1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
7 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
10 #include <linux/kvm_types.h>
11 #include <linux/kvm_host.h>
12 #include <linux/kernel.h>
13 #include <linux/highmem.h>
14 #include <linux/psp-sev.h>
15 #include <linux/pagemap.h>
16 #include <linux/swap.h>
17 #include <linux/misc_cgroup.h>
18 #include <linux/processor.h>
19 #include <linux/trace_events.h>
22 #include <asm/trapnr.h>
23 #include <asm/fpu/xcr.h>
31 #ifndef CONFIG_KVM_AMD_SEV
33 * When this config is not defined, SEV feature is not supported and APIs in
34 * this file are not used but this file still gets compiled into the KVM AMD
37 * We will not have MISC_CG_RES_SEV and MISC_CG_RES_SEV_ES entries in the enum
38 * misc_res_type {} defined in linux/misc_cgroup.h.
40 * Below macros allow compilation to succeed.
42 #define MISC_CG_RES_SEV MISC_CG_RES_TYPES
43 #define MISC_CG_RES_SEV_ES MISC_CG_RES_TYPES
46 #ifdef CONFIG_KVM_AMD_SEV
47 /* enable/disable SEV support */
48 static bool sev_enabled = true;
49 module_param_named(sev, sev_enabled, bool, 0444);
51 /* enable/disable SEV-ES support */
52 static bool sev_es_enabled = true;
53 module_param_named(sev_es, sev_es_enabled, bool, 0444);
55 #define sev_enabled false
56 #define sev_es_enabled false
57 #endif /* CONFIG_KVM_AMD_SEV */
59 static u8 sev_enc_bit;
60 static DECLARE_RWSEM(sev_deactivate_lock);
61 static DEFINE_MUTEX(sev_bitmap_lock);
62 unsigned int max_sev_asid;
63 static unsigned int min_sev_asid;
64 static unsigned long sev_me_mask;
65 static unsigned int nr_asids;
66 static unsigned long *sev_asid_bitmap;
67 static unsigned long *sev_reclaim_asid_bitmap;
70 struct list_head list;
77 /* Called with the sev_bitmap_lock held, or on shutdown */
78 static int sev_flush_asids(int min_asid, int max_asid)
80 int ret, asid, error = 0;
82 /* Check if there are any ASIDs to reclaim before performing a flush */
83 asid = find_next_bit(sev_reclaim_asid_bitmap, nr_asids, min_asid);
88 * DEACTIVATE will clear the WBINVD indicator causing DF_FLUSH to fail,
89 * so it must be guarded.
91 down_write(&sev_deactivate_lock);
94 ret = sev_guest_df_flush(&error);
96 up_write(&sev_deactivate_lock);
99 pr_err("SEV: DF_FLUSH failed, ret=%d, error=%#x\n", ret, error);
104 static inline bool is_mirroring_enc_context(struct kvm *kvm)
106 return !!to_kvm_svm(kvm)->sev_info.enc_context_owner;
109 /* Must be called with the sev_bitmap_lock held */
110 static bool __sev_recycle_asids(int min_asid, int max_asid)
112 if (sev_flush_asids(min_asid, max_asid))
115 /* The flush process will flush all reclaimable SEV and SEV-ES ASIDs */
116 bitmap_xor(sev_asid_bitmap, sev_asid_bitmap, sev_reclaim_asid_bitmap,
118 bitmap_zero(sev_reclaim_asid_bitmap, nr_asids);
123 static int sev_misc_cg_try_charge(struct kvm_sev_info *sev)
125 enum misc_res_type type = sev->es_active ? MISC_CG_RES_SEV_ES : MISC_CG_RES_SEV;
126 return misc_cg_try_charge(type, sev->misc_cg, 1);
129 static void sev_misc_cg_uncharge(struct kvm_sev_info *sev)
131 enum misc_res_type type = sev->es_active ? MISC_CG_RES_SEV_ES : MISC_CG_RES_SEV;
132 misc_cg_uncharge(type, sev->misc_cg, 1);
135 static int sev_asid_new(struct kvm_sev_info *sev)
137 int asid, min_asid, max_asid, ret;
140 WARN_ON(sev->misc_cg);
141 sev->misc_cg = get_current_misc_cg();
142 ret = sev_misc_cg_try_charge(sev);
144 put_misc_cg(sev->misc_cg);
149 mutex_lock(&sev_bitmap_lock);
152 * SEV-enabled guests must use asid from min_sev_asid to max_sev_asid.
153 * SEV-ES-enabled guest can use from 1 to min_sev_asid - 1.
155 min_asid = sev->es_active ? 1 : min_sev_asid;
156 max_asid = sev->es_active ? min_sev_asid - 1 : max_sev_asid;
158 asid = find_next_zero_bit(sev_asid_bitmap, max_asid + 1, min_asid);
159 if (asid > max_asid) {
160 if (retry && __sev_recycle_asids(min_asid, max_asid)) {
164 mutex_unlock(&sev_bitmap_lock);
169 __set_bit(asid, sev_asid_bitmap);
171 mutex_unlock(&sev_bitmap_lock);
175 sev_misc_cg_uncharge(sev);
176 put_misc_cg(sev->misc_cg);
181 static int sev_get_asid(struct kvm *kvm)
183 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
188 static void sev_asid_free(struct kvm_sev_info *sev)
190 struct svm_cpu_data *sd;
193 mutex_lock(&sev_bitmap_lock);
195 __set_bit(sev->asid, sev_reclaim_asid_bitmap);
197 for_each_possible_cpu(cpu) {
198 sd = per_cpu(svm_data, cpu);
199 sd->sev_vmcbs[sev->asid] = NULL;
202 mutex_unlock(&sev_bitmap_lock);
204 sev_misc_cg_uncharge(sev);
205 put_misc_cg(sev->misc_cg);
209 static void sev_decommission(unsigned int handle)
211 struct sev_data_decommission decommission;
216 decommission.handle = handle;
217 sev_guest_decommission(&decommission, NULL);
220 static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
222 struct sev_data_deactivate deactivate;
227 deactivate.handle = handle;
229 /* Guard DEACTIVATE against WBINVD/DF_FLUSH used in ASID recycling */
230 down_read(&sev_deactivate_lock);
231 sev_guest_deactivate(&deactivate, NULL);
232 up_read(&sev_deactivate_lock);
234 sev_decommission(handle);
237 static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
239 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
242 if (kvm->created_vcpus)
246 if (unlikely(sev->active))
250 sev->es_active = argp->id == KVM_SEV_ES_INIT;
251 asid = sev_asid_new(sev);
256 ret = sev_platform_init(&argp->error);
260 INIT_LIST_HEAD(&sev->regions_list);
268 sev->es_active = false;
273 static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
275 struct sev_data_activate activate;
276 int asid = sev_get_asid(kvm);
279 /* activate ASID on the given handle */
280 activate.handle = handle;
281 activate.asid = asid;
282 ret = sev_guest_activate(&activate, error);
287 static int __sev_issue_cmd(int fd, int id, void *data, int *error)
296 ret = sev_issue_cmd_external_user(f.file, id, data, error);
302 static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error)
304 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
306 return __sev_issue_cmd(sev->fd, id, data, error);
309 static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
311 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
312 struct sev_data_launch_start start;
313 struct kvm_sev_launch_start params;
314 void *dh_blob, *session_blob;
315 int *error = &argp->error;
321 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params)))
324 memset(&start, 0, sizeof(start));
327 if (params.dh_uaddr) {
328 dh_blob = psp_copy_user_blob(params.dh_uaddr, params.dh_len);
330 return PTR_ERR(dh_blob);
332 start.dh_cert_address = __sme_set(__pa(dh_blob));
333 start.dh_cert_len = params.dh_len;
337 if (params.session_uaddr) {
338 session_blob = psp_copy_user_blob(params.session_uaddr, params.session_len);
339 if (IS_ERR(session_blob)) {
340 ret = PTR_ERR(session_blob);
344 start.session_address = __sme_set(__pa(session_blob));
345 start.session_len = params.session_len;
348 start.handle = params.handle;
349 start.policy = params.policy;
351 /* create memory encryption context */
352 ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_LAUNCH_START, &start, error);
356 /* Bind ASID to this guest */
357 ret = sev_bind_asid(kvm, start.handle, error);
359 sev_decommission(start.handle);
363 /* return handle to userspace */
364 params.handle = start.handle;
365 if (copy_to_user((void __user *)(uintptr_t)argp->data, ¶ms, sizeof(params))) {
366 sev_unbind_asid(kvm, start.handle);
371 sev->handle = start.handle;
372 sev->fd = argp->sev_fd;
381 static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
382 unsigned long ulen, unsigned long *n,
385 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
386 unsigned long npages, size;
388 unsigned long locked, lock_limit;
390 unsigned long first, last;
393 lockdep_assert_held(&kvm->lock);
395 if (ulen == 0 || uaddr + ulen < uaddr)
396 return ERR_PTR(-EINVAL);
398 /* Calculate number of pages. */
399 first = (uaddr & PAGE_MASK) >> PAGE_SHIFT;
400 last = ((uaddr + ulen - 1) & PAGE_MASK) >> PAGE_SHIFT;
401 npages = (last - first + 1);
403 locked = sev->pages_locked + npages;
404 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
405 if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
406 pr_err("SEV: %lu locked pages exceed the lock limit of %lu.\n", locked, lock_limit);
407 return ERR_PTR(-ENOMEM);
410 if (WARN_ON_ONCE(npages > INT_MAX))
411 return ERR_PTR(-EINVAL);
413 /* Avoid using vmalloc for smaller buffers. */
414 size = npages * sizeof(struct page *);
415 if (size > PAGE_SIZE)
416 pages = __vmalloc(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
418 pages = kmalloc(size, GFP_KERNEL_ACCOUNT);
421 return ERR_PTR(-ENOMEM);
423 /* Pin the user virtual address. */
424 npinned = pin_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
425 if (npinned != npages) {
426 pr_err("SEV: Failure locking %lu pages.\n", npages);
432 sev->pages_locked = locked;
438 unpin_user_pages(pages, npinned);
444 static void sev_unpin_memory(struct kvm *kvm, struct page **pages,
445 unsigned long npages)
447 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
449 unpin_user_pages(pages, npages);
451 sev->pages_locked -= npages;
454 static void sev_clflush_pages(struct page *pages[], unsigned long npages)
456 uint8_t *page_virtual;
459 if (this_cpu_has(X86_FEATURE_SME_COHERENT) || npages == 0 ||
463 for (i = 0; i < npages; i++) {
464 page_virtual = kmap_atomic(pages[i]);
465 clflush_cache_range(page_virtual, PAGE_SIZE);
466 kunmap_atomic(page_virtual);
470 static unsigned long get_num_contig_pages(unsigned long idx,
471 struct page **inpages, unsigned long npages)
473 unsigned long paddr, next_paddr;
474 unsigned long i = idx + 1, pages = 1;
476 /* find the number of contiguous pages starting from idx */
477 paddr = __sme_page_pa(inpages[idx]);
479 next_paddr = __sme_page_pa(inpages[i++]);
480 if ((paddr + PAGE_SIZE) == next_paddr) {
491 static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
493 unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i;
494 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
495 struct kvm_sev_launch_update_data params;
496 struct sev_data_launch_update_data data;
497 struct page **inpages;
503 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params)))
506 vaddr = params.uaddr;
508 vaddr_end = vaddr + size;
510 /* Lock the user memory. */
511 inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1);
513 return PTR_ERR(inpages);
516 * Flush (on non-coherent CPUs) before LAUNCH_UPDATE encrypts pages in
517 * place; the cache may contain the data that was written unencrypted.
519 sev_clflush_pages(inpages, npages);
522 data.handle = sev->handle;
524 for (i = 0; vaddr < vaddr_end; vaddr = next_vaddr, i += pages) {
528 * If the user buffer is not page-aligned, calculate the offset
531 offset = vaddr & (PAGE_SIZE - 1);
533 /* Calculate the number of pages that can be encrypted in one go. */
534 pages = get_num_contig_pages(i, inpages, npages);
536 len = min_t(size_t, ((pages * PAGE_SIZE) - offset), size);
539 data.address = __sme_page_pa(inpages[i]) + offset;
540 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_DATA, &data, &argp->error);
545 next_vaddr = vaddr + len;
549 /* content of memory is updated, mark pages dirty */
550 for (i = 0; i < npages; i++) {
551 set_page_dirty_lock(inpages[i]);
552 mark_page_accessed(inpages[i]);
554 /* unlock the user pages */
555 sev_unpin_memory(kvm, inpages, npages);
559 static int sev_es_sync_vmsa(struct vcpu_svm *svm)
561 struct vmcb_save_area *save = &svm->vmcb->save;
563 /* Check some debug related fields before encrypting the VMSA */
564 if (svm->vcpu.guest_debug || (save->dr7 & ~DR7_FIXED_1))
567 /* Sync registgers */
568 save->rax = svm->vcpu.arch.regs[VCPU_REGS_RAX];
569 save->rbx = svm->vcpu.arch.regs[VCPU_REGS_RBX];
570 save->rcx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
571 save->rdx = svm->vcpu.arch.regs[VCPU_REGS_RDX];
572 save->rsp = svm->vcpu.arch.regs[VCPU_REGS_RSP];
573 save->rbp = svm->vcpu.arch.regs[VCPU_REGS_RBP];
574 save->rsi = svm->vcpu.arch.regs[VCPU_REGS_RSI];
575 save->rdi = svm->vcpu.arch.regs[VCPU_REGS_RDI];
577 save->r8 = svm->vcpu.arch.regs[VCPU_REGS_R8];
578 save->r9 = svm->vcpu.arch.regs[VCPU_REGS_R9];
579 save->r10 = svm->vcpu.arch.regs[VCPU_REGS_R10];
580 save->r11 = svm->vcpu.arch.regs[VCPU_REGS_R11];
581 save->r12 = svm->vcpu.arch.regs[VCPU_REGS_R12];
582 save->r13 = svm->vcpu.arch.regs[VCPU_REGS_R13];
583 save->r14 = svm->vcpu.arch.regs[VCPU_REGS_R14];
584 save->r15 = svm->vcpu.arch.regs[VCPU_REGS_R15];
586 save->rip = svm->vcpu.arch.regs[VCPU_REGS_RIP];
588 /* Sync some non-GPR registers before encrypting */
589 save->xcr0 = svm->vcpu.arch.xcr0;
590 save->pkru = svm->vcpu.arch.pkru;
591 save->xss = svm->vcpu.arch.ia32_xss;
592 save->dr6 = svm->vcpu.arch.dr6;
595 * SEV-ES will use a VMSA that is pointed to by the VMCB, not
596 * the traditional VMSA that is part of the VMCB. Copy the
597 * traditional VMSA as it has been built so far (in prep
598 * for LAUNCH_UPDATE_VMSA) to be the initial SEV-ES state.
600 memcpy(svm->sev_es.vmsa, save, sizeof(*save));
605 static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu,
608 struct sev_data_launch_update_vmsa vmsa;
609 struct vcpu_svm *svm = to_svm(vcpu);
612 /* Perform some pre-encryption checks against the VMSA */
613 ret = sev_es_sync_vmsa(svm);
618 * The LAUNCH_UPDATE_VMSA command will perform in-place encryption of
619 * the VMSA memory content (i.e it will write the same memory region
620 * with the guest's key), so invalidate it first.
622 clflush_cache_range(svm->sev_es.vmsa, PAGE_SIZE);
625 vmsa.handle = to_kvm_svm(kvm)->sev_info.handle;
626 vmsa.address = __sme_pa(svm->sev_es.vmsa);
627 vmsa.len = PAGE_SIZE;
628 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa, error);
632 vcpu->arch.guest_state_protected = true;
636 static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
638 struct kvm_vcpu *vcpu;
641 if (!sev_es_guest(kvm))
644 kvm_for_each_vcpu(i, vcpu, kvm) {
645 ret = mutex_lock_killable(&vcpu->mutex);
649 ret = __sev_launch_update_vmsa(kvm, vcpu, &argp->error);
651 mutex_unlock(&vcpu->mutex);
659 static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
661 void __user *measure = (void __user *)(uintptr_t)argp->data;
662 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
663 struct sev_data_launch_measure data;
664 struct kvm_sev_launch_measure params;
665 void __user *p = NULL;
672 if (copy_from_user(¶ms, measure, sizeof(params)))
675 memset(&data, 0, sizeof(data));
677 /* User wants to query the blob length */
681 p = (void __user *)(uintptr_t)params.uaddr;
683 if (params.len > SEV_FW_BLOB_MAX_SIZE)
686 blob = kmalloc(params.len, GFP_KERNEL_ACCOUNT);
690 data.address = __psp_pa(blob);
691 data.len = params.len;
695 data.handle = sev->handle;
696 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, &data, &argp->error);
699 * If we query the session length, FW responded with expected data.
708 if (copy_to_user(p, blob, params.len))
713 params.len = data.len;
714 if (copy_to_user(measure, ¶ms, sizeof(params)))
721 static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
723 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
724 struct sev_data_launch_finish data;
729 data.handle = sev->handle;
730 return sev_issue_cmd(kvm, SEV_CMD_LAUNCH_FINISH, &data, &argp->error);
733 static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
735 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
736 struct kvm_sev_guest_status params;
737 struct sev_data_guest_status data;
743 memset(&data, 0, sizeof(data));
745 data.handle = sev->handle;
746 ret = sev_issue_cmd(kvm, SEV_CMD_GUEST_STATUS, &data, &argp->error);
750 params.policy = data.policy;
751 params.state = data.state;
752 params.handle = data.handle;
754 if (copy_to_user((void __user *)(uintptr_t)argp->data, ¶ms, sizeof(params)))
760 static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src,
761 unsigned long dst, int size,
762 int *error, bool enc)
764 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
765 struct sev_data_dbg data;
768 data.handle = sev->handle;
773 return sev_issue_cmd(kvm,
774 enc ? SEV_CMD_DBG_ENCRYPT : SEV_CMD_DBG_DECRYPT,
778 static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
779 unsigned long dst_paddr, int sz, int *err)
784 * Its safe to read more than we are asked, caller should ensure that
785 * destination has enough space.
787 offset = src_paddr & 15;
788 src_paddr = round_down(src_paddr, 16);
789 sz = round_up(sz + offset, 16);
791 return __sev_issue_dbg_cmd(kvm, src_paddr, dst_paddr, sz, err, false);
794 static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
795 void __user *dst_uaddr,
796 unsigned long dst_paddr,
799 struct page *tpage = NULL;
802 /* if inputs are not 16-byte then use intermediate buffer */
803 if (!IS_ALIGNED(dst_paddr, 16) ||
804 !IS_ALIGNED(paddr, 16) ||
805 !IS_ALIGNED(size, 16)) {
806 tpage = (void *)alloc_page(GFP_KERNEL);
810 dst_paddr = __sme_page_pa(tpage);
813 ret = __sev_dbg_decrypt(kvm, paddr, dst_paddr, size, err);
819 if (copy_to_user(dst_uaddr, page_address(tpage) + offset, size))
830 static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
832 unsigned long dst_paddr,
833 void __user *dst_vaddr,
834 int size, int *error)
836 struct page *src_tpage = NULL;
837 struct page *dst_tpage = NULL;
840 /* If source buffer is not aligned then use an intermediate buffer */
841 if (!IS_ALIGNED((unsigned long)vaddr, 16)) {
842 src_tpage = alloc_page(GFP_KERNEL);
846 if (copy_from_user(page_address(src_tpage), vaddr, size)) {
847 __free_page(src_tpage);
851 paddr = __sme_page_pa(src_tpage);
855 * If destination buffer or length is not aligned then do read-modify-write:
856 * - decrypt destination in an intermediate buffer
857 * - copy the source buffer in an intermediate buffer
858 * - use the intermediate buffer as source buffer
860 if (!IS_ALIGNED((unsigned long)dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
863 dst_tpage = alloc_page(GFP_KERNEL);
869 ret = __sev_dbg_decrypt(kvm, dst_paddr,
870 __sme_page_pa(dst_tpage), size, error);
875 * If source is kernel buffer then use memcpy() otherwise
878 dst_offset = dst_paddr & 15;
881 memcpy(page_address(dst_tpage) + dst_offset,
882 page_address(src_tpage), size);
884 if (copy_from_user(page_address(dst_tpage) + dst_offset,
891 paddr = __sme_page_pa(dst_tpage);
892 dst_paddr = round_down(dst_paddr, 16);
893 len = round_up(size, 16);
896 ret = __sev_issue_dbg_cmd(kvm, paddr, dst_paddr, len, error, true);
900 __free_page(src_tpage);
902 __free_page(dst_tpage);
906 static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
908 unsigned long vaddr, vaddr_end, next_vaddr;
909 unsigned long dst_vaddr;
910 struct page **src_p, **dst_p;
911 struct kvm_sev_dbg debug;
919 if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug)))
922 if (!debug.len || debug.src_uaddr + debug.len < debug.src_uaddr)
924 if (!debug.dst_uaddr)
927 vaddr = debug.src_uaddr;
929 vaddr_end = vaddr + size;
930 dst_vaddr = debug.dst_uaddr;
932 for (; vaddr < vaddr_end; vaddr = next_vaddr) {
933 int len, s_off, d_off;
935 /* lock userspace source and destination page */
936 src_p = sev_pin_memory(kvm, vaddr & PAGE_MASK, PAGE_SIZE, &n, 0);
938 return PTR_ERR(src_p);
940 dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, 1);
942 sev_unpin_memory(kvm, src_p, n);
943 return PTR_ERR(dst_p);
947 * Flush (on non-coherent CPUs) before DBG_{DE,EN}CRYPT read or modify
948 * the pages; flush the destination too so that future accesses do not
951 sev_clflush_pages(src_p, 1);
952 sev_clflush_pages(dst_p, 1);
955 * Since user buffer may not be page aligned, calculate the
956 * offset within the page.
958 s_off = vaddr & ~PAGE_MASK;
959 d_off = dst_vaddr & ~PAGE_MASK;
960 len = min_t(size_t, (PAGE_SIZE - s_off), size);
963 ret = __sev_dbg_decrypt_user(kvm,
964 __sme_page_pa(src_p[0]) + s_off,
965 (void __user *)dst_vaddr,
966 __sme_page_pa(dst_p[0]) + d_off,
969 ret = __sev_dbg_encrypt_user(kvm,
970 __sme_page_pa(src_p[0]) + s_off,
971 (void __user *)vaddr,
972 __sme_page_pa(dst_p[0]) + d_off,
973 (void __user *)dst_vaddr,
976 sev_unpin_memory(kvm, src_p, n);
977 sev_unpin_memory(kvm, dst_p, n);
982 next_vaddr = vaddr + len;
983 dst_vaddr = dst_vaddr + len;
990 static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
992 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
993 struct sev_data_launch_secret data;
994 struct kvm_sev_launch_secret params;
1000 if (!sev_guest(kvm))
1003 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params)))
1006 pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, 1);
1008 return PTR_ERR(pages);
1011 * Flush (on non-coherent CPUs) before LAUNCH_SECRET encrypts pages in
1012 * place; the cache may contain the data that was written unencrypted.
1014 sev_clflush_pages(pages, n);
1017 * The secret must be copied into contiguous memory region, lets verify
1018 * that userspace memory pages are contiguous before we issue command.
1020 if (get_num_contig_pages(0, pages, n) != n) {
1022 goto e_unpin_memory;
1025 memset(&data, 0, sizeof(data));
1027 offset = params.guest_uaddr & (PAGE_SIZE - 1);
1028 data.guest_address = __sme_page_pa(pages[0]) + offset;
1029 data.guest_len = params.guest_len;
1031 blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
1033 ret = PTR_ERR(blob);
1034 goto e_unpin_memory;
1037 data.trans_address = __psp_pa(blob);
1038 data.trans_len = params.trans_len;
1040 hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
1045 data.hdr_address = __psp_pa(hdr);
1046 data.hdr_len = params.hdr_len;
1048 data.handle = sev->handle;
1049 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, &data, &argp->error);
1056 /* content of memory is updated, mark pages dirty */
1057 for (i = 0; i < n; i++) {
1058 set_page_dirty_lock(pages[i]);
1059 mark_page_accessed(pages[i]);
1061 sev_unpin_memory(kvm, pages, n);
1065 static int sev_get_attestation_report(struct kvm *kvm, struct kvm_sev_cmd *argp)
1067 void __user *report = (void __user *)(uintptr_t)argp->data;
1068 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1069 struct sev_data_attestation_report data;
1070 struct kvm_sev_attestation_report params;
1075 if (!sev_guest(kvm))
1078 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params)))
1081 memset(&data, 0, sizeof(data));
1083 /* User wants to query the blob length */
1087 p = (void __user *)(uintptr_t)params.uaddr;
1089 if (params.len > SEV_FW_BLOB_MAX_SIZE)
1092 blob = kmalloc(params.len, GFP_KERNEL_ACCOUNT);
1096 data.address = __psp_pa(blob);
1097 data.len = params.len;
1098 memcpy(data.mnonce, params.mnonce, sizeof(params.mnonce));
1101 data.handle = sev->handle;
1102 ret = sev_issue_cmd(kvm, SEV_CMD_ATTESTATION_REPORT, &data, &argp->error);
1104 * If we query the session length, FW responded with expected data.
1113 if (copy_to_user(p, blob, params.len))
1118 params.len = data.len;
1119 if (copy_to_user(report, ¶ms, sizeof(params)))
1126 /* Userspace wants to query session length. */
1128 __sev_send_start_query_session_length(struct kvm *kvm, struct kvm_sev_cmd *argp,
1129 struct kvm_sev_send_start *params)
1131 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1132 struct sev_data_send_start data;
1135 memset(&data, 0, sizeof(data));
1136 data.handle = sev->handle;
1137 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, &data, &argp->error);
1139 params->session_len = data.session_len;
1140 if (copy_to_user((void __user *)(uintptr_t)argp->data, params,
1141 sizeof(struct kvm_sev_send_start)))
1147 static int sev_send_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
1149 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1150 struct sev_data_send_start data;
1151 struct kvm_sev_send_start params;
1152 void *amd_certs, *session_data;
1153 void *pdh_cert, *plat_certs;
1156 if (!sev_guest(kvm))
1159 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data,
1160 sizeof(struct kvm_sev_send_start)))
1163 /* if session_len is zero, userspace wants to query the session length */
1164 if (!params.session_len)
1165 return __sev_send_start_query_session_length(kvm, argp,
1168 /* some sanity checks */
1169 if (!params.pdh_cert_uaddr || !params.pdh_cert_len ||
1170 !params.session_uaddr || params.session_len > SEV_FW_BLOB_MAX_SIZE)
1173 /* allocate the memory to hold the session data blob */
1174 session_data = kmalloc(params.session_len, GFP_KERNEL_ACCOUNT);
1178 /* copy the certificate blobs from userspace */
1179 pdh_cert = psp_copy_user_blob(params.pdh_cert_uaddr,
1180 params.pdh_cert_len);
1181 if (IS_ERR(pdh_cert)) {
1182 ret = PTR_ERR(pdh_cert);
1183 goto e_free_session;
1186 plat_certs = psp_copy_user_blob(params.plat_certs_uaddr,
1187 params.plat_certs_len);
1188 if (IS_ERR(plat_certs)) {
1189 ret = PTR_ERR(plat_certs);
1193 amd_certs = psp_copy_user_blob(params.amd_certs_uaddr,
1194 params.amd_certs_len);
1195 if (IS_ERR(amd_certs)) {
1196 ret = PTR_ERR(amd_certs);
1197 goto e_free_plat_cert;
1200 /* populate the FW SEND_START field with system physical address */
1201 memset(&data, 0, sizeof(data));
1202 data.pdh_cert_address = __psp_pa(pdh_cert);
1203 data.pdh_cert_len = params.pdh_cert_len;
1204 data.plat_certs_address = __psp_pa(plat_certs);
1205 data.plat_certs_len = params.plat_certs_len;
1206 data.amd_certs_address = __psp_pa(amd_certs);
1207 data.amd_certs_len = params.amd_certs_len;
1208 data.session_address = __psp_pa(session_data);
1209 data.session_len = params.session_len;
1210 data.handle = sev->handle;
1212 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, &data, &argp->error);
1214 if (!ret && copy_to_user((void __user *)(uintptr_t)params.session_uaddr,
1215 session_data, params.session_len)) {
1217 goto e_free_amd_cert;
1220 params.policy = data.policy;
1221 params.session_len = data.session_len;
1222 if (copy_to_user((void __user *)(uintptr_t)argp->data, ¶ms,
1223 sizeof(struct kvm_sev_send_start)))
1233 kfree(session_data);
1237 /* Userspace wants to query either header or trans length. */
1239 __sev_send_update_data_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp,
1240 struct kvm_sev_send_update_data *params)
1242 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1243 struct sev_data_send_update_data data;
1246 memset(&data, 0, sizeof(data));
1247 data.handle = sev->handle;
1248 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, &data, &argp->error);
1250 params->hdr_len = data.hdr_len;
1251 params->trans_len = data.trans_len;
1253 if (copy_to_user((void __user *)(uintptr_t)argp->data, params,
1254 sizeof(struct kvm_sev_send_update_data)))
1260 static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
1262 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1263 struct sev_data_send_update_data data;
1264 struct kvm_sev_send_update_data params;
1265 void *hdr, *trans_data;
1266 struct page **guest_page;
1270 if (!sev_guest(kvm))
1273 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data,
1274 sizeof(struct kvm_sev_send_update_data)))
1277 /* userspace wants to query either header or trans length */
1278 if (!params.trans_len || !params.hdr_len)
1279 return __sev_send_update_data_query_lengths(kvm, argp, ¶ms);
1281 if (!params.trans_uaddr || !params.guest_uaddr ||
1282 !params.guest_len || !params.hdr_uaddr)
1285 /* Check if we are crossing the page boundary */
1286 offset = params.guest_uaddr & (PAGE_SIZE - 1);
1287 if ((params.guest_len + offset > PAGE_SIZE))
1290 /* Pin guest memory */
1291 guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK,
1293 if (IS_ERR(guest_page))
1294 return PTR_ERR(guest_page);
1296 /* allocate memory for header and transport buffer */
1298 hdr = kmalloc(params.hdr_len, GFP_KERNEL_ACCOUNT);
1302 trans_data = kmalloc(params.trans_len, GFP_KERNEL_ACCOUNT);
1306 memset(&data, 0, sizeof(data));
1307 data.hdr_address = __psp_pa(hdr);
1308 data.hdr_len = params.hdr_len;
1309 data.trans_address = __psp_pa(trans_data);
1310 data.trans_len = params.trans_len;
1312 /* The SEND_UPDATE_DATA command requires C-bit to be always set. */
1313 data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset;
1314 data.guest_address |= sev_me_mask;
1315 data.guest_len = params.guest_len;
1316 data.handle = sev->handle;
1318 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, &data, &argp->error);
1321 goto e_free_trans_data;
1323 /* copy transport buffer to user space */
1324 if (copy_to_user((void __user *)(uintptr_t)params.trans_uaddr,
1325 trans_data, params.trans_len)) {
1327 goto e_free_trans_data;
1330 /* Copy packet header to userspace. */
1331 if (copy_to_user((void __user *)(uintptr_t)params.hdr_uaddr, hdr,
1340 sev_unpin_memory(kvm, guest_page, n);
1345 static int sev_send_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
1347 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1348 struct sev_data_send_finish data;
1350 if (!sev_guest(kvm))
1353 data.handle = sev->handle;
1354 return sev_issue_cmd(kvm, SEV_CMD_SEND_FINISH, &data, &argp->error);
1357 static int sev_send_cancel(struct kvm *kvm, struct kvm_sev_cmd *argp)
1359 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1360 struct sev_data_send_cancel data;
1362 if (!sev_guest(kvm))
1365 data.handle = sev->handle;
1366 return sev_issue_cmd(kvm, SEV_CMD_SEND_CANCEL, &data, &argp->error);
1369 static int sev_receive_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
1371 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1372 struct sev_data_receive_start start;
1373 struct kvm_sev_receive_start params;
1374 int *error = &argp->error;
1379 if (!sev_guest(kvm))
1382 /* Get parameter from the userspace */
1383 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data,
1384 sizeof(struct kvm_sev_receive_start)))
1387 /* some sanity checks */
1388 if (!params.pdh_uaddr || !params.pdh_len ||
1389 !params.session_uaddr || !params.session_len)
1392 pdh_data = psp_copy_user_blob(params.pdh_uaddr, params.pdh_len);
1393 if (IS_ERR(pdh_data))
1394 return PTR_ERR(pdh_data);
1396 session_data = psp_copy_user_blob(params.session_uaddr,
1397 params.session_len);
1398 if (IS_ERR(session_data)) {
1399 ret = PTR_ERR(session_data);
1403 memset(&start, 0, sizeof(start));
1404 start.handle = params.handle;
1405 start.policy = params.policy;
1406 start.pdh_cert_address = __psp_pa(pdh_data);
1407 start.pdh_cert_len = params.pdh_len;
1408 start.session_address = __psp_pa(session_data);
1409 start.session_len = params.session_len;
1411 /* create memory encryption context */
1412 ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_RECEIVE_START, &start,
1415 goto e_free_session;
1417 /* Bind ASID to this guest */
1418 ret = sev_bind_asid(kvm, start.handle, error);
1420 sev_decommission(start.handle);
1421 goto e_free_session;
1424 params.handle = start.handle;
1425 if (copy_to_user((void __user *)(uintptr_t)argp->data,
1426 ¶ms, sizeof(struct kvm_sev_receive_start))) {
1428 sev_unbind_asid(kvm, start.handle);
1429 goto e_free_session;
1432 sev->handle = start.handle;
1433 sev->fd = argp->sev_fd;
1436 kfree(session_data);
1443 static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
1445 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1446 struct kvm_sev_receive_update_data params;
1447 struct sev_data_receive_update_data data;
1448 void *hdr = NULL, *trans = NULL;
1449 struct page **guest_page;
1453 if (!sev_guest(kvm))
1456 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data,
1457 sizeof(struct kvm_sev_receive_update_data)))
1460 if (!params.hdr_uaddr || !params.hdr_len ||
1461 !params.guest_uaddr || !params.guest_len ||
1462 !params.trans_uaddr || !params.trans_len)
1465 /* Check if we are crossing the page boundary */
1466 offset = params.guest_uaddr & (PAGE_SIZE - 1);
1467 if ((params.guest_len + offset > PAGE_SIZE))
1470 hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
1472 return PTR_ERR(hdr);
1474 trans = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
1475 if (IS_ERR(trans)) {
1476 ret = PTR_ERR(trans);
1480 memset(&data, 0, sizeof(data));
1481 data.hdr_address = __psp_pa(hdr);
1482 data.hdr_len = params.hdr_len;
1483 data.trans_address = __psp_pa(trans);
1484 data.trans_len = params.trans_len;
1486 /* Pin guest memory */
1487 guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK,
1489 if (IS_ERR(guest_page)) {
1490 ret = PTR_ERR(guest_page);
1495 * Flush (on non-coherent CPUs) before RECEIVE_UPDATE_DATA, the PSP
1496 * encrypts the written data with the guest's key, and the cache may
1497 * contain dirty, unencrypted data.
1499 sev_clflush_pages(guest_page, n);
1501 /* The RECEIVE_UPDATE_DATA command requires C-bit to be always set. */
1502 data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset;
1503 data.guest_address |= sev_me_mask;
1504 data.guest_len = params.guest_len;
1505 data.handle = sev->handle;
1507 ret = sev_issue_cmd(kvm, SEV_CMD_RECEIVE_UPDATE_DATA, &data,
1510 sev_unpin_memory(kvm, guest_page, n);
1520 static int sev_receive_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
1522 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1523 struct sev_data_receive_finish data;
1525 if (!sev_guest(kvm))
1528 data.handle = sev->handle;
1529 return sev_issue_cmd(kvm, SEV_CMD_RECEIVE_FINISH, &data, &argp->error);
1532 static bool is_cmd_allowed_from_mirror(u32 cmd_id)
1535 * Allow mirrors VM to call KVM_SEV_LAUNCH_UPDATE_VMSA to enable SEV-ES
1536 * active mirror VMs. Also allow the debugging and status commands.
1538 if (cmd_id == KVM_SEV_LAUNCH_UPDATE_VMSA ||
1539 cmd_id == KVM_SEV_GUEST_STATUS || cmd_id == KVM_SEV_DBG_DECRYPT ||
1540 cmd_id == KVM_SEV_DBG_ENCRYPT)
1546 static int sev_lock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
1548 struct kvm_sev_info *dst_sev = &to_kvm_svm(dst_kvm)->sev_info;
1549 struct kvm_sev_info *src_sev = &to_kvm_svm(src_kvm)->sev_info;
1551 if (dst_kvm == src_kvm)
1555 * Bail if these VMs are already involved in a migration to avoid
1556 * deadlock between two VMs trying to migrate to/from each other.
1558 if (atomic_cmpxchg_acquire(&dst_sev->migration_in_progress, 0, 1))
1561 if (atomic_cmpxchg_acquire(&src_sev->migration_in_progress, 0, 1)) {
1562 atomic_set_release(&dst_sev->migration_in_progress, 0);
1566 mutex_lock(&dst_kvm->lock);
1567 mutex_lock(&src_kvm->lock);
1571 static void sev_unlock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
1573 struct kvm_sev_info *dst_sev = &to_kvm_svm(dst_kvm)->sev_info;
1574 struct kvm_sev_info *src_sev = &to_kvm_svm(src_kvm)->sev_info;
1576 mutex_unlock(&dst_kvm->lock);
1577 mutex_unlock(&src_kvm->lock);
1578 atomic_set_release(&dst_sev->migration_in_progress, 0);
1579 atomic_set_release(&src_sev->migration_in_progress, 0);
1583 static int sev_lock_vcpus_for_migration(struct kvm *kvm)
1585 struct kvm_vcpu *vcpu;
1588 kvm_for_each_vcpu(i, vcpu, kvm) {
1589 if (mutex_lock_killable(&vcpu->mutex))
1596 kvm_for_each_vcpu(j, vcpu, kvm) {
1600 mutex_unlock(&vcpu->mutex);
1605 static void sev_unlock_vcpus_for_migration(struct kvm *kvm)
1607 struct kvm_vcpu *vcpu;
1610 kvm_for_each_vcpu(i, vcpu, kvm) {
1611 mutex_unlock(&vcpu->mutex);
1615 static void sev_migrate_from(struct kvm_sev_info *dst,
1616 struct kvm_sev_info *src)
1619 dst->asid = src->asid;
1620 dst->handle = src->handle;
1621 dst->pages_locked = src->pages_locked;
1622 dst->enc_context_owner = src->enc_context_owner;
1625 src->active = false;
1627 src->pages_locked = 0;
1628 src->enc_context_owner = NULL;
1630 list_cut_before(&dst->regions_list, &src->regions_list, &src->regions_list);
1633 static int sev_es_migrate_from(struct kvm *dst, struct kvm *src)
1636 struct kvm_vcpu *dst_vcpu, *src_vcpu;
1637 struct vcpu_svm *dst_svm, *src_svm;
1639 if (atomic_read(&src->online_vcpus) != atomic_read(&dst->online_vcpus))
1642 kvm_for_each_vcpu(i, src_vcpu, src) {
1643 if (!src_vcpu->arch.guest_state_protected)
1647 kvm_for_each_vcpu(i, src_vcpu, src) {
1648 src_svm = to_svm(src_vcpu);
1649 dst_vcpu = kvm_get_vcpu(dst, i);
1650 dst_svm = to_svm(dst_vcpu);
1653 * Transfer VMSA and GHCB state to the destination. Nullify and
1654 * clear source fields as appropriate, the state now belongs to
1657 memcpy(&dst_svm->sev_es, &src_svm->sev_es, sizeof(src_svm->sev_es));
1658 dst_svm->vmcb->control.ghcb_gpa = src_svm->vmcb->control.ghcb_gpa;
1659 dst_svm->vmcb->control.vmsa_pa = src_svm->vmcb->control.vmsa_pa;
1660 dst_vcpu->arch.guest_state_protected = true;
1662 memset(&src_svm->sev_es, 0, sizeof(src_svm->sev_es));
1663 src_svm->vmcb->control.ghcb_gpa = INVALID_PAGE;
1664 src_svm->vmcb->control.vmsa_pa = INVALID_PAGE;
1665 src_vcpu->arch.guest_state_protected = false;
1667 to_kvm_svm(src)->sev_info.es_active = false;
1668 to_kvm_svm(dst)->sev_info.es_active = true;
1673 int svm_vm_migrate_from(struct kvm *kvm, unsigned int source_fd)
1675 struct kvm_sev_info *dst_sev = &to_kvm_svm(kvm)->sev_info;
1676 struct kvm_sev_info *src_sev, *cg_cleanup_sev;
1677 struct file *source_kvm_file;
1678 struct kvm *source_kvm;
1679 bool charged = false;
1682 source_kvm_file = fget(source_fd);
1683 if (!file_is_kvm(source_kvm_file)) {
1688 source_kvm = source_kvm_file->private_data;
1689 ret = sev_lock_two_vms(kvm, source_kvm);
1693 if (sev_guest(kvm) || !sev_guest(source_kvm)) {
1698 src_sev = &to_kvm_svm(source_kvm)->sev_info;
1699 dst_sev->misc_cg = get_current_misc_cg();
1700 cg_cleanup_sev = dst_sev;
1701 if (dst_sev->misc_cg != src_sev->misc_cg) {
1702 ret = sev_misc_cg_try_charge(dst_sev);
1704 goto out_dst_cgroup;
1708 ret = sev_lock_vcpus_for_migration(kvm);
1710 goto out_dst_cgroup;
1711 ret = sev_lock_vcpus_for_migration(source_kvm);
1715 if (sev_es_guest(source_kvm)) {
1716 ret = sev_es_migrate_from(kvm, source_kvm);
1718 goto out_source_vcpu;
1720 sev_migrate_from(dst_sev, src_sev);
1721 kvm_vm_dead(source_kvm);
1722 cg_cleanup_sev = src_sev;
1726 sev_unlock_vcpus_for_migration(source_kvm);
1728 sev_unlock_vcpus_for_migration(kvm);
1730 /* Operates on the source on success, on the destination on failure. */
1732 sev_misc_cg_uncharge(cg_cleanup_sev);
1733 put_misc_cg(cg_cleanup_sev->misc_cg);
1734 cg_cleanup_sev->misc_cg = NULL;
1736 sev_unlock_two_vms(kvm, source_kvm);
1738 if (source_kvm_file)
1739 fput(source_kvm_file);
1743 int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
1745 struct kvm_sev_cmd sev_cmd;
1754 if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd)))
1757 mutex_lock(&kvm->lock);
1759 /* Only the enc_context_owner handles some memory enc operations. */
1760 if (is_mirroring_enc_context(kvm) &&
1761 !is_cmd_allowed_from_mirror(sev_cmd.id)) {
1766 switch (sev_cmd.id) {
1767 case KVM_SEV_ES_INIT:
1768 if (!sev_es_enabled) {
1774 r = sev_guest_init(kvm, &sev_cmd);
1776 case KVM_SEV_LAUNCH_START:
1777 r = sev_launch_start(kvm, &sev_cmd);
1779 case KVM_SEV_LAUNCH_UPDATE_DATA:
1780 r = sev_launch_update_data(kvm, &sev_cmd);
1782 case KVM_SEV_LAUNCH_UPDATE_VMSA:
1783 r = sev_launch_update_vmsa(kvm, &sev_cmd);
1785 case KVM_SEV_LAUNCH_MEASURE:
1786 r = sev_launch_measure(kvm, &sev_cmd);
1788 case KVM_SEV_LAUNCH_FINISH:
1789 r = sev_launch_finish(kvm, &sev_cmd);
1791 case KVM_SEV_GUEST_STATUS:
1792 r = sev_guest_status(kvm, &sev_cmd);
1794 case KVM_SEV_DBG_DECRYPT:
1795 r = sev_dbg_crypt(kvm, &sev_cmd, true);
1797 case KVM_SEV_DBG_ENCRYPT:
1798 r = sev_dbg_crypt(kvm, &sev_cmd, false);
1800 case KVM_SEV_LAUNCH_SECRET:
1801 r = sev_launch_secret(kvm, &sev_cmd);
1803 case KVM_SEV_GET_ATTESTATION_REPORT:
1804 r = sev_get_attestation_report(kvm, &sev_cmd);
1806 case KVM_SEV_SEND_START:
1807 r = sev_send_start(kvm, &sev_cmd);
1809 case KVM_SEV_SEND_UPDATE_DATA:
1810 r = sev_send_update_data(kvm, &sev_cmd);
1812 case KVM_SEV_SEND_FINISH:
1813 r = sev_send_finish(kvm, &sev_cmd);
1815 case KVM_SEV_SEND_CANCEL:
1816 r = sev_send_cancel(kvm, &sev_cmd);
1818 case KVM_SEV_RECEIVE_START:
1819 r = sev_receive_start(kvm, &sev_cmd);
1821 case KVM_SEV_RECEIVE_UPDATE_DATA:
1822 r = sev_receive_update_data(kvm, &sev_cmd);
1824 case KVM_SEV_RECEIVE_FINISH:
1825 r = sev_receive_finish(kvm, &sev_cmd);
1832 if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd)))
1836 mutex_unlock(&kvm->lock);
1840 int svm_register_enc_region(struct kvm *kvm,
1841 struct kvm_enc_region *range)
1843 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1844 struct enc_region *region;
1847 if (!sev_guest(kvm))
1850 /* If kvm is mirroring encryption context it isn't responsible for it */
1851 if (is_mirroring_enc_context(kvm))
1854 if (range->addr > ULONG_MAX || range->size > ULONG_MAX)
1857 region = kzalloc(sizeof(*region), GFP_KERNEL_ACCOUNT);
1861 mutex_lock(&kvm->lock);
1862 region->pages = sev_pin_memory(kvm, range->addr, range->size, ®ion->npages, 1);
1863 if (IS_ERR(region->pages)) {
1864 ret = PTR_ERR(region->pages);
1865 mutex_unlock(&kvm->lock);
1869 region->uaddr = range->addr;
1870 region->size = range->size;
1872 list_add_tail(®ion->list, &sev->regions_list);
1873 mutex_unlock(&kvm->lock);
1876 * The guest may change the memory encryption attribute from C=0 -> C=1
1877 * or vice versa for this memory range. Lets make sure caches are
1878 * flushed to ensure that guest data gets written into memory with
1881 sev_clflush_pages(region->pages, region->npages);
1890 static struct enc_region *
1891 find_enc_region(struct kvm *kvm, struct kvm_enc_region *range)
1893 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1894 struct list_head *head = &sev->regions_list;
1895 struct enc_region *i;
1897 list_for_each_entry(i, head, list) {
1898 if (i->uaddr == range->addr &&
1899 i->size == range->size)
1906 static void __unregister_enc_region_locked(struct kvm *kvm,
1907 struct enc_region *region)
1909 sev_unpin_memory(kvm, region->pages, region->npages);
1910 list_del(®ion->list);
1914 int svm_unregister_enc_region(struct kvm *kvm,
1915 struct kvm_enc_region *range)
1917 struct enc_region *region;
1920 /* If kvm is mirroring encryption context it isn't responsible for it */
1921 if (is_mirroring_enc_context(kvm))
1924 mutex_lock(&kvm->lock);
1926 if (!sev_guest(kvm)) {
1931 region = find_enc_region(kvm, range);
1938 * Ensure that all guest tagged cache entries are flushed before
1939 * releasing the pages back to the system for use. CLFLUSH will
1940 * not do this, so issue a WBINVD.
1942 wbinvd_on_all_cpus();
1944 __unregister_enc_region_locked(kvm, region);
1946 mutex_unlock(&kvm->lock);
1950 mutex_unlock(&kvm->lock);
1954 int svm_vm_copy_asid_from(struct kvm *kvm, unsigned int source_fd)
1956 struct file *source_kvm_file;
1957 struct kvm *source_kvm;
1958 struct kvm_sev_info source_sev, *mirror_sev;
1961 source_kvm_file = fget(source_fd);
1962 if (!file_is_kvm(source_kvm_file)) {
1967 source_kvm = source_kvm_file->private_data;
1968 mutex_lock(&source_kvm->lock);
1970 if (!sev_guest(source_kvm)) {
1972 goto e_source_unlock;
1975 /* Mirrors of mirrors should work, but let's not get silly */
1976 if (is_mirroring_enc_context(source_kvm) || source_kvm == kvm) {
1978 goto e_source_unlock;
1981 memcpy(&source_sev, &to_kvm_svm(source_kvm)->sev_info,
1982 sizeof(source_sev));
1985 * The mirror kvm holds an enc_context_owner ref so its asid can't
1986 * disappear until we're done with it
1988 kvm_get_kvm(source_kvm);
1990 fput(source_kvm_file);
1991 mutex_unlock(&source_kvm->lock);
1992 mutex_lock(&kvm->lock);
1995 * Disallow out-of-band SEV/SEV-ES init if the target is already an
1996 * SEV guest, or if vCPUs have been created. KVM relies on vCPUs being
1997 * created after SEV/SEV-ES initialization, e.g. to init intercepts.
1999 if (sev_guest(kvm) || kvm->created_vcpus) {
2001 goto e_mirror_unlock;
2004 /* Set enc_context_owner and copy its encryption context over */
2005 mirror_sev = &to_kvm_svm(kvm)->sev_info;
2006 mirror_sev->enc_context_owner = source_kvm;
2007 mirror_sev->active = true;
2008 mirror_sev->asid = source_sev.asid;
2009 mirror_sev->fd = source_sev.fd;
2010 mirror_sev->es_active = source_sev.es_active;
2011 mirror_sev->handle = source_sev.handle;
2012 INIT_LIST_HEAD(&mirror_sev->regions_list);
2014 * Do not copy ap_jump_table. Since the mirror does not share the same
2015 * KVM contexts as the original, and they may have different
2019 mutex_unlock(&kvm->lock);
2023 mutex_unlock(&kvm->lock);
2024 kvm_put_kvm(source_kvm);
2027 mutex_unlock(&source_kvm->lock);
2029 if (source_kvm_file)
2030 fput(source_kvm_file);
2034 void sev_vm_destroy(struct kvm *kvm)
2036 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
2037 struct list_head *head = &sev->regions_list;
2038 struct list_head *pos, *q;
2040 if (!sev_guest(kvm))
2043 /* If this is a mirror_kvm release the enc_context_owner and skip sev cleanup */
2044 if (is_mirroring_enc_context(kvm)) {
2045 kvm_put_kvm(sev->enc_context_owner);
2049 mutex_lock(&kvm->lock);
2052 * Ensure that all guest tagged cache entries are flushed before
2053 * releasing the pages back to the system for use. CLFLUSH will
2054 * not do this, so issue a WBINVD.
2056 wbinvd_on_all_cpus();
2059 * if userspace was terminated before unregistering the memory regions
2060 * then lets unpin all the registered memory.
2062 if (!list_empty(head)) {
2063 list_for_each_safe(pos, q, head) {
2064 __unregister_enc_region_locked(kvm,
2065 list_entry(pos, struct enc_region, list));
2070 mutex_unlock(&kvm->lock);
2072 sev_unbind_asid(kvm, sev->handle);
2076 void __init sev_set_cpu_caps(void)
2079 kvm_cpu_cap_clear(X86_FEATURE_SEV);
2080 if (!sev_es_enabled)
2081 kvm_cpu_cap_clear(X86_FEATURE_SEV_ES);
2084 void __init sev_hardware_setup(void)
2086 #ifdef CONFIG_KVM_AMD_SEV
2087 unsigned int eax, ebx, ecx, edx, sev_asid_count, sev_es_asid_count;
2088 bool sev_es_supported = false;
2089 bool sev_supported = false;
2091 if (!sev_enabled || !npt_enabled)
2094 /* Does the CPU support SEV? */
2095 if (!boot_cpu_has(X86_FEATURE_SEV))
2098 /* Retrieve SEV CPUID information */
2099 cpuid(0x8000001f, &eax, &ebx, &ecx, &edx);
2101 /* Set encryption bit location for SEV-ES guests */
2102 sev_enc_bit = ebx & 0x3f;
2104 /* Maximum number of encrypted guests supported simultaneously */
2109 /* Minimum ASID value that should be used for SEV guest */
2111 sev_me_mask = 1UL << (ebx & 0x3f);
2114 * Initialize SEV ASID bitmaps. Allocate space for ASID 0 in the bitmap,
2115 * even though it's never used, so that the bitmap is indexed by the
2118 nr_asids = max_sev_asid + 1;
2119 sev_asid_bitmap = bitmap_zalloc(nr_asids, GFP_KERNEL);
2120 if (!sev_asid_bitmap)
2123 sev_reclaim_asid_bitmap = bitmap_zalloc(nr_asids, GFP_KERNEL);
2124 if (!sev_reclaim_asid_bitmap) {
2125 bitmap_free(sev_asid_bitmap);
2126 sev_asid_bitmap = NULL;
2130 sev_asid_count = max_sev_asid - min_sev_asid + 1;
2131 if (misc_cg_set_capacity(MISC_CG_RES_SEV, sev_asid_count))
2134 pr_info("SEV supported: %u ASIDs\n", sev_asid_count);
2135 sev_supported = true;
2137 /* SEV-ES support requested? */
2138 if (!sev_es_enabled)
2141 /* Does the CPU support SEV-ES? */
2142 if (!boot_cpu_has(X86_FEATURE_SEV_ES))
2145 /* Has the system been allocated ASIDs for SEV-ES? */
2146 if (min_sev_asid == 1)
2149 sev_es_asid_count = min_sev_asid - 1;
2150 if (misc_cg_set_capacity(MISC_CG_RES_SEV_ES, sev_es_asid_count))
2153 pr_info("SEV-ES supported: %u ASIDs\n", sev_es_asid_count);
2154 sev_es_supported = true;
2157 sev_enabled = sev_supported;
2158 sev_es_enabled = sev_es_supported;
2162 void sev_hardware_teardown(void)
2167 /* No need to take sev_bitmap_lock, all VMs have been destroyed. */
2168 sev_flush_asids(1, max_sev_asid);
2170 bitmap_free(sev_asid_bitmap);
2171 bitmap_free(sev_reclaim_asid_bitmap);
2173 misc_cg_set_capacity(MISC_CG_RES_SEV, 0);
2174 misc_cg_set_capacity(MISC_CG_RES_SEV_ES, 0);
2177 int sev_cpu_init(struct svm_cpu_data *sd)
2182 sd->sev_vmcbs = kcalloc(nr_asids, sizeof(void *), GFP_KERNEL);
2190 * Pages used by hardware to hold guest encrypted state must be flushed before
2191 * returning them to the system.
2193 static void sev_flush_guest_memory(struct vcpu_svm *svm, void *va,
2197 * If hardware enforced cache coherency for encrypted mappings of the
2198 * same physical page is supported, nothing to do.
2200 if (boot_cpu_has(X86_FEATURE_SME_COHERENT))
2204 * If the VM Page Flush MSR is supported, use it to flush the page
2205 * (using the page virtual address and the guest ASID).
2207 if (boot_cpu_has(X86_FEATURE_VM_PAGE_FLUSH)) {
2208 struct kvm_sev_info *sev;
2209 unsigned long va_start;
2212 /* Align start and stop to page boundaries. */
2213 va_start = (unsigned long)va;
2214 start = (u64)va_start & PAGE_MASK;
2215 stop = PAGE_ALIGN((u64)va_start + len);
2218 sev = &to_kvm_svm(svm->vcpu.kvm)->sev_info;
2220 while (start < stop) {
2221 wrmsrl(MSR_AMD64_VM_PAGE_FLUSH,
2230 WARN(1, "Address overflow, using WBINVD\n");
2234 * Hardware should always have one of the above features,
2235 * but if not, use WBINVD and issue a warning.
2237 WARN_ONCE(1, "Using WBINVD to flush guest memory\n");
2238 wbinvd_on_all_cpus();
2241 void sev_free_vcpu(struct kvm_vcpu *vcpu)
2243 struct vcpu_svm *svm;
2245 if (!sev_es_guest(vcpu->kvm))
2250 if (vcpu->arch.guest_state_protected)
2251 sev_flush_guest_memory(svm, svm->sev_es.vmsa, PAGE_SIZE);
2252 __free_page(virt_to_page(svm->sev_es.vmsa));
2254 if (svm->sev_es.ghcb_sa_free)
2255 kfree(svm->sev_es.ghcb_sa);
2258 static void dump_ghcb(struct vcpu_svm *svm)
2260 struct ghcb *ghcb = svm->sev_es.ghcb;
2263 /* Re-use the dump_invalid_vmcb module parameter */
2264 if (!dump_invalid_vmcb) {
2265 pr_warn_ratelimited("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n");
2269 nbits = sizeof(ghcb->save.valid_bitmap) * 8;
2271 pr_err("GHCB (GPA=%016llx):\n", svm->vmcb->control.ghcb_gpa);
2272 pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_code",
2273 ghcb->save.sw_exit_code, ghcb_sw_exit_code_is_valid(ghcb));
2274 pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_1",
2275 ghcb->save.sw_exit_info_1, ghcb_sw_exit_info_1_is_valid(ghcb));
2276 pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_2",
2277 ghcb->save.sw_exit_info_2, ghcb_sw_exit_info_2_is_valid(ghcb));
2278 pr_err("%-20s%016llx is_valid: %u\n", "sw_scratch",
2279 ghcb->save.sw_scratch, ghcb_sw_scratch_is_valid(ghcb));
2280 pr_err("%-20s%*pb\n", "valid_bitmap", nbits, ghcb->save.valid_bitmap);
2283 static void sev_es_sync_to_ghcb(struct vcpu_svm *svm)
2285 struct kvm_vcpu *vcpu = &svm->vcpu;
2286 struct ghcb *ghcb = svm->sev_es.ghcb;
2289 * The GHCB protocol so far allows for the following data
2291 * GPRs RAX, RBX, RCX, RDX
2293 * Copy their values, even if they may not have been written during the
2294 * VM-Exit. It's the guest's responsibility to not consume random data.
2296 ghcb_set_rax(ghcb, vcpu->arch.regs[VCPU_REGS_RAX]);
2297 ghcb_set_rbx(ghcb, vcpu->arch.regs[VCPU_REGS_RBX]);
2298 ghcb_set_rcx(ghcb, vcpu->arch.regs[VCPU_REGS_RCX]);
2299 ghcb_set_rdx(ghcb, vcpu->arch.regs[VCPU_REGS_RDX]);
2302 static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
2304 struct vmcb_control_area *control = &svm->vmcb->control;
2305 struct kvm_vcpu *vcpu = &svm->vcpu;
2306 struct ghcb *ghcb = svm->sev_es.ghcb;
2310 * The GHCB protocol so far allows for the following data
2312 * GPRs RAX, RBX, RCX, RDX
2316 * VMMCALL allows the guest to provide extra registers. KVM also
2317 * expects RSI for hypercalls, so include that, too.
2319 * Copy their values to the appropriate location if supplied.
2321 memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs));
2323 vcpu->arch.regs[VCPU_REGS_RAX] = ghcb_get_rax_if_valid(ghcb);
2324 vcpu->arch.regs[VCPU_REGS_RBX] = ghcb_get_rbx_if_valid(ghcb);
2325 vcpu->arch.regs[VCPU_REGS_RCX] = ghcb_get_rcx_if_valid(ghcb);
2326 vcpu->arch.regs[VCPU_REGS_RDX] = ghcb_get_rdx_if_valid(ghcb);
2327 vcpu->arch.regs[VCPU_REGS_RSI] = ghcb_get_rsi_if_valid(ghcb);
2329 svm->vmcb->save.cpl = ghcb_get_cpl_if_valid(ghcb);
2331 if (ghcb_xcr0_is_valid(ghcb)) {
2332 vcpu->arch.xcr0 = ghcb_get_xcr0(ghcb);
2333 kvm_update_cpuid_runtime(vcpu);
2336 /* Copy the GHCB exit information into the VMCB fields */
2337 exit_code = ghcb_get_sw_exit_code(ghcb);
2338 control->exit_code = lower_32_bits(exit_code);
2339 control->exit_code_hi = upper_32_bits(exit_code);
2340 control->exit_info_1 = ghcb_get_sw_exit_info_1(ghcb);
2341 control->exit_info_2 = ghcb_get_sw_exit_info_2(ghcb);
2343 /* Clear the valid entries fields */
2344 memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
2347 static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
2349 struct kvm_vcpu *vcpu;
2353 ghcb = svm->sev_es.ghcb;
2355 /* Only GHCB Usage code 0 is supported */
2356 if (ghcb->ghcb_usage)
2360 * Retrieve the exit code now even though is may not be marked valid
2361 * as it could help with debugging.
2363 exit_code = ghcb_get_sw_exit_code(ghcb);
2365 if (!ghcb_sw_exit_code_is_valid(ghcb) ||
2366 !ghcb_sw_exit_info_1_is_valid(ghcb) ||
2367 !ghcb_sw_exit_info_2_is_valid(ghcb))
2370 switch (ghcb_get_sw_exit_code(ghcb)) {
2371 case SVM_EXIT_READ_DR7:
2373 case SVM_EXIT_WRITE_DR7:
2374 if (!ghcb_rax_is_valid(ghcb))
2377 case SVM_EXIT_RDTSC:
2379 case SVM_EXIT_RDPMC:
2380 if (!ghcb_rcx_is_valid(ghcb))
2383 case SVM_EXIT_CPUID:
2384 if (!ghcb_rax_is_valid(ghcb) ||
2385 !ghcb_rcx_is_valid(ghcb))
2387 if (ghcb_get_rax(ghcb) == 0xd)
2388 if (!ghcb_xcr0_is_valid(ghcb))
2394 if (ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_STR_MASK) {
2395 if (!ghcb_sw_scratch_is_valid(ghcb))
2398 if (!(ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_TYPE_MASK))
2399 if (!ghcb_rax_is_valid(ghcb))
2404 if (!ghcb_rcx_is_valid(ghcb))
2406 if (ghcb_get_sw_exit_info_1(ghcb)) {
2407 if (!ghcb_rax_is_valid(ghcb) ||
2408 !ghcb_rdx_is_valid(ghcb))
2412 case SVM_EXIT_VMMCALL:
2413 if (!ghcb_rax_is_valid(ghcb) ||
2414 !ghcb_cpl_is_valid(ghcb))
2417 case SVM_EXIT_RDTSCP:
2419 case SVM_EXIT_WBINVD:
2421 case SVM_EXIT_MONITOR:
2422 if (!ghcb_rax_is_valid(ghcb) ||
2423 !ghcb_rcx_is_valid(ghcb) ||
2424 !ghcb_rdx_is_valid(ghcb))
2427 case SVM_EXIT_MWAIT:
2428 if (!ghcb_rax_is_valid(ghcb) ||
2429 !ghcb_rcx_is_valid(ghcb))
2432 case SVM_VMGEXIT_MMIO_READ:
2433 case SVM_VMGEXIT_MMIO_WRITE:
2434 if (!ghcb_sw_scratch_is_valid(ghcb))
2437 case SVM_VMGEXIT_NMI_COMPLETE:
2438 case SVM_VMGEXIT_AP_HLT_LOOP:
2439 case SVM_VMGEXIT_AP_JUMP_TABLE:
2440 case SVM_VMGEXIT_UNSUPPORTED_EVENT:
2451 if (ghcb->ghcb_usage) {
2452 vcpu_unimpl(vcpu, "vmgexit: ghcb usage %#x is not valid\n",
2455 vcpu_unimpl(vcpu, "vmgexit: exit reason %#llx is not valid\n",
2460 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
2461 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
2462 vcpu->run->internal.ndata = 2;
2463 vcpu->run->internal.data[0] = exit_code;
2464 vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu;
2469 void sev_es_unmap_ghcb(struct vcpu_svm *svm)
2471 if (!svm->sev_es.ghcb)
2474 if (svm->sev_es.ghcb_sa_free) {
2476 * The scratch area lives outside the GHCB, so there is a
2477 * buffer that, depending on the operation performed, may
2478 * need to be synced, then freed.
2480 if (svm->sev_es.ghcb_sa_sync) {
2481 kvm_write_guest(svm->vcpu.kvm,
2482 ghcb_get_sw_scratch(svm->sev_es.ghcb),
2483 svm->sev_es.ghcb_sa,
2484 svm->sev_es.ghcb_sa_len);
2485 svm->sev_es.ghcb_sa_sync = false;
2488 kfree(svm->sev_es.ghcb_sa);
2489 svm->sev_es.ghcb_sa = NULL;
2490 svm->sev_es.ghcb_sa_free = false;
2493 trace_kvm_vmgexit_exit(svm->vcpu.vcpu_id, svm->sev_es.ghcb);
2495 sev_es_sync_to_ghcb(svm);
2497 kvm_vcpu_unmap(&svm->vcpu, &svm->sev_es.ghcb_map, true);
2498 svm->sev_es.ghcb = NULL;
2501 void pre_sev_run(struct vcpu_svm *svm, int cpu)
2503 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
2504 int asid = sev_get_asid(svm->vcpu.kvm);
2506 /* Assign the asid allocated with this SEV guest */
2512 * 1) when different VMCB for the same ASID is to be run on the same host CPU.
2513 * 2) or this VMCB was executed on different host CPU in previous VMRUNs.
2515 if (sd->sev_vmcbs[asid] == svm->vmcb &&
2516 svm->vcpu.arch.last_vmentry_cpu == cpu)
2519 sd->sev_vmcbs[asid] = svm->vmcb;
2520 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
2521 vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
2524 #define GHCB_SCRATCH_AREA_LIMIT (16ULL * PAGE_SIZE)
2525 static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
2527 struct vmcb_control_area *control = &svm->vmcb->control;
2528 struct ghcb *ghcb = svm->sev_es.ghcb;
2529 u64 ghcb_scratch_beg, ghcb_scratch_end;
2530 u64 scratch_gpa_beg, scratch_gpa_end;
2533 scratch_gpa_beg = ghcb_get_sw_scratch(ghcb);
2534 if (!scratch_gpa_beg) {
2535 pr_err("vmgexit: scratch gpa not provided\n");
2539 scratch_gpa_end = scratch_gpa_beg + len;
2540 if (scratch_gpa_end < scratch_gpa_beg) {
2541 pr_err("vmgexit: scratch length (%#llx) not valid for scratch address (%#llx)\n",
2542 len, scratch_gpa_beg);
2546 if ((scratch_gpa_beg & PAGE_MASK) == control->ghcb_gpa) {
2547 /* Scratch area begins within GHCB */
2548 ghcb_scratch_beg = control->ghcb_gpa +
2549 offsetof(struct ghcb, shared_buffer);
2550 ghcb_scratch_end = control->ghcb_gpa +
2551 offsetof(struct ghcb, reserved_1);
2554 * If the scratch area begins within the GHCB, it must be
2555 * completely contained in the GHCB shared buffer area.
2557 if (scratch_gpa_beg < ghcb_scratch_beg ||
2558 scratch_gpa_end > ghcb_scratch_end) {
2559 pr_err("vmgexit: scratch area is outside of GHCB shared buffer area (%#llx - %#llx)\n",
2560 scratch_gpa_beg, scratch_gpa_end);
2564 scratch_va = (void *)svm->sev_es.ghcb;
2565 scratch_va += (scratch_gpa_beg - control->ghcb_gpa);
2568 * The guest memory must be read into a kernel buffer, so
2571 if (len > GHCB_SCRATCH_AREA_LIMIT) {
2572 pr_err("vmgexit: scratch area exceeds KVM limits (%#llx requested, %#llx limit)\n",
2573 len, GHCB_SCRATCH_AREA_LIMIT);
2576 scratch_va = kzalloc(len, GFP_KERNEL_ACCOUNT);
2580 if (kvm_read_guest(svm->vcpu.kvm, scratch_gpa_beg, scratch_va, len)) {
2581 /* Unable to copy scratch area from guest */
2582 pr_err("vmgexit: kvm_read_guest for scratch area failed\n");
2589 * The scratch area is outside the GHCB. The operation will
2590 * dictate whether the buffer needs to be synced before running
2591 * the vCPU next time (i.e. a read was requested so the data
2592 * must be written back to the guest memory).
2594 svm->sev_es.ghcb_sa_sync = sync;
2595 svm->sev_es.ghcb_sa_free = true;
2598 svm->sev_es.ghcb_sa = scratch_va;
2599 svm->sev_es.ghcb_sa_len = len;
2604 static void set_ghcb_msr_bits(struct vcpu_svm *svm, u64 value, u64 mask,
2607 svm->vmcb->control.ghcb_gpa &= ~(mask << pos);
2608 svm->vmcb->control.ghcb_gpa |= (value & mask) << pos;
2611 static u64 get_ghcb_msr_bits(struct vcpu_svm *svm, u64 mask, unsigned int pos)
2613 return (svm->vmcb->control.ghcb_gpa >> pos) & mask;
2616 static void set_ghcb_msr(struct vcpu_svm *svm, u64 value)
2618 svm->vmcb->control.ghcb_gpa = value;
2621 static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
2623 struct vmcb_control_area *control = &svm->vmcb->control;
2624 struct kvm_vcpu *vcpu = &svm->vcpu;
2628 ghcb_info = control->ghcb_gpa & GHCB_MSR_INFO_MASK;
2630 trace_kvm_vmgexit_msr_protocol_enter(svm->vcpu.vcpu_id,
2633 switch (ghcb_info) {
2634 case GHCB_MSR_SEV_INFO_REQ:
2635 set_ghcb_msr(svm, GHCB_MSR_SEV_INFO(GHCB_VERSION_MAX,
2639 case GHCB_MSR_CPUID_REQ: {
2640 u64 cpuid_fn, cpuid_reg, cpuid_value;
2642 cpuid_fn = get_ghcb_msr_bits(svm,
2643 GHCB_MSR_CPUID_FUNC_MASK,
2644 GHCB_MSR_CPUID_FUNC_POS);
2646 /* Initialize the registers needed by the CPUID intercept */
2647 vcpu->arch.regs[VCPU_REGS_RAX] = cpuid_fn;
2648 vcpu->arch.regs[VCPU_REGS_RCX] = 0;
2650 ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_CPUID);
2656 cpuid_reg = get_ghcb_msr_bits(svm,
2657 GHCB_MSR_CPUID_REG_MASK,
2658 GHCB_MSR_CPUID_REG_POS);
2660 cpuid_value = vcpu->arch.regs[VCPU_REGS_RAX];
2661 else if (cpuid_reg == 1)
2662 cpuid_value = vcpu->arch.regs[VCPU_REGS_RBX];
2663 else if (cpuid_reg == 2)
2664 cpuid_value = vcpu->arch.regs[VCPU_REGS_RCX];
2666 cpuid_value = vcpu->arch.regs[VCPU_REGS_RDX];
2668 set_ghcb_msr_bits(svm, cpuid_value,
2669 GHCB_MSR_CPUID_VALUE_MASK,
2670 GHCB_MSR_CPUID_VALUE_POS);
2672 set_ghcb_msr_bits(svm, GHCB_MSR_CPUID_RESP,
2677 case GHCB_MSR_TERM_REQ: {
2678 u64 reason_set, reason_code;
2680 reason_set = get_ghcb_msr_bits(svm,
2681 GHCB_MSR_TERM_REASON_SET_MASK,
2682 GHCB_MSR_TERM_REASON_SET_POS);
2683 reason_code = get_ghcb_msr_bits(svm,
2684 GHCB_MSR_TERM_REASON_MASK,
2685 GHCB_MSR_TERM_REASON_POS);
2686 pr_info("SEV-ES guest requested termination: %#llx:%#llx\n",
2687 reason_set, reason_code);
2694 trace_kvm_vmgexit_msr_protocol_exit(svm->vcpu.vcpu_id,
2695 control->ghcb_gpa, ret);
2700 int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
2702 struct vcpu_svm *svm = to_svm(vcpu);
2703 struct vmcb_control_area *control = &svm->vmcb->control;
2704 u64 ghcb_gpa, exit_code;
2708 /* Validate the GHCB */
2709 ghcb_gpa = control->ghcb_gpa;
2710 if (ghcb_gpa & GHCB_MSR_INFO_MASK)
2711 return sev_handle_vmgexit_msr_protocol(svm);
2714 vcpu_unimpl(vcpu, "vmgexit: GHCB gpa is not set\n");
2718 if (kvm_vcpu_map(vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->sev_es.ghcb_map)) {
2719 /* Unable to map GHCB from guest */
2720 vcpu_unimpl(vcpu, "vmgexit: error mapping GHCB [%#llx] from guest\n",
2725 svm->sev_es.ghcb = svm->sev_es.ghcb_map.hva;
2726 ghcb = svm->sev_es.ghcb_map.hva;
2728 trace_kvm_vmgexit_enter(vcpu->vcpu_id, ghcb);
2730 exit_code = ghcb_get_sw_exit_code(ghcb);
2732 ret = sev_es_validate_vmgexit(svm);
2736 sev_es_sync_from_ghcb(svm);
2737 ghcb_set_sw_exit_info_1(ghcb, 0);
2738 ghcb_set_sw_exit_info_2(ghcb, 0);
2741 switch (exit_code) {
2742 case SVM_VMGEXIT_MMIO_READ:
2743 if (!setup_vmgexit_scratch(svm, true, control->exit_info_2))
2746 ret = kvm_sev_es_mmio_read(vcpu,
2747 control->exit_info_1,
2748 control->exit_info_2,
2749 svm->sev_es.ghcb_sa);
2751 case SVM_VMGEXIT_MMIO_WRITE:
2752 if (!setup_vmgexit_scratch(svm, false, control->exit_info_2))
2755 ret = kvm_sev_es_mmio_write(vcpu,
2756 control->exit_info_1,
2757 control->exit_info_2,
2758 svm->sev_es.ghcb_sa);
2760 case SVM_VMGEXIT_NMI_COMPLETE:
2761 ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_IRET);
2763 case SVM_VMGEXIT_AP_HLT_LOOP:
2764 ret = kvm_emulate_ap_reset_hold(vcpu);
2766 case SVM_VMGEXIT_AP_JUMP_TABLE: {
2767 struct kvm_sev_info *sev = &to_kvm_svm(vcpu->kvm)->sev_info;
2769 switch (control->exit_info_1) {
2771 /* Set AP jump table address */
2772 sev->ap_jump_table = control->exit_info_2;
2775 /* Get AP jump table address */
2776 ghcb_set_sw_exit_info_2(ghcb, sev->ap_jump_table);
2779 pr_err("svm: vmgexit: unsupported AP jump table request - exit_info_1=%#llx\n",
2780 control->exit_info_1);
2781 ghcb_set_sw_exit_info_1(ghcb, 1);
2782 ghcb_set_sw_exit_info_2(ghcb,
2784 SVM_EVTINJ_TYPE_EXEPT |
2791 case SVM_VMGEXIT_UNSUPPORTED_EVENT:
2793 "vmgexit: unsupported event - exit_info_1=%#llx, exit_info_2=%#llx\n",
2794 control->exit_info_1, control->exit_info_2);
2797 ret = svm_invoke_exit_handler(vcpu, exit_code);
2803 int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
2808 if (svm->vmcb->control.exit_info_2 > INT_MAX)
2811 count = svm->vmcb->control.exit_info_2;
2812 if (unlikely(check_mul_overflow(count, size, &bytes)))
2815 if (!setup_vmgexit_scratch(svm, in, bytes))
2818 return kvm_sev_es_string_io(&svm->vcpu, size, port, svm->sev_es.ghcb_sa,
2822 void sev_es_init_vmcb(struct vcpu_svm *svm)
2824 struct kvm_vcpu *vcpu = &svm->vcpu;
2826 svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ES_ENABLE;
2827 svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
2830 * An SEV-ES guest requires a VMSA area that is a separate from the
2831 * VMCB page. Do not include the encryption mask on the VMSA physical
2832 * address since hardware will access it using the guest key.
2834 svm->vmcb->control.vmsa_pa = __pa(svm->sev_es.vmsa);
2836 /* Can't intercept CR register access, HV can't modify CR registers */
2837 svm_clr_intercept(svm, INTERCEPT_CR0_READ);
2838 svm_clr_intercept(svm, INTERCEPT_CR4_READ);
2839 svm_clr_intercept(svm, INTERCEPT_CR8_READ);
2840 svm_clr_intercept(svm, INTERCEPT_CR0_WRITE);
2841 svm_clr_intercept(svm, INTERCEPT_CR4_WRITE);
2842 svm_clr_intercept(svm, INTERCEPT_CR8_WRITE);
2844 svm_clr_intercept(svm, INTERCEPT_SELECTIVE_CR0);
2846 /* Track EFER/CR register changes */
2847 svm_set_intercept(svm, TRAP_EFER_WRITE);
2848 svm_set_intercept(svm, TRAP_CR0_WRITE);
2849 svm_set_intercept(svm, TRAP_CR4_WRITE);
2850 svm_set_intercept(svm, TRAP_CR8_WRITE);
2852 /* No support for enable_vmware_backdoor */
2853 clr_exception_intercept(svm, GP_VECTOR);
2855 /* Can't intercept XSETBV, HV can't modify XCR0 directly */
2856 svm_clr_intercept(svm, INTERCEPT_XSETBV);
2858 /* Clear intercepts on selected MSRs */
2859 set_msr_interception(vcpu, svm->msrpm, MSR_EFER, 1, 1);
2860 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_CR_PAT, 1, 1);
2861 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
2862 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
2863 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
2864 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
2867 void sev_es_vcpu_reset(struct vcpu_svm *svm)
2870 * Set the GHCB MSR value as per the GHCB specification when emulating
2871 * vCPU RESET for an SEV-ES guest.
2873 set_ghcb_msr(svm, GHCB_MSR_SEV_INFO(GHCB_VERSION_MAX,
2878 void sev_es_prepare_guest_switch(struct vcpu_svm *svm, unsigned int cpu)
2880 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
2881 struct vmcb_save_area *hostsa;
2884 * As an SEV-ES guest, hardware will restore the host state on VMEXIT,
2885 * of which one step is to perform a VMLOAD. Since hardware does not
2886 * perform a VMSAVE on VMRUN, the host savearea must be updated.
2888 vmsave(__sme_page_pa(sd->save_area));
2890 /* XCR0 is restored on VMEXIT, save the current host value */
2891 hostsa = (struct vmcb_save_area *)(page_address(sd->save_area) + 0x400);
2892 hostsa->xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
2894 /* PKRU is restored on VMEXIT, save the current host value */
2895 hostsa->pkru = read_pkru();
2897 /* MSR_IA32_XSS is restored on VMEXIT, save the currnet host value */
2898 hostsa->xss = host_xss;
2901 void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
2903 struct vcpu_svm *svm = to_svm(vcpu);
2905 /* First SIPI: Use the values as initially set by the VMM */
2906 if (!svm->sev_es.received_first_sipi) {
2907 svm->sev_es.received_first_sipi = true;
2912 * Subsequent SIPI: Return from an AP Reset Hold VMGEXIT, where
2913 * the guest will set the CS and RIP. Set SW_EXIT_INFO_2 to a
2916 if (!svm->sev_es.ghcb)
2919 ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, 1);