1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
7 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
10 #include <linux/kvm_types.h>
11 #include <linux/kvm_host.h>
12 #include <linux/kernel.h>
13 #include <linux/highmem.h>
14 #include <linux/psp-sev.h>
15 #include <linux/pagemap.h>
16 #include <linux/swap.h>
17 #include <linux/misc_cgroup.h>
18 #include <linux/processor.h>
19 #include <linux/trace_events.h>
20 #include <asm/fpu/internal.h>
22 #include <asm/trapnr.h>
30 #define __ex(x) __kvm_handle_fault_on_reboot(x)
32 #ifndef CONFIG_KVM_AMD_SEV
34 * When this config is not defined, SEV feature is not supported and APIs in
35 * this file are not used but this file still gets compiled into the KVM AMD
38 * We will not have MISC_CG_RES_SEV and MISC_CG_RES_SEV_ES entries in the enum
39 * misc_res_type {} defined in linux/misc_cgroup.h.
41 * Below macros allow compilation to succeed.
43 #define MISC_CG_RES_SEV MISC_CG_RES_TYPES
44 #define MISC_CG_RES_SEV_ES MISC_CG_RES_TYPES
47 static u8 sev_enc_bit;
48 static int sev_flush_asids(void);
49 static DECLARE_RWSEM(sev_deactivate_lock);
50 static DEFINE_MUTEX(sev_bitmap_lock);
51 unsigned int max_sev_asid;
52 static unsigned int min_sev_asid;
53 static unsigned long *sev_asid_bitmap;
54 static unsigned long *sev_reclaim_asid_bitmap;
57 struct list_head list;
64 static int sev_flush_asids(void)
69 * DEACTIVATE will clear the WBINVD indicator causing DF_FLUSH to fail,
70 * so it must be guarded.
72 down_write(&sev_deactivate_lock);
75 ret = sev_guest_df_flush(&error);
77 up_write(&sev_deactivate_lock);
80 pr_err("SEV: DF_FLUSH failed, ret=%d, error=%#x\n", ret, error);
85 /* Must be called with the sev_bitmap_lock held */
86 static bool __sev_recycle_asids(int min_asid, int max_asid)
90 /* Check if there are any ASIDs to reclaim before performing a flush */
91 pos = find_next_bit(sev_reclaim_asid_bitmap, max_sev_asid, min_asid);
95 if (sev_flush_asids())
98 /* The flush process will flush all reclaimable SEV and SEV-ES ASIDs */
99 bitmap_xor(sev_asid_bitmap, sev_asid_bitmap, sev_reclaim_asid_bitmap,
101 bitmap_zero(sev_reclaim_asid_bitmap, max_sev_asid);
106 static int sev_asid_new(struct kvm_sev_info *sev)
108 int pos, min_asid, max_asid, ret;
110 enum misc_res_type type;
112 type = sev->es_active ? MISC_CG_RES_SEV_ES : MISC_CG_RES_SEV;
113 WARN_ON(sev->misc_cg);
114 sev->misc_cg = get_current_misc_cg();
115 ret = misc_cg_try_charge(type, sev->misc_cg, 1);
117 put_misc_cg(sev->misc_cg);
122 mutex_lock(&sev_bitmap_lock);
125 * SEV-enabled guests must use asid from min_sev_asid to max_sev_asid.
126 * SEV-ES-enabled guest can use from 1 to min_sev_asid - 1.
128 min_asid = sev->es_active ? 0 : min_sev_asid - 1;
129 max_asid = sev->es_active ? min_sev_asid - 1 : max_sev_asid;
131 pos = find_next_zero_bit(sev_asid_bitmap, max_sev_asid, min_asid);
132 if (pos >= max_asid) {
133 if (retry && __sev_recycle_asids(min_asid, max_asid)) {
137 mutex_unlock(&sev_bitmap_lock);
142 __set_bit(pos, sev_asid_bitmap);
144 mutex_unlock(&sev_bitmap_lock);
148 misc_cg_uncharge(type, sev->misc_cg, 1);
149 put_misc_cg(sev->misc_cg);
154 static int sev_get_asid(struct kvm *kvm)
156 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
161 static void sev_asid_free(struct kvm_sev_info *sev)
163 struct svm_cpu_data *sd;
165 enum misc_res_type type;
167 mutex_lock(&sev_bitmap_lock);
170 __set_bit(pos, sev_reclaim_asid_bitmap);
172 for_each_possible_cpu(cpu) {
173 sd = per_cpu(svm_data, cpu);
174 sd->sev_vmcbs[pos] = NULL;
177 mutex_unlock(&sev_bitmap_lock);
179 type = sev->es_active ? MISC_CG_RES_SEV_ES : MISC_CG_RES_SEV;
180 misc_cg_uncharge(type, sev->misc_cg, 1);
181 put_misc_cg(sev->misc_cg);
185 static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
187 struct sev_data_decommission *decommission;
188 struct sev_data_deactivate *data;
193 data = kzalloc(sizeof(*data), GFP_KERNEL);
197 /* deactivate handle */
198 data->handle = handle;
200 /* Guard DEACTIVATE against WBINVD/DF_FLUSH used in ASID recycling */
201 down_read(&sev_deactivate_lock);
202 sev_guest_deactivate(data, NULL);
203 up_read(&sev_deactivate_lock);
207 decommission = kzalloc(sizeof(*decommission), GFP_KERNEL);
211 /* decommission handle */
212 decommission->handle = handle;
213 sev_guest_decommission(decommission, NULL);
218 static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
220 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
224 if (unlikely(sev->active))
227 asid = sev_asid_new(sev);
232 ret = sev_platform_init(&argp->error);
237 INIT_LIST_HEAD(&sev->regions_list);
247 static int sev_es_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
252 to_kvm_svm(kvm)->sev_info.es_active = true;
254 return sev_guest_init(kvm, argp);
257 static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
259 struct sev_data_activate *data;
260 int asid = sev_get_asid(kvm);
263 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
267 /* activate ASID on the given handle */
268 data->handle = handle;
270 ret = sev_guest_activate(data, error);
276 static int __sev_issue_cmd(int fd, int id, void *data, int *error)
285 ret = sev_issue_cmd_external_user(f.file, id, data, error);
291 static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error)
293 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
295 return __sev_issue_cmd(sev->fd, id, data, error);
298 static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
300 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
301 struct sev_data_launch_start *start;
302 struct kvm_sev_launch_start params;
303 void *dh_blob, *session_blob;
304 int *error = &argp->error;
310 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params)))
313 start = kzalloc(sizeof(*start), GFP_KERNEL_ACCOUNT);
318 if (params.dh_uaddr) {
319 dh_blob = psp_copy_user_blob(params.dh_uaddr, params.dh_len);
320 if (IS_ERR(dh_blob)) {
321 ret = PTR_ERR(dh_blob);
325 start->dh_cert_address = __sme_set(__pa(dh_blob));
326 start->dh_cert_len = params.dh_len;
330 if (params.session_uaddr) {
331 session_blob = psp_copy_user_blob(params.session_uaddr, params.session_len);
332 if (IS_ERR(session_blob)) {
333 ret = PTR_ERR(session_blob);
337 start->session_address = __sme_set(__pa(session_blob));
338 start->session_len = params.session_len;
341 start->handle = params.handle;
342 start->policy = params.policy;
344 /* create memory encryption context */
345 ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_LAUNCH_START, start, error);
349 /* Bind ASID to this guest */
350 ret = sev_bind_asid(kvm, start->handle, error);
354 /* return handle to userspace */
355 params.handle = start->handle;
356 if (copy_to_user((void __user *)(uintptr_t)argp->data, ¶ms, sizeof(params))) {
357 sev_unbind_asid(kvm, start->handle);
362 sev->handle = start->handle;
363 sev->fd = argp->sev_fd;
374 static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
375 unsigned long ulen, unsigned long *n,
378 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
379 unsigned long npages, size;
381 unsigned long locked, lock_limit;
383 unsigned long first, last;
386 lockdep_assert_held(&kvm->lock);
388 if (ulen == 0 || uaddr + ulen < uaddr)
389 return ERR_PTR(-EINVAL);
391 /* Calculate number of pages. */
392 first = (uaddr & PAGE_MASK) >> PAGE_SHIFT;
393 last = ((uaddr + ulen - 1) & PAGE_MASK) >> PAGE_SHIFT;
394 npages = (last - first + 1);
396 locked = sev->pages_locked + npages;
397 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
398 if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
399 pr_err("SEV: %lu locked pages exceed the lock limit of %lu.\n", locked, lock_limit);
400 return ERR_PTR(-ENOMEM);
403 if (WARN_ON_ONCE(npages > INT_MAX))
404 return ERR_PTR(-EINVAL);
406 /* Avoid using vmalloc for smaller buffers. */
407 size = npages * sizeof(struct page *);
408 if (size > PAGE_SIZE)
409 pages = __vmalloc(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
411 pages = kmalloc(size, GFP_KERNEL_ACCOUNT);
414 return ERR_PTR(-ENOMEM);
416 /* Pin the user virtual address. */
417 npinned = pin_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
418 if (npinned != npages) {
419 pr_err("SEV: Failure locking %lu pages.\n", npages);
425 sev->pages_locked = locked;
431 unpin_user_pages(pages, npinned);
437 static void sev_unpin_memory(struct kvm *kvm, struct page **pages,
438 unsigned long npages)
440 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
442 unpin_user_pages(pages, npages);
444 sev->pages_locked -= npages;
447 static void sev_clflush_pages(struct page *pages[], unsigned long npages)
449 uint8_t *page_virtual;
452 if (this_cpu_has(X86_FEATURE_SME_COHERENT) || npages == 0 ||
456 for (i = 0; i < npages; i++) {
457 page_virtual = kmap_atomic(pages[i]);
458 clflush_cache_range(page_virtual, PAGE_SIZE);
459 kunmap_atomic(page_virtual);
463 static unsigned long get_num_contig_pages(unsigned long idx,
464 struct page **inpages, unsigned long npages)
466 unsigned long paddr, next_paddr;
467 unsigned long i = idx + 1, pages = 1;
469 /* find the number of contiguous pages starting from idx */
470 paddr = __sme_page_pa(inpages[idx]);
472 next_paddr = __sme_page_pa(inpages[i++]);
473 if ((paddr + PAGE_SIZE) == next_paddr) {
484 static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
486 unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i;
487 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
488 struct kvm_sev_launch_update_data params;
489 struct sev_data_launch_update_data *data;
490 struct page **inpages;
496 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params)))
499 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
503 vaddr = params.uaddr;
505 vaddr_end = vaddr + size;
507 /* Lock the user memory. */
508 inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1);
509 if (IS_ERR(inpages)) {
510 ret = PTR_ERR(inpages);
515 * Flush (on non-coherent CPUs) before LAUNCH_UPDATE encrypts pages in
516 * place; the cache may contain the data that was written unencrypted.
518 sev_clflush_pages(inpages, npages);
520 for (i = 0; vaddr < vaddr_end; vaddr = next_vaddr, i += pages) {
524 * If the user buffer is not page-aligned, calculate the offset
527 offset = vaddr & (PAGE_SIZE - 1);
529 /* Calculate the number of pages that can be encrypted in one go. */
530 pages = get_num_contig_pages(i, inpages, npages);
532 len = min_t(size_t, ((pages * PAGE_SIZE) - offset), size);
534 data->handle = sev->handle;
536 data->address = __sme_page_pa(inpages[i]) + offset;
537 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_DATA, data, &argp->error);
542 next_vaddr = vaddr + len;
546 /* content of memory is updated, mark pages dirty */
547 for (i = 0; i < npages; i++) {
548 set_page_dirty_lock(inpages[i]);
549 mark_page_accessed(inpages[i]);
551 /* unlock the user pages */
552 sev_unpin_memory(kvm, inpages, npages);
558 static int sev_es_sync_vmsa(struct vcpu_svm *svm)
560 struct vmcb_save_area *save = &svm->vmcb->save;
562 /* Check some debug related fields before encrypting the VMSA */
563 if (svm->vcpu.guest_debug || (save->dr7 & ~DR7_FIXED_1))
566 /* Sync registgers */
567 save->rax = svm->vcpu.arch.regs[VCPU_REGS_RAX];
568 save->rbx = svm->vcpu.arch.regs[VCPU_REGS_RBX];
569 save->rcx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
570 save->rdx = svm->vcpu.arch.regs[VCPU_REGS_RDX];
571 save->rsp = svm->vcpu.arch.regs[VCPU_REGS_RSP];
572 save->rbp = svm->vcpu.arch.regs[VCPU_REGS_RBP];
573 save->rsi = svm->vcpu.arch.regs[VCPU_REGS_RSI];
574 save->rdi = svm->vcpu.arch.regs[VCPU_REGS_RDI];
576 save->r8 = svm->vcpu.arch.regs[VCPU_REGS_R8];
577 save->r9 = svm->vcpu.arch.regs[VCPU_REGS_R9];
578 save->r10 = svm->vcpu.arch.regs[VCPU_REGS_R10];
579 save->r11 = svm->vcpu.arch.regs[VCPU_REGS_R11];
580 save->r12 = svm->vcpu.arch.regs[VCPU_REGS_R12];
581 save->r13 = svm->vcpu.arch.regs[VCPU_REGS_R13];
582 save->r14 = svm->vcpu.arch.regs[VCPU_REGS_R14];
583 save->r15 = svm->vcpu.arch.regs[VCPU_REGS_R15];
585 save->rip = svm->vcpu.arch.regs[VCPU_REGS_RIP];
587 /* Sync some non-GPR registers before encrypting */
588 save->xcr0 = svm->vcpu.arch.xcr0;
589 save->pkru = svm->vcpu.arch.pkru;
590 save->xss = svm->vcpu.arch.ia32_xss;
593 * SEV-ES will use a VMSA that is pointed to by the VMCB, not
594 * the traditional VMSA that is part of the VMCB. Copy the
595 * traditional VMSA as it has been built so far (in prep
596 * for LAUNCH_UPDATE_VMSA) to be the initial SEV-ES state.
598 memcpy(svm->vmsa, save, sizeof(*save));
603 static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
605 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
606 struct sev_data_launch_update_vmsa *vmsa;
609 if (!sev_es_guest(kvm))
612 vmsa = kzalloc(sizeof(*vmsa), GFP_KERNEL);
616 for (i = 0; i < kvm->created_vcpus; i++) {
617 struct vcpu_svm *svm = to_svm(kvm->vcpus[i]);
619 /* Perform some pre-encryption checks against the VMSA */
620 ret = sev_es_sync_vmsa(svm);
625 * The LAUNCH_UPDATE_VMSA command will perform in-place
626 * encryption of the VMSA memory content (i.e it will write
627 * the same memory region with the guest's key), so invalidate
630 clflush_cache_range(svm->vmsa, PAGE_SIZE);
632 vmsa->handle = sev->handle;
633 vmsa->address = __sme_pa(svm->vmsa);
634 vmsa->len = PAGE_SIZE;
635 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, vmsa,
640 svm->vcpu.arch.guest_state_protected = true;
648 static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
650 void __user *measure = (void __user *)(uintptr_t)argp->data;
651 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
652 struct sev_data_launch_measure *data;
653 struct kvm_sev_launch_measure params;
654 void __user *p = NULL;
661 if (copy_from_user(¶ms, measure, sizeof(params)))
664 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
668 /* User wants to query the blob length */
672 p = (void __user *)(uintptr_t)params.uaddr;
674 if (params.len > SEV_FW_BLOB_MAX_SIZE) {
680 blob = kmalloc(params.len, GFP_KERNEL);
684 data->address = __psp_pa(blob);
685 data->len = params.len;
689 data->handle = sev->handle;
690 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, data, &argp->error);
693 * If we query the session length, FW responded with expected data.
702 if (copy_to_user(p, blob, params.len))
707 params.len = data->len;
708 if (copy_to_user(measure, ¶ms, sizeof(params)))
717 static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
719 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
720 struct sev_data_launch_finish *data;
726 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
730 data->handle = sev->handle;
731 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_FINISH, data, &argp->error);
737 static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
739 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
740 struct kvm_sev_guest_status params;
741 struct sev_data_guest_status *data;
747 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
751 data->handle = sev->handle;
752 ret = sev_issue_cmd(kvm, SEV_CMD_GUEST_STATUS, data, &argp->error);
756 params.policy = data->policy;
757 params.state = data->state;
758 params.handle = data->handle;
760 if (copy_to_user((void __user *)(uintptr_t)argp->data, ¶ms, sizeof(params)))
767 static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src,
768 unsigned long dst, int size,
769 int *error, bool enc)
771 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
772 struct sev_data_dbg *data;
775 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
779 data->handle = sev->handle;
780 data->dst_addr = dst;
781 data->src_addr = src;
784 ret = sev_issue_cmd(kvm,
785 enc ? SEV_CMD_DBG_ENCRYPT : SEV_CMD_DBG_DECRYPT,
791 static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
792 unsigned long dst_paddr, int sz, int *err)
797 * Its safe to read more than we are asked, caller should ensure that
798 * destination has enough space.
800 offset = src_paddr & 15;
801 src_paddr = round_down(src_paddr, 16);
802 sz = round_up(sz + offset, 16);
804 return __sev_issue_dbg_cmd(kvm, src_paddr, dst_paddr, sz, err, false);
807 static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
808 unsigned long __user dst_uaddr,
809 unsigned long dst_paddr,
812 struct page *tpage = NULL;
815 /* if inputs are not 16-byte then use intermediate buffer */
816 if (!IS_ALIGNED(dst_paddr, 16) ||
817 !IS_ALIGNED(paddr, 16) ||
818 !IS_ALIGNED(size, 16)) {
819 tpage = (void *)alloc_page(GFP_KERNEL);
823 dst_paddr = __sme_page_pa(tpage);
826 ret = __sev_dbg_decrypt(kvm, paddr, dst_paddr, size, err);
832 if (copy_to_user((void __user *)(uintptr_t)dst_uaddr,
833 page_address(tpage) + offset, size))
844 static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
845 unsigned long __user vaddr,
846 unsigned long dst_paddr,
847 unsigned long __user dst_vaddr,
848 int size, int *error)
850 struct page *src_tpage = NULL;
851 struct page *dst_tpage = NULL;
854 /* If source buffer is not aligned then use an intermediate buffer */
855 if (!IS_ALIGNED(vaddr, 16)) {
856 src_tpage = alloc_page(GFP_KERNEL);
860 if (copy_from_user(page_address(src_tpage),
861 (void __user *)(uintptr_t)vaddr, size)) {
862 __free_page(src_tpage);
866 paddr = __sme_page_pa(src_tpage);
870 * If destination buffer or length is not aligned then do read-modify-write:
871 * - decrypt destination in an intermediate buffer
872 * - copy the source buffer in an intermediate buffer
873 * - use the intermediate buffer as source buffer
875 if (!IS_ALIGNED(dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
878 dst_tpage = alloc_page(GFP_KERNEL);
884 ret = __sev_dbg_decrypt(kvm, dst_paddr,
885 __sme_page_pa(dst_tpage), size, error);
890 * If source is kernel buffer then use memcpy() otherwise
893 dst_offset = dst_paddr & 15;
896 memcpy(page_address(dst_tpage) + dst_offset,
897 page_address(src_tpage), size);
899 if (copy_from_user(page_address(dst_tpage) + dst_offset,
900 (void __user *)(uintptr_t)vaddr, size)) {
906 paddr = __sme_page_pa(dst_tpage);
907 dst_paddr = round_down(dst_paddr, 16);
908 len = round_up(size, 16);
911 ret = __sev_issue_dbg_cmd(kvm, paddr, dst_paddr, len, error, true);
915 __free_page(src_tpage);
917 __free_page(dst_tpage);
921 static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
923 unsigned long vaddr, vaddr_end, next_vaddr;
924 unsigned long dst_vaddr;
925 struct page **src_p, **dst_p;
926 struct kvm_sev_dbg debug;
934 if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug)))
937 if (!debug.len || debug.src_uaddr + debug.len < debug.src_uaddr)
939 if (!debug.dst_uaddr)
942 vaddr = debug.src_uaddr;
944 vaddr_end = vaddr + size;
945 dst_vaddr = debug.dst_uaddr;
947 for (; vaddr < vaddr_end; vaddr = next_vaddr) {
948 int len, s_off, d_off;
950 /* lock userspace source and destination page */
951 src_p = sev_pin_memory(kvm, vaddr & PAGE_MASK, PAGE_SIZE, &n, 0);
953 return PTR_ERR(src_p);
955 dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, 1);
957 sev_unpin_memory(kvm, src_p, n);
958 return PTR_ERR(dst_p);
962 * Flush (on non-coherent CPUs) before DBG_{DE,EN}CRYPT read or modify
963 * the pages; flush the destination too so that future accesses do not
966 sev_clflush_pages(src_p, 1);
967 sev_clflush_pages(dst_p, 1);
970 * Since user buffer may not be page aligned, calculate the
971 * offset within the page.
973 s_off = vaddr & ~PAGE_MASK;
974 d_off = dst_vaddr & ~PAGE_MASK;
975 len = min_t(size_t, (PAGE_SIZE - s_off), size);
978 ret = __sev_dbg_decrypt_user(kvm,
979 __sme_page_pa(src_p[0]) + s_off,
981 __sme_page_pa(dst_p[0]) + d_off,
984 ret = __sev_dbg_encrypt_user(kvm,
985 __sme_page_pa(src_p[0]) + s_off,
987 __sme_page_pa(dst_p[0]) + d_off,
991 sev_unpin_memory(kvm, src_p, n);
992 sev_unpin_memory(kvm, dst_p, n);
997 next_vaddr = vaddr + len;
998 dst_vaddr = dst_vaddr + len;
1005 static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
1007 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1008 struct sev_data_launch_secret *data;
1009 struct kvm_sev_launch_secret params;
1010 struct page **pages;
1015 if (!sev_guest(kvm))
1018 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params)))
1021 pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, 1);
1023 return PTR_ERR(pages);
1026 * Flush (on non-coherent CPUs) before LAUNCH_SECRET encrypts pages in
1027 * place; the cache may contain the data that was written unencrypted.
1029 sev_clflush_pages(pages, n);
1032 * The secret must be copied into contiguous memory region, lets verify
1033 * that userspace memory pages are contiguous before we issue command.
1035 if (get_num_contig_pages(0, pages, n) != n) {
1037 goto e_unpin_memory;
1041 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
1043 goto e_unpin_memory;
1045 offset = params.guest_uaddr & (PAGE_SIZE - 1);
1046 data->guest_address = __sme_page_pa(pages[0]) + offset;
1047 data->guest_len = params.guest_len;
1049 blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
1051 ret = PTR_ERR(blob);
1055 data->trans_address = __psp_pa(blob);
1056 data->trans_len = params.trans_len;
1058 hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
1063 data->hdr_address = __psp_pa(hdr);
1064 data->hdr_len = params.hdr_len;
1066 data->handle = sev->handle;
1067 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, data, &argp->error);
1076 /* content of memory is updated, mark pages dirty */
1077 for (i = 0; i < n; i++) {
1078 set_page_dirty_lock(pages[i]);
1079 mark_page_accessed(pages[i]);
1081 sev_unpin_memory(kvm, pages, n);
1085 static int sev_get_attestation_report(struct kvm *kvm, struct kvm_sev_cmd *argp)
1087 void __user *report = (void __user *)(uintptr_t)argp->data;
1088 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1089 struct sev_data_attestation_report *data;
1090 struct kvm_sev_attestation_report params;
1095 if (!sev_guest(kvm))
1098 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params)))
1101 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
1105 /* User wants to query the blob length */
1109 p = (void __user *)(uintptr_t)params.uaddr;
1111 if (params.len > SEV_FW_BLOB_MAX_SIZE) {
1117 blob = kmalloc(params.len, GFP_KERNEL);
1121 data->address = __psp_pa(blob);
1122 data->len = params.len;
1123 memcpy(data->mnonce, params.mnonce, sizeof(params.mnonce));
1126 data->handle = sev->handle;
1127 ret = sev_issue_cmd(kvm, SEV_CMD_ATTESTATION_REPORT, data, &argp->error);
1129 * If we query the session length, FW responded with expected data.
1138 if (copy_to_user(p, blob, params.len))
1143 params.len = data->len;
1144 if (copy_to_user(report, ¶ms, sizeof(params)))
1153 int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
1155 struct kvm_sev_cmd sev_cmd;
1158 if (!svm_sev_enabled() || !sev)
1164 if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd)))
1167 mutex_lock(&kvm->lock);
1169 switch (sev_cmd.id) {
1171 r = sev_guest_init(kvm, &sev_cmd);
1173 case KVM_SEV_ES_INIT:
1174 r = sev_es_guest_init(kvm, &sev_cmd);
1176 case KVM_SEV_LAUNCH_START:
1177 r = sev_launch_start(kvm, &sev_cmd);
1179 case KVM_SEV_LAUNCH_UPDATE_DATA:
1180 r = sev_launch_update_data(kvm, &sev_cmd);
1182 case KVM_SEV_LAUNCH_UPDATE_VMSA:
1183 r = sev_launch_update_vmsa(kvm, &sev_cmd);
1185 case KVM_SEV_LAUNCH_MEASURE:
1186 r = sev_launch_measure(kvm, &sev_cmd);
1188 case KVM_SEV_LAUNCH_FINISH:
1189 r = sev_launch_finish(kvm, &sev_cmd);
1191 case KVM_SEV_GUEST_STATUS:
1192 r = sev_guest_status(kvm, &sev_cmd);
1194 case KVM_SEV_DBG_DECRYPT:
1195 r = sev_dbg_crypt(kvm, &sev_cmd, true);
1197 case KVM_SEV_DBG_ENCRYPT:
1198 r = sev_dbg_crypt(kvm, &sev_cmd, false);
1200 case KVM_SEV_LAUNCH_SECRET:
1201 r = sev_launch_secret(kvm, &sev_cmd);
1203 case KVM_SEV_GET_ATTESTATION_REPORT:
1204 r = sev_get_attestation_report(kvm, &sev_cmd);
1211 if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd)))
1215 mutex_unlock(&kvm->lock);
1219 int svm_register_enc_region(struct kvm *kvm,
1220 struct kvm_enc_region *range)
1222 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1223 struct enc_region *region;
1226 if (!sev_guest(kvm))
1229 if (range->addr > ULONG_MAX || range->size > ULONG_MAX)
1232 region = kzalloc(sizeof(*region), GFP_KERNEL_ACCOUNT);
1236 mutex_lock(&kvm->lock);
1237 region->pages = sev_pin_memory(kvm, range->addr, range->size, ®ion->npages, 1);
1238 if (IS_ERR(region->pages)) {
1239 ret = PTR_ERR(region->pages);
1240 mutex_unlock(&kvm->lock);
1244 region->uaddr = range->addr;
1245 region->size = range->size;
1247 list_add_tail(®ion->list, &sev->regions_list);
1248 mutex_unlock(&kvm->lock);
1251 * The guest may change the memory encryption attribute from C=0 -> C=1
1252 * or vice versa for this memory range. Lets make sure caches are
1253 * flushed to ensure that guest data gets written into memory with
1256 sev_clflush_pages(region->pages, region->npages);
1265 static struct enc_region *
1266 find_enc_region(struct kvm *kvm, struct kvm_enc_region *range)
1268 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1269 struct list_head *head = &sev->regions_list;
1270 struct enc_region *i;
1272 list_for_each_entry(i, head, list) {
1273 if (i->uaddr == range->addr &&
1274 i->size == range->size)
1281 static void __unregister_enc_region_locked(struct kvm *kvm,
1282 struct enc_region *region)
1284 sev_unpin_memory(kvm, region->pages, region->npages);
1285 list_del(®ion->list);
1289 int svm_unregister_enc_region(struct kvm *kvm,
1290 struct kvm_enc_region *range)
1292 struct enc_region *region;
1295 mutex_lock(&kvm->lock);
1297 if (!sev_guest(kvm)) {
1302 region = find_enc_region(kvm, range);
1309 * Ensure that all guest tagged cache entries are flushed before
1310 * releasing the pages back to the system for use. CLFLUSH will
1311 * not do this, so issue a WBINVD.
1313 wbinvd_on_all_cpus();
1315 __unregister_enc_region_locked(kvm, region);
1317 mutex_unlock(&kvm->lock);
1321 mutex_unlock(&kvm->lock);
1325 void sev_vm_destroy(struct kvm *kvm)
1327 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1328 struct list_head *head = &sev->regions_list;
1329 struct list_head *pos, *q;
1331 if (!sev_guest(kvm))
1334 mutex_lock(&kvm->lock);
1337 * Ensure that all guest tagged cache entries are flushed before
1338 * releasing the pages back to the system for use. CLFLUSH will
1339 * not do this, so issue a WBINVD.
1341 wbinvd_on_all_cpus();
1344 * if userspace was terminated before unregistering the memory regions
1345 * then lets unpin all the registered memory.
1347 if (!list_empty(head)) {
1348 list_for_each_safe(pos, q, head) {
1349 __unregister_enc_region_locked(kvm,
1350 list_entry(pos, struct enc_region, list));
1355 mutex_unlock(&kvm->lock);
1357 sev_unbind_asid(kvm, sev->handle);
1361 void __init sev_hardware_setup(void)
1363 unsigned int eax, ebx, ecx, edx, sev_asid_count, sev_es_asid_count;
1364 bool sev_es_supported = false;
1365 bool sev_supported = false;
1367 /* Does the CPU support SEV? */
1368 if (!boot_cpu_has(X86_FEATURE_SEV))
1371 /* Retrieve SEV CPUID information */
1372 cpuid(0x8000001f, &eax, &ebx, &ecx, &edx);
1374 /* Set encryption bit location for SEV-ES guests */
1375 sev_enc_bit = ebx & 0x3f;
1377 /* Maximum number of encrypted guests supported simultaneously */
1380 if (!svm_sev_enabled())
1383 /* Minimum ASID value that should be used for SEV guest */
1386 /* Initialize SEV ASID bitmaps */
1387 sev_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
1388 if (!sev_asid_bitmap)
1391 sev_reclaim_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
1392 if (!sev_reclaim_asid_bitmap)
1395 sev_asid_count = max_sev_asid - min_sev_asid + 1;
1396 if (misc_cg_set_capacity(MISC_CG_RES_SEV, sev_asid_count))
1399 pr_info("SEV supported: %u ASIDs\n", sev_asid_count);
1400 sev_supported = true;
1402 /* SEV-ES support requested? */
1406 /* Does the CPU support SEV-ES? */
1407 if (!boot_cpu_has(X86_FEATURE_SEV_ES))
1410 /* Has the system been allocated ASIDs for SEV-ES? */
1411 if (min_sev_asid == 1)
1414 sev_es_asid_count = min_sev_asid - 1;
1415 if (misc_cg_set_capacity(MISC_CG_RES_SEV_ES, sev_es_asid_count))
1418 pr_info("SEV-ES supported: %u ASIDs\n", sev_es_asid_count);
1419 sev_es_supported = true;
1422 sev = sev_supported;
1423 sev_es = sev_es_supported;
1426 void sev_hardware_teardown(void)
1428 if (!svm_sev_enabled())
1431 bitmap_free(sev_asid_bitmap);
1432 bitmap_free(sev_reclaim_asid_bitmap);
1433 misc_cg_set_capacity(MISC_CG_RES_SEV, 0);
1434 misc_cg_set_capacity(MISC_CG_RES_SEV_ES, 0);
1440 * Pages used by hardware to hold guest encrypted state must be flushed before
1441 * returning them to the system.
1443 static void sev_flush_guest_memory(struct vcpu_svm *svm, void *va,
1447 * If hardware enforced cache coherency for encrypted mappings of the
1448 * same physical page is supported, nothing to do.
1450 if (boot_cpu_has(X86_FEATURE_SME_COHERENT))
1454 * If the VM Page Flush MSR is supported, use it to flush the page
1455 * (using the page virtual address and the guest ASID).
1457 if (boot_cpu_has(X86_FEATURE_VM_PAGE_FLUSH)) {
1458 struct kvm_sev_info *sev;
1459 unsigned long va_start;
1462 /* Align start and stop to page boundaries. */
1463 va_start = (unsigned long)va;
1464 start = (u64)va_start & PAGE_MASK;
1465 stop = PAGE_ALIGN((u64)va_start + len);
1468 sev = &to_kvm_svm(svm->vcpu.kvm)->sev_info;
1470 while (start < stop) {
1471 wrmsrl(MSR_AMD64_VM_PAGE_FLUSH,
1480 WARN(1, "Address overflow, using WBINVD\n");
1484 * Hardware should always have one of the above features,
1485 * but if not, use WBINVD and issue a warning.
1487 WARN_ONCE(1, "Using WBINVD to flush guest memory\n");
1488 wbinvd_on_all_cpus();
1491 void sev_free_vcpu(struct kvm_vcpu *vcpu)
1493 struct vcpu_svm *svm;
1495 if (!sev_es_guest(vcpu->kvm))
1500 if (vcpu->arch.guest_state_protected)
1501 sev_flush_guest_memory(svm, svm->vmsa, PAGE_SIZE);
1502 __free_page(virt_to_page(svm->vmsa));
1504 if (svm->ghcb_sa_free)
1505 kfree(svm->ghcb_sa);
1508 static void dump_ghcb(struct vcpu_svm *svm)
1510 struct ghcb *ghcb = svm->ghcb;
1513 /* Re-use the dump_invalid_vmcb module parameter */
1514 if (!dump_invalid_vmcb) {
1515 pr_warn_ratelimited("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n");
1519 nbits = sizeof(ghcb->save.valid_bitmap) * 8;
1521 pr_err("GHCB (GPA=%016llx):\n", svm->vmcb->control.ghcb_gpa);
1522 pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_code",
1523 ghcb->save.sw_exit_code, ghcb_sw_exit_code_is_valid(ghcb));
1524 pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_1",
1525 ghcb->save.sw_exit_info_1, ghcb_sw_exit_info_1_is_valid(ghcb));
1526 pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_2",
1527 ghcb->save.sw_exit_info_2, ghcb_sw_exit_info_2_is_valid(ghcb));
1528 pr_err("%-20s%016llx is_valid: %u\n", "sw_scratch",
1529 ghcb->save.sw_scratch, ghcb_sw_scratch_is_valid(ghcb));
1530 pr_err("%-20s%*pb\n", "valid_bitmap", nbits, ghcb->save.valid_bitmap);
1533 static void sev_es_sync_to_ghcb(struct vcpu_svm *svm)
1535 struct kvm_vcpu *vcpu = &svm->vcpu;
1536 struct ghcb *ghcb = svm->ghcb;
1539 * The GHCB protocol so far allows for the following data
1541 * GPRs RAX, RBX, RCX, RDX
1543 * Copy their values, even if they may not have been written during the
1544 * VM-Exit. It's the guest's responsibility to not consume random data.
1546 ghcb_set_rax(ghcb, vcpu->arch.regs[VCPU_REGS_RAX]);
1547 ghcb_set_rbx(ghcb, vcpu->arch.regs[VCPU_REGS_RBX]);
1548 ghcb_set_rcx(ghcb, vcpu->arch.regs[VCPU_REGS_RCX]);
1549 ghcb_set_rdx(ghcb, vcpu->arch.regs[VCPU_REGS_RDX]);
1552 static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
1554 struct vmcb_control_area *control = &svm->vmcb->control;
1555 struct kvm_vcpu *vcpu = &svm->vcpu;
1556 struct ghcb *ghcb = svm->ghcb;
1560 * The GHCB protocol so far allows for the following data
1562 * GPRs RAX, RBX, RCX, RDX
1566 * VMMCALL allows the guest to provide extra registers. KVM also
1567 * expects RSI for hypercalls, so include that, too.
1569 * Copy their values to the appropriate location if supplied.
1571 memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs));
1573 vcpu->arch.regs[VCPU_REGS_RAX] = ghcb_get_rax_if_valid(ghcb);
1574 vcpu->arch.regs[VCPU_REGS_RBX] = ghcb_get_rbx_if_valid(ghcb);
1575 vcpu->arch.regs[VCPU_REGS_RCX] = ghcb_get_rcx_if_valid(ghcb);
1576 vcpu->arch.regs[VCPU_REGS_RDX] = ghcb_get_rdx_if_valid(ghcb);
1577 vcpu->arch.regs[VCPU_REGS_RSI] = ghcb_get_rsi_if_valid(ghcb);
1579 svm->vmcb->save.cpl = ghcb_get_cpl_if_valid(ghcb);
1581 if (ghcb_xcr0_is_valid(ghcb)) {
1582 vcpu->arch.xcr0 = ghcb_get_xcr0(ghcb);
1583 kvm_update_cpuid_runtime(vcpu);
1586 /* Copy the GHCB exit information into the VMCB fields */
1587 exit_code = ghcb_get_sw_exit_code(ghcb);
1588 control->exit_code = lower_32_bits(exit_code);
1589 control->exit_code_hi = upper_32_bits(exit_code);
1590 control->exit_info_1 = ghcb_get_sw_exit_info_1(ghcb);
1591 control->exit_info_2 = ghcb_get_sw_exit_info_2(ghcb);
1593 /* Clear the valid entries fields */
1594 memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
1597 static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
1599 struct kvm_vcpu *vcpu;
1605 /* Only GHCB Usage code 0 is supported */
1606 if (ghcb->ghcb_usage)
1610 * Retrieve the exit code now even though is may not be marked valid
1611 * as it could help with debugging.
1613 exit_code = ghcb_get_sw_exit_code(ghcb);
1615 if (!ghcb_sw_exit_code_is_valid(ghcb) ||
1616 !ghcb_sw_exit_info_1_is_valid(ghcb) ||
1617 !ghcb_sw_exit_info_2_is_valid(ghcb))
1620 switch (ghcb_get_sw_exit_code(ghcb)) {
1621 case SVM_EXIT_READ_DR7:
1623 case SVM_EXIT_WRITE_DR7:
1624 if (!ghcb_rax_is_valid(ghcb))
1627 case SVM_EXIT_RDTSC:
1629 case SVM_EXIT_RDPMC:
1630 if (!ghcb_rcx_is_valid(ghcb))
1633 case SVM_EXIT_CPUID:
1634 if (!ghcb_rax_is_valid(ghcb) ||
1635 !ghcb_rcx_is_valid(ghcb))
1637 if (ghcb_get_rax(ghcb) == 0xd)
1638 if (!ghcb_xcr0_is_valid(ghcb))
1644 if (ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_STR_MASK) {
1645 if (!ghcb_sw_scratch_is_valid(ghcb))
1648 if (!(ghcb_get_sw_exit_info_1(ghcb) & SVM_IOIO_TYPE_MASK))
1649 if (!ghcb_rax_is_valid(ghcb))
1654 if (!ghcb_rcx_is_valid(ghcb))
1656 if (ghcb_get_sw_exit_info_1(ghcb)) {
1657 if (!ghcb_rax_is_valid(ghcb) ||
1658 !ghcb_rdx_is_valid(ghcb))
1662 case SVM_EXIT_VMMCALL:
1663 if (!ghcb_rax_is_valid(ghcb) ||
1664 !ghcb_cpl_is_valid(ghcb))
1667 case SVM_EXIT_RDTSCP:
1669 case SVM_EXIT_WBINVD:
1671 case SVM_EXIT_MONITOR:
1672 if (!ghcb_rax_is_valid(ghcb) ||
1673 !ghcb_rcx_is_valid(ghcb) ||
1674 !ghcb_rdx_is_valid(ghcb))
1677 case SVM_EXIT_MWAIT:
1678 if (!ghcb_rax_is_valid(ghcb) ||
1679 !ghcb_rcx_is_valid(ghcb))
1682 case SVM_VMGEXIT_MMIO_READ:
1683 case SVM_VMGEXIT_MMIO_WRITE:
1684 if (!ghcb_sw_scratch_is_valid(ghcb))
1687 case SVM_VMGEXIT_NMI_COMPLETE:
1688 case SVM_VMGEXIT_AP_HLT_LOOP:
1689 case SVM_VMGEXIT_AP_JUMP_TABLE:
1690 case SVM_VMGEXIT_UNSUPPORTED_EVENT:
1701 if (ghcb->ghcb_usage) {
1702 vcpu_unimpl(vcpu, "vmgexit: ghcb usage %#x is not valid\n",
1705 vcpu_unimpl(vcpu, "vmgexit: exit reason %#llx is not valid\n",
1710 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1711 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
1712 vcpu->run->internal.ndata = 2;
1713 vcpu->run->internal.data[0] = exit_code;
1714 vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu;
1719 static void pre_sev_es_run(struct vcpu_svm *svm)
1724 if (svm->ghcb_sa_free) {
1726 * The scratch area lives outside the GHCB, so there is a
1727 * buffer that, depending on the operation performed, may
1728 * need to be synced, then freed.
1730 if (svm->ghcb_sa_sync) {
1731 kvm_write_guest(svm->vcpu.kvm,
1732 ghcb_get_sw_scratch(svm->ghcb),
1733 svm->ghcb_sa, svm->ghcb_sa_len);
1734 svm->ghcb_sa_sync = false;
1737 kfree(svm->ghcb_sa);
1738 svm->ghcb_sa = NULL;
1739 svm->ghcb_sa_free = false;
1742 trace_kvm_vmgexit_exit(svm->vcpu.vcpu_id, svm->ghcb);
1744 sev_es_sync_to_ghcb(svm);
1746 kvm_vcpu_unmap(&svm->vcpu, &svm->ghcb_map, true);
1750 void pre_sev_run(struct vcpu_svm *svm, int cpu)
1752 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
1753 int asid = sev_get_asid(svm->vcpu.kvm);
1755 /* Perform any SEV-ES pre-run actions */
1756 pre_sev_es_run(svm);
1758 /* Assign the asid allocated with this SEV guest */
1764 * 1) when different VMCB for the same ASID is to be run on the same host CPU.
1765 * 2) or this VMCB was executed on different host CPU in previous VMRUNs.
1767 if (sd->sev_vmcbs[asid] == svm->vmcb &&
1768 svm->vcpu.arch.last_vmentry_cpu == cpu)
1771 sd->sev_vmcbs[asid] = svm->vmcb;
1772 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
1773 vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
1776 #define GHCB_SCRATCH_AREA_LIMIT (16ULL * PAGE_SIZE)
1777 static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
1779 struct vmcb_control_area *control = &svm->vmcb->control;
1780 struct ghcb *ghcb = svm->ghcb;
1781 u64 ghcb_scratch_beg, ghcb_scratch_end;
1782 u64 scratch_gpa_beg, scratch_gpa_end;
1785 scratch_gpa_beg = ghcb_get_sw_scratch(ghcb);
1786 if (!scratch_gpa_beg) {
1787 pr_err("vmgexit: scratch gpa not provided\n");
1791 scratch_gpa_end = scratch_gpa_beg + len;
1792 if (scratch_gpa_end < scratch_gpa_beg) {
1793 pr_err("vmgexit: scratch length (%#llx) not valid for scratch address (%#llx)\n",
1794 len, scratch_gpa_beg);
1798 if ((scratch_gpa_beg & PAGE_MASK) == control->ghcb_gpa) {
1799 /* Scratch area begins within GHCB */
1800 ghcb_scratch_beg = control->ghcb_gpa +
1801 offsetof(struct ghcb, shared_buffer);
1802 ghcb_scratch_end = control->ghcb_gpa +
1803 offsetof(struct ghcb, reserved_1);
1806 * If the scratch area begins within the GHCB, it must be
1807 * completely contained in the GHCB shared buffer area.
1809 if (scratch_gpa_beg < ghcb_scratch_beg ||
1810 scratch_gpa_end > ghcb_scratch_end) {
1811 pr_err("vmgexit: scratch area is outside of GHCB shared buffer area (%#llx - %#llx)\n",
1812 scratch_gpa_beg, scratch_gpa_end);
1816 scratch_va = (void *)svm->ghcb;
1817 scratch_va += (scratch_gpa_beg - control->ghcb_gpa);
1820 * The guest memory must be read into a kernel buffer, so
1823 if (len > GHCB_SCRATCH_AREA_LIMIT) {
1824 pr_err("vmgexit: scratch area exceeds KVM limits (%#llx requested, %#llx limit)\n",
1825 len, GHCB_SCRATCH_AREA_LIMIT);
1828 scratch_va = kzalloc(len, GFP_KERNEL);
1832 if (kvm_read_guest(svm->vcpu.kvm, scratch_gpa_beg, scratch_va, len)) {
1833 /* Unable to copy scratch area from guest */
1834 pr_err("vmgexit: kvm_read_guest for scratch area failed\n");
1841 * The scratch area is outside the GHCB. The operation will
1842 * dictate whether the buffer needs to be synced before running
1843 * the vCPU next time (i.e. a read was requested so the data
1844 * must be written back to the guest memory).
1846 svm->ghcb_sa_sync = sync;
1847 svm->ghcb_sa_free = true;
1850 svm->ghcb_sa = scratch_va;
1851 svm->ghcb_sa_len = len;
1856 static void set_ghcb_msr_bits(struct vcpu_svm *svm, u64 value, u64 mask,
1859 svm->vmcb->control.ghcb_gpa &= ~(mask << pos);
1860 svm->vmcb->control.ghcb_gpa |= (value & mask) << pos;
1863 static u64 get_ghcb_msr_bits(struct vcpu_svm *svm, u64 mask, unsigned int pos)
1865 return (svm->vmcb->control.ghcb_gpa >> pos) & mask;
1868 static void set_ghcb_msr(struct vcpu_svm *svm, u64 value)
1870 svm->vmcb->control.ghcb_gpa = value;
1873 static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
1875 struct vmcb_control_area *control = &svm->vmcb->control;
1876 struct kvm_vcpu *vcpu = &svm->vcpu;
1880 ghcb_info = control->ghcb_gpa & GHCB_MSR_INFO_MASK;
1882 trace_kvm_vmgexit_msr_protocol_enter(svm->vcpu.vcpu_id,
1885 switch (ghcb_info) {
1886 case GHCB_MSR_SEV_INFO_REQ:
1887 set_ghcb_msr(svm, GHCB_MSR_SEV_INFO(GHCB_VERSION_MAX,
1891 case GHCB_MSR_CPUID_REQ: {
1892 u64 cpuid_fn, cpuid_reg, cpuid_value;
1894 cpuid_fn = get_ghcb_msr_bits(svm,
1895 GHCB_MSR_CPUID_FUNC_MASK,
1896 GHCB_MSR_CPUID_FUNC_POS);
1898 /* Initialize the registers needed by the CPUID intercept */
1899 vcpu->arch.regs[VCPU_REGS_RAX] = cpuid_fn;
1900 vcpu->arch.regs[VCPU_REGS_RCX] = 0;
1902 ret = svm_invoke_exit_handler(svm, SVM_EXIT_CPUID);
1908 cpuid_reg = get_ghcb_msr_bits(svm,
1909 GHCB_MSR_CPUID_REG_MASK,
1910 GHCB_MSR_CPUID_REG_POS);
1912 cpuid_value = vcpu->arch.regs[VCPU_REGS_RAX];
1913 else if (cpuid_reg == 1)
1914 cpuid_value = vcpu->arch.regs[VCPU_REGS_RBX];
1915 else if (cpuid_reg == 2)
1916 cpuid_value = vcpu->arch.regs[VCPU_REGS_RCX];
1918 cpuid_value = vcpu->arch.regs[VCPU_REGS_RDX];
1920 set_ghcb_msr_bits(svm, cpuid_value,
1921 GHCB_MSR_CPUID_VALUE_MASK,
1922 GHCB_MSR_CPUID_VALUE_POS);
1924 set_ghcb_msr_bits(svm, GHCB_MSR_CPUID_RESP,
1929 case GHCB_MSR_TERM_REQ: {
1930 u64 reason_set, reason_code;
1932 reason_set = get_ghcb_msr_bits(svm,
1933 GHCB_MSR_TERM_REASON_SET_MASK,
1934 GHCB_MSR_TERM_REASON_SET_POS);
1935 reason_code = get_ghcb_msr_bits(svm,
1936 GHCB_MSR_TERM_REASON_MASK,
1937 GHCB_MSR_TERM_REASON_POS);
1938 pr_info("SEV-ES guest requested termination: %#llx:%#llx\n",
1939 reason_set, reason_code);
1946 trace_kvm_vmgexit_msr_protocol_exit(svm->vcpu.vcpu_id,
1947 control->ghcb_gpa, ret);
1952 int sev_handle_vmgexit(struct vcpu_svm *svm)
1954 struct vmcb_control_area *control = &svm->vmcb->control;
1955 u64 ghcb_gpa, exit_code;
1959 /* Validate the GHCB */
1960 ghcb_gpa = control->ghcb_gpa;
1961 if (ghcb_gpa & GHCB_MSR_INFO_MASK)
1962 return sev_handle_vmgexit_msr_protocol(svm);
1965 vcpu_unimpl(&svm->vcpu, "vmgexit: GHCB gpa is not set\n");
1969 if (kvm_vcpu_map(&svm->vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->ghcb_map)) {
1970 /* Unable to map GHCB from guest */
1971 vcpu_unimpl(&svm->vcpu, "vmgexit: error mapping GHCB [%#llx] from guest\n",
1976 svm->ghcb = svm->ghcb_map.hva;
1977 ghcb = svm->ghcb_map.hva;
1979 trace_kvm_vmgexit_enter(svm->vcpu.vcpu_id, ghcb);
1981 exit_code = ghcb_get_sw_exit_code(ghcb);
1983 ret = sev_es_validate_vmgexit(svm);
1987 sev_es_sync_from_ghcb(svm);
1988 ghcb_set_sw_exit_info_1(ghcb, 0);
1989 ghcb_set_sw_exit_info_2(ghcb, 0);
1992 switch (exit_code) {
1993 case SVM_VMGEXIT_MMIO_READ:
1994 if (!setup_vmgexit_scratch(svm, true, control->exit_info_2))
1997 ret = kvm_sev_es_mmio_read(&svm->vcpu,
1998 control->exit_info_1,
1999 control->exit_info_2,
2002 case SVM_VMGEXIT_MMIO_WRITE:
2003 if (!setup_vmgexit_scratch(svm, false, control->exit_info_2))
2006 ret = kvm_sev_es_mmio_write(&svm->vcpu,
2007 control->exit_info_1,
2008 control->exit_info_2,
2011 case SVM_VMGEXIT_NMI_COMPLETE:
2012 ret = svm_invoke_exit_handler(svm, SVM_EXIT_IRET);
2014 case SVM_VMGEXIT_AP_HLT_LOOP:
2015 ret = kvm_emulate_ap_reset_hold(&svm->vcpu);
2017 case SVM_VMGEXIT_AP_JUMP_TABLE: {
2018 struct kvm_sev_info *sev = &to_kvm_svm(svm->vcpu.kvm)->sev_info;
2020 switch (control->exit_info_1) {
2022 /* Set AP jump table address */
2023 sev->ap_jump_table = control->exit_info_2;
2026 /* Get AP jump table address */
2027 ghcb_set_sw_exit_info_2(ghcb, sev->ap_jump_table);
2030 pr_err("svm: vmgexit: unsupported AP jump table request - exit_info_1=%#llx\n",
2031 control->exit_info_1);
2032 ghcb_set_sw_exit_info_1(ghcb, 1);
2033 ghcb_set_sw_exit_info_2(ghcb,
2035 SVM_EVTINJ_TYPE_EXEPT |
2042 case SVM_VMGEXIT_UNSUPPORTED_EVENT:
2043 vcpu_unimpl(&svm->vcpu,
2044 "vmgexit: unsupported event - exit_info_1=%#llx, exit_info_2=%#llx\n",
2045 control->exit_info_1, control->exit_info_2);
2048 ret = svm_invoke_exit_handler(svm, exit_code);
2054 int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
2056 if (!setup_vmgexit_scratch(svm, in, svm->vmcb->control.exit_info_2))
2059 return kvm_sev_es_string_io(&svm->vcpu, size, port,
2060 svm->ghcb_sa, svm->ghcb_sa_len, in);
2063 void sev_es_init_vmcb(struct vcpu_svm *svm)
2065 struct kvm_vcpu *vcpu = &svm->vcpu;
2067 svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ES_ENABLE;
2068 svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
2071 * An SEV-ES guest requires a VMSA area that is a separate from the
2072 * VMCB page. Do not include the encryption mask on the VMSA physical
2073 * address since hardware will access it using the guest key.
2075 svm->vmcb->control.vmsa_pa = __pa(svm->vmsa);
2077 /* Can't intercept CR register access, HV can't modify CR registers */
2078 svm_clr_intercept(svm, INTERCEPT_CR0_READ);
2079 svm_clr_intercept(svm, INTERCEPT_CR4_READ);
2080 svm_clr_intercept(svm, INTERCEPT_CR8_READ);
2081 svm_clr_intercept(svm, INTERCEPT_CR0_WRITE);
2082 svm_clr_intercept(svm, INTERCEPT_CR4_WRITE);
2083 svm_clr_intercept(svm, INTERCEPT_CR8_WRITE);
2085 svm_clr_intercept(svm, INTERCEPT_SELECTIVE_CR0);
2087 /* Track EFER/CR register changes */
2088 svm_set_intercept(svm, TRAP_EFER_WRITE);
2089 svm_set_intercept(svm, TRAP_CR0_WRITE);
2090 svm_set_intercept(svm, TRAP_CR4_WRITE);
2091 svm_set_intercept(svm, TRAP_CR8_WRITE);
2093 /* No support for enable_vmware_backdoor */
2094 clr_exception_intercept(svm, GP_VECTOR);
2096 /* Can't intercept XSETBV, HV can't modify XCR0 directly */
2097 svm_clr_intercept(svm, INTERCEPT_XSETBV);
2099 /* Clear intercepts on selected MSRs */
2100 set_msr_interception(vcpu, svm->msrpm, MSR_EFER, 1, 1);
2101 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_CR_PAT, 1, 1);
2102 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
2103 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
2104 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
2105 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
2108 void sev_es_create_vcpu(struct vcpu_svm *svm)
2111 * Set the GHCB MSR value as per the GHCB specification when creating
2112 * a vCPU for an SEV-ES guest.
2114 set_ghcb_msr(svm, GHCB_MSR_SEV_INFO(GHCB_VERSION_MAX,
2119 void sev_es_prepare_guest_switch(struct vcpu_svm *svm, unsigned int cpu)
2121 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
2122 struct vmcb_save_area *hostsa;
2125 * As an SEV-ES guest, hardware will restore the host state on VMEXIT,
2126 * of which one step is to perform a VMLOAD. Since hardware does not
2127 * perform a VMSAVE on VMRUN, the host savearea must be updated.
2129 vmsave(__sme_page_pa(sd->save_area));
2131 /* XCR0 is restored on VMEXIT, save the current host value */
2132 hostsa = (struct vmcb_save_area *)(page_address(sd->save_area) + 0x400);
2133 hostsa->xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
2135 /* PKRU is restored on VMEXIT, save the curent host value */
2136 hostsa->pkru = read_pkru();
2138 /* MSR_IA32_XSS is restored on VMEXIT, save the currnet host value */
2139 hostsa->xss = host_xss;
2142 void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
2144 struct vcpu_svm *svm = to_svm(vcpu);
2146 /* First SIPI: Use the values as initially set by the VMM */
2147 if (!svm->received_first_sipi) {
2148 svm->received_first_sipi = true;
2153 * Subsequent SIPI: Return from an AP Reset Hold VMGEXIT, where
2154 * the guest will set the CS and RIP. Set SW_EXIT_INFO_2 to a
2157 ghcb_set_sw_exit_info_2(svm->ghcb, 1);