1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
7 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
10 #include <linux/kvm_types.h>
11 #include <linux/kvm_host.h>
12 #include <linux/kernel.h>
13 #include <linux/highmem.h>
14 #include <linux/psp-sev.h>
15 #include <linux/pagemap.h>
16 #include <linux/swap.h>
21 static int sev_flush_asids(void);
22 static DECLARE_RWSEM(sev_deactivate_lock);
23 static DEFINE_MUTEX(sev_bitmap_lock);
24 unsigned int max_sev_asid;
25 static unsigned int min_sev_asid;
26 static unsigned long *sev_asid_bitmap;
27 static unsigned long *sev_reclaim_asid_bitmap;
28 #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
31 struct list_head list;
38 static int sev_flush_asids(void)
43 * DEACTIVATE will clear the WBINVD indicator causing DF_FLUSH to fail,
44 * so it must be guarded.
46 down_write(&sev_deactivate_lock);
49 ret = sev_guest_df_flush(&error);
51 up_write(&sev_deactivate_lock);
54 pr_err("SEV: DF_FLUSH failed, ret=%d, error=%#x\n", ret, error);
59 /* Must be called with the sev_bitmap_lock held */
60 static bool __sev_recycle_asids(void)
64 /* Check if there are any ASIDs to reclaim before performing a flush */
65 pos = find_next_bit(sev_reclaim_asid_bitmap,
66 max_sev_asid, min_sev_asid - 1);
67 if (pos >= max_sev_asid)
70 if (sev_flush_asids())
73 bitmap_xor(sev_asid_bitmap, sev_asid_bitmap, sev_reclaim_asid_bitmap,
75 bitmap_zero(sev_reclaim_asid_bitmap, max_sev_asid);
80 static int sev_asid_new(void)
85 mutex_lock(&sev_bitmap_lock);
88 * SEV-enabled guest must use asid from min_sev_asid to max_sev_asid.
91 pos = find_next_zero_bit(sev_asid_bitmap, max_sev_asid, min_sev_asid - 1);
92 if (pos >= max_sev_asid) {
93 if (retry && __sev_recycle_asids()) {
97 mutex_unlock(&sev_bitmap_lock);
101 __set_bit(pos, sev_asid_bitmap);
103 mutex_unlock(&sev_bitmap_lock);
108 static int sev_get_asid(struct kvm *kvm)
110 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
115 static void sev_asid_free(int asid)
117 struct svm_cpu_data *sd;
120 mutex_lock(&sev_bitmap_lock);
123 __set_bit(pos, sev_reclaim_asid_bitmap);
125 for_each_possible_cpu(cpu) {
126 sd = per_cpu(svm_data, cpu);
127 sd->sev_vmcbs[pos] = NULL;
130 mutex_unlock(&sev_bitmap_lock);
133 static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
135 struct sev_data_decommission *decommission;
136 struct sev_data_deactivate *data;
141 data = kzalloc(sizeof(*data), GFP_KERNEL);
145 /* deactivate handle */
146 data->handle = handle;
148 /* Guard DEACTIVATE against WBINVD/DF_FLUSH used in ASID recycling */
149 down_read(&sev_deactivate_lock);
150 sev_guest_deactivate(data, NULL);
151 up_read(&sev_deactivate_lock);
155 decommission = kzalloc(sizeof(*decommission), GFP_KERNEL);
159 /* decommission handle */
160 decommission->handle = handle;
161 sev_guest_decommission(decommission, NULL);
166 static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
168 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
172 if (unlikely(sev->active))
175 asid = sev_asid_new();
179 ret = sev_platform_init(&argp->error);
185 INIT_LIST_HEAD(&sev->regions_list);
194 static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
196 struct sev_data_activate *data;
197 int asid = sev_get_asid(kvm);
200 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
204 /* activate ASID on the given handle */
205 data->handle = handle;
207 ret = sev_guest_activate(data, error);
213 static int __sev_issue_cmd(int fd, int id, void *data, int *error)
222 ret = sev_issue_cmd_external_user(f.file, id, data, error);
228 static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error)
230 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
232 return __sev_issue_cmd(sev->fd, id, data, error);
235 static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
237 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
238 struct sev_data_launch_start *start;
239 struct kvm_sev_launch_start params;
240 void *dh_blob, *session_blob;
241 int *error = &argp->error;
247 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params)))
250 start = kzalloc(sizeof(*start), GFP_KERNEL_ACCOUNT);
255 if (params.dh_uaddr) {
256 dh_blob = psp_copy_user_blob(params.dh_uaddr, params.dh_len);
257 if (IS_ERR(dh_blob)) {
258 ret = PTR_ERR(dh_blob);
262 start->dh_cert_address = __sme_set(__pa(dh_blob));
263 start->dh_cert_len = params.dh_len;
267 if (params.session_uaddr) {
268 session_blob = psp_copy_user_blob(params.session_uaddr, params.session_len);
269 if (IS_ERR(session_blob)) {
270 ret = PTR_ERR(session_blob);
274 start->session_address = __sme_set(__pa(session_blob));
275 start->session_len = params.session_len;
278 start->handle = params.handle;
279 start->policy = params.policy;
281 /* create memory encryption context */
282 ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_LAUNCH_START, start, error);
286 /* Bind ASID to this guest */
287 ret = sev_bind_asid(kvm, start->handle, error);
291 /* return handle to userspace */
292 params.handle = start->handle;
293 if (copy_to_user((void __user *)(uintptr_t)argp->data, ¶ms, sizeof(params))) {
294 sev_unbind_asid(kvm, start->handle);
299 sev->handle = start->handle;
300 sev->fd = argp->sev_fd;
311 static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
312 unsigned long ulen, unsigned long *n,
315 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
316 unsigned long npages, npinned, size;
317 unsigned long locked, lock_limit;
319 unsigned long first, last;
321 if (ulen == 0 || uaddr + ulen < uaddr)
324 /* Calculate number of pages. */
325 first = (uaddr & PAGE_MASK) >> PAGE_SHIFT;
326 last = ((uaddr + ulen - 1) & PAGE_MASK) >> PAGE_SHIFT;
327 npages = (last - first + 1);
329 locked = sev->pages_locked + npages;
330 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
331 if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
332 pr_err("SEV: %lu locked pages exceed the lock limit of %lu.\n", locked, lock_limit);
336 /* Avoid using vmalloc for smaller buffers. */
337 size = npages * sizeof(struct page *);
338 if (size > PAGE_SIZE)
339 pages = __vmalloc(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO,
342 pages = kmalloc(size, GFP_KERNEL_ACCOUNT);
347 /* Pin the user virtual address. */
348 npinned = get_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
349 if (npinned != npages) {
350 pr_err("SEV: Failure locking %lu pages.\n", npages);
355 sev->pages_locked = locked;
361 release_pages(pages, npinned);
367 static void sev_unpin_memory(struct kvm *kvm, struct page **pages,
368 unsigned long npages)
370 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
372 release_pages(pages, npages);
374 sev->pages_locked -= npages;
377 static void sev_clflush_pages(struct page *pages[], unsigned long npages)
379 uint8_t *page_virtual;
382 if (npages == 0 || pages == NULL)
385 for (i = 0; i < npages; i++) {
386 page_virtual = kmap_atomic(pages[i]);
387 clflush_cache_range(page_virtual, PAGE_SIZE);
388 kunmap_atomic(page_virtual);
392 static unsigned long get_num_contig_pages(unsigned long idx,
393 struct page **inpages, unsigned long npages)
395 unsigned long paddr, next_paddr;
396 unsigned long i = idx + 1, pages = 1;
398 /* find the number of contiguous pages starting from idx */
399 paddr = __sme_page_pa(inpages[idx]);
401 next_paddr = __sme_page_pa(inpages[i++]);
402 if ((paddr + PAGE_SIZE) == next_paddr) {
413 static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
415 unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i;
416 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
417 struct kvm_sev_launch_update_data params;
418 struct sev_data_launch_update_data *data;
419 struct page **inpages;
425 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params)))
428 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
432 vaddr = params.uaddr;
434 vaddr_end = vaddr + size;
436 /* Lock the user memory. */
437 inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1);
444 * The LAUNCH_UPDATE command will perform in-place encryption of the
445 * memory content (i.e it will write the same memory region with C=1).
446 * It's possible that the cache may contain the data with C=0, i.e.,
447 * unencrypted so invalidate it first.
449 sev_clflush_pages(inpages, npages);
451 for (i = 0; vaddr < vaddr_end; vaddr = next_vaddr, i += pages) {
455 * If the user buffer is not page-aligned, calculate the offset
458 offset = vaddr & (PAGE_SIZE - 1);
460 /* Calculate the number of pages that can be encrypted in one go. */
461 pages = get_num_contig_pages(i, inpages, npages);
463 len = min_t(size_t, ((pages * PAGE_SIZE) - offset), size);
465 data->handle = sev->handle;
467 data->address = __sme_page_pa(inpages[i]) + offset;
468 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_DATA, data, &argp->error);
473 next_vaddr = vaddr + len;
477 /* content of memory is updated, mark pages dirty */
478 for (i = 0; i < npages; i++) {
479 set_page_dirty_lock(inpages[i]);
480 mark_page_accessed(inpages[i]);
482 /* unlock the user pages */
483 sev_unpin_memory(kvm, inpages, npages);
489 static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
491 void __user *measure = (void __user *)(uintptr_t)argp->data;
492 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
493 struct sev_data_launch_measure *data;
494 struct kvm_sev_launch_measure params;
495 void __user *p = NULL;
502 if (copy_from_user(¶ms, measure, sizeof(params)))
505 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
509 /* User wants to query the blob length */
513 p = (void __user *)(uintptr_t)params.uaddr;
515 if (params.len > SEV_FW_BLOB_MAX_SIZE) {
521 blob = kmalloc(params.len, GFP_KERNEL);
525 data->address = __psp_pa(blob);
526 data->len = params.len;
530 data->handle = sev->handle;
531 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, data, &argp->error);
534 * If we query the session length, FW responded with expected data.
543 if (copy_to_user(p, blob, params.len))
548 params.len = data->len;
549 if (copy_to_user(measure, ¶ms, sizeof(params)))
558 static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
560 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
561 struct sev_data_launch_finish *data;
567 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
571 data->handle = sev->handle;
572 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_FINISH, data, &argp->error);
578 static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
580 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
581 struct kvm_sev_guest_status params;
582 struct sev_data_guest_status *data;
588 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
592 data->handle = sev->handle;
593 ret = sev_issue_cmd(kvm, SEV_CMD_GUEST_STATUS, data, &argp->error);
597 params.policy = data->policy;
598 params.state = data->state;
599 params.handle = data->handle;
601 if (copy_to_user((void __user *)(uintptr_t)argp->data, ¶ms, sizeof(params)))
608 static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src,
609 unsigned long dst, int size,
610 int *error, bool enc)
612 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
613 struct sev_data_dbg *data;
616 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
620 data->handle = sev->handle;
621 data->dst_addr = dst;
622 data->src_addr = src;
625 ret = sev_issue_cmd(kvm,
626 enc ? SEV_CMD_DBG_ENCRYPT : SEV_CMD_DBG_DECRYPT,
632 static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
633 unsigned long dst_paddr, int sz, int *err)
638 * Its safe to read more than we are asked, caller should ensure that
639 * destination has enough space.
641 src_paddr = round_down(src_paddr, 16);
642 offset = src_paddr & 15;
643 sz = round_up(sz + offset, 16);
645 return __sev_issue_dbg_cmd(kvm, src_paddr, dst_paddr, sz, err, false);
648 static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
649 unsigned long __user dst_uaddr,
650 unsigned long dst_paddr,
653 struct page *tpage = NULL;
656 /* if inputs are not 16-byte then use intermediate buffer */
657 if (!IS_ALIGNED(dst_paddr, 16) ||
658 !IS_ALIGNED(paddr, 16) ||
659 !IS_ALIGNED(size, 16)) {
660 tpage = (void *)alloc_page(GFP_KERNEL);
664 dst_paddr = __sme_page_pa(tpage);
667 ret = __sev_dbg_decrypt(kvm, paddr, dst_paddr, size, err);
673 if (copy_to_user((void __user *)(uintptr_t)dst_uaddr,
674 page_address(tpage) + offset, size))
685 static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
686 unsigned long __user vaddr,
687 unsigned long dst_paddr,
688 unsigned long __user dst_vaddr,
689 int size, int *error)
691 struct page *src_tpage = NULL;
692 struct page *dst_tpage = NULL;
695 /* If source buffer is not aligned then use an intermediate buffer */
696 if (!IS_ALIGNED(vaddr, 16)) {
697 src_tpage = alloc_page(GFP_KERNEL);
701 if (copy_from_user(page_address(src_tpage),
702 (void __user *)(uintptr_t)vaddr, size)) {
703 __free_page(src_tpage);
707 paddr = __sme_page_pa(src_tpage);
711 * If destination buffer or length is not aligned then do read-modify-write:
712 * - decrypt destination in an intermediate buffer
713 * - copy the source buffer in an intermediate buffer
714 * - use the intermediate buffer as source buffer
716 if (!IS_ALIGNED(dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
719 dst_tpage = alloc_page(GFP_KERNEL);
725 ret = __sev_dbg_decrypt(kvm, dst_paddr,
726 __sme_page_pa(dst_tpage), size, error);
731 * If source is kernel buffer then use memcpy() otherwise
734 dst_offset = dst_paddr & 15;
737 memcpy(page_address(dst_tpage) + dst_offset,
738 page_address(src_tpage), size);
740 if (copy_from_user(page_address(dst_tpage) + dst_offset,
741 (void __user *)(uintptr_t)vaddr, size)) {
747 paddr = __sme_page_pa(dst_tpage);
748 dst_paddr = round_down(dst_paddr, 16);
749 len = round_up(size, 16);
752 ret = __sev_issue_dbg_cmd(kvm, paddr, dst_paddr, len, error, true);
756 __free_page(src_tpage);
758 __free_page(dst_tpage);
762 static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
764 unsigned long vaddr, vaddr_end, next_vaddr;
765 unsigned long dst_vaddr;
766 struct page **src_p, **dst_p;
767 struct kvm_sev_dbg debug;
775 if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug)))
778 if (!debug.len || debug.src_uaddr + debug.len < debug.src_uaddr)
780 if (!debug.dst_uaddr)
783 vaddr = debug.src_uaddr;
785 vaddr_end = vaddr + size;
786 dst_vaddr = debug.dst_uaddr;
788 for (; vaddr < vaddr_end; vaddr = next_vaddr) {
789 int len, s_off, d_off;
791 /* lock userspace source and destination page */
792 src_p = sev_pin_memory(kvm, vaddr & PAGE_MASK, PAGE_SIZE, &n, 0);
796 dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, 1);
798 sev_unpin_memory(kvm, src_p, n);
803 * The DBG_{DE,EN}CRYPT commands will perform {dec,en}cryption of the
804 * memory content (i.e it will write the same memory region with C=1).
805 * It's possible that the cache may contain the data with C=0, i.e.,
806 * unencrypted so invalidate it first.
808 sev_clflush_pages(src_p, 1);
809 sev_clflush_pages(dst_p, 1);
812 * Since user buffer may not be page aligned, calculate the
813 * offset within the page.
815 s_off = vaddr & ~PAGE_MASK;
816 d_off = dst_vaddr & ~PAGE_MASK;
817 len = min_t(size_t, (PAGE_SIZE - s_off), size);
820 ret = __sev_dbg_decrypt_user(kvm,
821 __sme_page_pa(src_p[0]) + s_off,
823 __sme_page_pa(dst_p[0]) + d_off,
826 ret = __sev_dbg_encrypt_user(kvm,
827 __sme_page_pa(src_p[0]) + s_off,
829 __sme_page_pa(dst_p[0]) + d_off,
833 sev_unpin_memory(kvm, src_p, n);
834 sev_unpin_memory(kvm, dst_p, n);
839 next_vaddr = vaddr + len;
840 dst_vaddr = dst_vaddr + len;
847 static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
849 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
850 struct sev_data_launch_secret *data;
851 struct kvm_sev_launch_secret params;
860 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params)))
863 pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, 1);
868 * The secret must be copied into contiguous memory region, lets verify
869 * that userspace memory pages are contiguous before we issue command.
871 if (get_num_contig_pages(0, pages, n) != n) {
877 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
881 offset = params.guest_uaddr & (PAGE_SIZE - 1);
882 data->guest_address = __sme_page_pa(pages[0]) + offset;
883 data->guest_len = params.guest_len;
885 blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
891 data->trans_address = __psp_pa(blob);
892 data->trans_len = params.trans_len;
894 hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
899 data->hdr_address = __psp_pa(hdr);
900 data->hdr_len = params.hdr_len;
902 data->handle = sev->handle;
903 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, data, &argp->error);
912 sev_unpin_memory(kvm, pages, n);
916 int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
918 struct kvm_sev_cmd sev_cmd;
921 if (!svm_sev_enabled())
927 if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd)))
930 mutex_lock(&kvm->lock);
932 switch (sev_cmd.id) {
934 r = sev_guest_init(kvm, &sev_cmd);
936 case KVM_SEV_LAUNCH_START:
937 r = sev_launch_start(kvm, &sev_cmd);
939 case KVM_SEV_LAUNCH_UPDATE_DATA:
940 r = sev_launch_update_data(kvm, &sev_cmd);
942 case KVM_SEV_LAUNCH_MEASURE:
943 r = sev_launch_measure(kvm, &sev_cmd);
945 case KVM_SEV_LAUNCH_FINISH:
946 r = sev_launch_finish(kvm, &sev_cmd);
948 case KVM_SEV_GUEST_STATUS:
949 r = sev_guest_status(kvm, &sev_cmd);
951 case KVM_SEV_DBG_DECRYPT:
952 r = sev_dbg_crypt(kvm, &sev_cmd, true);
954 case KVM_SEV_DBG_ENCRYPT:
955 r = sev_dbg_crypt(kvm, &sev_cmd, false);
957 case KVM_SEV_LAUNCH_SECRET:
958 r = sev_launch_secret(kvm, &sev_cmd);
965 if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd)))
969 mutex_unlock(&kvm->lock);
973 int svm_register_enc_region(struct kvm *kvm,
974 struct kvm_enc_region *range)
976 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
977 struct enc_region *region;
983 if (range->addr > ULONG_MAX || range->size > ULONG_MAX)
986 region = kzalloc(sizeof(*region), GFP_KERNEL_ACCOUNT);
990 region->pages = sev_pin_memory(kvm, range->addr, range->size, ®ion->npages, 1);
991 if (!region->pages) {
997 * The guest may change the memory encryption attribute from C=0 -> C=1
998 * or vice versa for this memory range. Lets make sure caches are
999 * flushed to ensure that guest data gets written into memory with
1002 sev_clflush_pages(region->pages, region->npages);
1004 region->uaddr = range->addr;
1005 region->size = range->size;
1007 mutex_lock(&kvm->lock);
1008 list_add_tail(®ion->list, &sev->regions_list);
1009 mutex_unlock(&kvm->lock);
1018 static struct enc_region *
1019 find_enc_region(struct kvm *kvm, struct kvm_enc_region *range)
1021 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1022 struct list_head *head = &sev->regions_list;
1023 struct enc_region *i;
1025 list_for_each_entry(i, head, list) {
1026 if (i->uaddr == range->addr &&
1027 i->size == range->size)
1034 static void __unregister_enc_region_locked(struct kvm *kvm,
1035 struct enc_region *region)
1037 sev_unpin_memory(kvm, region->pages, region->npages);
1038 list_del(®ion->list);
1042 int svm_unregister_enc_region(struct kvm *kvm,
1043 struct kvm_enc_region *range)
1045 struct enc_region *region;
1048 mutex_lock(&kvm->lock);
1050 if (!sev_guest(kvm)) {
1055 region = find_enc_region(kvm, range);
1062 * Ensure that all guest tagged cache entries are flushed before
1063 * releasing the pages back to the system for use. CLFLUSH will
1064 * not do this, so issue a WBINVD.
1066 wbinvd_on_all_cpus();
1068 __unregister_enc_region_locked(kvm, region);
1070 mutex_unlock(&kvm->lock);
1074 mutex_unlock(&kvm->lock);
1078 void sev_vm_destroy(struct kvm *kvm)
1080 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1081 struct list_head *head = &sev->regions_list;
1082 struct list_head *pos, *q;
1084 if (!sev_guest(kvm))
1087 mutex_lock(&kvm->lock);
1090 * Ensure that all guest tagged cache entries are flushed before
1091 * releasing the pages back to the system for use. CLFLUSH will
1092 * not do this, so issue a WBINVD.
1094 wbinvd_on_all_cpus();
1097 * if userspace was terminated before unregistering the memory regions
1098 * then lets unpin all the registered memory.
1100 if (!list_empty(head)) {
1101 list_for_each_safe(pos, q, head) {
1102 __unregister_enc_region_locked(kvm,
1103 list_entry(pos, struct enc_region, list));
1107 mutex_unlock(&kvm->lock);
1109 sev_unbind_asid(kvm, sev->handle);
1110 sev_asid_free(sev->asid);
1113 int __init sev_hardware_setup(void)
1115 struct sev_user_data_status *status;
1118 /* Maximum number of encrypted guests supported simultaneously */
1119 max_sev_asid = cpuid_ecx(0x8000001F);
1121 if (!svm_sev_enabled())
1124 /* Minimum ASID value that should be used for SEV guest */
1125 min_sev_asid = cpuid_edx(0x8000001F);
1127 /* Initialize SEV ASID bitmaps */
1128 sev_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
1129 if (!sev_asid_bitmap)
1132 sev_reclaim_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
1133 if (!sev_reclaim_asid_bitmap)
1136 status = kmalloc(sizeof(*status), GFP_KERNEL);
1141 * Check SEV platform status.
1143 * PLATFORM_STATUS can be called in any state, if we failed to query
1144 * the PLATFORM status then either PSP firmware does not support SEV
1145 * feature or SEV firmware is dead.
1147 rc = sev_platform_status(status, NULL);
1151 pr_info("SEV supported\n");
1158 void sev_hardware_teardown(void)
1160 if (!svm_sev_enabled())
1163 bitmap_free(sev_asid_bitmap);
1164 bitmap_free(sev_reclaim_asid_bitmap);
1169 void pre_sev_run(struct vcpu_svm *svm, int cpu)
1171 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
1172 int asid = sev_get_asid(svm->vcpu.kvm);
1174 /* Assign the asid allocated with this SEV guest */
1175 svm->vmcb->control.asid = asid;
1180 * 1) when different VMCB for the same ASID is to be run on the same host CPU.
1181 * 2) or this VMCB was executed on different host CPU in previous VMRUNs.
1183 if (sd->sev_vmcbs[asid] == svm->vmcb &&
1184 svm->last_cpu == cpu)
1187 svm->last_cpu = cpu;
1188 sd->sev_vmcbs[asid] = svm->vmcb;
1189 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
1190 mark_dirty(svm->vmcb, VMCB_ASID);