1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
7 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
10 #include <linux/kvm_types.h>
11 #include <linux/kvm_host.h>
12 #include <linux/kernel.h>
13 #include <linux/highmem.h>
14 #include <linux/psp-sev.h>
15 #include <linux/pagemap.h>
16 #include <linux/swap.h>
21 static int sev_flush_asids(void);
22 static DECLARE_RWSEM(sev_deactivate_lock);
23 static DEFINE_MUTEX(sev_bitmap_lock);
24 unsigned int max_sev_asid;
25 static unsigned int min_sev_asid;
26 static unsigned long *sev_asid_bitmap;
27 static unsigned long *sev_reclaim_asid_bitmap;
28 #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
31 struct list_head list;
38 static int sev_flush_asids(void)
43 * DEACTIVATE will clear the WBINVD indicator causing DF_FLUSH to fail,
44 * so it must be guarded.
46 down_write(&sev_deactivate_lock);
49 ret = sev_guest_df_flush(&error);
51 up_write(&sev_deactivate_lock);
54 pr_err("SEV: DF_FLUSH failed, ret=%d, error=%#x\n", ret, error);
59 /* Must be called with the sev_bitmap_lock held */
60 static bool __sev_recycle_asids(void)
64 /* Check if there are any ASIDs to reclaim before performing a flush */
65 pos = find_next_bit(sev_reclaim_asid_bitmap,
66 max_sev_asid, min_sev_asid - 1);
67 if (pos >= max_sev_asid)
70 if (sev_flush_asids())
73 bitmap_xor(sev_asid_bitmap, sev_asid_bitmap, sev_reclaim_asid_bitmap,
75 bitmap_zero(sev_reclaim_asid_bitmap, max_sev_asid);
80 static int sev_asid_new(void)
85 mutex_lock(&sev_bitmap_lock);
88 * SEV-enabled guest must use asid from min_sev_asid to max_sev_asid.
91 pos = find_next_zero_bit(sev_asid_bitmap, max_sev_asid, min_sev_asid - 1);
92 if (pos >= max_sev_asid) {
93 if (retry && __sev_recycle_asids()) {
97 mutex_unlock(&sev_bitmap_lock);
101 __set_bit(pos, sev_asid_bitmap);
103 mutex_unlock(&sev_bitmap_lock);
108 static int sev_get_asid(struct kvm *kvm)
110 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
115 static void sev_asid_free(int asid)
117 struct svm_cpu_data *sd;
120 mutex_lock(&sev_bitmap_lock);
123 __set_bit(pos, sev_reclaim_asid_bitmap);
125 for_each_possible_cpu(cpu) {
126 sd = per_cpu(svm_data, cpu);
127 sd->sev_vmcbs[pos] = NULL;
130 mutex_unlock(&sev_bitmap_lock);
133 static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
135 struct sev_data_decommission *decommission;
136 struct sev_data_deactivate *data;
141 data = kzalloc(sizeof(*data), GFP_KERNEL);
145 /* deactivate handle */
146 data->handle = handle;
148 /* Guard DEACTIVATE against WBINVD/DF_FLUSH used in ASID recycling */
149 down_read(&sev_deactivate_lock);
150 sev_guest_deactivate(data, NULL);
151 up_read(&sev_deactivate_lock);
155 decommission = kzalloc(sizeof(*decommission), GFP_KERNEL);
159 /* decommission handle */
160 decommission->handle = handle;
161 sev_guest_decommission(decommission, NULL);
166 static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
168 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
172 if (unlikely(sev->active))
175 asid = sev_asid_new();
179 ret = sev_platform_init(&argp->error);
185 INIT_LIST_HEAD(&sev->regions_list);
194 static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
196 struct sev_data_activate *data;
197 int asid = sev_get_asid(kvm);
200 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
204 /* activate ASID on the given handle */
205 data->handle = handle;
207 ret = sev_guest_activate(data, error);
213 static int __sev_issue_cmd(int fd, int id, void *data, int *error)
222 ret = sev_issue_cmd_external_user(f.file, id, data, error);
228 static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error)
230 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
232 return __sev_issue_cmd(sev->fd, id, data, error);
235 static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
237 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
238 struct sev_data_launch_start *start;
239 struct kvm_sev_launch_start params;
240 void *dh_blob, *session_blob;
241 int *error = &argp->error;
247 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params)))
250 start = kzalloc(sizeof(*start), GFP_KERNEL_ACCOUNT);
255 if (params.dh_uaddr) {
256 dh_blob = psp_copy_user_blob(params.dh_uaddr, params.dh_len);
257 if (IS_ERR(dh_blob)) {
258 ret = PTR_ERR(dh_blob);
262 start->dh_cert_address = __sme_set(__pa(dh_blob));
263 start->dh_cert_len = params.dh_len;
267 if (params.session_uaddr) {
268 session_blob = psp_copy_user_blob(params.session_uaddr, params.session_len);
269 if (IS_ERR(session_blob)) {
270 ret = PTR_ERR(session_blob);
274 start->session_address = __sme_set(__pa(session_blob));
275 start->session_len = params.session_len;
278 start->handle = params.handle;
279 start->policy = params.policy;
281 /* create memory encryption context */
282 ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_LAUNCH_START, start, error);
286 /* Bind ASID to this guest */
287 ret = sev_bind_asid(kvm, start->handle, error);
291 /* return handle to userspace */
292 params.handle = start->handle;
293 if (copy_to_user((void __user *)(uintptr_t)argp->data, ¶ms, sizeof(params))) {
294 sev_unbind_asid(kvm, start->handle);
299 sev->handle = start->handle;
300 sev->fd = argp->sev_fd;
311 static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
312 unsigned long ulen, unsigned long *n,
315 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
316 unsigned long npages, npinned, size;
317 unsigned long locked, lock_limit;
319 unsigned long first, last;
321 if (ulen == 0 || uaddr + ulen < uaddr)
324 /* Calculate number of pages. */
325 first = (uaddr & PAGE_MASK) >> PAGE_SHIFT;
326 last = ((uaddr + ulen - 1) & PAGE_MASK) >> PAGE_SHIFT;
327 npages = (last - first + 1);
329 locked = sev->pages_locked + npages;
330 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
331 if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
332 pr_err("SEV: %lu locked pages exceed the lock limit of %lu.\n", locked, lock_limit);
336 /* Avoid using vmalloc for smaller buffers. */
337 size = npages * sizeof(struct page *);
338 if (size > PAGE_SIZE)
339 pages = __vmalloc(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
341 pages = kmalloc(size, GFP_KERNEL_ACCOUNT);
346 /* Pin the user virtual address. */
347 npinned = get_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
348 if (npinned != npages) {
349 pr_err("SEV: Failure locking %lu pages.\n", npages);
354 sev->pages_locked = locked;
360 release_pages(pages, npinned);
366 static void sev_unpin_memory(struct kvm *kvm, struct page **pages,
367 unsigned long npages)
369 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
371 release_pages(pages, npages);
373 sev->pages_locked -= npages;
376 static void sev_clflush_pages(struct page *pages[], unsigned long npages)
378 uint8_t *page_virtual;
381 if (npages == 0 || pages == NULL)
384 for (i = 0; i < npages; i++) {
385 page_virtual = kmap_atomic(pages[i]);
386 clflush_cache_range(page_virtual, PAGE_SIZE);
387 kunmap_atomic(page_virtual);
391 static unsigned long get_num_contig_pages(unsigned long idx,
392 struct page **inpages, unsigned long npages)
394 unsigned long paddr, next_paddr;
395 unsigned long i = idx + 1, pages = 1;
397 /* find the number of contiguous pages starting from idx */
398 paddr = __sme_page_pa(inpages[idx]);
400 next_paddr = __sme_page_pa(inpages[i++]);
401 if ((paddr + PAGE_SIZE) == next_paddr) {
412 static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
414 unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i;
415 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
416 struct kvm_sev_launch_update_data params;
417 struct sev_data_launch_update_data *data;
418 struct page **inpages;
424 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params)))
427 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
431 vaddr = params.uaddr;
433 vaddr_end = vaddr + size;
435 /* Lock the user memory. */
436 inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1);
443 * The LAUNCH_UPDATE command will perform in-place encryption of the
444 * memory content (i.e it will write the same memory region with C=1).
445 * It's possible that the cache may contain the data with C=0, i.e.,
446 * unencrypted so invalidate it first.
448 sev_clflush_pages(inpages, npages);
450 for (i = 0; vaddr < vaddr_end; vaddr = next_vaddr, i += pages) {
454 * If the user buffer is not page-aligned, calculate the offset
457 offset = vaddr & (PAGE_SIZE - 1);
459 /* Calculate the number of pages that can be encrypted in one go. */
460 pages = get_num_contig_pages(i, inpages, npages);
462 len = min_t(size_t, ((pages * PAGE_SIZE) - offset), size);
464 data->handle = sev->handle;
466 data->address = __sme_page_pa(inpages[i]) + offset;
467 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_DATA, data, &argp->error);
472 next_vaddr = vaddr + len;
476 /* content of memory is updated, mark pages dirty */
477 for (i = 0; i < npages; i++) {
478 set_page_dirty_lock(inpages[i]);
479 mark_page_accessed(inpages[i]);
481 /* unlock the user pages */
482 sev_unpin_memory(kvm, inpages, npages);
488 static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
490 void __user *measure = (void __user *)(uintptr_t)argp->data;
491 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
492 struct sev_data_launch_measure *data;
493 struct kvm_sev_launch_measure params;
494 void __user *p = NULL;
501 if (copy_from_user(¶ms, measure, sizeof(params)))
504 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
508 /* User wants to query the blob length */
512 p = (void __user *)(uintptr_t)params.uaddr;
514 if (params.len > SEV_FW_BLOB_MAX_SIZE) {
520 blob = kmalloc(params.len, GFP_KERNEL);
524 data->address = __psp_pa(blob);
525 data->len = params.len;
529 data->handle = sev->handle;
530 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, data, &argp->error);
533 * If we query the session length, FW responded with expected data.
542 if (copy_to_user(p, blob, params.len))
547 params.len = data->len;
548 if (copy_to_user(measure, ¶ms, sizeof(params)))
557 static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
559 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
560 struct sev_data_launch_finish *data;
566 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
570 data->handle = sev->handle;
571 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_FINISH, data, &argp->error);
577 static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
579 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
580 struct kvm_sev_guest_status params;
581 struct sev_data_guest_status *data;
587 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
591 data->handle = sev->handle;
592 ret = sev_issue_cmd(kvm, SEV_CMD_GUEST_STATUS, data, &argp->error);
596 params.policy = data->policy;
597 params.state = data->state;
598 params.handle = data->handle;
600 if (copy_to_user((void __user *)(uintptr_t)argp->data, ¶ms, sizeof(params)))
607 static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src,
608 unsigned long dst, int size,
609 int *error, bool enc)
611 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
612 struct sev_data_dbg *data;
615 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
619 data->handle = sev->handle;
620 data->dst_addr = dst;
621 data->src_addr = src;
624 ret = sev_issue_cmd(kvm,
625 enc ? SEV_CMD_DBG_ENCRYPT : SEV_CMD_DBG_DECRYPT,
631 static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
632 unsigned long dst_paddr, int sz, int *err)
637 * Its safe to read more than we are asked, caller should ensure that
638 * destination has enough space.
640 src_paddr = round_down(src_paddr, 16);
641 offset = src_paddr & 15;
642 sz = round_up(sz + offset, 16);
644 return __sev_issue_dbg_cmd(kvm, src_paddr, dst_paddr, sz, err, false);
647 static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
648 unsigned long __user dst_uaddr,
649 unsigned long dst_paddr,
652 struct page *tpage = NULL;
655 /* if inputs are not 16-byte then use intermediate buffer */
656 if (!IS_ALIGNED(dst_paddr, 16) ||
657 !IS_ALIGNED(paddr, 16) ||
658 !IS_ALIGNED(size, 16)) {
659 tpage = (void *)alloc_page(GFP_KERNEL);
663 dst_paddr = __sme_page_pa(tpage);
666 ret = __sev_dbg_decrypt(kvm, paddr, dst_paddr, size, err);
672 if (copy_to_user((void __user *)(uintptr_t)dst_uaddr,
673 page_address(tpage) + offset, size))
684 static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
685 unsigned long __user vaddr,
686 unsigned long dst_paddr,
687 unsigned long __user dst_vaddr,
688 int size, int *error)
690 struct page *src_tpage = NULL;
691 struct page *dst_tpage = NULL;
694 /* If source buffer is not aligned then use an intermediate buffer */
695 if (!IS_ALIGNED(vaddr, 16)) {
696 src_tpage = alloc_page(GFP_KERNEL);
700 if (copy_from_user(page_address(src_tpage),
701 (void __user *)(uintptr_t)vaddr, size)) {
702 __free_page(src_tpage);
706 paddr = __sme_page_pa(src_tpage);
710 * If destination buffer or length is not aligned then do read-modify-write:
711 * - decrypt destination in an intermediate buffer
712 * - copy the source buffer in an intermediate buffer
713 * - use the intermediate buffer as source buffer
715 if (!IS_ALIGNED(dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
718 dst_tpage = alloc_page(GFP_KERNEL);
724 ret = __sev_dbg_decrypt(kvm, dst_paddr,
725 __sme_page_pa(dst_tpage), size, error);
730 * If source is kernel buffer then use memcpy() otherwise
733 dst_offset = dst_paddr & 15;
736 memcpy(page_address(dst_tpage) + dst_offset,
737 page_address(src_tpage), size);
739 if (copy_from_user(page_address(dst_tpage) + dst_offset,
740 (void __user *)(uintptr_t)vaddr, size)) {
746 paddr = __sme_page_pa(dst_tpage);
747 dst_paddr = round_down(dst_paddr, 16);
748 len = round_up(size, 16);
751 ret = __sev_issue_dbg_cmd(kvm, paddr, dst_paddr, len, error, true);
755 __free_page(src_tpage);
757 __free_page(dst_tpage);
761 static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
763 unsigned long vaddr, vaddr_end, next_vaddr;
764 unsigned long dst_vaddr;
765 struct page **src_p, **dst_p;
766 struct kvm_sev_dbg debug;
774 if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug)))
777 if (!debug.len || debug.src_uaddr + debug.len < debug.src_uaddr)
779 if (!debug.dst_uaddr)
782 vaddr = debug.src_uaddr;
784 vaddr_end = vaddr + size;
785 dst_vaddr = debug.dst_uaddr;
787 for (; vaddr < vaddr_end; vaddr = next_vaddr) {
788 int len, s_off, d_off;
790 /* lock userspace source and destination page */
791 src_p = sev_pin_memory(kvm, vaddr & PAGE_MASK, PAGE_SIZE, &n, 0);
795 dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, 1);
797 sev_unpin_memory(kvm, src_p, n);
802 * The DBG_{DE,EN}CRYPT commands will perform {dec,en}cryption of the
803 * memory content (i.e it will write the same memory region with C=1).
804 * It's possible that the cache may contain the data with C=0, i.e.,
805 * unencrypted so invalidate it first.
807 sev_clflush_pages(src_p, 1);
808 sev_clflush_pages(dst_p, 1);
811 * Since user buffer may not be page aligned, calculate the
812 * offset within the page.
814 s_off = vaddr & ~PAGE_MASK;
815 d_off = dst_vaddr & ~PAGE_MASK;
816 len = min_t(size_t, (PAGE_SIZE - s_off), size);
819 ret = __sev_dbg_decrypt_user(kvm,
820 __sme_page_pa(src_p[0]) + s_off,
822 __sme_page_pa(dst_p[0]) + d_off,
825 ret = __sev_dbg_encrypt_user(kvm,
826 __sme_page_pa(src_p[0]) + s_off,
828 __sme_page_pa(dst_p[0]) + d_off,
832 sev_unpin_memory(kvm, src_p, n);
833 sev_unpin_memory(kvm, dst_p, n);
838 next_vaddr = vaddr + len;
839 dst_vaddr = dst_vaddr + len;
846 static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
848 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
849 struct sev_data_launch_secret *data;
850 struct kvm_sev_launch_secret params;
859 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params)))
862 pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, 1);
867 * The secret must be copied into contiguous memory region, lets verify
868 * that userspace memory pages are contiguous before we issue command.
870 if (get_num_contig_pages(0, pages, n) != n) {
876 data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
880 offset = params.guest_uaddr & (PAGE_SIZE - 1);
881 data->guest_address = __sme_page_pa(pages[0]) + offset;
882 data->guest_len = params.guest_len;
884 blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
890 data->trans_address = __psp_pa(blob);
891 data->trans_len = params.trans_len;
893 hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
898 data->hdr_address = __psp_pa(hdr);
899 data->hdr_len = params.hdr_len;
901 data->handle = sev->handle;
902 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, data, &argp->error);
911 sev_unpin_memory(kvm, pages, n);
915 int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
917 struct kvm_sev_cmd sev_cmd;
920 if (!svm_sev_enabled())
926 if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd)))
929 mutex_lock(&kvm->lock);
931 switch (sev_cmd.id) {
933 r = sev_guest_init(kvm, &sev_cmd);
935 case KVM_SEV_LAUNCH_START:
936 r = sev_launch_start(kvm, &sev_cmd);
938 case KVM_SEV_LAUNCH_UPDATE_DATA:
939 r = sev_launch_update_data(kvm, &sev_cmd);
941 case KVM_SEV_LAUNCH_MEASURE:
942 r = sev_launch_measure(kvm, &sev_cmd);
944 case KVM_SEV_LAUNCH_FINISH:
945 r = sev_launch_finish(kvm, &sev_cmd);
947 case KVM_SEV_GUEST_STATUS:
948 r = sev_guest_status(kvm, &sev_cmd);
950 case KVM_SEV_DBG_DECRYPT:
951 r = sev_dbg_crypt(kvm, &sev_cmd, true);
953 case KVM_SEV_DBG_ENCRYPT:
954 r = sev_dbg_crypt(kvm, &sev_cmd, false);
956 case KVM_SEV_LAUNCH_SECRET:
957 r = sev_launch_secret(kvm, &sev_cmd);
964 if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd)))
968 mutex_unlock(&kvm->lock);
972 int svm_register_enc_region(struct kvm *kvm,
973 struct kvm_enc_region *range)
975 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
976 struct enc_region *region;
982 if (range->addr > ULONG_MAX || range->size > ULONG_MAX)
985 region = kzalloc(sizeof(*region), GFP_KERNEL_ACCOUNT);
989 region->pages = sev_pin_memory(kvm, range->addr, range->size, ®ion->npages, 1);
990 if (!region->pages) {
996 * The guest may change the memory encryption attribute from C=0 -> C=1
997 * or vice versa for this memory range. Lets make sure caches are
998 * flushed to ensure that guest data gets written into memory with
1001 sev_clflush_pages(region->pages, region->npages);
1003 region->uaddr = range->addr;
1004 region->size = range->size;
1006 mutex_lock(&kvm->lock);
1007 list_add_tail(®ion->list, &sev->regions_list);
1008 mutex_unlock(&kvm->lock);
1017 static struct enc_region *
1018 find_enc_region(struct kvm *kvm, struct kvm_enc_region *range)
1020 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1021 struct list_head *head = &sev->regions_list;
1022 struct enc_region *i;
1024 list_for_each_entry(i, head, list) {
1025 if (i->uaddr == range->addr &&
1026 i->size == range->size)
1033 static void __unregister_enc_region_locked(struct kvm *kvm,
1034 struct enc_region *region)
1036 sev_unpin_memory(kvm, region->pages, region->npages);
1037 list_del(®ion->list);
1041 int svm_unregister_enc_region(struct kvm *kvm,
1042 struct kvm_enc_region *range)
1044 struct enc_region *region;
1047 mutex_lock(&kvm->lock);
1049 if (!sev_guest(kvm)) {
1054 region = find_enc_region(kvm, range);
1061 * Ensure that all guest tagged cache entries are flushed before
1062 * releasing the pages back to the system for use. CLFLUSH will
1063 * not do this, so issue a WBINVD.
1065 wbinvd_on_all_cpus();
1067 __unregister_enc_region_locked(kvm, region);
1069 mutex_unlock(&kvm->lock);
1073 mutex_unlock(&kvm->lock);
1077 void sev_vm_destroy(struct kvm *kvm)
1079 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1080 struct list_head *head = &sev->regions_list;
1081 struct list_head *pos, *q;
1083 if (!sev_guest(kvm))
1086 mutex_lock(&kvm->lock);
1089 * Ensure that all guest tagged cache entries are flushed before
1090 * releasing the pages back to the system for use. CLFLUSH will
1091 * not do this, so issue a WBINVD.
1093 wbinvd_on_all_cpus();
1096 * if userspace was terminated before unregistering the memory regions
1097 * then lets unpin all the registered memory.
1099 if (!list_empty(head)) {
1100 list_for_each_safe(pos, q, head) {
1101 __unregister_enc_region_locked(kvm,
1102 list_entry(pos, struct enc_region, list));
1106 mutex_unlock(&kvm->lock);
1108 sev_unbind_asid(kvm, sev->handle);
1109 sev_asid_free(sev->asid);
1112 int __init sev_hardware_setup(void)
1114 struct sev_user_data_status *status;
1117 /* Maximum number of encrypted guests supported simultaneously */
1118 max_sev_asid = cpuid_ecx(0x8000001F);
1120 if (!svm_sev_enabled())
1123 /* Minimum ASID value that should be used for SEV guest */
1124 min_sev_asid = cpuid_edx(0x8000001F);
1126 /* Initialize SEV ASID bitmaps */
1127 sev_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
1128 if (!sev_asid_bitmap)
1131 sev_reclaim_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
1132 if (!sev_reclaim_asid_bitmap)
1135 status = kmalloc(sizeof(*status), GFP_KERNEL);
1140 * Check SEV platform status.
1142 * PLATFORM_STATUS can be called in any state, if we failed to query
1143 * the PLATFORM status then either PSP firmware does not support SEV
1144 * feature or SEV firmware is dead.
1146 rc = sev_platform_status(status, NULL);
1150 pr_info("SEV supported\n");
1157 void sev_hardware_teardown(void)
1159 if (!svm_sev_enabled())
1162 bitmap_free(sev_asid_bitmap);
1163 bitmap_free(sev_reclaim_asid_bitmap);
1168 void pre_sev_run(struct vcpu_svm *svm, int cpu)
1170 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
1171 int asid = sev_get_asid(svm->vcpu.kvm);
1173 /* Assign the asid allocated with this SEV guest */
1174 svm->vmcb->control.asid = asid;
1179 * 1) when different VMCB for the same ASID is to be run on the same host CPU.
1180 * 2) or this VMCB was executed on different host CPU in previous VMRUNs.
1182 if (sd->sev_vmcbs[asid] == svm->vmcb &&
1183 svm->last_cpu == cpu)
1186 svm->last_cpu = cpu;
1187 sd->sev_vmcbs[asid] = svm->vmcb;
1188 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
1189 mark_dirty(svm->vmcb, VMCB_ASID);