kvm: x86: Move last_cpu into kvm_vcpu_arch as last_vmentry_cpu
[linux-2.6-microblaze.git] / arch / x86 / kvm / svm / sev.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel-based Virtual Machine driver for Linux
4  *
5  * AMD SVM-SEV support
6  *
7  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
8  */
9
10 #include <linux/kvm_types.h>
11 #include <linux/kvm_host.h>
12 #include <linux/kernel.h>
13 #include <linux/highmem.h>
14 #include <linux/psp-sev.h>
15 #include <linux/pagemap.h>
16 #include <linux/swap.h>
17
18 #include "x86.h"
19 #include "svm.h"
20
21 static int sev_flush_asids(void);
22 static DECLARE_RWSEM(sev_deactivate_lock);
23 static DEFINE_MUTEX(sev_bitmap_lock);
24 unsigned int max_sev_asid;
25 static unsigned int min_sev_asid;
26 static unsigned long *sev_asid_bitmap;
27 static unsigned long *sev_reclaim_asid_bitmap;
28 #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
29
30 struct enc_region {
31         struct list_head list;
32         unsigned long npages;
33         struct page **pages;
34         unsigned long uaddr;
35         unsigned long size;
36 };
37
38 static int sev_flush_asids(void)
39 {
40         int ret, error = 0;
41
42         /*
43          * DEACTIVATE will clear the WBINVD indicator causing DF_FLUSH to fail,
44          * so it must be guarded.
45          */
46         down_write(&sev_deactivate_lock);
47
48         wbinvd_on_all_cpus();
49         ret = sev_guest_df_flush(&error);
50
51         up_write(&sev_deactivate_lock);
52
53         if (ret)
54                 pr_err("SEV: DF_FLUSH failed, ret=%d, error=%#x\n", ret, error);
55
56         return ret;
57 }
58
59 /* Must be called with the sev_bitmap_lock held */
60 static bool __sev_recycle_asids(void)
61 {
62         int pos;
63
64         /* Check if there are any ASIDs to reclaim before performing a flush */
65         pos = find_next_bit(sev_reclaim_asid_bitmap,
66                             max_sev_asid, min_sev_asid - 1);
67         if (pos >= max_sev_asid)
68                 return false;
69
70         if (sev_flush_asids())
71                 return false;
72
73         bitmap_xor(sev_asid_bitmap, sev_asid_bitmap, sev_reclaim_asid_bitmap,
74                    max_sev_asid);
75         bitmap_zero(sev_reclaim_asid_bitmap, max_sev_asid);
76
77         return true;
78 }
79
80 static int sev_asid_new(void)
81 {
82         bool retry = true;
83         int pos;
84
85         mutex_lock(&sev_bitmap_lock);
86
87         /*
88          * SEV-enabled guest must use asid from min_sev_asid to max_sev_asid.
89          */
90 again:
91         pos = find_next_zero_bit(sev_asid_bitmap, max_sev_asid, min_sev_asid - 1);
92         if (pos >= max_sev_asid) {
93                 if (retry && __sev_recycle_asids()) {
94                         retry = false;
95                         goto again;
96                 }
97                 mutex_unlock(&sev_bitmap_lock);
98                 return -EBUSY;
99         }
100
101         __set_bit(pos, sev_asid_bitmap);
102
103         mutex_unlock(&sev_bitmap_lock);
104
105         return pos + 1;
106 }
107
108 static int sev_get_asid(struct kvm *kvm)
109 {
110         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
111
112         return sev->asid;
113 }
114
115 static void sev_asid_free(int asid)
116 {
117         struct svm_cpu_data *sd;
118         int cpu, pos;
119
120         mutex_lock(&sev_bitmap_lock);
121
122         pos = asid - 1;
123         __set_bit(pos, sev_reclaim_asid_bitmap);
124
125         for_each_possible_cpu(cpu) {
126                 sd = per_cpu(svm_data, cpu);
127                 sd->sev_vmcbs[pos] = NULL;
128         }
129
130         mutex_unlock(&sev_bitmap_lock);
131 }
132
133 static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
134 {
135         struct sev_data_decommission *decommission;
136         struct sev_data_deactivate *data;
137
138         if (!handle)
139                 return;
140
141         data = kzalloc(sizeof(*data), GFP_KERNEL);
142         if (!data)
143                 return;
144
145         /* deactivate handle */
146         data->handle = handle;
147
148         /* Guard DEACTIVATE against WBINVD/DF_FLUSH used in ASID recycling */
149         down_read(&sev_deactivate_lock);
150         sev_guest_deactivate(data, NULL);
151         up_read(&sev_deactivate_lock);
152
153         kfree(data);
154
155         decommission = kzalloc(sizeof(*decommission), GFP_KERNEL);
156         if (!decommission)
157                 return;
158
159         /* decommission handle */
160         decommission->handle = handle;
161         sev_guest_decommission(decommission, NULL);
162
163         kfree(decommission);
164 }
165
166 static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
167 {
168         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
169         int asid, ret;
170
171         ret = -EBUSY;
172         if (unlikely(sev->active))
173                 return ret;
174
175         asid = sev_asid_new();
176         if (asid < 0)
177                 return ret;
178
179         ret = sev_platform_init(&argp->error);
180         if (ret)
181                 goto e_free;
182
183         sev->active = true;
184         sev->asid = asid;
185         INIT_LIST_HEAD(&sev->regions_list);
186
187         return 0;
188
189 e_free:
190         sev_asid_free(asid);
191         return ret;
192 }
193
194 static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
195 {
196         struct sev_data_activate *data;
197         int asid = sev_get_asid(kvm);
198         int ret;
199
200         data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
201         if (!data)
202                 return -ENOMEM;
203
204         /* activate ASID on the given handle */
205         data->handle = handle;
206         data->asid   = asid;
207         ret = sev_guest_activate(data, error);
208         kfree(data);
209
210         return ret;
211 }
212
213 static int __sev_issue_cmd(int fd, int id, void *data, int *error)
214 {
215         struct fd f;
216         int ret;
217
218         f = fdget(fd);
219         if (!f.file)
220                 return -EBADF;
221
222         ret = sev_issue_cmd_external_user(f.file, id, data, error);
223
224         fdput(f);
225         return ret;
226 }
227
228 static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error)
229 {
230         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
231
232         return __sev_issue_cmd(sev->fd, id, data, error);
233 }
234
235 static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
236 {
237         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
238         struct sev_data_launch_start *start;
239         struct kvm_sev_launch_start params;
240         void *dh_blob, *session_blob;
241         int *error = &argp->error;
242         int ret;
243
244         if (!sev_guest(kvm))
245                 return -ENOTTY;
246
247         if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
248                 return -EFAULT;
249
250         start = kzalloc(sizeof(*start), GFP_KERNEL_ACCOUNT);
251         if (!start)
252                 return -ENOMEM;
253
254         dh_blob = NULL;
255         if (params.dh_uaddr) {
256                 dh_blob = psp_copy_user_blob(params.dh_uaddr, params.dh_len);
257                 if (IS_ERR(dh_blob)) {
258                         ret = PTR_ERR(dh_blob);
259                         goto e_free;
260                 }
261
262                 start->dh_cert_address = __sme_set(__pa(dh_blob));
263                 start->dh_cert_len = params.dh_len;
264         }
265
266         session_blob = NULL;
267         if (params.session_uaddr) {
268                 session_blob = psp_copy_user_blob(params.session_uaddr, params.session_len);
269                 if (IS_ERR(session_blob)) {
270                         ret = PTR_ERR(session_blob);
271                         goto e_free_dh;
272                 }
273
274                 start->session_address = __sme_set(__pa(session_blob));
275                 start->session_len = params.session_len;
276         }
277
278         start->handle = params.handle;
279         start->policy = params.policy;
280
281         /* create memory encryption context */
282         ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_LAUNCH_START, start, error);
283         if (ret)
284                 goto e_free_session;
285
286         /* Bind ASID to this guest */
287         ret = sev_bind_asid(kvm, start->handle, error);
288         if (ret)
289                 goto e_free_session;
290
291         /* return handle to userspace */
292         params.handle = start->handle;
293         if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params))) {
294                 sev_unbind_asid(kvm, start->handle);
295                 ret = -EFAULT;
296                 goto e_free_session;
297         }
298
299         sev->handle = start->handle;
300         sev->fd = argp->sev_fd;
301
302 e_free_session:
303         kfree(session_blob);
304 e_free_dh:
305         kfree(dh_blob);
306 e_free:
307         kfree(start);
308         return ret;
309 }
310
311 static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
312                                     unsigned long ulen, unsigned long *n,
313                                     int write)
314 {
315         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
316         unsigned long npages, size;
317         int npinned;
318         unsigned long locked, lock_limit;
319         struct page **pages;
320         unsigned long first, last;
321
322         if (ulen == 0 || uaddr + ulen < uaddr)
323                 return ERR_PTR(-EINVAL);
324
325         /* Calculate number of pages. */
326         first = (uaddr & PAGE_MASK) >> PAGE_SHIFT;
327         last = ((uaddr + ulen - 1) & PAGE_MASK) >> PAGE_SHIFT;
328         npages = (last - first + 1);
329
330         locked = sev->pages_locked + npages;
331         lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
332         if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
333                 pr_err("SEV: %lu locked pages exceed the lock limit of %lu.\n", locked, lock_limit);
334                 return ERR_PTR(-ENOMEM);
335         }
336
337         if (WARN_ON_ONCE(npages > INT_MAX))
338                 return ERR_PTR(-EINVAL);
339
340         /* Avoid using vmalloc for smaller buffers. */
341         size = npages * sizeof(struct page *);
342         if (size > PAGE_SIZE)
343                 pages = __vmalloc(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
344         else
345                 pages = kmalloc(size, GFP_KERNEL_ACCOUNT);
346
347         if (!pages)
348                 return ERR_PTR(-ENOMEM);
349
350         /* Pin the user virtual address. */
351         npinned = pin_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
352         if (npinned != npages) {
353                 pr_err("SEV: Failure locking %lu pages.\n", npages);
354                 goto err;
355         }
356
357         *n = npages;
358         sev->pages_locked = locked;
359
360         return pages;
361
362 err:
363         if (npinned > 0) {
364                 unpin_user_pages(pages, npinned);
365                 npinned = -ENOMEM;
366         }
367
368         kvfree(pages);
369         return ERR_PTR(npinned);
370 }
371
372 static void sev_unpin_memory(struct kvm *kvm, struct page **pages,
373                              unsigned long npages)
374 {
375         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
376
377         unpin_user_pages(pages, npages);
378         kvfree(pages);
379         sev->pages_locked -= npages;
380 }
381
382 static void sev_clflush_pages(struct page *pages[], unsigned long npages)
383 {
384         uint8_t *page_virtual;
385         unsigned long i;
386
387         if (npages == 0 || pages == NULL)
388                 return;
389
390         for (i = 0; i < npages; i++) {
391                 page_virtual = kmap_atomic(pages[i]);
392                 clflush_cache_range(page_virtual, PAGE_SIZE);
393                 kunmap_atomic(page_virtual);
394         }
395 }
396
397 static unsigned long get_num_contig_pages(unsigned long idx,
398                                 struct page **inpages, unsigned long npages)
399 {
400         unsigned long paddr, next_paddr;
401         unsigned long i = idx + 1, pages = 1;
402
403         /* find the number of contiguous pages starting from idx */
404         paddr = __sme_page_pa(inpages[idx]);
405         while (i < npages) {
406                 next_paddr = __sme_page_pa(inpages[i++]);
407                 if ((paddr + PAGE_SIZE) == next_paddr) {
408                         pages++;
409                         paddr = next_paddr;
410                         continue;
411                 }
412                 break;
413         }
414
415         return pages;
416 }
417
418 static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
419 {
420         unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i;
421         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
422         struct kvm_sev_launch_update_data params;
423         struct sev_data_launch_update_data *data;
424         struct page **inpages;
425         int ret;
426
427         if (!sev_guest(kvm))
428                 return -ENOTTY;
429
430         if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
431                 return -EFAULT;
432
433         data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
434         if (!data)
435                 return -ENOMEM;
436
437         vaddr = params.uaddr;
438         size = params.len;
439         vaddr_end = vaddr + size;
440
441         /* Lock the user memory. */
442         inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1);
443         if (!inpages) {
444                 ret = -ENOMEM;
445                 goto e_free;
446         }
447
448         /*
449          * The LAUNCH_UPDATE command will perform in-place encryption of the
450          * memory content (i.e it will write the same memory region with C=1).
451          * It's possible that the cache may contain the data with C=0, i.e.,
452          * unencrypted so invalidate it first.
453          */
454         sev_clflush_pages(inpages, npages);
455
456         for (i = 0; vaddr < vaddr_end; vaddr = next_vaddr, i += pages) {
457                 int offset, len;
458
459                 /*
460                  * If the user buffer is not page-aligned, calculate the offset
461                  * within the page.
462                  */
463                 offset = vaddr & (PAGE_SIZE - 1);
464
465                 /* Calculate the number of pages that can be encrypted in one go. */
466                 pages = get_num_contig_pages(i, inpages, npages);
467
468                 len = min_t(size_t, ((pages * PAGE_SIZE) - offset), size);
469
470                 data->handle = sev->handle;
471                 data->len = len;
472                 data->address = __sme_page_pa(inpages[i]) + offset;
473                 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_DATA, data, &argp->error);
474                 if (ret)
475                         goto e_unpin;
476
477                 size -= len;
478                 next_vaddr = vaddr + len;
479         }
480
481 e_unpin:
482         /* content of memory is updated, mark pages dirty */
483         for (i = 0; i < npages; i++) {
484                 set_page_dirty_lock(inpages[i]);
485                 mark_page_accessed(inpages[i]);
486         }
487         /* unlock the user pages */
488         sev_unpin_memory(kvm, inpages, npages);
489 e_free:
490         kfree(data);
491         return ret;
492 }
493
494 static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
495 {
496         void __user *measure = (void __user *)(uintptr_t)argp->data;
497         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
498         struct sev_data_launch_measure *data;
499         struct kvm_sev_launch_measure params;
500         void __user *p = NULL;
501         void *blob = NULL;
502         int ret;
503
504         if (!sev_guest(kvm))
505                 return -ENOTTY;
506
507         if (copy_from_user(&params, measure, sizeof(params)))
508                 return -EFAULT;
509
510         data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
511         if (!data)
512                 return -ENOMEM;
513
514         /* User wants to query the blob length */
515         if (!params.len)
516                 goto cmd;
517
518         p = (void __user *)(uintptr_t)params.uaddr;
519         if (p) {
520                 if (params.len > SEV_FW_BLOB_MAX_SIZE) {
521                         ret = -EINVAL;
522                         goto e_free;
523                 }
524
525                 ret = -ENOMEM;
526                 blob = kmalloc(params.len, GFP_KERNEL);
527                 if (!blob)
528                         goto e_free;
529
530                 data->address = __psp_pa(blob);
531                 data->len = params.len;
532         }
533
534 cmd:
535         data->handle = sev->handle;
536         ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, data, &argp->error);
537
538         /*
539          * If we query the session length, FW responded with expected data.
540          */
541         if (!params.len)
542                 goto done;
543
544         if (ret)
545                 goto e_free_blob;
546
547         if (blob) {
548                 if (copy_to_user(p, blob, params.len))
549                         ret = -EFAULT;
550         }
551
552 done:
553         params.len = data->len;
554         if (copy_to_user(measure, &params, sizeof(params)))
555                 ret = -EFAULT;
556 e_free_blob:
557         kfree(blob);
558 e_free:
559         kfree(data);
560         return ret;
561 }
562
563 static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
564 {
565         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
566         struct sev_data_launch_finish *data;
567         int ret;
568
569         if (!sev_guest(kvm))
570                 return -ENOTTY;
571
572         data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
573         if (!data)
574                 return -ENOMEM;
575
576         data->handle = sev->handle;
577         ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_FINISH, data, &argp->error);
578
579         kfree(data);
580         return ret;
581 }
582
583 static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
584 {
585         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
586         struct kvm_sev_guest_status params;
587         struct sev_data_guest_status *data;
588         int ret;
589
590         if (!sev_guest(kvm))
591                 return -ENOTTY;
592
593         data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
594         if (!data)
595                 return -ENOMEM;
596
597         data->handle = sev->handle;
598         ret = sev_issue_cmd(kvm, SEV_CMD_GUEST_STATUS, data, &argp->error);
599         if (ret)
600                 goto e_free;
601
602         params.policy = data->policy;
603         params.state = data->state;
604         params.handle = data->handle;
605
606         if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params)))
607                 ret = -EFAULT;
608 e_free:
609         kfree(data);
610         return ret;
611 }
612
613 static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src,
614                                unsigned long dst, int size,
615                                int *error, bool enc)
616 {
617         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
618         struct sev_data_dbg *data;
619         int ret;
620
621         data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
622         if (!data)
623                 return -ENOMEM;
624
625         data->handle = sev->handle;
626         data->dst_addr = dst;
627         data->src_addr = src;
628         data->len = size;
629
630         ret = sev_issue_cmd(kvm,
631                             enc ? SEV_CMD_DBG_ENCRYPT : SEV_CMD_DBG_DECRYPT,
632                             data, error);
633         kfree(data);
634         return ret;
635 }
636
637 static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
638                              unsigned long dst_paddr, int sz, int *err)
639 {
640         int offset;
641
642         /*
643          * Its safe to read more than we are asked, caller should ensure that
644          * destination has enough space.
645          */
646         src_paddr = round_down(src_paddr, 16);
647         offset = src_paddr & 15;
648         sz = round_up(sz + offset, 16);
649
650         return __sev_issue_dbg_cmd(kvm, src_paddr, dst_paddr, sz, err, false);
651 }
652
653 static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
654                                   unsigned long __user dst_uaddr,
655                                   unsigned long dst_paddr,
656                                   int size, int *err)
657 {
658         struct page *tpage = NULL;
659         int ret, offset;
660
661         /* if inputs are not 16-byte then use intermediate buffer */
662         if (!IS_ALIGNED(dst_paddr, 16) ||
663             !IS_ALIGNED(paddr,     16) ||
664             !IS_ALIGNED(size,      16)) {
665                 tpage = (void *)alloc_page(GFP_KERNEL);
666                 if (!tpage)
667                         return -ENOMEM;
668
669                 dst_paddr = __sme_page_pa(tpage);
670         }
671
672         ret = __sev_dbg_decrypt(kvm, paddr, dst_paddr, size, err);
673         if (ret)
674                 goto e_free;
675
676         if (tpage) {
677                 offset = paddr & 15;
678                 if (copy_to_user((void __user *)(uintptr_t)dst_uaddr,
679                                  page_address(tpage) + offset, size))
680                         ret = -EFAULT;
681         }
682
683 e_free:
684         if (tpage)
685                 __free_page(tpage);
686
687         return ret;
688 }
689
690 static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
691                                   unsigned long __user vaddr,
692                                   unsigned long dst_paddr,
693                                   unsigned long __user dst_vaddr,
694                                   int size, int *error)
695 {
696         struct page *src_tpage = NULL;
697         struct page *dst_tpage = NULL;
698         int ret, len = size;
699
700         /* If source buffer is not aligned then use an intermediate buffer */
701         if (!IS_ALIGNED(vaddr, 16)) {
702                 src_tpage = alloc_page(GFP_KERNEL);
703                 if (!src_tpage)
704                         return -ENOMEM;
705
706                 if (copy_from_user(page_address(src_tpage),
707                                 (void __user *)(uintptr_t)vaddr, size)) {
708                         __free_page(src_tpage);
709                         return -EFAULT;
710                 }
711
712                 paddr = __sme_page_pa(src_tpage);
713         }
714
715         /*
716          *  If destination buffer or length is not aligned then do read-modify-write:
717          *   - decrypt destination in an intermediate buffer
718          *   - copy the source buffer in an intermediate buffer
719          *   - use the intermediate buffer as source buffer
720          */
721         if (!IS_ALIGNED(dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
722                 int dst_offset;
723
724                 dst_tpage = alloc_page(GFP_KERNEL);
725                 if (!dst_tpage) {
726                         ret = -ENOMEM;
727                         goto e_free;
728                 }
729
730                 ret = __sev_dbg_decrypt(kvm, dst_paddr,
731                                         __sme_page_pa(dst_tpage), size, error);
732                 if (ret)
733                         goto e_free;
734
735                 /*
736                  *  If source is kernel buffer then use memcpy() otherwise
737                  *  copy_from_user().
738                  */
739                 dst_offset = dst_paddr & 15;
740
741                 if (src_tpage)
742                         memcpy(page_address(dst_tpage) + dst_offset,
743                                page_address(src_tpage), size);
744                 else {
745                         if (copy_from_user(page_address(dst_tpage) + dst_offset,
746                                            (void __user *)(uintptr_t)vaddr, size)) {
747                                 ret = -EFAULT;
748                                 goto e_free;
749                         }
750                 }
751
752                 paddr = __sme_page_pa(dst_tpage);
753                 dst_paddr = round_down(dst_paddr, 16);
754                 len = round_up(size, 16);
755         }
756
757         ret = __sev_issue_dbg_cmd(kvm, paddr, dst_paddr, len, error, true);
758
759 e_free:
760         if (src_tpage)
761                 __free_page(src_tpage);
762         if (dst_tpage)
763                 __free_page(dst_tpage);
764         return ret;
765 }
766
767 static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
768 {
769         unsigned long vaddr, vaddr_end, next_vaddr;
770         unsigned long dst_vaddr;
771         struct page **src_p, **dst_p;
772         struct kvm_sev_dbg debug;
773         unsigned long n;
774         unsigned int size;
775         int ret;
776
777         if (!sev_guest(kvm))
778                 return -ENOTTY;
779
780         if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug)))
781                 return -EFAULT;
782
783         if (!debug.len || debug.src_uaddr + debug.len < debug.src_uaddr)
784                 return -EINVAL;
785         if (!debug.dst_uaddr)
786                 return -EINVAL;
787
788         vaddr = debug.src_uaddr;
789         size = debug.len;
790         vaddr_end = vaddr + size;
791         dst_vaddr = debug.dst_uaddr;
792
793         for (; vaddr < vaddr_end; vaddr = next_vaddr) {
794                 int len, s_off, d_off;
795
796                 /* lock userspace source and destination page */
797                 src_p = sev_pin_memory(kvm, vaddr & PAGE_MASK, PAGE_SIZE, &n, 0);
798                 if (!src_p)
799                         return -EFAULT;
800
801                 dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, 1);
802                 if (!dst_p) {
803                         sev_unpin_memory(kvm, src_p, n);
804                         return -EFAULT;
805                 }
806
807                 /*
808                  * The DBG_{DE,EN}CRYPT commands will perform {dec,en}cryption of the
809                  * memory content (i.e it will write the same memory region with C=1).
810                  * It's possible that the cache may contain the data with C=0, i.e.,
811                  * unencrypted so invalidate it first.
812                  */
813                 sev_clflush_pages(src_p, 1);
814                 sev_clflush_pages(dst_p, 1);
815
816                 /*
817                  * Since user buffer may not be page aligned, calculate the
818                  * offset within the page.
819                  */
820                 s_off = vaddr & ~PAGE_MASK;
821                 d_off = dst_vaddr & ~PAGE_MASK;
822                 len = min_t(size_t, (PAGE_SIZE - s_off), size);
823
824                 if (dec)
825                         ret = __sev_dbg_decrypt_user(kvm,
826                                                      __sme_page_pa(src_p[0]) + s_off,
827                                                      dst_vaddr,
828                                                      __sme_page_pa(dst_p[0]) + d_off,
829                                                      len, &argp->error);
830                 else
831                         ret = __sev_dbg_encrypt_user(kvm,
832                                                      __sme_page_pa(src_p[0]) + s_off,
833                                                      vaddr,
834                                                      __sme_page_pa(dst_p[0]) + d_off,
835                                                      dst_vaddr,
836                                                      len, &argp->error);
837
838                 sev_unpin_memory(kvm, src_p, n);
839                 sev_unpin_memory(kvm, dst_p, n);
840
841                 if (ret)
842                         goto err;
843
844                 next_vaddr = vaddr + len;
845                 dst_vaddr = dst_vaddr + len;
846                 size -= len;
847         }
848 err:
849         return ret;
850 }
851
852 static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
853 {
854         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
855         struct sev_data_launch_secret *data;
856         struct kvm_sev_launch_secret params;
857         struct page **pages;
858         void *blob, *hdr;
859         unsigned long n;
860         int ret, offset;
861
862         if (!sev_guest(kvm))
863                 return -ENOTTY;
864
865         if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
866                 return -EFAULT;
867
868         pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, 1);
869         if (IS_ERR(pages))
870                 return PTR_ERR(pages);
871
872         /*
873          * The secret must be copied into contiguous memory region, lets verify
874          * that userspace memory pages are contiguous before we issue command.
875          */
876         if (get_num_contig_pages(0, pages, n) != n) {
877                 ret = -EINVAL;
878                 goto e_unpin_memory;
879         }
880
881         ret = -ENOMEM;
882         data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
883         if (!data)
884                 goto e_unpin_memory;
885
886         offset = params.guest_uaddr & (PAGE_SIZE - 1);
887         data->guest_address = __sme_page_pa(pages[0]) + offset;
888         data->guest_len = params.guest_len;
889
890         blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
891         if (IS_ERR(blob)) {
892                 ret = PTR_ERR(blob);
893                 goto e_free;
894         }
895
896         data->trans_address = __psp_pa(blob);
897         data->trans_len = params.trans_len;
898
899         hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
900         if (IS_ERR(hdr)) {
901                 ret = PTR_ERR(hdr);
902                 goto e_free_blob;
903         }
904         data->hdr_address = __psp_pa(hdr);
905         data->hdr_len = params.hdr_len;
906
907         data->handle = sev->handle;
908         ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, data, &argp->error);
909
910         kfree(hdr);
911
912 e_free_blob:
913         kfree(blob);
914 e_free:
915         kfree(data);
916 e_unpin_memory:
917         sev_unpin_memory(kvm, pages, n);
918         return ret;
919 }
920
921 int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
922 {
923         struct kvm_sev_cmd sev_cmd;
924         int r;
925
926         if (!svm_sev_enabled())
927                 return -ENOTTY;
928
929         if (!argp)
930                 return 0;
931
932         if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd)))
933                 return -EFAULT;
934
935         mutex_lock(&kvm->lock);
936
937         switch (sev_cmd.id) {
938         case KVM_SEV_INIT:
939                 r = sev_guest_init(kvm, &sev_cmd);
940                 break;
941         case KVM_SEV_LAUNCH_START:
942                 r = sev_launch_start(kvm, &sev_cmd);
943                 break;
944         case KVM_SEV_LAUNCH_UPDATE_DATA:
945                 r = sev_launch_update_data(kvm, &sev_cmd);
946                 break;
947         case KVM_SEV_LAUNCH_MEASURE:
948                 r = sev_launch_measure(kvm, &sev_cmd);
949                 break;
950         case KVM_SEV_LAUNCH_FINISH:
951                 r = sev_launch_finish(kvm, &sev_cmd);
952                 break;
953         case KVM_SEV_GUEST_STATUS:
954                 r = sev_guest_status(kvm, &sev_cmd);
955                 break;
956         case KVM_SEV_DBG_DECRYPT:
957                 r = sev_dbg_crypt(kvm, &sev_cmd, true);
958                 break;
959         case KVM_SEV_DBG_ENCRYPT:
960                 r = sev_dbg_crypt(kvm, &sev_cmd, false);
961                 break;
962         case KVM_SEV_LAUNCH_SECRET:
963                 r = sev_launch_secret(kvm, &sev_cmd);
964                 break;
965         default:
966                 r = -EINVAL;
967                 goto out;
968         }
969
970         if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd)))
971                 r = -EFAULT;
972
973 out:
974         mutex_unlock(&kvm->lock);
975         return r;
976 }
977
978 int svm_register_enc_region(struct kvm *kvm,
979                             struct kvm_enc_region *range)
980 {
981         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
982         struct enc_region *region;
983         int ret = 0;
984
985         if (!sev_guest(kvm))
986                 return -ENOTTY;
987
988         if (range->addr > ULONG_MAX || range->size > ULONG_MAX)
989                 return -EINVAL;
990
991         region = kzalloc(sizeof(*region), GFP_KERNEL_ACCOUNT);
992         if (!region)
993                 return -ENOMEM;
994
995         region->pages = sev_pin_memory(kvm, range->addr, range->size, &region->npages, 1);
996         if (IS_ERR(region->pages)) {
997                 ret = PTR_ERR(region->pages);
998                 goto e_free;
999         }
1000
1001         /*
1002          * The guest may change the memory encryption attribute from C=0 -> C=1
1003          * or vice versa for this memory range. Lets make sure caches are
1004          * flushed to ensure that guest data gets written into memory with
1005          * correct C-bit.
1006          */
1007         sev_clflush_pages(region->pages, region->npages);
1008
1009         region->uaddr = range->addr;
1010         region->size = range->size;
1011
1012         mutex_lock(&kvm->lock);
1013         list_add_tail(&region->list, &sev->regions_list);
1014         mutex_unlock(&kvm->lock);
1015
1016         return ret;
1017
1018 e_free:
1019         kfree(region);
1020         return ret;
1021 }
1022
1023 static struct enc_region *
1024 find_enc_region(struct kvm *kvm, struct kvm_enc_region *range)
1025 {
1026         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1027         struct list_head *head = &sev->regions_list;
1028         struct enc_region *i;
1029
1030         list_for_each_entry(i, head, list) {
1031                 if (i->uaddr == range->addr &&
1032                     i->size == range->size)
1033                         return i;
1034         }
1035
1036         return NULL;
1037 }
1038
1039 static void __unregister_enc_region_locked(struct kvm *kvm,
1040                                            struct enc_region *region)
1041 {
1042         sev_unpin_memory(kvm, region->pages, region->npages);
1043         list_del(&region->list);
1044         kfree(region);
1045 }
1046
1047 int svm_unregister_enc_region(struct kvm *kvm,
1048                               struct kvm_enc_region *range)
1049 {
1050         struct enc_region *region;
1051         int ret;
1052
1053         mutex_lock(&kvm->lock);
1054
1055         if (!sev_guest(kvm)) {
1056                 ret = -ENOTTY;
1057                 goto failed;
1058         }
1059
1060         region = find_enc_region(kvm, range);
1061         if (!region) {
1062                 ret = -EINVAL;
1063                 goto failed;
1064         }
1065
1066         /*
1067          * Ensure that all guest tagged cache entries are flushed before
1068          * releasing the pages back to the system for use. CLFLUSH will
1069          * not do this, so issue a WBINVD.
1070          */
1071         wbinvd_on_all_cpus();
1072
1073         __unregister_enc_region_locked(kvm, region);
1074
1075         mutex_unlock(&kvm->lock);
1076         return 0;
1077
1078 failed:
1079         mutex_unlock(&kvm->lock);
1080         return ret;
1081 }
1082
1083 void sev_vm_destroy(struct kvm *kvm)
1084 {
1085         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1086         struct list_head *head = &sev->regions_list;
1087         struct list_head *pos, *q;
1088
1089         if (!sev_guest(kvm))
1090                 return;
1091
1092         mutex_lock(&kvm->lock);
1093
1094         /*
1095          * Ensure that all guest tagged cache entries are flushed before
1096          * releasing the pages back to the system for use. CLFLUSH will
1097          * not do this, so issue a WBINVD.
1098          */
1099         wbinvd_on_all_cpus();
1100
1101         /*
1102          * if userspace was terminated before unregistering the memory regions
1103          * then lets unpin all the registered memory.
1104          */
1105         if (!list_empty(head)) {
1106                 list_for_each_safe(pos, q, head) {
1107                         __unregister_enc_region_locked(kvm,
1108                                 list_entry(pos, struct enc_region, list));
1109                 }
1110         }
1111
1112         mutex_unlock(&kvm->lock);
1113
1114         sev_unbind_asid(kvm, sev->handle);
1115         sev_asid_free(sev->asid);
1116 }
1117
1118 int __init sev_hardware_setup(void)
1119 {
1120         struct sev_user_data_status *status;
1121         int rc;
1122
1123         /* Maximum number of encrypted guests supported simultaneously */
1124         max_sev_asid = cpuid_ecx(0x8000001F);
1125
1126         if (!svm_sev_enabled())
1127                 return 1;
1128
1129         /* Minimum ASID value that should be used for SEV guest */
1130         min_sev_asid = cpuid_edx(0x8000001F);
1131
1132         /* Initialize SEV ASID bitmaps */
1133         sev_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
1134         if (!sev_asid_bitmap)
1135                 return 1;
1136
1137         sev_reclaim_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
1138         if (!sev_reclaim_asid_bitmap)
1139                 return 1;
1140
1141         status = kmalloc(sizeof(*status), GFP_KERNEL);
1142         if (!status)
1143                 return 1;
1144
1145         /*
1146          * Check SEV platform status.
1147          *
1148          * PLATFORM_STATUS can be called in any state, if we failed to query
1149          * the PLATFORM status then either PSP firmware does not support SEV
1150          * feature or SEV firmware is dead.
1151          */
1152         rc = sev_platform_status(status, NULL);
1153         if (rc)
1154                 goto err;
1155
1156         pr_info("SEV supported\n");
1157
1158 err:
1159         kfree(status);
1160         return rc;
1161 }
1162
1163 void sev_hardware_teardown(void)
1164 {
1165         if (!svm_sev_enabled())
1166                 return;
1167
1168         bitmap_free(sev_asid_bitmap);
1169         bitmap_free(sev_reclaim_asid_bitmap);
1170
1171         sev_flush_asids();
1172 }
1173
1174 void pre_sev_run(struct vcpu_svm *svm, int cpu)
1175 {
1176         struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
1177         int asid = sev_get_asid(svm->vcpu.kvm);
1178
1179         /* Assign the asid allocated with this SEV guest */
1180         svm->vmcb->control.asid = asid;
1181
1182         /*
1183          * Flush guest TLB:
1184          *
1185          * 1) when different VMCB for the same ASID is to be run on the same host CPU.
1186          * 2) or this VMCB was executed on different host CPU in previous VMRUNs.
1187          */
1188         if (sd->sev_vmcbs[asid] == svm->vmcb &&
1189             svm->vcpu.arch.last_vmentry_cpu == cpu)
1190                 return;
1191
1192         sd->sev_vmcbs[asid] = svm->vmcb;
1193         svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
1194         mark_dirty(svm->vmcb, VMCB_ASID);
1195 }