Merge tag 'imx-fixes-5.7-2' of git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo...
[linux-2.6-microblaze.git] / arch / x86 / kvm / svm / sev.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel-based Virtual Machine driver for Linux
4  *
5  * AMD SVM-SEV support
6  *
7  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
8  */
9
10 #include <linux/kvm_types.h>
11 #include <linux/kvm_host.h>
12 #include <linux/kernel.h>
13 #include <linux/highmem.h>
14 #include <linux/psp-sev.h>
15 #include <linux/pagemap.h>
16 #include <linux/swap.h>
17
18 #include "x86.h"
19 #include "svm.h"
20
21 static int sev_flush_asids(void);
22 static DECLARE_RWSEM(sev_deactivate_lock);
23 static DEFINE_MUTEX(sev_bitmap_lock);
24 unsigned int max_sev_asid;
25 static unsigned int min_sev_asid;
26 static unsigned long *sev_asid_bitmap;
27 static unsigned long *sev_reclaim_asid_bitmap;
28 #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
29
30 struct enc_region {
31         struct list_head list;
32         unsigned long npages;
33         struct page **pages;
34         unsigned long uaddr;
35         unsigned long size;
36 };
37
38 static int sev_flush_asids(void)
39 {
40         int ret, error = 0;
41
42         /*
43          * DEACTIVATE will clear the WBINVD indicator causing DF_FLUSH to fail,
44          * so it must be guarded.
45          */
46         down_write(&sev_deactivate_lock);
47
48         wbinvd_on_all_cpus();
49         ret = sev_guest_df_flush(&error);
50
51         up_write(&sev_deactivate_lock);
52
53         if (ret)
54                 pr_err("SEV: DF_FLUSH failed, ret=%d, error=%#x\n", ret, error);
55
56         return ret;
57 }
58
59 /* Must be called with the sev_bitmap_lock held */
60 static bool __sev_recycle_asids(void)
61 {
62         int pos;
63
64         /* Check if there are any ASIDs to reclaim before performing a flush */
65         pos = find_next_bit(sev_reclaim_asid_bitmap,
66                             max_sev_asid, min_sev_asid - 1);
67         if (pos >= max_sev_asid)
68                 return false;
69
70         if (sev_flush_asids())
71                 return false;
72
73         bitmap_xor(sev_asid_bitmap, sev_asid_bitmap, sev_reclaim_asid_bitmap,
74                    max_sev_asid);
75         bitmap_zero(sev_reclaim_asid_bitmap, max_sev_asid);
76
77         return true;
78 }
79
80 static int sev_asid_new(void)
81 {
82         bool retry = true;
83         int pos;
84
85         mutex_lock(&sev_bitmap_lock);
86
87         /*
88          * SEV-enabled guest must use asid from min_sev_asid to max_sev_asid.
89          */
90 again:
91         pos = find_next_zero_bit(sev_asid_bitmap, max_sev_asid, min_sev_asid - 1);
92         if (pos >= max_sev_asid) {
93                 if (retry && __sev_recycle_asids()) {
94                         retry = false;
95                         goto again;
96                 }
97                 mutex_unlock(&sev_bitmap_lock);
98                 return -EBUSY;
99         }
100
101         __set_bit(pos, sev_asid_bitmap);
102
103         mutex_unlock(&sev_bitmap_lock);
104
105         return pos + 1;
106 }
107
108 static int sev_get_asid(struct kvm *kvm)
109 {
110         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
111
112         return sev->asid;
113 }
114
115 static void sev_asid_free(int asid)
116 {
117         struct svm_cpu_data *sd;
118         int cpu, pos;
119
120         mutex_lock(&sev_bitmap_lock);
121
122         pos = asid - 1;
123         __set_bit(pos, sev_reclaim_asid_bitmap);
124
125         for_each_possible_cpu(cpu) {
126                 sd = per_cpu(svm_data, cpu);
127                 sd->sev_vmcbs[pos] = NULL;
128         }
129
130         mutex_unlock(&sev_bitmap_lock);
131 }
132
133 static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
134 {
135         struct sev_data_decommission *decommission;
136         struct sev_data_deactivate *data;
137
138         if (!handle)
139                 return;
140
141         data = kzalloc(sizeof(*data), GFP_KERNEL);
142         if (!data)
143                 return;
144
145         /* deactivate handle */
146         data->handle = handle;
147
148         /* Guard DEACTIVATE against WBINVD/DF_FLUSH used in ASID recycling */
149         down_read(&sev_deactivate_lock);
150         sev_guest_deactivate(data, NULL);
151         up_read(&sev_deactivate_lock);
152
153         kfree(data);
154
155         decommission = kzalloc(sizeof(*decommission), GFP_KERNEL);
156         if (!decommission)
157                 return;
158
159         /* decommission handle */
160         decommission->handle = handle;
161         sev_guest_decommission(decommission, NULL);
162
163         kfree(decommission);
164 }
165
166 static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
167 {
168         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
169         int asid, ret;
170
171         ret = -EBUSY;
172         if (unlikely(sev->active))
173                 return ret;
174
175         asid = sev_asid_new();
176         if (asid < 0)
177                 return ret;
178
179         ret = sev_platform_init(&argp->error);
180         if (ret)
181                 goto e_free;
182
183         sev->active = true;
184         sev->asid = asid;
185         INIT_LIST_HEAD(&sev->regions_list);
186
187         return 0;
188
189 e_free:
190         sev_asid_free(asid);
191         return ret;
192 }
193
194 static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
195 {
196         struct sev_data_activate *data;
197         int asid = sev_get_asid(kvm);
198         int ret;
199
200         data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
201         if (!data)
202                 return -ENOMEM;
203
204         /* activate ASID on the given handle */
205         data->handle = handle;
206         data->asid   = asid;
207         ret = sev_guest_activate(data, error);
208         kfree(data);
209
210         return ret;
211 }
212
213 static int __sev_issue_cmd(int fd, int id, void *data, int *error)
214 {
215         struct fd f;
216         int ret;
217
218         f = fdget(fd);
219         if (!f.file)
220                 return -EBADF;
221
222         ret = sev_issue_cmd_external_user(f.file, id, data, error);
223
224         fdput(f);
225         return ret;
226 }
227
228 static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error)
229 {
230         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
231
232         return __sev_issue_cmd(sev->fd, id, data, error);
233 }
234
235 static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
236 {
237         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
238         struct sev_data_launch_start *start;
239         struct kvm_sev_launch_start params;
240         void *dh_blob, *session_blob;
241         int *error = &argp->error;
242         int ret;
243
244         if (!sev_guest(kvm))
245                 return -ENOTTY;
246
247         if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
248                 return -EFAULT;
249
250         start = kzalloc(sizeof(*start), GFP_KERNEL_ACCOUNT);
251         if (!start)
252                 return -ENOMEM;
253
254         dh_blob = NULL;
255         if (params.dh_uaddr) {
256                 dh_blob = psp_copy_user_blob(params.dh_uaddr, params.dh_len);
257                 if (IS_ERR(dh_blob)) {
258                         ret = PTR_ERR(dh_blob);
259                         goto e_free;
260                 }
261
262                 start->dh_cert_address = __sme_set(__pa(dh_blob));
263                 start->dh_cert_len = params.dh_len;
264         }
265
266         session_blob = NULL;
267         if (params.session_uaddr) {
268                 session_blob = psp_copy_user_blob(params.session_uaddr, params.session_len);
269                 if (IS_ERR(session_blob)) {
270                         ret = PTR_ERR(session_blob);
271                         goto e_free_dh;
272                 }
273
274                 start->session_address = __sme_set(__pa(session_blob));
275                 start->session_len = params.session_len;
276         }
277
278         start->handle = params.handle;
279         start->policy = params.policy;
280
281         /* create memory encryption context */
282         ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_LAUNCH_START, start, error);
283         if (ret)
284                 goto e_free_session;
285
286         /* Bind ASID to this guest */
287         ret = sev_bind_asid(kvm, start->handle, error);
288         if (ret)
289                 goto e_free_session;
290
291         /* return handle to userspace */
292         params.handle = start->handle;
293         if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params))) {
294                 sev_unbind_asid(kvm, start->handle);
295                 ret = -EFAULT;
296                 goto e_free_session;
297         }
298
299         sev->handle = start->handle;
300         sev->fd = argp->sev_fd;
301
302 e_free_session:
303         kfree(session_blob);
304 e_free_dh:
305         kfree(dh_blob);
306 e_free:
307         kfree(start);
308         return ret;
309 }
310
311 static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
312                                     unsigned long ulen, unsigned long *n,
313                                     int write)
314 {
315         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
316         unsigned long npages, npinned, size;
317         unsigned long locked, lock_limit;
318         struct page **pages;
319         unsigned long first, last;
320
321         if (ulen == 0 || uaddr + ulen < uaddr)
322                 return NULL;
323
324         /* Calculate number of pages. */
325         first = (uaddr & PAGE_MASK) >> PAGE_SHIFT;
326         last = ((uaddr + ulen - 1) & PAGE_MASK) >> PAGE_SHIFT;
327         npages = (last - first + 1);
328
329         locked = sev->pages_locked + npages;
330         lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
331         if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
332                 pr_err("SEV: %lu locked pages exceed the lock limit of %lu.\n", locked, lock_limit);
333                 return NULL;
334         }
335
336         /* Avoid using vmalloc for smaller buffers. */
337         size = npages * sizeof(struct page *);
338         if (size > PAGE_SIZE)
339                 pages = __vmalloc(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO,
340                                   PAGE_KERNEL);
341         else
342                 pages = kmalloc(size, GFP_KERNEL_ACCOUNT);
343
344         if (!pages)
345                 return NULL;
346
347         /* Pin the user virtual address. */
348         npinned = get_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
349         if (npinned != npages) {
350                 pr_err("SEV: Failure locking %lu pages.\n", npages);
351                 goto err;
352         }
353
354         *n = npages;
355         sev->pages_locked = locked;
356
357         return pages;
358
359 err:
360         if (npinned > 0)
361                 release_pages(pages, npinned);
362
363         kvfree(pages);
364         return NULL;
365 }
366
367 static void sev_unpin_memory(struct kvm *kvm, struct page **pages,
368                              unsigned long npages)
369 {
370         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
371
372         release_pages(pages, npages);
373         kvfree(pages);
374         sev->pages_locked -= npages;
375 }
376
377 static void sev_clflush_pages(struct page *pages[], unsigned long npages)
378 {
379         uint8_t *page_virtual;
380         unsigned long i;
381
382         if (npages == 0 || pages == NULL)
383                 return;
384
385         for (i = 0; i < npages; i++) {
386                 page_virtual = kmap_atomic(pages[i]);
387                 clflush_cache_range(page_virtual, PAGE_SIZE);
388                 kunmap_atomic(page_virtual);
389         }
390 }
391
392 static unsigned long get_num_contig_pages(unsigned long idx,
393                                 struct page **inpages, unsigned long npages)
394 {
395         unsigned long paddr, next_paddr;
396         unsigned long i = idx + 1, pages = 1;
397
398         /* find the number of contiguous pages starting from idx */
399         paddr = __sme_page_pa(inpages[idx]);
400         while (i < npages) {
401                 next_paddr = __sme_page_pa(inpages[i++]);
402                 if ((paddr + PAGE_SIZE) == next_paddr) {
403                         pages++;
404                         paddr = next_paddr;
405                         continue;
406                 }
407                 break;
408         }
409
410         return pages;
411 }
412
413 static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
414 {
415         unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i;
416         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
417         struct kvm_sev_launch_update_data params;
418         struct sev_data_launch_update_data *data;
419         struct page **inpages;
420         int ret;
421
422         if (!sev_guest(kvm))
423                 return -ENOTTY;
424
425         if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
426                 return -EFAULT;
427
428         data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
429         if (!data)
430                 return -ENOMEM;
431
432         vaddr = params.uaddr;
433         size = params.len;
434         vaddr_end = vaddr + size;
435
436         /* Lock the user memory. */
437         inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1);
438         if (!inpages) {
439                 ret = -ENOMEM;
440                 goto e_free;
441         }
442
443         /*
444          * The LAUNCH_UPDATE command will perform in-place encryption of the
445          * memory content (i.e it will write the same memory region with C=1).
446          * It's possible that the cache may contain the data with C=0, i.e.,
447          * unencrypted so invalidate it first.
448          */
449         sev_clflush_pages(inpages, npages);
450
451         for (i = 0; vaddr < vaddr_end; vaddr = next_vaddr, i += pages) {
452                 int offset, len;
453
454                 /*
455                  * If the user buffer is not page-aligned, calculate the offset
456                  * within the page.
457                  */
458                 offset = vaddr & (PAGE_SIZE - 1);
459
460                 /* Calculate the number of pages that can be encrypted in one go. */
461                 pages = get_num_contig_pages(i, inpages, npages);
462
463                 len = min_t(size_t, ((pages * PAGE_SIZE) - offset), size);
464
465                 data->handle = sev->handle;
466                 data->len = len;
467                 data->address = __sme_page_pa(inpages[i]) + offset;
468                 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_DATA, data, &argp->error);
469                 if (ret)
470                         goto e_unpin;
471
472                 size -= len;
473                 next_vaddr = vaddr + len;
474         }
475
476 e_unpin:
477         /* content of memory is updated, mark pages dirty */
478         for (i = 0; i < npages; i++) {
479                 set_page_dirty_lock(inpages[i]);
480                 mark_page_accessed(inpages[i]);
481         }
482         /* unlock the user pages */
483         sev_unpin_memory(kvm, inpages, npages);
484 e_free:
485         kfree(data);
486         return ret;
487 }
488
489 static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
490 {
491         void __user *measure = (void __user *)(uintptr_t)argp->data;
492         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
493         struct sev_data_launch_measure *data;
494         struct kvm_sev_launch_measure params;
495         void __user *p = NULL;
496         void *blob = NULL;
497         int ret;
498
499         if (!sev_guest(kvm))
500                 return -ENOTTY;
501
502         if (copy_from_user(&params, measure, sizeof(params)))
503                 return -EFAULT;
504
505         data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
506         if (!data)
507                 return -ENOMEM;
508
509         /* User wants to query the blob length */
510         if (!params.len)
511                 goto cmd;
512
513         p = (void __user *)(uintptr_t)params.uaddr;
514         if (p) {
515                 if (params.len > SEV_FW_BLOB_MAX_SIZE) {
516                         ret = -EINVAL;
517                         goto e_free;
518                 }
519
520                 ret = -ENOMEM;
521                 blob = kmalloc(params.len, GFP_KERNEL);
522                 if (!blob)
523                         goto e_free;
524
525                 data->address = __psp_pa(blob);
526                 data->len = params.len;
527         }
528
529 cmd:
530         data->handle = sev->handle;
531         ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, data, &argp->error);
532
533         /*
534          * If we query the session length, FW responded with expected data.
535          */
536         if (!params.len)
537                 goto done;
538
539         if (ret)
540                 goto e_free_blob;
541
542         if (blob) {
543                 if (copy_to_user(p, blob, params.len))
544                         ret = -EFAULT;
545         }
546
547 done:
548         params.len = data->len;
549         if (copy_to_user(measure, &params, sizeof(params)))
550                 ret = -EFAULT;
551 e_free_blob:
552         kfree(blob);
553 e_free:
554         kfree(data);
555         return ret;
556 }
557
558 static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
559 {
560         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
561         struct sev_data_launch_finish *data;
562         int ret;
563
564         if (!sev_guest(kvm))
565                 return -ENOTTY;
566
567         data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
568         if (!data)
569                 return -ENOMEM;
570
571         data->handle = sev->handle;
572         ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_FINISH, data, &argp->error);
573
574         kfree(data);
575         return ret;
576 }
577
578 static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
579 {
580         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
581         struct kvm_sev_guest_status params;
582         struct sev_data_guest_status *data;
583         int ret;
584
585         if (!sev_guest(kvm))
586                 return -ENOTTY;
587
588         data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
589         if (!data)
590                 return -ENOMEM;
591
592         data->handle = sev->handle;
593         ret = sev_issue_cmd(kvm, SEV_CMD_GUEST_STATUS, data, &argp->error);
594         if (ret)
595                 goto e_free;
596
597         params.policy = data->policy;
598         params.state = data->state;
599         params.handle = data->handle;
600
601         if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params)))
602                 ret = -EFAULT;
603 e_free:
604         kfree(data);
605         return ret;
606 }
607
608 static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src,
609                                unsigned long dst, int size,
610                                int *error, bool enc)
611 {
612         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
613         struct sev_data_dbg *data;
614         int ret;
615
616         data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
617         if (!data)
618                 return -ENOMEM;
619
620         data->handle = sev->handle;
621         data->dst_addr = dst;
622         data->src_addr = src;
623         data->len = size;
624
625         ret = sev_issue_cmd(kvm,
626                             enc ? SEV_CMD_DBG_ENCRYPT : SEV_CMD_DBG_DECRYPT,
627                             data, error);
628         kfree(data);
629         return ret;
630 }
631
632 static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
633                              unsigned long dst_paddr, int sz, int *err)
634 {
635         int offset;
636
637         /*
638          * Its safe to read more than we are asked, caller should ensure that
639          * destination has enough space.
640          */
641         src_paddr = round_down(src_paddr, 16);
642         offset = src_paddr & 15;
643         sz = round_up(sz + offset, 16);
644
645         return __sev_issue_dbg_cmd(kvm, src_paddr, dst_paddr, sz, err, false);
646 }
647
648 static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
649                                   unsigned long __user dst_uaddr,
650                                   unsigned long dst_paddr,
651                                   int size, int *err)
652 {
653         struct page *tpage = NULL;
654         int ret, offset;
655
656         /* if inputs are not 16-byte then use intermediate buffer */
657         if (!IS_ALIGNED(dst_paddr, 16) ||
658             !IS_ALIGNED(paddr,     16) ||
659             !IS_ALIGNED(size,      16)) {
660                 tpage = (void *)alloc_page(GFP_KERNEL);
661                 if (!tpage)
662                         return -ENOMEM;
663
664                 dst_paddr = __sme_page_pa(tpage);
665         }
666
667         ret = __sev_dbg_decrypt(kvm, paddr, dst_paddr, size, err);
668         if (ret)
669                 goto e_free;
670
671         if (tpage) {
672                 offset = paddr & 15;
673                 if (copy_to_user((void __user *)(uintptr_t)dst_uaddr,
674                                  page_address(tpage) + offset, size))
675                         ret = -EFAULT;
676         }
677
678 e_free:
679         if (tpage)
680                 __free_page(tpage);
681
682         return ret;
683 }
684
685 static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
686                                   unsigned long __user vaddr,
687                                   unsigned long dst_paddr,
688                                   unsigned long __user dst_vaddr,
689                                   int size, int *error)
690 {
691         struct page *src_tpage = NULL;
692         struct page *dst_tpage = NULL;
693         int ret, len = size;
694
695         /* If source buffer is not aligned then use an intermediate buffer */
696         if (!IS_ALIGNED(vaddr, 16)) {
697                 src_tpage = alloc_page(GFP_KERNEL);
698                 if (!src_tpage)
699                         return -ENOMEM;
700
701                 if (copy_from_user(page_address(src_tpage),
702                                 (void __user *)(uintptr_t)vaddr, size)) {
703                         __free_page(src_tpage);
704                         return -EFAULT;
705                 }
706
707                 paddr = __sme_page_pa(src_tpage);
708         }
709
710         /*
711          *  If destination buffer or length is not aligned then do read-modify-write:
712          *   - decrypt destination in an intermediate buffer
713          *   - copy the source buffer in an intermediate buffer
714          *   - use the intermediate buffer as source buffer
715          */
716         if (!IS_ALIGNED(dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
717                 int dst_offset;
718
719                 dst_tpage = alloc_page(GFP_KERNEL);
720                 if (!dst_tpage) {
721                         ret = -ENOMEM;
722                         goto e_free;
723                 }
724
725                 ret = __sev_dbg_decrypt(kvm, dst_paddr,
726                                         __sme_page_pa(dst_tpage), size, error);
727                 if (ret)
728                         goto e_free;
729
730                 /*
731                  *  If source is kernel buffer then use memcpy() otherwise
732                  *  copy_from_user().
733                  */
734                 dst_offset = dst_paddr & 15;
735
736                 if (src_tpage)
737                         memcpy(page_address(dst_tpage) + dst_offset,
738                                page_address(src_tpage), size);
739                 else {
740                         if (copy_from_user(page_address(dst_tpage) + dst_offset,
741                                            (void __user *)(uintptr_t)vaddr, size)) {
742                                 ret = -EFAULT;
743                                 goto e_free;
744                         }
745                 }
746
747                 paddr = __sme_page_pa(dst_tpage);
748                 dst_paddr = round_down(dst_paddr, 16);
749                 len = round_up(size, 16);
750         }
751
752         ret = __sev_issue_dbg_cmd(kvm, paddr, dst_paddr, len, error, true);
753
754 e_free:
755         if (src_tpage)
756                 __free_page(src_tpage);
757         if (dst_tpage)
758                 __free_page(dst_tpage);
759         return ret;
760 }
761
762 static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
763 {
764         unsigned long vaddr, vaddr_end, next_vaddr;
765         unsigned long dst_vaddr;
766         struct page **src_p, **dst_p;
767         struct kvm_sev_dbg debug;
768         unsigned long n;
769         unsigned int size;
770         int ret;
771
772         if (!sev_guest(kvm))
773                 return -ENOTTY;
774
775         if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug)))
776                 return -EFAULT;
777
778         if (!debug.len || debug.src_uaddr + debug.len < debug.src_uaddr)
779                 return -EINVAL;
780         if (!debug.dst_uaddr)
781                 return -EINVAL;
782
783         vaddr = debug.src_uaddr;
784         size = debug.len;
785         vaddr_end = vaddr + size;
786         dst_vaddr = debug.dst_uaddr;
787
788         for (; vaddr < vaddr_end; vaddr = next_vaddr) {
789                 int len, s_off, d_off;
790
791                 /* lock userspace source and destination page */
792                 src_p = sev_pin_memory(kvm, vaddr & PAGE_MASK, PAGE_SIZE, &n, 0);
793                 if (!src_p)
794                         return -EFAULT;
795
796                 dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, 1);
797                 if (!dst_p) {
798                         sev_unpin_memory(kvm, src_p, n);
799                         return -EFAULT;
800                 }
801
802                 /*
803                  * The DBG_{DE,EN}CRYPT commands will perform {dec,en}cryption of the
804                  * memory content (i.e it will write the same memory region with C=1).
805                  * It's possible that the cache may contain the data with C=0, i.e.,
806                  * unencrypted so invalidate it first.
807                  */
808                 sev_clflush_pages(src_p, 1);
809                 sev_clflush_pages(dst_p, 1);
810
811                 /*
812                  * Since user buffer may not be page aligned, calculate the
813                  * offset within the page.
814                  */
815                 s_off = vaddr & ~PAGE_MASK;
816                 d_off = dst_vaddr & ~PAGE_MASK;
817                 len = min_t(size_t, (PAGE_SIZE - s_off), size);
818
819                 if (dec)
820                         ret = __sev_dbg_decrypt_user(kvm,
821                                                      __sme_page_pa(src_p[0]) + s_off,
822                                                      dst_vaddr,
823                                                      __sme_page_pa(dst_p[0]) + d_off,
824                                                      len, &argp->error);
825                 else
826                         ret = __sev_dbg_encrypt_user(kvm,
827                                                      __sme_page_pa(src_p[0]) + s_off,
828                                                      vaddr,
829                                                      __sme_page_pa(dst_p[0]) + d_off,
830                                                      dst_vaddr,
831                                                      len, &argp->error);
832
833                 sev_unpin_memory(kvm, src_p, n);
834                 sev_unpin_memory(kvm, dst_p, n);
835
836                 if (ret)
837                         goto err;
838
839                 next_vaddr = vaddr + len;
840                 dst_vaddr = dst_vaddr + len;
841                 size -= len;
842         }
843 err:
844         return ret;
845 }
846
847 static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
848 {
849         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
850         struct sev_data_launch_secret *data;
851         struct kvm_sev_launch_secret params;
852         struct page **pages;
853         void *blob, *hdr;
854         unsigned long n;
855         int ret, offset;
856
857         if (!sev_guest(kvm))
858                 return -ENOTTY;
859
860         if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
861                 return -EFAULT;
862
863         pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, 1);
864         if (!pages)
865                 return -ENOMEM;
866
867         /*
868          * The secret must be copied into contiguous memory region, lets verify
869          * that userspace memory pages are contiguous before we issue command.
870          */
871         if (get_num_contig_pages(0, pages, n) != n) {
872                 ret = -EINVAL;
873                 goto e_unpin_memory;
874         }
875
876         ret = -ENOMEM;
877         data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
878         if (!data)
879                 goto e_unpin_memory;
880
881         offset = params.guest_uaddr & (PAGE_SIZE - 1);
882         data->guest_address = __sme_page_pa(pages[0]) + offset;
883         data->guest_len = params.guest_len;
884
885         blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
886         if (IS_ERR(blob)) {
887                 ret = PTR_ERR(blob);
888                 goto e_free;
889         }
890
891         data->trans_address = __psp_pa(blob);
892         data->trans_len = params.trans_len;
893
894         hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
895         if (IS_ERR(hdr)) {
896                 ret = PTR_ERR(hdr);
897                 goto e_free_blob;
898         }
899         data->hdr_address = __psp_pa(hdr);
900         data->hdr_len = params.hdr_len;
901
902         data->handle = sev->handle;
903         ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, data, &argp->error);
904
905         kfree(hdr);
906
907 e_free_blob:
908         kfree(blob);
909 e_free:
910         kfree(data);
911 e_unpin_memory:
912         sev_unpin_memory(kvm, pages, n);
913         return ret;
914 }
915
916 int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
917 {
918         struct kvm_sev_cmd sev_cmd;
919         int r;
920
921         if (!svm_sev_enabled())
922                 return -ENOTTY;
923
924         if (!argp)
925                 return 0;
926
927         if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd)))
928                 return -EFAULT;
929
930         mutex_lock(&kvm->lock);
931
932         switch (sev_cmd.id) {
933         case KVM_SEV_INIT:
934                 r = sev_guest_init(kvm, &sev_cmd);
935                 break;
936         case KVM_SEV_LAUNCH_START:
937                 r = sev_launch_start(kvm, &sev_cmd);
938                 break;
939         case KVM_SEV_LAUNCH_UPDATE_DATA:
940                 r = sev_launch_update_data(kvm, &sev_cmd);
941                 break;
942         case KVM_SEV_LAUNCH_MEASURE:
943                 r = sev_launch_measure(kvm, &sev_cmd);
944                 break;
945         case KVM_SEV_LAUNCH_FINISH:
946                 r = sev_launch_finish(kvm, &sev_cmd);
947                 break;
948         case KVM_SEV_GUEST_STATUS:
949                 r = sev_guest_status(kvm, &sev_cmd);
950                 break;
951         case KVM_SEV_DBG_DECRYPT:
952                 r = sev_dbg_crypt(kvm, &sev_cmd, true);
953                 break;
954         case KVM_SEV_DBG_ENCRYPT:
955                 r = sev_dbg_crypt(kvm, &sev_cmd, false);
956                 break;
957         case KVM_SEV_LAUNCH_SECRET:
958                 r = sev_launch_secret(kvm, &sev_cmd);
959                 break;
960         default:
961                 r = -EINVAL;
962                 goto out;
963         }
964
965         if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd)))
966                 r = -EFAULT;
967
968 out:
969         mutex_unlock(&kvm->lock);
970         return r;
971 }
972
973 int svm_register_enc_region(struct kvm *kvm,
974                             struct kvm_enc_region *range)
975 {
976         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
977         struct enc_region *region;
978         int ret = 0;
979
980         if (!sev_guest(kvm))
981                 return -ENOTTY;
982
983         if (range->addr > ULONG_MAX || range->size > ULONG_MAX)
984                 return -EINVAL;
985
986         region = kzalloc(sizeof(*region), GFP_KERNEL_ACCOUNT);
987         if (!region)
988                 return -ENOMEM;
989
990         region->pages = sev_pin_memory(kvm, range->addr, range->size, &region->npages, 1);
991         if (!region->pages) {
992                 ret = -ENOMEM;
993                 goto e_free;
994         }
995
996         /*
997          * The guest may change the memory encryption attribute from C=0 -> C=1
998          * or vice versa for this memory range. Lets make sure caches are
999          * flushed to ensure that guest data gets written into memory with
1000          * correct C-bit.
1001          */
1002         sev_clflush_pages(region->pages, region->npages);
1003
1004         region->uaddr = range->addr;
1005         region->size = range->size;
1006
1007         mutex_lock(&kvm->lock);
1008         list_add_tail(&region->list, &sev->regions_list);
1009         mutex_unlock(&kvm->lock);
1010
1011         return ret;
1012
1013 e_free:
1014         kfree(region);
1015         return ret;
1016 }
1017
1018 static struct enc_region *
1019 find_enc_region(struct kvm *kvm, struct kvm_enc_region *range)
1020 {
1021         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1022         struct list_head *head = &sev->regions_list;
1023         struct enc_region *i;
1024
1025         list_for_each_entry(i, head, list) {
1026                 if (i->uaddr == range->addr &&
1027                     i->size == range->size)
1028                         return i;
1029         }
1030
1031         return NULL;
1032 }
1033
1034 static void __unregister_enc_region_locked(struct kvm *kvm,
1035                                            struct enc_region *region)
1036 {
1037         sev_unpin_memory(kvm, region->pages, region->npages);
1038         list_del(&region->list);
1039         kfree(region);
1040 }
1041
1042 int svm_unregister_enc_region(struct kvm *kvm,
1043                               struct kvm_enc_region *range)
1044 {
1045         struct enc_region *region;
1046         int ret;
1047
1048         mutex_lock(&kvm->lock);
1049
1050         if (!sev_guest(kvm)) {
1051                 ret = -ENOTTY;
1052                 goto failed;
1053         }
1054
1055         region = find_enc_region(kvm, range);
1056         if (!region) {
1057                 ret = -EINVAL;
1058                 goto failed;
1059         }
1060
1061         /*
1062          * Ensure that all guest tagged cache entries are flushed before
1063          * releasing the pages back to the system for use. CLFLUSH will
1064          * not do this, so issue a WBINVD.
1065          */
1066         wbinvd_on_all_cpus();
1067
1068         __unregister_enc_region_locked(kvm, region);
1069
1070         mutex_unlock(&kvm->lock);
1071         return 0;
1072
1073 failed:
1074         mutex_unlock(&kvm->lock);
1075         return ret;
1076 }
1077
1078 void sev_vm_destroy(struct kvm *kvm)
1079 {
1080         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1081         struct list_head *head = &sev->regions_list;
1082         struct list_head *pos, *q;
1083
1084         if (!sev_guest(kvm))
1085                 return;
1086
1087         mutex_lock(&kvm->lock);
1088
1089         /*
1090          * Ensure that all guest tagged cache entries are flushed before
1091          * releasing the pages back to the system for use. CLFLUSH will
1092          * not do this, so issue a WBINVD.
1093          */
1094         wbinvd_on_all_cpus();
1095
1096         /*
1097          * if userspace was terminated before unregistering the memory regions
1098          * then lets unpin all the registered memory.
1099          */
1100         if (!list_empty(head)) {
1101                 list_for_each_safe(pos, q, head) {
1102                         __unregister_enc_region_locked(kvm,
1103                                 list_entry(pos, struct enc_region, list));
1104                 }
1105         }
1106
1107         mutex_unlock(&kvm->lock);
1108
1109         sev_unbind_asid(kvm, sev->handle);
1110         sev_asid_free(sev->asid);
1111 }
1112
1113 int __init sev_hardware_setup(void)
1114 {
1115         struct sev_user_data_status *status;
1116         int rc;
1117
1118         /* Maximum number of encrypted guests supported simultaneously */
1119         max_sev_asid = cpuid_ecx(0x8000001F);
1120
1121         if (!svm_sev_enabled())
1122                 return 1;
1123
1124         /* Minimum ASID value that should be used for SEV guest */
1125         min_sev_asid = cpuid_edx(0x8000001F);
1126
1127         /* Initialize SEV ASID bitmaps */
1128         sev_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
1129         if (!sev_asid_bitmap)
1130                 return 1;
1131
1132         sev_reclaim_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
1133         if (!sev_reclaim_asid_bitmap)
1134                 return 1;
1135
1136         status = kmalloc(sizeof(*status), GFP_KERNEL);
1137         if (!status)
1138                 return 1;
1139
1140         /*
1141          * Check SEV platform status.
1142          *
1143          * PLATFORM_STATUS can be called in any state, if we failed to query
1144          * the PLATFORM status then either PSP firmware does not support SEV
1145          * feature or SEV firmware is dead.
1146          */
1147         rc = sev_platform_status(status, NULL);
1148         if (rc)
1149                 goto err;
1150
1151         pr_info("SEV supported\n");
1152
1153 err:
1154         kfree(status);
1155         return rc;
1156 }
1157
1158 void sev_hardware_teardown(void)
1159 {
1160         if (!svm_sev_enabled())
1161                 return;
1162
1163         bitmap_free(sev_asid_bitmap);
1164         bitmap_free(sev_reclaim_asid_bitmap);
1165
1166         sev_flush_asids();
1167 }
1168
1169 void pre_sev_run(struct vcpu_svm *svm, int cpu)
1170 {
1171         struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
1172         int asid = sev_get_asid(svm->vcpu.kvm);
1173
1174         /* Assign the asid allocated with this SEV guest */
1175         svm->vmcb->control.asid = asid;
1176
1177         /*
1178          * Flush guest TLB:
1179          *
1180          * 1) when different VMCB for the same ASID is to be run on the same host CPU.
1181          * 2) or this VMCB was executed on different host CPU in previous VMRUNs.
1182          */
1183         if (sd->sev_vmcbs[asid] == svm->vmcb &&
1184             svm->last_cpu == cpu)
1185                 return;
1186
1187         svm->last_cpu = cpu;
1188         sd->sev_vmcbs[asid] = svm->vmcb;
1189         svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
1190         mark_dirty(svm->vmcb, VMCB_ASID);
1191 }