Merge tag 'kconfig-v5.10' of git://git.kernel.org/pub/scm/linux/kernel/git/masahiroy...
[linux-2.6-microblaze.git] / arch / x86 / kvm / svm / sev.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel-based Virtual Machine driver for Linux
4  *
5  * AMD SVM-SEV support
6  *
7  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
8  */
9
10 #include <linux/kvm_types.h>
11 #include <linux/kvm_host.h>
12 #include <linux/kernel.h>
13 #include <linux/highmem.h>
14 #include <linux/psp-sev.h>
15 #include <linux/pagemap.h>
16 #include <linux/swap.h>
17
18 #include "x86.h"
19 #include "svm.h"
20
21 static int sev_flush_asids(void);
22 static DECLARE_RWSEM(sev_deactivate_lock);
23 static DEFINE_MUTEX(sev_bitmap_lock);
24 unsigned int max_sev_asid;
25 static unsigned int min_sev_asid;
26 static unsigned long *sev_asid_bitmap;
27 static unsigned long *sev_reclaim_asid_bitmap;
28 #define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
29
30 struct enc_region {
31         struct list_head list;
32         unsigned long npages;
33         struct page **pages;
34         unsigned long uaddr;
35         unsigned long size;
36 };
37
38 static int sev_flush_asids(void)
39 {
40         int ret, error = 0;
41
42         /*
43          * DEACTIVATE will clear the WBINVD indicator causing DF_FLUSH to fail,
44          * so it must be guarded.
45          */
46         down_write(&sev_deactivate_lock);
47
48         wbinvd_on_all_cpus();
49         ret = sev_guest_df_flush(&error);
50
51         up_write(&sev_deactivate_lock);
52
53         if (ret)
54                 pr_err("SEV: DF_FLUSH failed, ret=%d, error=%#x\n", ret, error);
55
56         return ret;
57 }
58
59 /* Must be called with the sev_bitmap_lock held */
60 static bool __sev_recycle_asids(void)
61 {
62         int pos;
63
64         /* Check if there are any ASIDs to reclaim before performing a flush */
65         pos = find_next_bit(sev_reclaim_asid_bitmap,
66                             max_sev_asid, min_sev_asid - 1);
67         if (pos >= max_sev_asid)
68                 return false;
69
70         if (sev_flush_asids())
71                 return false;
72
73         bitmap_xor(sev_asid_bitmap, sev_asid_bitmap, sev_reclaim_asid_bitmap,
74                    max_sev_asid);
75         bitmap_zero(sev_reclaim_asid_bitmap, max_sev_asid);
76
77         return true;
78 }
79
80 static int sev_asid_new(void)
81 {
82         bool retry = true;
83         int pos;
84
85         mutex_lock(&sev_bitmap_lock);
86
87         /*
88          * SEV-enabled guest must use asid from min_sev_asid to max_sev_asid.
89          */
90 again:
91         pos = find_next_zero_bit(sev_asid_bitmap, max_sev_asid, min_sev_asid - 1);
92         if (pos >= max_sev_asid) {
93                 if (retry && __sev_recycle_asids()) {
94                         retry = false;
95                         goto again;
96                 }
97                 mutex_unlock(&sev_bitmap_lock);
98                 return -EBUSY;
99         }
100
101         __set_bit(pos, sev_asid_bitmap);
102
103         mutex_unlock(&sev_bitmap_lock);
104
105         return pos + 1;
106 }
107
108 static int sev_get_asid(struct kvm *kvm)
109 {
110         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
111
112         return sev->asid;
113 }
114
115 static void sev_asid_free(int asid)
116 {
117         struct svm_cpu_data *sd;
118         int cpu, pos;
119
120         mutex_lock(&sev_bitmap_lock);
121
122         pos = asid - 1;
123         __set_bit(pos, sev_reclaim_asid_bitmap);
124
125         for_each_possible_cpu(cpu) {
126                 sd = per_cpu(svm_data, cpu);
127                 sd->sev_vmcbs[pos] = NULL;
128         }
129
130         mutex_unlock(&sev_bitmap_lock);
131 }
132
133 static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
134 {
135         struct sev_data_decommission *decommission;
136         struct sev_data_deactivate *data;
137
138         if (!handle)
139                 return;
140
141         data = kzalloc(sizeof(*data), GFP_KERNEL);
142         if (!data)
143                 return;
144
145         /* deactivate handle */
146         data->handle = handle;
147
148         /* Guard DEACTIVATE against WBINVD/DF_FLUSH used in ASID recycling */
149         down_read(&sev_deactivate_lock);
150         sev_guest_deactivate(data, NULL);
151         up_read(&sev_deactivate_lock);
152
153         kfree(data);
154
155         decommission = kzalloc(sizeof(*decommission), GFP_KERNEL);
156         if (!decommission)
157                 return;
158
159         /* decommission handle */
160         decommission->handle = handle;
161         sev_guest_decommission(decommission, NULL);
162
163         kfree(decommission);
164 }
165
166 static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
167 {
168         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
169         int asid, ret;
170
171         ret = -EBUSY;
172         if (unlikely(sev->active))
173                 return ret;
174
175         asid = sev_asid_new();
176         if (asid < 0)
177                 return ret;
178
179         ret = sev_platform_init(&argp->error);
180         if (ret)
181                 goto e_free;
182
183         sev->active = true;
184         sev->asid = asid;
185         INIT_LIST_HEAD(&sev->regions_list);
186
187         return 0;
188
189 e_free:
190         sev_asid_free(asid);
191         return ret;
192 }
193
194 static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
195 {
196         struct sev_data_activate *data;
197         int asid = sev_get_asid(kvm);
198         int ret;
199
200         data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
201         if (!data)
202                 return -ENOMEM;
203
204         /* activate ASID on the given handle */
205         data->handle = handle;
206         data->asid   = asid;
207         ret = sev_guest_activate(data, error);
208         kfree(data);
209
210         return ret;
211 }
212
213 static int __sev_issue_cmd(int fd, int id, void *data, int *error)
214 {
215         struct fd f;
216         int ret;
217
218         f = fdget(fd);
219         if (!f.file)
220                 return -EBADF;
221
222         ret = sev_issue_cmd_external_user(f.file, id, data, error);
223
224         fdput(f);
225         return ret;
226 }
227
228 static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error)
229 {
230         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
231
232         return __sev_issue_cmd(sev->fd, id, data, error);
233 }
234
235 static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
236 {
237         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
238         struct sev_data_launch_start *start;
239         struct kvm_sev_launch_start params;
240         void *dh_blob, *session_blob;
241         int *error = &argp->error;
242         int ret;
243
244         if (!sev_guest(kvm))
245                 return -ENOTTY;
246
247         if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
248                 return -EFAULT;
249
250         start = kzalloc(sizeof(*start), GFP_KERNEL_ACCOUNT);
251         if (!start)
252                 return -ENOMEM;
253
254         dh_blob = NULL;
255         if (params.dh_uaddr) {
256                 dh_blob = psp_copy_user_blob(params.dh_uaddr, params.dh_len);
257                 if (IS_ERR(dh_blob)) {
258                         ret = PTR_ERR(dh_blob);
259                         goto e_free;
260                 }
261
262                 start->dh_cert_address = __sme_set(__pa(dh_blob));
263                 start->dh_cert_len = params.dh_len;
264         }
265
266         session_blob = NULL;
267         if (params.session_uaddr) {
268                 session_blob = psp_copy_user_blob(params.session_uaddr, params.session_len);
269                 if (IS_ERR(session_blob)) {
270                         ret = PTR_ERR(session_blob);
271                         goto e_free_dh;
272                 }
273
274                 start->session_address = __sme_set(__pa(session_blob));
275                 start->session_len = params.session_len;
276         }
277
278         start->handle = params.handle;
279         start->policy = params.policy;
280
281         /* create memory encryption context */
282         ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_LAUNCH_START, start, error);
283         if (ret)
284                 goto e_free_session;
285
286         /* Bind ASID to this guest */
287         ret = sev_bind_asid(kvm, start->handle, error);
288         if (ret)
289                 goto e_free_session;
290
291         /* return handle to userspace */
292         params.handle = start->handle;
293         if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params))) {
294                 sev_unbind_asid(kvm, start->handle);
295                 ret = -EFAULT;
296                 goto e_free_session;
297         }
298
299         sev->handle = start->handle;
300         sev->fd = argp->sev_fd;
301
302 e_free_session:
303         kfree(session_blob);
304 e_free_dh:
305         kfree(dh_blob);
306 e_free:
307         kfree(start);
308         return ret;
309 }
310
311 static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
312                                     unsigned long ulen, unsigned long *n,
313                                     int write)
314 {
315         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
316         unsigned long npages, size;
317         int npinned;
318         unsigned long locked, lock_limit;
319         struct page **pages;
320         unsigned long first, last;
321         int ret;
322
323         if (ulen == 0 || uaddr + ulen < uaddr)
324                 return ERR_PTR(-EINVAL);
325
326         /* Calculate number of pages. */
327         first = (uaddr & PAGE_MASK) >> PAGE_SHIFT;
328         last = ((uaddr + ulen - 1) & PAGE_MASK) >> PAGE_SHIFT;
329         npages = (last - first + 1);
330
331         locked = sev->pages_locked + npages;
332         lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
333         if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
334                 pr_err("SEV: %lu locked pages exceed the lock limit of %lu.\n", locked, lock_limit);
335                 return ERR_PTR(-ENOMEM);
336         }
337
338         if (WARN_ON_ONCE(npages > INT_MAX))
339                 return ERR_PTR(-EINVAL);
340
341         /* Avoid using vmalloc for smaller buffers. */
342         size = npages * sizeof(struct page *);
343         if (size > PAGE_SIZE)
344                 pages = __vmalloc(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
345         else
346                 pages = kmalloc(size, GFP_KERNEL_ACCOUNT);
347
348         if (!pages)
349                 return ERR_PTR(-ENOMEM);
350
351         /* Pin the user virtual address. */
352         npinned = pin_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
353         if (npinned != npages) {
354                 pr_err("SEV: Failure locking %lu pages.\n", npages);
355                 ret = -ENOMEM;
356                 goto err;
357         }
358
359         *n = npages;
360         sev->pages_locked = locked;
361
362         return pages;
363
364 err:
365         if (npinned > 0)
366                 unpin_user_pages(pages, npinned);
367
368         kvfree(pages);
369         return ERR_PTR(ret);
370 }
371
372 static void sev_unpin_memory(struct kvm *kvm, struct page **pages,
373                              unsigned long npages)
374 {
375         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
376
377         unpin_user_pages(pages, npages);
378         kvfree(pages);
379         sev->pages_locked -= npages;
380 }
381
382 static void sev_clflush_pages(struct page *pages[], unsigned long npages)
383 {
384         uint8_t *page_virtual;
385         unsigned long i;
386
387         if (this_cpu_has(X86_FEATURE_SME_COHERENT) || npages == 0 ||
388             pages == NULL)
389                 return;
390
391         for (i = 0; i < npages; i++) {
392                 page_virtual = kmap_atomic(pages[i]);
393                 clflush_cache_range(page_virtual, PAGE_SIZE);
394                 kunmap_atomic(page_virtual);
395         }
396 }
397
398 static unsigned long get_num_contig_pages(unsigned long idx,
399                                 struct page **inpages, unsigned long npages)
400 {
401         unsigned long paddr, next_paddr;
402         unsigned long i = idx + 1, pages = 1;
403
404         /* find the number of contiguous pages starting from idx */
405         paddr = __sme_page_pa(inpages[idx]);
406         while (i < npages) {
407                 next_paddr = __sme_page_pa(inpages[i++]);
408                 if ((paddr + PAGE_SIZE) == next_paddr) {
409                         pages++;
410                         paddr = next_paddr;
411                         continue;
412                 }
413                 break;
414         }
415
416         return pages;
417 }
418
419 static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
420 {
421         unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i;
422         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
423         struct kvm_sev_launch_update_data params;
424         struct sev_data_launch_update_data *data;
425         struct page **inpages;
426         int ret;
427
428         if (!sev_guest(kvm))
429                 return -ENOTTY;
430
431         if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
432                 return -EFAULT;
433
434         data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
435         if (!data)
436                 return -ENOMEM;
437
438         vaddr = params.uaddr;
439         size = params.len;
440         vaddr_end = vaddr + size;
441
442         /* Lock the user memory. */
443         inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1);
444         if (IS_ERR(inpages)) {
445                 ret = PTR_ERR(inpages);
446                 goto e_free;
447         }
448
449         /*
450          * The LAUNCH_UPDATE command will perform in-place encryption of the
451          * memory content (i.e it will write the same memory region with C=1).
452          * It's possible that the cache may contain the data with C=0, i.e.,
453          * unencrypted so invalidate it first.
454          */
455         sev_clflush_pages(inpages, npages);
456
457         for (i = 0; vaddr < vaddr_end; vaddr = next_vaddr, i += pages) {
458                 int offset, len;
459
460                 /*
461                  * If the user buffer is not page-aligned, calculate the offset
462                  * within the page.
463                  */
464                 offset = vaddr & (PAGE_SIZE - 1);
465
466                 /* Calculate the number of pages that can be encrypted in one go. */
467                 pages = get_num_contig_pages(i, inpages, npages);
468
469                 len = min_t(size_t, ((pages * PAGE_SIZE) - offset), size);
470
471                 data->handle = sev->handle;
472                 data->len = len;
473                 data->address = __sme_page_pa(inpages[i]) + offset;
474                 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_DATA, data, &argp->error);
475                 if (ret)
476                         goto e_unpin;
477
478                 size -= len;
479                 next_vaddr = vaddr + len;
480         }
481
482 e_unpin:
483         /* content of memory is updated, mark pages dirty */
484         for (i = 0; i < npages; i++) {
485                 set_page_dirty_lock(inpages[i]);
486                 mark_page_accessed(inpages[i]);
487         }
488         /* unlock the user pages */
489         sev_unpin_memory(kvm, inpages, npages);
490 e_free:
491         kfree(data);
492         return ret;
493 }
494
495 static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
496 {
497         void __user *measure = (void __user *)(uintptr_t)argp->data;
498         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
499         struct sev_data_launch_measure *data;
500         struct kvm_sev_launch_measure params;
501         void __user *p = NULL;
502         void *blob = NULL;
503         int ret;
504
505         if (!sev_guest(kvm))
506                 return -ENOTTY;
507
508         if (copy_from_user(&params, measure, sizeof(params)))
509                 return -EFAULT;
510
511         data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
512         if (!data)
513                 return -ENOMEM;
514
515         /* User wants to query the blob length */
516         if (!params.len)
517                 goto cmd;
518
519         p = (void __user *)(uintptr_t)params.uaddr;
520         if (p) {
521                 if (params.len > SEV_FW_BLOB_MAX_SIZE) {
522                         ret = -EINVAL;
523                         goto e_free;
524                 }
525
526                 ret = -ENOMEM;
527                 blob = kmalloc(params.len, GFP_KERNEL);
528                 if (!blob)
529                         goto e_free;
530
531                 data->address = __psp_pa(blob);
532                 data->len = params.len;
533         }
534
535 cmd:
536         data->handle = sev->handle;
537         ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, data, &argp->error);
538
539         /*
540          * If we query the session length, FW responded with expected data.
541          */
542         if (!params.len)
543                 goto done;
544
545         if (ret)
546                 goto e_free_blob;
547
548         if (blob) {
549                 if (copy_to_user(p, blob, params.len))
550                         ret = -EFAULT;
551         }
552
553 done:
554         params.len = data->len;
555         if (copy_to_user(measure, &params, sizeof(params)))
556                 ret = -EFAULT;
557 e_free_blob:
558         kfree(blob);
559 e_free:
560         kfree(data);
561         return ret;
562 }
563
564 static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
565 {
566         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
567         struct sev_data_launch_finish *data;
568         int ret;
569
570         if (!sev_guest(kvm))
571                 return -ENOTTY;
572
573         data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
574         if (!data)
575                 return -ENOMEM;
576
577         data->handle = sev->handle;
578         ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_FINISH, data, &argp->error);
579
580         kfree(data);
581         return ret;
582 }
583
584 static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
585 {
586         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
587         struct kvm_sev_guest_status params;
588         struct sev_data_guest_status *data;
589         int ret;
590
591         if (!sev_guest(kvm))
592                 return -ENOTTY;
593
594         data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
595         if (!data)
596                 return -ENOMEM;
597
598         data->handle = sev->handle;
599         ret = sev_issue_cmd(kvm, SEV_CMD_GUEST_STATUS, data, &argp->error);
600         if (ret)
601                 goto e_free;
602
603         params.policy = data->policy;
604         params.state = data->state;
605         params.handle = data->handle;
606
607         if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params)))
608                 ret = -EFAULT;
609 e_free:
610         kfree(data);
611         return ret;
612 }
613
614 static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src,
615                                unsigned long dst, int size,
616                                int *error, bool enc)
617 {
618         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
619         struct sev_data_dbg *data;
620         int ret;
621
622         data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
623         if (!data)
624                 return -ENOMEM;
625
626         data->handle = sev->handle;
627         data->dst_addr = dst;
628         data->src_addr = src;
629         data->len = size;
630
631         ret = sev_issue_cmd(kvm,
632                             enc ? SEV_CMD_DBG_ENCRYPT : SEV_CMD_DBG_DECRYPT,
633                             data, error);
634         kfree(data);
635         return ret;
636 }
637
638 static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
639                              unsigned long dst_paddr, int sz, int *err)
640 {
641         int offset;
642
643         /*
644          * Its safe to read more than we are asked, caller should ensure that
645          * destination has enough space.
646          */
647         src_paddr = round_down(src_paddr, 16);
648         offset = src_paddr & 15;
649         sz = round_up(sz + offset, 16);
650
651         return __sev_issue_dbg_cmd(kvm, src_paddr, dst_paddr, sz, err, false);
652 }
653
654 static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
655                                   unsigned long __user dst_uaddr,
656                                   unsigned long dst_paddr,
657                                   int size, int *err)
658 {
659         struct page *tpage = NULL;
660         int ret, offset;
661
662         /* if inputs are not 16-byte then use intermediate buffer */
663         if (!IS_ALIGNED(dst_paddr, 16) ||
664             !IS_ALIGNED(paddr,     16) ||
665             !IS_ALIGNED(size,      16)) {
666                 tpage = (void *)alloc_page(GFP_KERNEL);
667                 if (!tpage)
668                         return -ENOMEM;
669
670                 dst_paddr = __sme_page_pa(tpage);
671         }
672
673         ret = __sev_dbg_decrypt(kvm, paddr, dst_paddr, size, err);
674         if (ret)
675                 goto e_free;
676
677         if (tpage) {
678                 offset = paddr & 15;
679                 if (copy_to_user((void __user *)(uintptr_t)dst_uaddr,
680                                  page_address(tpage) + offset, size))
681                         ret = -EFAULT;
682         }
683
684 e_free:
685         if (tpage)
686                 __free_page(tpage);
687
688         return ret;
689 }
690
691 static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
692                                   unsigned long __user vaddr,
693                                   unsigned long dst_paddr,
694                                   unsigned long __user dst_vaddr,
695                                   int size, int *error)
696 {
697         struct page *src_tpage = NULL;
698         struct page *dst_tpage = NULL;
699         int ret, len = size;
700
701         /* If source buffer is not aligned then use an intermediate buffer */
702         if (!IS_ALIGNED(vaddr, 16)) {
703                 src_tpage = alloc_page(GFP_KERNEL);
704                 if (!src_tpage)
705                         return -ENOMEM;
706
707                 if (copy_from_user(page_address(src_tpage),
708                                 (void __user *)(uintptr_t)vaddr, size)) {
709                         __free_page(src_tpage);
710                         return -EFAULT;
711                 }
712
713                 paddr = __sme_page_pa(src_tpage);
714         }
715
716         /*
717          *  If destination buffer or length is not aligned then do read-modify-write:
718          *   - decrypt destination in an intermediate buffer
719          *   - copy the source buffer in an intermediate buffer
720          *   - use the intermediate buffer as source buffer
721          */
722         if (!IS_ALIGNED(dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
723                 int dst_offset;
724
725                 dst_tpage = alloc_page(GFP_KERNEL);
726                 if (!dst_tpage) {
727                         ret = -ENOMEM;
728                         goto e_free;
729                 }
730
731                 ret = __sev_dbg_decrypt(kvm, dst_paddr,
732                                         __sme_page_pa(dst_tpage), size, error);
733                 if (ret)
734                         goto e_free;
735
736                 /*
737                  *  If source is kernel buffer then use memcpy() otherwise
738                  *  copy_from_user().
739                  */
740                 dst_offset = dst_paddr & 15;
741
742                 if (src_tpage)
743                         memcpy(page_address(dst_tpage) + dst_offset,
744                                page_address(src_tpage), size);
745                 else {
746                         if (copy_from_user(page_address(dst_tpage) + dst_offset,
747                                            (void __user *)(uintptr_t)vaddr, size)) {
748                                 ret = -EFAULT;
749                                 goto e_free;
750                         }
751                 }
752
753                 paddr = __sme_page_pa(dst_tpage);
754                 dst_paddr = round_down(dst_paddr, 16);
755                 len = round_up(size, 16);
756         }
757
758         ret = __sev_issue_dbg_cmd(kvm, paddr, dst_paddr, len, error, true);
759
760 e_free:
761         if (src_tpage)
762                 __free_page(src_tpage);
763         if (dst_tpage)
764                 __free_page(dst_tpage);
765         return ret;
766 }
767
768 static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
769 {
770         unsigned long vaddr, vaddr_end, next_vaddr;
771         unsigned long dst_vaddr;
772         struct page **src_p, **dst_p;
773         struct kvm_sev_dbg debug;
774         unsigned long n;
775         unsigned int size;
776         int ret;
777
778         if (!sev_guest(kvm))
779                 return -ENOTTY;
780
781         if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug)))
782                 return -EFAULT;
783
784         if (!debug.len || debug.src_uaddr + debug.len < debug.src_uaddr)
785                 return -EINVAL;
786         if (!debug.dst_uaddr)
787                 return -EINVAL;
788
789         vaddr = debug.src_uaddr;
790         size = debug.len;
791         vaddr_end = vaddr + size;
792         dst_vaddr = debug.dst_uaddr;
793
794         for (; vaddr < vaddr_end; vaddr = next_vaddr) {
795                 int len, s_off, d_off;
796
797                 /* lock userspace source and destination page */
798                 src_p = sev_pin_memory(kvm, vaddr & PAGE_MASK, PAGE_SIZE, &n, 0);
799                 if (IS_ERR(src_p))
800                         return PTR_ERR(src_p);
801
802                 dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, 1);
803                 if (IS_ERR(dst_p)) {
804                         sev_unpin_memory(kvm, src_p, n);
805                         return PTR_ERR(dst_p);
806                 }
807
808                 /*
809                  * The DBG_{DE,EN}CRYPT commands will perform {dec,en}cryption of the
810                  * memory content (i.e it will write the same memory region with C=1).
811                  * It's possible that the cache may contain the data with C=0, i.e.,
812                  * unencrypted so invalidate it first.
813                  */
814                 sev_clflush_pages(src_p, 1);
815                 sev_clflush_pages(dst_p, 1);
816
817                 /*
818                  * Since user buffer may not be page aligned, calculate the
819                  * offset within the page.
820                  */
821                 s_off = vaddr & ~PAGE_MASK;
822                 d_off = dst_vaddr & ~PAGE_MASK;
823                 len = min_t(size_t, (PAGE_SIZE - s_off), size);
824
825                 if (dec)
826                         ret = __sev_dbg_decrypt_user(kvm,
827                                                      __sme_page_pa(src_p[0]) + s_off,
828                                                      dst_vaddr,
829                                                      __sme_page_pa(dst_p[0]) + d_off,
830                                                      len, &argp->error);
831                 else
832                         ret = __sev_dbg_encrypt_user(kvm,
833                                                      __sme_page_pa(src_p[0]) + s_off,
834                                                      vaddr,
835                                                      __sme_page_pa(dst_p[0]) + d_off,
836                                                      dst_vaddr,
837                                                      len, &argp->error);
838
839                 sev_unpin_memory(kvm, src_p, n);
840                 sev_unpin_memory(kvm, dst_p, n);
841
842                 if (ret)
843                         goto err;
844
845                 next_vaddr = vaddr + len;
846                 dst_vaddr = dst_vaddr + len;
847                 size -= len;
848         }
849 err:
850         return ret;
851 }
852
853 static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
854 {
855         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
856         struct sev_data_launch_secret *data;
857         struct kvm_sev_launch_secret params;
858         struct page **pages;
859         void *blob, *hdr;
860         unsigned long n;
861         int ret, offset;
862
863         if (!sev_guest(kvm))
864                 return -ENOTTY;
865
866         if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
867                 return -EFAULT;
868
869         pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, 1);
870         if (IS_ERR(pages))
871                 return PTR_ERR(pages);
872
873         /*
874          * The secret must be copied into contiguous memory region, lets verify
875          * that userspace memory pages are contiguous before we issue command.
876          */
877         if (get_num_contig_pages(0, pages, n) != n) {
878                 ret = -EINVAL;
879                 goto e_unpin_memory;
880         }
881
882         ret = -ENOMEM;
883         data = kzalloc(sizeof(*data), GFP_KERNEL_ACCOUNT);
884         if (!data)
885                 goto e_unpin_memory;
886
887         offset = params.guest_uaddr & (PAGE_SIZE - 1);
888         data->guest_address = __sme_page_pa(pages[0]) + offset;
889         data->guest_len = params.guest_len;
890
891         blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
892         if (IS_ERR(blob)) {
893                 ret = PTR_ERR(blob);
894                 goto e_free;
895         }
896
897         data->trans_address = __psp_pa(blob);
898         data->trans_len = params.trans_len;
899
900         hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
901         if (IS_ERR(hdr)) {
902                 ret = PTR_ERR(hdr);
903                 goto e_free_blob;
904         }
905         data->hdr_address = __psp_pa(hdr);
906         data->hdr_len = params.hdr_len;
907
908         data->handle = sev->handle;
909         ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, data, &argp->error);
910
911         kfree(hdr);
912
913 e_free_blob:
914         kfree(blob);
915 e_free:
916         kfree(data);
917 e_unpin_memory:
918         sev_unpin_memory(kvm, pages, n);
919         return ret;
920 }
921
922 int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
923 {
924         struct kvm_sev_cmd sev_cmd;
925         int r;
926
927         if (!svm_sev_enabled())
928                 return -ENOTTY;
929
930         if (!argp)
931                 return 0;
932
933         if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd)))
934                 return -EFAULT;
935
936         mutex_lock(&kvm->lock);
937
938         switch (sev_cmd.id) {
939         case KVM_SEV_INIT:
940                 r = sev_guest_init(kvm, &sev_cmd);
941                 break;
942         case KVM_SEV_LAUNCH_START:
943                 r = sev_launch_start(kvm, &sev_cmd);
944                 break;
945         case KVM_SEV_LAUNCH_UPDATE_DATA:
946                 r = sev_launch_update_data(kvm, &sev_cmd);
947                 break;
948         case KVM_SEV_LAUNCH_MEASURE:
949                 r = sev_launch_measure(kvm, &sev_cmd);
950                 break;
951         case KVM_SEV_LAUNCH_FINISH:
952                 r = sev_launch_finish(kvm, &sev_cmd);
953                 break;
954         case KVM_SEV_GUEST_STATUS:
955                 r = sev_guest_status(kvm, &sev_cmd);
956                 break;
957         case KVM_SEV_DBG_DECRYPT:
958                 r = sev_dbg_crypt(kvm, &sev_cmd, true);
959                 break;
960         case KVM_SEV_DBG_ENCRYPT:
961                 r = sev_dbg_crypt(kvm, &sev_cmd, false);
962                 break;
963         case KVM_SEV_LAUNCH_SECRET:
964                 r = sev_launch_secret(kvm, &sev_cmd);
965                 break;
966         default:
967                 r = -EINVAL;
968                 goto out;
969         }
970
971         if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd)))
972                 r = -EFAULT;
973
974 out:
975         mutex_unlock(&kvm->lock);
976         return r;
977 }
978
979 int svm_register_enc_region(struct kvm *kvm,
980                             struct kvm_enc_region *range)
981 {
982         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
983         struct enc_region *region;
984         int ret = 0;
985
986         if (!sev_guest(kvm))
987                 return -ENOTTY;
988
989         if (range->addr > ULONG_MAX || range->size > ULONG_MAX)
990                 return -EINVAL;
991
992         region = kzalloc(sizeof(*region), GFP_KERNEL_ACCOUNT);
993         if (!region)
994                 return -ENOMEM;
995
996         region->pages = sev_pin_memory(kvm, range->addr, range->size, &region->npages, 1);
997         if (IS_ERR(region->pages)) {
998                 ret = PTR_ERR(region->pages);
999                 goto e_free;
1000         }
1001
1002         /*
1003          * The guest may change the memory encryption attribute from C=0 -> C=1
1004          * or vice versa for this memory range. Lets make sure caches are
1005          * flushed to ensure that guest data gets written into memory with
1006          * correct C-bit.
1007          */
1008         sev_clflush_pages(region->pages, region->npages);
1009
1010         region->uaddr = range->addr;
1011         region->size = range->size;
1012
1013         mutex_lock(&kvm->lock);
1014         list_add_tail(&region->list, &sev->regions_list);
1015         mutex_unlock(&kvm->lock);
1016
1017         return ret;
1018
1019 e_free:
1020         kfree(region);
1021         return ret;
1022 }
1023
1024 static struct enc_region *
1025 find_enc_region(struct kvm *kvm, struct kvm_enc_region *range)
1026 {
1027         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1028         struct list_head *head = &sev->regions_list;
1029         struct enc_region *i;
1030
1031         list_for_each_entry(i, head, list) {
1032                 if (i->uaddr == range->addr &&
1033                     i->size == range->size)
1034                         return i;
1035         }
1036
1037         return NULL;
1038 }
1039
1040 static void __unregister_enc_region_locked(struct kvm *kvm,
1041                                            struct enc_region *region)
1042 {
1043         sev_unpin_memory(kvm, region->pages, region->npages);
1044         list_del(&region->list);
1045         kfree(region);
1046 }
1047
1048 int svm_unregister_enc_region(struct kvm *kvm,
1049                               struct kvm_enc_region *range)
1050 {
1051         struct enc_region *region;
1052         int ret;
1053
1054         mutex_lock(&kvm->lock);
1055
1056         if (!sev_guest(kvm)) {
1057                 ret = -ENOTTY;
1058                 goto failed;
1059         }
1060
1061         region = find_enc_region(kvm, range);
1062         if (!region) {
1063                 ret = -EINVAL;
1064                 goto failed;
1065         }
1066
1067         /*
1068          * Ensure that all guest tagged cache entries are flushed before
1069          * releasing the pages back to the system for use. CLFLUSH will
1070          * not do this, so issue a WBINVD.
1071          */
1072         wbinvd_on_all_cpus();
1073
1074         __unregister_enc_region_locked(kvm, region);
1075
1076         mutex_unlock(&kvm->lock);
1077         return 0;
1078
1079 failed:
1080         mutex_unlock(&kvm->lock);
1081         return ret;
1082 }
1083
1084 void sev_vm_destroy(struct kvm *kvm)
1085 {
1086         struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1087         struct list_head *head = &sev->regions_list;
1088         struct list_head *pos, *q;
1089
1090         if (!sev_guest(kvm))
1091                 return;
1092
1093         mutex_lock(&kvm->lock);
1094
1095         /*
1096          * Ensure that all guest tagged cache entries are flushed before
1097          * releasing the pages back to the system for use. CLFLUSH will
1098          * not do this, so issue a WBINVD.
1099          */
1100         wbinvd_on_all_cpus();
1101
1102         /*
1103          * if userspace was terminated before unregistering the memory regions
1104          * then lets unpin all the registered memory.
1105          */
1106         if (!list_empty(head)) {
1107                 list_for_each_safe(pos, q, head) {
1108                         __unregister_enc_region_locked(kvm,
1109                                 list_entry(pos, struct enc_region, list));
1110                         cond_resched();
1111                 }
1112         }
1113
1114         mutex_unlock(&kvm->lock);
1115
1116         sev_unbind_asid(kvm, sev->handle);
1117         sev_asid_free(sev->asid);
1118 }
1119
1120 int __init sev_hardware_setup(void)
1121 {
1122         struct sev_user_data_status *status;
1123         int rc;
1124
1125         /* Maximum number of encrypted guests supported simultaneously */
1126         max_sev_asid = cpuid_ecx(0x8000001F);
1127
1128         if (!svm_sev_enabled())
1129                 return 1;
1130
1131         /* Minimum ASID value that should be used for SEV guest */
1132         min_sev_asid = cpuid_edx(0x8000001F);
1133
1134         /* Initialize SEV ASID bitmaps */
1135         sev_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
1136         if (!sev_asid_bitmap)
1137                 return 1;
1138
1139         sev_reclaim_asid_bitmap = bitmap_zalloc(max_sev_asid, GFP_KERNEL);
1140         if (!sev_reclaim_asid_bitmap)
1141                 return 1;
1142
1143         status = kmalloc(sizeof(*status), GFP_KERNEL);
1144         if (!status)
1145                 return 1;
1146
1147         /*
1148          * Check SEV platform status.
1149          *
1150          * PLATFORM_STATUS can be called in any state, if we failed to query
1151          * the PLATFORM status then either PSP firmware does not support SEV
1152          * feature or SEV firmware is dead.
1153          */
1154         rc = sev_platform_status(status, NULL);
1155         if (rc)
1156                 goto err;
1157
1158         pr_info("SEV supported\n");
1159
1160 err:
1161         kfree(status);
1162         return rc;
1163 }
1164
1165 void sev_hardware_teardown(void)
1166 {
1167         if (!svm_sev_enabled())
1168                 return;
1169
1170         bitmap_free(sev_asid_bitmap);
1171         bitmap_free(sev_reclaim_asid_bitmap);
1172
1173         sev_flush_asids();
1174 }
1175
1176 void pre_sev_run(struct vcpu_svm *svm, int cpu)
1177 {
1178         struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
1179         int asid = sev_get_asid(svm->vcpu.kvm);
1180
1181         /* Assign the asid allocated with this SEV guest */
1182         svm->vmcb->control.asid = asid;
1183
1184         /*
1185          * Flush guest TLB:
1186          *
1187          * 1) when different VMCB for the same ASID is to be run on the same host CPU.
1188          * 2) or this VMCB was executed on different host CPU in previous VMRUNs.
1189          */
1190         if (sd->sev_vmcbs[asid] == svm->vmcb &&
1191             svm->vcpu.arch.last_vmentry_cpu == cpu)
1192                 return;
1193
1194         sd->sev_vmcbs[asid] = svm->vmcb;
1195         svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
1196         vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
1197 }