1 // SPDX-License-Identifier: GPL-2.0
3 * Common Ultravisor functions and initialization
5 * Copyright IBM Corp. 2019, 2020
7 #define KMSG_COMPONENT "prot_virt"
8 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10 #include <linux/kernel.h>
11 #include <linux/types.h>
12 #include <linux/sizes.h>
13 #include <linux/bitmap.h>
14 #include <linux/memblock.h>
15 #include <linux/pagemap.h>
16 #include <linux/swap.h>
17 #include <asm/facility.h>
18 #include <asm/sections.h>
21 /* the bootdata_preserved fields come from ones in arch/s390/boot/uv.c */
22 #ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
23 int __bootdata_preserved(prot_virt_guest);
26 struct uv_info __bootdata_preserved(uv_info);
28 #if IS_ENABLED(CONFIG_KVM)
29 int __bootdata_preserved(prot_virt_host);
30 EXPORT_SYMBOL(prot_virt_host);
31 EXPORT_SYMBOL(uv_info);
33 static int __init uv_init(unsigned long stor_base, unsigned long stor_len)
35 struct uv_cb_init uvcb = {
36 .header.cmd = UVC_CMD_INIT_UV,
37 .header.len = sizeof(uvcb),
38 .stor_origin = stor_base,
42 if (uv_call(0, (uint64_t)&uvcb)) {
43 pr_err("Ultravisor init failed with rc: 0x%x rrc: 0%x\n",
44 uvcb.header.rc, uvcb.header.rrc);
50 void __init setup_uv(void)
52 unsigned long uv_stor_base;
55 * keep these conditions in line with kasan init code has_uv_sec_stor_limit()
57 if (!is_prot_virt_host())
60 if (is_prot_virt_guest()) {
62 pr_warn("Protected virtualization not available in protected guests.");
66 if (!test_facility(158)) {
68 pr_warn("Protected virtualization not supported by the hardware.");
72 uv_stor_base = (unsigned long)memblock_alloc_try_nid(
73 uv_info.uv_base_stor_len, SZ_1M, SZ_2G,
74 MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE);
76 pr_warn("Failed to reserve %lu bytes for ultravisor base storage\n",
77 uv_info.uv_base_stor_len);
81 if (uv_init(uv_stor_base, uv_info.uv_base_stor_len)) {
82 memblock_free(uv_stor_base, uv_info.uv_base_stor_len);
86 pr_info("Reserving %luMB as ultravisor base storage\n",
87 uv_info.uv_base_stor_len >> 20);
90 pr_info("Disabling support for protected virtualization");
94 void adjust_to_uv_max(unsigned long *vmax)
96 if (uv_info.max_sec_stor_addr)
97 *vmax = min_t(unsigned long, *vmax, uv_info.max_sec_stor_addr);
101 * Requests the Ultravisor to pin the page in the shared state. This will
102 * cause an intercept when the guest attempts to unshare the pinned page.
104 static int uv_pin_shared(unsigned long paddr)
106 struct uv_cb_cfs uvcb = {
107 .header.cmd = UVC_CMD_PIN_PAGE_SHARED,
108 .header.len = sizeof(uvcb),
112 if (uv_call(0, (u64)&uvcb))
118 * Requests the Ultravisor to destroy a guest page and make it
119 * accessible to the host. The destroy clears the page instead of
122 * @paddr: Absolute host address of page to be destroyed
124 int uv_destroy_page(unsigned long paddr)
126 struct uv_cb_cfs uvcb = {
127 .header.cmd = UVC_CMD_DESTR_SEC_STOR,
128 .header.len = sizeof(uvcb),
132 if (uv_call(0, (u64)&uvcb))
138 * Requests the Ultravisor to encrypt a guest page and make it
139 * accessible to the host for paging (export).
141 * @paddr: Absolute host address of page to be exported
143 int uv_convert_from_secure(unsigned long paddr)
145 struct uv_cb_cfs uvcb = {
146 .header.cmd = UVC_CMD_CONV_FROM_SEC_STOR,
147 .header.len = sizeof(uvcb),
151 if (uv_call(0, (u64)&uvcb))
157 * Calculate the expected ref_count for a page that would otherwise have no
158 * further pins. This was cribbed from similar functions in other places in
159 * the kernel, but with some slight modifications. We know that a secure
160 * page can not be a huge page for example.
162 static int expected_page_refs(struct page *page)
166 res = page_mapcount(page);
167 if (PageSwapCache(page)) {
169 } else if (page_mapping(page)) {
171 if (page_has_private(page))
177 static int make_secure_pte(pte_t *ptep, unsigned long addr,
178 struct page *exp_page, struct uv_cb_header *uvcb)
180 pte_t entry = READ_ONCE(*ptep);
182 int expected, rc = 0;
184 if (!pte_present(entry))
186 if (pte_val(entry) & _PAGE_INVALID)
189 page = pte_page(entry);
190 if (page != exp_page)
192 if (PageWriteback(page))
194 expected = expected_page_refs(page);
195 if (!page_ref_freeze(page, expected))
197 set_bit(PG_arch_1, &page->flags);
198 rc = uv_call(0, (u64)uvcb);
199 page_ref_unfreeze(page, expected);
200 /* Return -ENXIO if the page was not mapped, -EINVAL otherwise */
202 rc = uvcb->rc == 0x10a ? -ENXIO : -EINVAL;
207 * Requests the Ultravisor to make a page accessible to a guest.
208 * If it's brought in the first time, it will be cleared. If
209 * it has been exported before, it will be decrypted and integrity
212 int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb)
214 struct vm_area_struct *vma;
215 bool local_drain = false;
224 mmap_read_lock(gmap->mm);
226 uaddr = __gmap_translate(gmap, gaddr);
227 if (IS_ERR_VALUE(uaddr))
229 vma = find_vma(gmap->mm, uaddr);
233 * Secure pages cannot be huge and userspace should not combine both.
234 * In case userspace does it anyway this will result in an -EFAULT for
235 * the unpack. The guest is thus never reaching secure mode. If
236 * userspace is playing dirty tricky with mapping huge pages later
237 * on this will result in a segmentation fault.
239 if (is_vm_hugetlb_page(vma))
243 page = follow_page(vma, uaddr, FOLL_WRITE);
244 if (IS_ERR_OR_NULL(page))
248 ptep = get_locked_pte(gmap->mm, uaddr, &ptelock);
249 rc = make_secure_pte(ptep, uaddr, page, uvcb);
250 pte_unmap_unlock(ptep, ptelock);
253 mmap_read_unlock(gmap->mm);
256 wait_on_page_writeback(page);
257 } else if (rc == -EBUSY) {
259 * If we have tried a local drain and the page refcount
260 * still does not match our expected safe value, try with a
261 * system wide drain. This is needed if the pagevecs holding
262 * the page are on a different CPU.
266 /* We give up here, and let the caller try again */
270 * We are here if the page refcount does not match the
271 * expected safe value. The main culprits are usually
272 * pagevecs. With lru_add_drain() we drain the pagevecs
273 * on the local CPU so that hopefully the refcount will
274 * reach the expected safe value.
278 /* And now we try again immediately after draining */
280 } else if (rc == -ENXIO) {
281 if (gmap_fault(gmap, gaddr, FAULT_FLAG_WRITE))
287 EXPORT_SYMBOL_GPL(gmap_make_secure);
289 int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr)
291 struct uv_cb_cts uvcb = {
292 .header.cmd = UVC_CMD_CONV_TO_SEC_STOR,
293 .header.len = sizeof(uvcb),
294 .guest_handle = gmap->guest_handle,
298 return gmap_make_secure(gmap, gaddr, &uvcb);
300 EXPORT_SYMBOL_GPL(gmap_convert_to_secure);
303 * To be called with the page locked or with an extra reference! This will
304 * prevent gmap_make_secure from touching the page concurrently. Having 2
305 * parallel make_page_accessible is fine, as the UV calls will become a
306 * no-op if the page is already exported.
308 int arch_make_page_accessible(struct page *page)
312 /* Hugepage cannot be protected, so nothing to do */
317 * PG_arch_1 is used in 3 places:
318 * 1. for kernel page tables during early boot
319 * 2. for storage keys of huge pages and KVM
320 * 3. As an indication that this page might be secure. This can
321 * overindicate, e.g. we set the bit before calling
323 * As secure pages are never huge, all 3 variants can co-exists.
325 if (!test_bit(PG_arch_1, &page->flags))
328 rc = uv_pin_shared(page_to_phys(page));
330 clear_bit(PG_arch_1, &page->flags);
334 rc = uv_convert_from_secure(page_to_phys(page));
336 clear_bit(PG_arch_1, &page->flags);
342 EXPORT_SYMBOL_GPL(arch_make_page_accessible);
346 #if defined(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) || IS_ENABLED(CONFIG_KVM)
347 static ssize_t uv_query_facilities(struct kobject *kobj,
348 struct kobj_attribute *attr, char *page)
350 return scnprintf(page, PAGE_SIZE, "%lx\n%lx\n%lx\n%lx\n",
351 uv_info.inst_calls_list[0],
352 uv_info.inst_calls_list[1],
353 uv_info.inst_calls_list[2],
354 uv_info.inst_calls_list[3]);
357 static struct kobj_attribute uv_query_facilities_attr =
358 __ATTR(facilities, 0444, uv_query_facilities, NULL);
360 static ssize_t uv_query_max_guest_cpus(struct kobject *kobj,
361 struct kobj_attribute *attr, char *page)
363 return scnprintf(page, PAGE_SIZE, "%d\n",
364 uv_info.max_guest_cpus);
367 static struct kobj_attribute uv_query_max_guest_cpus_attr =
368 __ATTR(max_cpus, 0444, uv_query_max_guest_cpus, NULL);
370 static ssize_t uv_query_max_guest_vms(struct kobject *kobj,
371 struct kobj_attribute *attr, char *page)
373 return scnprintf(page, PAGE_SIZE, "%d\n",
374 uv_info.max_num_sec_conf);
377 static struct kobj_attribute uv_query_max_guest_vms_attr =
378 __ATTR(max_guests, 0444, uv_query_max_guest_vms, NULL);
380 static ssize_t uv_query_max_guest_addr(struct kobject *kobj,
381 struct kobj_attribute *attr, char *page)
383 return scnprintf(page, PAGE_SIZE, "%lx\n",
384 uv_info.max_sec_stor_addr);
387 static struct kobj_attribute uv_query_max_guest_addr_attr =
388 __ATTR(max_address, 0444, uv_query_max_guest_addr, NULL);
390 static struct attribute *uv_query_attrs[] = {
391 &uv_query_facilities_attr.attr,
392 &uv_query_max_guest_cpus_attr.attr,
393 &uv_query_max_guest_vms_attr.attr,
394 &uv_query_max_guest_addr_attr.attr,
398 static struct attribute_group uv_query_attr_group = {
399 .attrs = uv_query_attrs,
402 static struct kset *uv_query_kset;
403 static struct kobject *uv_kobj;
405 static int __init uv_info_init(void)
409 if (!test_facility(158))
412 uv_kobj = kobject_create_and_add("uv", firmware_kobj);
416 uv_query_kset = kset_create_and_add("query", NULL, uv_kobj);
420 rc = sysfs_create_group(&uv_query_kset->kobj, &uv_query_attr_group);
424 kset_unregister(uv_query_kset);
426 kobject_del(uv_kobj);
427 kobject_put(uv_kobj);
430 device_initcall(uv_info_init);