1 // SPDX-License-Identifier: GPL-2.0-only
3 * AMD Memory Encryption Support
5 * Copyright (C) 2016 Advanced Micro Devices, Inc.
7 * Author: Tom Lendacky <thomas.lendacky@amd.com>
10 #define DISABLE_BRANCH_PROFILING
12 #include <linux/linkage.h>
13 #include <linux/init.h>
15 #include <linux/dma-direct.h>
16 #include <linux/swiotlb.h>
17 #include <linux/mem_encrypt.h>
18 #include <linux/device.h>
19 #include <linux/kernel.h>
20 #include <linux/bitops.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/virtio_config.h>
24 #include <asm/tlbflush.h>
25 #include <asm/fixmap.h>
26 #include <asm/setup.h>
27 #include <asm/bootparam.h>
28 #include <asm/set_memory.h>
29 #include <asm/cacheflush.h>
30 #include <asm/processor-flags.h>
32 #include <asm/cmdline.h>
34 #include "mm_internal.h"
37 * Since SME related variables are set early in the boot process they must
38 * reside in the .data section so as not to be zeroed out when the .bss
39 * section is later cleared.
41 u64 sme_me_mask __section(".data") = 0;
42 u64 sev_status __section(".data") = 0;
43 u64 sev_check_data __section(".data") = 0;
44 EXPORT_SYMBOL(sme_me_mask);
45 DEFINE_STATIC_KEY_FALSE(sev_enable_key);
46 EXPORT_SYMBOL_GPL(sev_enable_key);
48 /* Buffer used for early in-place encryption by BSP, no locking needed */
49 static char sme_early_buffer[PAGE_SIZE] __initdata __aligned(PAGE_SIZE);
52 * This routine does not change the underlying encryption setting of the
53 * page(s) that map this memory. It assumes that eventually the memory is
54 * meant to be accessed as either encrypted or decrypted but the contents
55 * are currently not in the desired state.
57 * This routine follows the steps outlined in the AMD64 Architecture
58 * Programmer's Manual Volume 2, Section 7.10.8 Encrypt-in-Place.
60 static void __init __sme_early_enc_dec(resource_size_t paddr,
61 unsigned long size, bool enc)
72 * There are limited number of early mapping slots, so map (at most)
76 len = min_t(size_t, sizeof(sme_early_buffer), size);
79 * Create mappings for the current and desired format of
80 * the memory. Use a write-protected mapping for the source.
82 src = enc ? early_memremap_decrypted_wp(paddr, len) :
83 early_memremap_encrypted_wp(paddr, len);
85 dst = enc ? early_memremap_encrypted(paddr, len) :
86 early_memremap_decrypted(paddr, len);
89 * If a mapping can't be obtained to perform the operation,
90 * then eventual access of that area in the desired mode
96 * Use a temporary buffer, of cache-line multiple size, to
97 * avoid data corruption as documented in the APM.
99 memcpy(sme_early_buffer, src, len);
100 memcpy(dst, sme_early_buffer, len);
102 early_memunmap(dst, len);
103 early_memunmap(src, len);
110 void __init sme_early_encrypt(resource_size_t paddr, unsigned long size)
112 __sme_early_enc_dec(paddr, size, true);
115 void __init sme_early_decrypt(resource_size_t paddr, unsigned long size)
117 __sme_early_enc_dec(paddr, size, false);
120 static void __init __sme_early_map_unmap_mem(void *vaddr, unsigned long size,
123 unsigned long paddr = (unsigned long)vaddr - __PAGE_OFFSET;
124 pmdval_t pmd_flags, pmd;
126 /* Use early_pmd_flags but remove the encryption mask */
127 pmd_flags = __sme_clr(early_pmd_flags);
130 pmd = map ? (paddr & PMD_MASK) + pmd_flags : 0;
131 __early_make_pgtable((unsigned long)vaddr, pmd);
135 size = (size <= PMD_SIZE) ? 0 : size - PMD_SIZE;
141 void __init sme_unmap_bootdata(char *real_mode_data)
143 struct boot_params *boot_data;
144 unsigned long cmdline_paddr;
149 /* Get the command line address before unmapping the real_mode_data */
150 boot_data = (struct boot_params *)real_mode_data;
151 cmdline_paddr = boot_data->hdr.cmd_line_ptr | ((u64)boot_data->ext_cmd_line_ptr << 32);
153 __sme_early_map_unmap_mem(real_mode_data, sizeof(boot_params), false);
158 __sme_early_map_unmap_mem(__va(cmdline_paddr), COMMAND_LINE_SIZE, false);
161 void __init sme_map_bootdata(char *real_mode_data)
163 struct boot_params *boot_data;
164 unsigned long cmdline_paddr;
169 __sme_early_map_unmap_mem(real_mode_data, sizeof(boot_params), true);
171 /* Get the command line address after mapping the real_mode_data */
172 boot_data = (struct boot_params *)real_mode_data;
173 cmdline_paddr = boot_data->hdr.cmd_line_ptr | ((u64)boot_data->ext_cmd_line_ptr << 32);
178 __sme_early_map_unmap_mem(__va(cmdline_paddr), COMMAND_LINE_SIZE, true);
181 void __init sme_early_init(void)
188 early_pmd_flags = __sme_set(early_pmd_flags);
190 __supported_pte_mask = __sme_set(__supported_pte_mask);
192 /* Update the protection map with memory encryption mask */
193 for (i = 0; i < ARRAY_SIZE(protection_map); i++)
194 protection_map[i] = pgprot_encrypted(protection_map[i]);
197 swiotlb_force = SWIOTLB_FORCE;
200 void __init sev_setup_arch(void)
202 phys_addr_t total_mem = memblock_phys_mem_size();
209 * For SEV, all DMA has to occur via shared/unencrypted pages.
210 * SEV uses SWIOTLB to make this happen without changing device
211 * drivers. However, depending on the workload being run, the
212 * default 64MB of SWIOTLB may not be enough and SWIOTLB may
213 * run out of buffers for DMA, resulting in I/O errors and/or
214 * performance degradation especially with high I/O workloads.
216 * Adjust the default size of SWIOTLB for SEV guests using
217 * a percentage of guest memory for SWIOTLB buffers.
218 * Also, as the SWIOTLB bounce buffer memory is allocated
219 * from low memory, ensure that the adjusted size is within
220 * the limits of low available memory.
222 * The percentage of guest memory used here for SWIOTLB buffers
223 * is more of an approximation of the static adjustment which
224 * 64MB for <1G, and ~128M to 256M for 1G-to-4G, i.e., the 6%
226 size = total_mem * 6 / 100;
227 size = clamp_val(size, IO_TLB_DEFAULT_SIZE, SZ_1G);
228 swiotlb_adjust_size(size);
231 static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
233 pgprot_t old_prot, new_prot;
234 unsigned long pfn, pa, size;
239 pfn = pte_pfn(*kpte);
240 old_prot = pte_pgprot(*kpte);
243 pfn = pmd_pfn(*(pmd_t *)kpte);
244 old_prot = pmd_pgprot(*(pmd_t *)kpte);
247 pfn = pud_pfn(*(pud_t *)kpte);
248 old_prot = pud_pgprot(*(pud_t *)kpte);
256 pgprot_val(new_prot) |= _PAGE_ENC;
258 pgprot_val(new_prot) &= ~_PAGE_ENC;
260 /* If prot is same then do nothing. */
261 if (pgprot_val(old_prot) == pgprot_val(new_prot))
264 pa = pfn << PAGE_SHIFT;
265 size = page_level_size(level);
268 * We are going to perform in-place en-/decryption and change the
269 * physical page attribute from C=1 to C=0 or vice versa. Flush the
270 * caches to ensure that data gets accessed with the correct C-bit.
272 clflush_cache_range(__va(pa), size);
274 /* Encrypt/decrypt the contents in-place */
276 sme_early_encrypt(pa, size);
278 sme_early_decrypt(pa, size);
280 /* Change the page encryption mask. */
281 new_pte = pfn_pte(pfn, new_prot);
282 set_pte_atomic(kpte, new_pte);
285 static int __init early_set_memory_enc_dec(unsigned long vaddr,
286 unsigned long size, bool enc)
288 unsigned long vaddr_end, vaddr_next;
289 unsigned long psize, pmask;
290 int split_page_size_mask;
295 vaddr_end = vaddr + size;
297 for (; vaddr < vaddr_end; vaddr = vaddr_next) {
298 kpte = lookup_address(vaddr, &level);
299 if (!kpte || pte_none(*kpte)) {
304 if (level == PG_LEVEL_4K) {
305 __set_clr_pte_enc(kpte, level, enc);
306 vaddr_next = (vaddr & PAGE_MASK) + PAGE_SIZE;
310 psize = page_level_size(level);
311 pmask = page_level_mask(level);
314 * Check whether we can change the large page in one go.
315 * We request a split when the address is not aligned and
316 * the number of pages to set/clear encryption bit is smaller
317 * than the number of pages in the large page.
319 if (vaddr == (vaddr & pmask) &&
320 ((vaddr_end - vaddr) >= psize)) {
321 __set_clr_pte_enc(kpte, level, enc);
322 vaddr_next = (vaddr & pmask) + psize;
327 * The virtual address is part of a larger page, create the next
328 * level page table mapping (4K or 2M). If it is part of a 2M
329 * page then we request a split of the large page into 4K
330 * chunks. A 1GB large page is split into 2M pages, resp.
332 if (level == PG_LEVEL_2M)
333 split_page_size_mask = 0;
335 split_page_size_mask = 1 << PG_LEVEL_2M;
338 * kernel_physical_mapping_change() does not flush the TLBs, so
339 * a TLB flush is required after we exit from the for loop.
341 kernel_physical_mapping_change(__pa(vaddr & pmask),
342 __pa((vaddr_end & pmask) + psize),
343 split_page_size_mask);
353 int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size)
355 return early_set_memory_enc_dec(vaddr, size, false);
358 int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size)
360 return early_set_memory_enc_dec(vaddr, size, true);
364 * SME and SEV are very similar but they are not the same, so there are
365 * times that the kernel will need to distinguish between SME and SEV. The
366 * sme_active() and sev_active() functions are used for this. When a
367 * distinction isn't needed, the mem_encrypt_active() function can be used.
369 * The trampoline code is a good example for this requirement. Before
370 * paging is activated, SME will access all memory as decrypted, but SEV
371 * will access all memory as encrypted. So, when APs are being brought
372 * up under SME the trampoline area cannot be encrypted, whereas under SEV
373 * the trampoline area must be encrypted.
375 bool sev_active(void)
377 return sev_status & MSR_AMD64_SEV_ENABLED;
380 bool sme_active(void)
382 return sme_me_mask && !sev_active();
384 EXPORT_SYMBOL_GPL(sev_active);
386 /* Needs to be called from non-instrumentable code */
387 bool noinstr sev_es_active(void)
389 return sev_status & MSR_AMD64_SEV_ES_ENABLED;
392 /* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */
393 bool force_dma_unencrypted(struct device *dev)
396 * For SEV, all DMA must be to unencrypted addresses.
402 * For SME, all DMA must be to unencrypted addresses if the
403 * device does not support DMA to addresses that include the
407 u64 dma_enc_mask = DMA_BIT_MASK(__ffs64(sme_me_mask));
408 u64 dma_dev_mask = min_not_zero(dev->coherent_dma_mask,
411 if (dma_dev_mask <= dma_enc_mask)
418 void __init mem_encrypt_free_decrypted_mem(void)
420 unsigned long vaddr, vaddr_end, npages;
423 vaddr = (unsigned long)__start_bss_decrypted_unused;
424 vaddr_end = (unsigned long)__end_bss_decrypted;
425 npages = (vaddr_end - vaddr) >> PAGE_SHIFT;
428 * The unused memory range was mapped decrypted, change the encryption
429 * attribute from decrypted to encrypted before freeing it.
431 if (mem_encrypt_active()) {
432 r = set_memory_encrypted(vaddr, npages);
434 pr_warn("failed to free unused decrypted pages\n");
439 free_init_pages("unused decrypted", vaddr, vaddr_end);
442 static void print_mem_encrypt_feature_info(void)
444 pr_info("AMD Memory Encryption Features active:");
446 /* Secure Memory Encryption */
449 * SME is mutually exclusive with any of the SEV
456 /* Secure Encrypted Virtualization */
460 /* Encrypted Register State */
467 /* Architecture __weak replacement functions */
468 void __init mem_encrypt_init(void)
473 /* Call into SWIOTLB to update the SWIOTLB DMA buffers */
474 swiotlb_update_mem_attributes();
477 * With SEV, we need to unroll the rep string I/O instructions,
478 * but SEV-ES supports them through the #VC handler.
480 if (sev_active() && !sev_es_active())
481 static_branch_enable(&sev_enable_key);
483 print_mem_encrypt_feature_info();
486 int arch_has_restricted_virtio_memory_access(void)
490 EXPORT_SYMBOL_GPL(arch_has_restricted_virtio_memory_access);