1 // SPDX-License-Identifier: GPL-2.0-only
3 * Extensible Firmware Interface
5 * Based on Extensible Firmware Interface Specification version 2.4
7 * Copyright (C) 2013, 2014 Linaro Ltd.
10 #include <linux/efi.h>
11 #include <linux/init.h>
14 #include <asm/stacktrace.h>
16 static bool region_is_misaligned(const efi_memory_desc_t *md)
18 if (PAGE_SIZE == EFI_PAGE_SIZE)
20 return !PAGE_ALIGNED(md->phys_addr) ||
21 !PAGE_ALIGNED(md->num_pages << EFI_PAGE_SHIFT);
25 * Only regions of type EFI_RUNTIME_SERVICES_CODE need to be
26 * executable, everything else can be mapped with the XN bits
27 * set. Also take the new (optional) RO/XP bits into account.
29 static __init pteval_t create_mapping_protection(efi_memory_desc_t *md)
31 u64 attr = md->attribute;
34 if (type == EFI_MEMORY_MAPPED_IO)
35 return PROT_DEVICE_nGnRE;
37 if (region_is_misaligned(md)) {
38 static bool __initdata code_is_misaligned;
41 * Regions that are not aligned to the OS page size cannot be
42 * mapped with strict permissions, as those might interfere
43 * with the permissions that are needed by the adjacent
44 * region's mapping. However, if we haven't encountered any
45 * misaligned runtime code regions so far, we can safely use
46 * non-executable permissions for non-code regions.
48 code_is_misaligned |= (type == EFI_RUNTIME_SERVICES_CODE);
50 return code_is_misaligned ? pgprot_val(PAGE_KERNEL_EXEC)
51 : pgprot_val(PAGE_KERNEL);
55 if ((attr & (EFI_MEMORY_XP | EFI_MEMORY_RO)) ==
56 (EFI_MEMORY_XP | EFI_MEMORY_RO))
57 return pgprot_val(PAGE_KERNEL_RO);
60 if (attr & EFI_MEMORY_RO)
61 return pgprot_val(PAGE_KERNEL_ROX);
64 if (((attr & (EFI_MEMORY_RP | EFI_MEMORY_WP | EFI_MEMORY_XP)) ==
66 type != EFI_RUNTIME_SERVICES_CODE)
67 return pgprot_val(PAGE_KERNEL);
70 return pgprot_val(PAGE_KERNEL_EXEC);
73 /* we will fill this structure from the stub, so don't put it in .bss */
74 struct screen_info screen_info __section(".data");
75 EXPORT_SYMBOL(screen_info);
77 int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
79 pteval_t prot_val = create_mapping_protection(md);
80 bool page_mappings_only = (md->type == EFI_RUNTIME_SERVICES_CODE ||
81 md->type == EFI_RUNTIME_SERVICES_DATA);
84 * If this region is not aligned to the page size used by the OS, the
85 * mapping will be rounded outwards, and may end up sharing a page
86 * frame with an adjacent runtime memory region. Given that the page
87 * table descriptor covering the shared page will be rewritten when the
88 * adjacent region gets mapped, we must avoid block mappings here so we
89 * don't have to worry about splitting them when that happens.
91 if (region_is_misaligned(md))
92 page_mappings_only = true;
94 create_pgd_mapping(mm, md->phys_addr, md->virt_addr,
95 md->num_pages << EFI_PAGE_SHIFT,
96 __pgprot(prot_val | PTE_NG), page_mappings_only);
100 struct set_perm_data {
101 const efi_memory_desc_t *md;
105 static int __init set_permissions(pte_t *ptep, unsigned long addr, void *data)
107 struct set_perm_data *spd = data;
108 const efi_memory_desc_t *md = spd->md;
109 pte_t pte = READ_ONCE(*ptep);
111 if (md->attribute & EFI_MEMORY_RO)
112 pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
113 if (md->attribute & EFI_MEMORY_XP)
114 pte = set_pte_bit(pte, __pgprot(PTE_PXN));
115 else if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) &&
116 system_supports_bti() && spd->has_bti)
117 pte = set_pte_bit(pte, __pgprot(PTE_GP));
122 int __init efi_set_mapping_permissions(struct mm_struct *mm,
123 efi_memory_desc_t *md,
126 struct set_perm_data data = { md, has_bti };
128 BUG_ON(md->type != EFI_RUNTIME_SERVICES_CODE &&
129 md->type != EFI_RUNTIME_SERVICES_DATA);
131 if (region_is_misaligned(md))
135 * Calling apply_to_page_range() is only safe on regions that are
136 * guaranteed to be mapped down to pages. Since we are only called
137 * for regions that have been mapped using efi_create_mapping() above
138 * (and this is checked by the generic Memory Attributes table parsing
139 * routines), there is no need to check that again here.
141 return apply_to_page_range(mm, md->virt_addr,
142 md->num_pages << EFI_PAGE_SHIFT,
143 set_permissions, &data);
147 * UpdateCapsule() depends on the system being shutdown via
150 bool efi_poweroff_required(void)
152 return efi_enabled(EFI_RUNTIME_SERVICES);
155 asmlinkage efi_status_t efi_handle_corrupted_x18(efi_status_t s, const char *f)
157 pr_err_ratelimited(FW_BUG "register x18 corrupted by EFI %s\n", f);
161 static DEFINE_RAW_SPINLOCK(efi_rt_lock);
163 void arch_efi_call_virt_setup(void)
166 __efi_fpsimd_begin();
167 raw_spin_lock(&efi_rt_lock);
170 void arch_efi_call_virt_teardown(void)
172 raw_spin_unlock(&efi_rt_lock);
174 efi_virtmap_unload();
177 asmlinkage u64 *efi_rt_stack_top __ro_after_init;
179 asmlinkage efi_status_t __efi_rt_asm_recover(void);
181 bool efi_runtime_fixup_exception(struct pt_regs *regs, const char *msg)
183 /* Check whether the exception occurred while running the firmware */
184 if (!current_in_efi() || regs->pc >= TASK_SIZE_64)
187 pr_err(FW_BUG "Unable to handle %s in EFI runtime service\n", msg);
188 add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
189 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
191 regs->regs[0] = EFI_ABORTED;
192 regs->regs[30] = efi_rt_stack_top[-1];
193 regs->pc = (u64)__efi_rt_asm_recover;
195 if (IS_ENABLED(CONFIG_SHADOW_CALL_STACK))
196 regs->regs[18] = efi_rt_stack_top[-2];
201 /* EFI requires 8 KiB of stack space for runtime services */
202 static_assert(THREAD_SIZE >= SZ_8K);
204 static int __init arm64_efi_rt_init(void)
208 if (!efi_enabled(EFI_RUNTIME_SERVICES))
211 p = __vmalloc_node(THREAD_SIZE, THREAD_ALIGN, GFP_KERNEL,
214 pr_warn("Failed to allocate EFI runtime stack\n");
215 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
219 efi_rt_stack_top = p + THREAD_SIZE;
222 core_initcall(arm64_efi_rt_init);