1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2015 Linaro Ltd <ard.biesheuvel@linaro.org>
8 #include <asm/mach/map.h>
9 #include <asm/mmu_context.h>
11 static int __init set_permissions(pte_t *ptep, pgtable_t token,
12 unsigned long addr, void *data)
14 efi_memory_desc_t *md = data;
17 if (md->attribute & EFI_MEMORY_RO)
18 pte = set_pte_bit(pte, __pgprot(L_PTE_RDONLY));
19 if (md->attribute & EFI_MEMORY_XP)
20 pte = set_pte_bit(pte, __pgprot(L_PTE_XN));
21 set_pte_ext(ptep, pte, PTE_EXT_NG);
25 int __init efi_set_mapping_permissions(struct mm_struct *mm,
26 efi_memory_desc_t *md)
28 unsigned long base, size;
31 size = md->num_pages << EFI_PAGE_SHIFT;
34 * We can only use apply_to_page_range() if we can guarantee that the
35 * entire region was mapped using pages. This should be the case if the
36 * region does not cover any naturally aligned SECTION_SIZE sized
39 if (round_down(base + size, SECTION_SIZE) <
40 round_up(base, SECTION_SIZE) + SECTION_SIZE)
41 return apply_to_page_range(mm, base, size, set_permissions, md);
46 int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
48 struct map_desc desc = {
49 .virtual = md->virt_addr,
50 .pfn = __phys_to_pfn(md->phys_addr),
51 .length = md->num_pages * EFI_PAGE_SIZE,
55 * Order is important here: memory regions may have all of the
56 * bits below set (and usually do), so we check them in order of
59 if (md->attribute & EFI_MEMORY_WB)
60 desc.type = MT_MEMORY_RWX;
61 else if (md->attribute & EFI_MEMORY_WT)
62 desc.type = MT_MEMORY_RWX_NONCACHED;
63 else if (md->attribute & EFI_MEMORY_WC)
64 desc.type = MT_DEVICE_WC;
66 desc.type = MT_DEVICE;
68 create_mapping_late(mm, &desc, true);
71 * If stricter permissions were specified, apply them now.
73 if (md->attribute & (EFI_MEMORY_RO | EFI_MEMORY_XP))
74 return efi_set_mapping_permissions(mm, md);