1 // SPDX-License-Identifier: GPL-2.0-only
3 * Re-map IO memory to kernel address space so that we can access it.
4 * This is needed for high PCI addresses that aren't mapped in the
5 * 640k-1MB IO memory area on PC's
7 * (C) Copyright 1995 1996 Linus Torvalds
10 #include <linux/memblock.h>
11 #include <linux/init.h>
13 #include <linux/ioport.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 #include <linux/mmiotrace.h>
17 #include <linux/mem_encrypt.h>
18 #include <linux/efi.h>
20 #include <asm/set_memory.h>
21 #include <asm/e820/api.h>
22 #include <asm/fixmap.h>
23 #include <asm/pgtable.h>
24 #include <asm/tlbflush.h>
25 #include <asm/pgalloc.h>
27 #include <asm/setup.h>
31 struct ioremap_mem_flags {
37 * Fix up the linear direct mapping of the kernel to avoid cache attribute
40 int ioremap_change_attr(unsigned long vaddr, unsigned long size,
41 enum page_cache_mode pcm)
43 unsigned long nrpages = size >> PAGE_SHIFT;
47 case _PAGE_CACHE_MODE_UC:
49 err = _set_memory_uc(vaddr, nrpages);
51 case _PAGE_CACHE_MODE_WC:
52 err = _set_memory_wc(vaddr, nrpages);
54 case _PAGE_CACHE_MODE_WT:
55 err = _set_memory_wt(vaddr, nrpages);
57 case _PAGE_CACHE_MODE_WB:
58 err = _set_memory_wb(vaddr, nrpages);
65 static bool __ioremap_check_ram(struct resource *res)
67 unsigned long start_pfn, stop_pfn;
70 if ((res->flags & IORESOURCE_SYSTEM_RAM) != IORESOURCE_SYSTEM_RAM)
73 start_pfn = (res->start + PAGE_SIZE - 1) >> PAGE_SHIFT;
74 stop_pfn = (res->end + 1) >> PAGE_SHIFT;
75 if (stop_pfn > start_pfn) {
76 for (i = 0; i < (stop_pfn - start_pfn); ++i)
77 if (pfn_valid(start_pfn + i) &&
78 !PageReserved(pfn_to_page(start_pfn + i)))
85 static int __ioremap_check_desc_other(struct resource *res)
87 return (res->desc != IORES_DESC_NONE);
90 static int __ioremap_res_check(struct resource *res, void *arg)
92 struct ioremap_mem_flags *flags = arg;
94 if (!flags->system_ram)
95 flags->system_ram = __ioremap_check_ram(res);
97 if (!flags->desc_other)
98 flags->desc_other = __ioremap_check_desc_other(res);
100 return flags->system_ram && flags->desc_other;
104 * To avoid multiple resource walks, this function walks resources marked as
105 * IORESOURCE_MEM and IORESOURCE_BUSY and looking for system RAM and/or a
106 * resource described not as IORES_DESC_NONE (e.g. IORES_DESC_ACPI_TABLES).
108 static void __ioremap_check_mem(resource_size_t addr, unsigned long size,
109 struct ioremap_mem_flags *flags)
114 end = start + size - 1;
115 memset(flags, 0, sizeof(*flags));
117 walk_mem_res(start, end, flags, __ioremap_res_check);
121 * Remap an arbitrary physical address space into the kernel virtual
122 * address space. It transparently creates kernel huge I/O mapping when
123 * the physical address is aligned by a huge page size (1GB or 2MB) and
124 * the requested size is at least the huge page size.
126 * NOTE: MTRRs can override PAT memory types with a 4KB granularity.
127 * Therefore, the mapping code falls back to use a smaller page toward 4KB
128 * when a mapping range is covered by non-WB type of MTRRs.
130 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
131 * have to convert them into an offset in a page-aligned mapping, but the
132 * caller shouldn't need to know that small detail.
134 static void __iomem *__ioremap_caller(resource_size_t phys_addr,
135 unsigned long size, enum page_cache_mode pcm,
136 void *caller, bool encrypted)
138 unsigned long offset, vaddr;
139 resource_size_t last_addr;
140 const resource_size_t unaligned_phys_addr = phys_addr;
141 const unsigned long unaligned_size = size;
142 struct ioremap_mem_flags mem_flags;
143 struct vm_struct *area;
144 enum page_cache_mode new_pcm;
147 void __iomem *ret_addr;
149 /* Don't allow wraparound or zero size */
150 last_addr = phys_addr + size - 1;
151 if (!size || last_addr < phys_addr)
154 if (!phys_addr_valid(phys_addr)) {
155 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
156 (unsigned long long)phys_addr);
161 __ioremap_check_mem(phys_addr, size, &mem_flags);
164 * Don't allow anybody to remap normal RAM that we're using..
166 if (mem_flags.system_ram) {
167 WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
168 &phys_addr, &last_addr);
173 * Mappings have to be page-aligned
175 offset = phys_addr & ~PAGE_MASK;
176 phys_addr &= PHYSICAL_PAGE_MASK;
177 size = PAGE_ALIGN(last_addr+1) - phys_addr;
179 retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
182 printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
186 if (pcm != new_pcm) {
187 if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) {
189 "ioremap error for 0x%llx-0x%llx, requested 0x%x, got 0x%x\n",
190 (unsigned long long)phys_addr,
191 (unsigned long long)(phys_addr + size),
193 goto err_free_memtype;
199 * If the page being mapped is in memory and SEV is active then
200 * make sure the memory encryption attribute is enabled in the
203 prot = PAGE_KERNEL_IO;
204 if ((sev_active() && mem_flags.desc_other) || encrypted)
205 prot = pgprot_encrypted(prot);
208 case _PAGE_CACHE_MODE_UC:
210 prot = __pgprot(pgprot_val(prot) |
211 cachemode2protval(_PAGE_CACHE_MODE_UC));
213 case _PAGE_CACHE_MODE_UC_MINUS:
214 prot = __pgprot(pgprot_val(prot) |
215 cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
217 case _PAGE_CACHE_MODE_WC:
218 prot = __pgprot(pgprot_val(prot) |
219 cachemode2protval(_PAGE_CACHE_MODE_WC));
221 case _PAGE_CACHE_MODE_WT:
222 prot = __pgprot(pgprot_val(prot) |
223 cachemode2protval(_PAGE_CACHE_MODE_WT));
225 case _PAGE_CACHE_MODE_WB:
232 area = get_vm_area_caller(size, VM_IOREMAP, caller);
234 goto err_free_memtype;
235 area->phys_addr = phys_addr;
236 vaddr = (unsigned long) area->addr;
238 if (kernel_map_sync_memtype(phys_addr, size, pcm))
241 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
244 ret_addr = (void __iomem *) (vaddr + offset);
245 mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
248 * Check if the request spans more than any BAR in the iomem resource
251 if (iomem_map_sanity_check(unaligned_phys_addr, unaligned_size))
252 pr_warn("caller %pS mapping multiple BARs\n", caller);
258 free_memtype(phys_addr, phys_addr + size);
263 * ioremap_nocache - map bus memory into CPU space
264 * @phys_addr: bus address of the memory
265 * @size: size of the resource to map
267 * ioremap_nocache performs a platform specific sequence of operations to
268 * make bus memory CPU accessible via the readb/readw/readl/writeb/
269 * writew/writel functions and the other mmio helpers. The returned
270 * address is not guaranteed to be usable directly as a virtual
273 * This version of ioremap ensures that the memory is marked uncachable
274 * on the CPU as well as honouring existing caching rules from things like
275 * the PCI bus. Note that there are other caches and buffers on many
276 * busses. In particular driver authors should read up on PCI writes
278 * It's useful if some control registers are in such an area and
279 * write combining or read caching is not desirable:
281 * Must be freed with iounmap.
283 void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
286 * Ideally, this should be:
287 * pat_enabled() ? _PAGE_CACHE_MODE_UC : _PAGE_CACHE_MODE_UC_MINUS;
289 * Till we fix all X drivers to use ioremap_wc(), we will use
290 * UC MINUS. Drivers that are certain they need or can already
291 * be converted over to strong UC can use ioremap_uc().
293 enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC_MINUS;
295 return __ioremap_caller(phys_addr, size, pcm,
296 __builtin_return_address(0), false);
298 EXPORT_SYMBOL(ioremap_nocache);
301 * ioremap_uc - map bus memory into CPU space as strongly uncachable
302 * @phys_addr: bus address of the memory
303 * @size: size of the resource to map
305 * ioremap_uc performs a platform specific sequence of operations to
306 * make bus memory CPU accessible via the readb/readw/readl/writeb/
307 * writew/writel functions and the other mmio helpers. The returned
308 * address is not guaranteed to be usable directly as a virtual
311 * This version of ioremap ensures that the memory is marked with a strong
312 * preference as completely uncachable on the CPU when possible. For non-PAT
313 * systems this ends up setting page-attribute flags PCD=1, PWT=1. For PAT
314 * systems this will set the PAT entry for the pages as strong UC. This call
315 * will honor existing caching rules from things like the PCI bus. Note that
316 * there are other caches and buffers on many busses. In particular driver
317 * authors should read up on PCI writes.
319 * It's useful if some control registers are in such an area and
320 * write combining or read caching is not desirable:
322 * Must be freed with iounmap.
324 void __iomem *ioremap_uc(resource_size_t phys_addr, unsigned long size)
326 enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC;
328 return __ioremap_caller(phys_addr, size, pcm,
329 __builtin_return_address(0), false);
331 EXPORT_SYMBOL_GPL(ioremap_uc);
334 * ioremap_wc - map memory into CPU space write combined
335 * @phys_addr: bus address of the memory
336 * @size: size of the resource to map
338 * This version of ioremap ensures that the memory is marked write combining.
339 * Write combining allows faster writes to some hardware devices.
341 * Must be freed with iounmap.
343 void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
345 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC,
346 __builtin_return_address(0), false);
348 EXPORT_SYMBOL(ioremap_wc);
351 * ioremap_wt - map memory into CPU space write through
352 * @phys_addr: bus address of the memory
353 * @size: size of the resource to map
355 * This version of ioremap ensures that the memory is marked write through.
356 * Write through stores data into memory while keeping the cache up-to-date.
358 * Must be freed with iounmap.
360 void __iomem *ioremap_wt(resource_size_t phys_addr, unsigned long size)
362 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WT,
363 __builtin_return_address(0), false);
365 EXPORT_SYMBOL(ioremap_wt);
367 void __iomem *ioremap_encrypted(resource_size_t phys_addr, unsigned long size)
369 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
370 __builtin_return_address(0), true);
372 EXPORT_SYMBOL(ioremap_encrypted);
374 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
376 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
377 __builtin_return_address(0), false);
379 EXPORT_SYMBOL(ioremap_cache);
381 void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
382 unsigned long prot_val)
384 return __ioremap_caller(phys_addr, size,
385 pgprot2cachemode(__pgprot(prot_val)),
386 __builtin_return_address(0), false);
388 EXPORT_SYMBOL(ioremap_prot);
391 * iounmap - Free a IO remapping
392 * @addr: virtual address from ioremap_*
394 * Caller must ensure there is only one unmapping for the same pointer.
396 void iounmap(volatile void __iomem *addr)
398 struct vm_struct *p, *o;
400 if ((void __force *)addr <= high_memory)
404 * The PCI/ISA range special-casing was removed from __ioremap()
405 * so this check, in theory, can be removed. However, there are
406 * cases where iounmap() is called for addresses not obtained via
407 * ioremap() (vga16fb for example). Add a warning so that these
408 * cases can be caught and fixed.
410 if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
411 (void __force *)addr < phys_to_virt(ISA_END_ADDRESS)) {
412 WARN(1, "iounmap() called for ISA range not obtained using ioremap()\n");
416 mmiotrace_iounmap(addr);
418 addr = (volatile void __iomem *)
419 (PAGE_MASK & (unsigned long __force)addr);
421 /* Use the vm area unlocked, assuming the caller
422 ensures there isn't another iounmap for the same address
423 in parallel. Reuse of the virtual address is prevented by
424 leaving it in the global lists until we're done with it.
425 cpa takes care of the direct mappings. */
426 p = find_vm_area((void __force *)addr);
429 printk(KERN_ERR "iounmap: bad address %p\n", addr);
434 free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
436 /* Finally remove it */
437 o = remove_vm_area((void __force *)addr);
438 BUG_ON(p != o || o == NULL);
441 EXPORT_SYMBOL(iounmap);
443 int __init arch_ioremap_pud_supported(void)
446 return boot_cpu_has(X86_FEATURE_GBPAGES);
452 int __init arch_ioremap_pmd_supported(void)
454 return boot_cpu_has(X86_FEATURE_PSE);
458 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
461 void *xlate_dev_mem_ptr(phys_addr_t phys)
463 unsigned long start = phys & PAGE_MASK;
464 unsigned long offset = phys & ~PAGE_MASK;
467 /* memremap() maps if RAM, otherwise falls back to ioremap() */
468 vaddr = memremap(start, PAGE_SIZE, MEMREMAP_WB);
470 /* Only add the offset on success and return NULL if memremap() failed */
477 void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
479 memunmap((void *)((unsigned long)addr & PAGE_MASK));
483 * Examine the physical address to determine if it is an area of memory
484 * that should be mapped decrypted. If the memory is not part of the
485 * kernel usable area it was accessed and created decrypted, so these
486 * areas should be mapped decrypted. And since the encryption key can
487 * change across reboots, persistent memory should also be mapped
490 * If SEV is active, that implies that BIOS/UEFI also ran encrypted so
491 * only persistent memory should be mapped decrypted.
493 static bool memremap_should_map_decrypted(resource_size_t phys_addr,
499 * Check if the address is part of a persistent memory region.
500 * This check covers areas added by E820, EFI and ACPI.
502 is_pmem = region_intersects(phys_addr, size, IORESOURCE_MEM,
503 IORES_DESC_PERSISTENT_MEMORY);
504 if (is_pmem != REGION_DISJOINT)
508 * Check if the non-volatile attribute is set for an EFI
511 if (efi_enabled(EFI_BOOT)) {
512 switch (efi_mem_type(phys_addr)) {
513 case EFI_RESERVED_TYPE:
514 if (efi_mem_attributes(phys_addr) & EFI_MEMORY_NV)
522 /* Check if the address is outside kernel usable area */
523 switch (e820__get_entry_type(phys_addr, phys_addr + size - 1)) {
524 case E820_TYPE_RESERVED:
527 case E820_TYPE_UNUSABLE:
528 /* For SEV, these areas are encrypted */
543 * Examine the physical address to determine if it is EFI data. Check
544 * it against the boot params structure and EFI tables and memory types.
546 static bool memremap_is_efi_data(resource_size_t phys_addr,
551 /* Check if the address is part of EFI boot/runtime data */
552 if (!efi_enabled(EFI_BOOT))
555 paddr = boot_params.efi_info.efi_memmap_hi;
557 paddr |= boot_params.efi_info.efi_memmap;
558 if (phys_addr == paddr)
561 paddr = boot_params.efi_info.efi_systab_hi;
563 paddr |= boot_params.efi_info.efi_systab;
564 if (phys_addr == paddr)
567 if (efi_is_table_address(phys_addr))
570 switch (efi_mem_type(phys_addr)) {
571 case EFI_BOOT_SERVICES_DATA:
572 case EFI_RUNTIME_SERVICES_DATA:
582 * Examine the physical address to determine if it is boot data by checking
583 * it against the boot params setup_data chain.
585 static bool memremap_is_setup_data(resource_size_t phys_addr,
588 struct setup_data *data;
589 u64 paddr, paddr_next;
591 paddr = boot_params.hdr.setup_data;
595 if (phys_addr == paddr)
598 data = memremap(paddr, sizeof(*data),
599 MEMREMAP_WB | MEMREMAP_DEC);
601 paddr_next = data->next;
606 if ((phys_addr > paddr) && (phys_addr < (paddr + len)))
616 * Examine the physical address to determine if it is boot data by checking
617 * it against the boot params setup_data chain (early boot version).
619 static bool __init early_memremap_is_setup_data(resource_size_t phys_addr,
622 struct setup_data *data;
623 u64 paddr, paddr_next;
625 paddr = boot_params.hdr.setup_data;
629 if (phys_addr == paddr)
632 data = early_memremap_decrypted(paddr, sizeof(*data));
634 paddr_next = data->next;
637 early_memunmap(data, sizeof(*data));
639 if ((phys_addr > paddr) && (phys_addr < (paddr + len)))
649 * Architecture function to determine if RAM remap is allowed. By default, a
650 * RAM remap will map the data as encrypted. Determine if a RAM remap should
651 * not be done so that the data will be mapped decrypted.
653 bool arch_memremap_can_ram_remap(resource_size_t phys_addr, unsigned long size,
656 if (!mem_encrypt_active())
659 if (flags & MEMREMAP_ENC)
662 if (flags & MEMREMAP_DEC)
666 if (memremap_is_setup_data(phys_addr, size) ||
667 memremap_is_efi_data(phys_addr, size))
671 return !memremap_should_map_decrypted(phys_addr, size);
675 * Architecture override of __weak function to adjust the protection attributes
676 * used when remapping memory. By default, early_memremap() will map the data
677 * as encrypted. Determine if an encrypted mapping should not be done and set
678 * the appropriate protection attributes.
680 pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr,
686 if (!mem_encrypt_active())
689 encrypted_prot = true;
692 if (early_memremap_is_setup_data(phys_addr, size) ||
693 memremap_is_efi_data(phys_addr, size))
694 encrypted_prot = false;
697 if (encrypted_prot && memremap_should_map_decrypted(phys_addr, size))
698 encrypted_prot = false;
700 return encrypted_prot ? pgprot_encrypted(prot)
701 : pgprot_decrypted(prot);
704 bool phys_mem_access_encrypted(unsigned long phys_addr, unsigned long size)
706 return arch_memremap_can_ram_remap(phys_addr, size, 0);
709 #ifdef CONFIG_AMD_MEM_ENCRYPT
710 /* Remap memory with encryption */
711 void __init *early_memremap_encrypted(resource_size_t phys_addr,
714 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC);
718 * Remap memory with encryption and write-protected - cannot be called
719 * before pat_init() is called
721 void __init *early_memremap_encrypted_wp(resource_size_t phys_addr,
724 /* Be sure the write-protect PAT entry is set for write-protect */
725 if (__pte2cachemode_tbl[_PAGE_CACHE_MODE_WP] != _PAGE_CACHE_MODE_WP)
728 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC_WP);
731 /* Remap memory without encryption */
732 void __init *early_memremap_decrypted(resource_size_t phys_addr,
735 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC);
739 * Remap memory without encryption and write-protected - cannot be called
740 * before pat_init() is called
742 void __init *early_memremap_decrypted_wp(resource_size_t phys_addr,
745 /* Be sure the write-protect PAT entry is set for write-protect */
746 if (__pte2cachemode_tbl[_PAGE_CACHE_MODE_WP] != _PAGE_CACHE_MODE_WP)
749 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC_WP);
751 #endif /* CONFIG_AMD_MEM_ENCRYPT */
753 static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
755 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
757 /* Don't assume we're using swapper_pg_dir at this point */
758 pgd_t *base = __va(read_cr3_pa());
759 pgd_t *pgd = &base[pgd_index(addr)];
760 p4d_t *p4d = p4d_offset(pgd, addr);
761 pud_t *pud = pud_offset(p4d, addr);
762 pmd_t *pmd = pmd_offset(pud, addr);
767 static inline pte_t * __init early_ioremap_pte(unsigned long addr)
769 return &bm_pte[pte_index(addr)];
772 bool __init is_early_ioremap_ptep(pte_t *ptep)
774 return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)];
777 void __init early_ioremap_init(void)
782 BUILD_BUG_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
784 WARN_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
787 early_ioremap_setup();
789 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
790 memset(bm_pte, 0, sizeof(bm_pte));
791 pmd_populate_kernel(&init_mm, pmd, bm_pte);
794 * The boot-ioremap range spans multiple pmds, for which
795 * we are not prepared:
797 #define __FIXADDR_TOP (-PAGE_SIZE)
798 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
799 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
801 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
803 printk(KERN_WARNING "pmd %p != %p\n",
804 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
805 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
806 fix_to_virt(FIX_BTMAP_BEGIN));
807 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
808 fix_to_virt(FIX_BTMAP_END));
810 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
811 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
816 void __init __early_set_fixmap(enum fixed_addresses idx,
817 phys_addr_t phys, pgprot_t flags)
819 unsigned long addr = __fix_to_virt(idx);
822 if (idx >= __end_of_fixed_addresses) {
826 pte = early_ioremap_pte(addr);
828 /* Sanitize 'prot' against any unsupported bits: */
829 pgprot_val(flags) &= __supported_pte_mask;
831 if (pgprot_val(flags))
832 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
834 pte_clear(&init_mm, addr, pte);
835 __flush_tlb_one_kernel(addr);