1 // SPDX-License-Identifier: GPL-2.0-only
3 * Re-map IO memory to kernel address space so that we can access it.
4 * This is needed for high PCI addresses that aren't mapped in the
5 * 640k-1MB IO memory area on PC's
7 * (C) Copyright 1995 1996 Linus Torvalds
10 #include <linux/memblock.h>
11 #include <linux/init.h>
13 #include <linux/ioport.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 #include <linux/mmiotrace.h>
17 #include <linux/cc_platform.h>
18 #include <linux/efi.h>
19 #include <linux/pgtable.h>
21 #include <asm/set_memory.h>
22 #include <asm/e820/api.h>
24 #include <asm/fixmap.h>
25 #include <asm/tlbflush.h>
26 #include <asm/pgalloc.h>
27 #include <asm/memtype.h>
28 #include <asm/setup.h>
33 * Descriptor controlling ioremap() behavior.
40 * Fix up the linear direct mapping of the kernel to avoid cache attribute
43 int ioremap_change_attr(unsigned long vaddr, unsigned long size,
44 enum page_cache_mode pcm)
46 unsigned long nrpages = size >> PAGE_SHIFT;
50 case _PAGE_CACHE_MODE_UC:
52 err = _set_memory_uc(vaddr, nrpages);
54 case _PAGE_CACHE_MODE_WC:
55 err = _set_memory_wc(vaddr, nrpages);
57 case _PAGE_CACHE_MODE_WT:
58 err = _set_memory_wt(vaddr, nrpages);
60 case _PAGE_CACHE_MODE_WB:
61 err = _set_memory_wb(vaddr, nrpages);
68 /* Does the range (or a subset of) contain normal RAM? */
69 static unsigned int __ioremap_check_ram(struct resource *res)
71 unsigned long start_pfn, stop_pfn;
74 if ((res->flags & IORESOURCE_SYSTEM_RAM) != IORESOURCE_SYSTEM_RAM)
77 start_pfn = (res->start + PAGE_SIZE - 1) >> PAGE_SHIFT;
78 stop_pfn = (res->end + 1) >> PAGE_SHIFT;
79 if (stop_pfn > start_pfn) {
80 for (i = 0; i < (stop_pfn - start_pfn); ++i)
81 if (pfn_valid(start_pfn + i) &&
82 !PageReserved(pfn_to_page(start_pfn + i)))
83 return IORES_MAP_SYSTEM_RAM;
90 * In a SEV guest, NONE and RESERVED should not be mapped encrypted because
91 * there the whole memory is already encrypted.
93 static unsigned int __ioremap_check_encrypted(struct resource *res)
95 if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
100 case IORES_DESC_RESERVED:
103 return IORES_MAP_ENCRYPTED;
110 * The EFI runtime services data area is not covered by walk_mem_res(), but must
111 * be mapped encrypted when SEV is active.
113 static void __ioremap_check_other(resource_size_t addr, struct ioremap_desc *desc)
115 if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
118 if (!IS_ENABLED(CONFIG_EFI))
121 if (efi_mem_type(addr) == EFI_RUNTIME_SERVICES_DATA ||
122 (efi_mem_type(addr) == EFI_BOOT_SERVICES_DATA &&
123 efi_mem_attributes(addr) & EFI_MEMORY_RUNTIME))
124 desc->flags |= IORES_MAP_ENCRYPTED;
127 static int __ioremap_collect_map_flags(struct resource *res, void *arg)
129 struct ioremap_desc *desc = arg;
131 if (!(desc->flags & IORES_MAP_SYSTEM_RAM))
132 desc->flags |= __ioremap_check_ram(res);
134 if (!(desc->flags & IORES_MAP_ENCRYPTED))
135 desc->flags |= __ioremap_check_encrypted(res);
137 return ((desc->flags & (IORES_MAP_SYSTEM_RAM | IORES_MAP_ENCRYPTED)) ==
138 (IORES_MAP_SYSTEM_RAM | IORES_MAP_ENCRYPTED));
142 * To avoid multiple resource walks, this function walks resources marked as
143 * IORESOURCE_MEM and IORESOURCE_BUSY and looking for system RAM and/or a
144 * resource described not as IORES_DESC_NONE (e.g. IORES_DESC_ACPI_TABLES).
146 * After that, deal with misc other ranges in __ioremap_check_other() which do
147 * not fall into the above category.
149 static void __ioremap_check_mem(resource_size_t addr, unsigned long size,
150 struct ioremap_desc *desc)
155 end = start + size - 1;
156 memset(desc, 0, sizeof(struct ioremap_desc));
158 walk_mem_res(start, end, desc, __ioremap_collect_map_flags);
160 __ioremap_check_other(addr, desc);
164 * Remap an arbitrary physical address space into the kernel virtual
165 * address space. It transparently creates kernel huge I/O mapping when
166 * the physical address is aligned by a huge page size (1GB or 2MB) and
167 * the requested size is at least the huge page size.
169 * NOTE: MTRRs can override PAT memory types with a 4KB granularity.
170 * Therefore, the mapping code falls back to use a smaller page toward 4KB
171 * when a mapping range is covered by non-WB type of MTRRs.
173 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
174 * have to convert them into an offset in a page-aligned mapping, but the
175 * caller shouldn't need to know that small detail.
177 static void __iomem *
178 __ioremap_caller(resource_size_t phys_addr, unsigned long size,
179 enum page_cache_mode pcm, void *caller, bool encrypted)
181 unsigned long offset, vaddr;
182 resource_size_t last_addr;
183 const resource_size_t unaligned_phys_addr = phys_addr;
184 const unsigned long unaligned_size = size;
185 struct ioremap_desc io_desc;
186 struct vm_struct *area;
187 enum page_cache_mode new_pcm;
190 void __iomem *ret_addr;
192 /* Don't allow wraparound or zero size */
193 last_addr = phys_addr + size - 1;
194 if (!size || last_addr < phys_addr)
197 if (!phys_addr_valid(phys_addr)) {
198 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
199 (unsigned long long)phys_addr);
204 __ioremap_check_mem(phys_addr, size, &io_desc);
207 * Don't allow anybody to remap normal RAM that we're using..
209 if (io_desc.flags & IORES_MAP_SYSTEM_RAM) {
210 WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
211 &phys_addr, &last_addr);
216 * Mappings have to be page-aligned
218 offset = phys_addr & ~PAGE_MASK;
219 phys_addr &= PHYSICAL_PAGE_MASK;
220 size = PAGE_ALIGN(last_addr+1) - phys_addr;
222 retval = memtype_reserve(phys_addr, (u64)phys_addr + size,
225 printk(KERN_ERR "ioremap memtype_reserve failed %d\n", retval);
229 if (pcm != new_pcm) {
230 if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) {
232 "ioremap error for 0x%llx-0x%llx, requested 0x%x, got 0x%x\n",
233 (unsigned long long)phys_addr,
234 (unsigned long long)(phys_addr + size),
236 goto err_free_memtype;
242 * If the page being mapped is in memory and SEV is active then
243 * make sure the memory encryption attribute is enabled in the
245 * In TDX guests, memory is marked private by default. If encryption
246 * is not requested (using encrypted), explicitly set decrypt
247 * attribute in all IOREMAPPED memory.
249 prot = PAGE_KERNEL_IO;
250 if ((io_desc.flags & IORES_MAP_ENCRYPTED) || encrypted)
251 prot = pgprot_encrypted(prot);
253 prot = pgprot_decrypted(prot);
256 case _PAGE_CACHE_MODE_UC:
258 prot = __pgprot(pgprot_val(prot) |
259 cachemode2protval(_PAGE_CACHE_MODE_UC));
261 case _PAGE_CACHE_MODE_UC_MINUS:
262 prot = __pgprot(pgprot_val(prot) |
263 cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
265 case _PAGE_CACHE_MODE_WC:
266 prot = __pgprot(pgprot_val(prot) |
267 cachemode2protval(_PAGE_CACHE_MODE_WC));
269 case _PAGE_CACHE_MODE_WT:
270 prot = __pgprot(pgprot_val(prot) |
271 cachemode2protval(_PAGE_CACHE_MODE_WT));
273 case _PAGE_CACHE_MODE_WB:
280 area = get_vm_area_caller(size, VM_IOREMAP, caller);
282 goto err_free_memtype;
283 area->phys_addr = phys_addr;
284 vaddr = (unsigned long) area->addr;
286 if (memtype_kernel_map_sync(phys_addr, size, pcm))
289 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
292 ret_addr = (void __iomem *) (vaddr + offset);
293 mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
296 * Check if the request spans more than any BAR in the iomem resource
299 if (iomem_map_sanity_check(unaligned_phys_addr, unaligned_size))
300 pr_warn("caller %pS mapping multiple BARs\n", caller);
306 memtype_free(phys_addr, phys_addr + size);
311 * ioremap - map bus memory into CPU space
312 * @phys_addr: bus address of the memory
313 * @size: size of the resource to map
315 * ioremap performs a platform specific sequence of operations to
316 * make bus memory CPU accessible via the readb/readw/readl/writeb/
317 * writew/writel functions and the other mmio helpers. The returned
318 * address is not guaranteed to be usable directly as a virtual
321 * This version of ioremap ensures that the memory is marked uncachable
322 * on the CPU as well as honouring existing caching rules from things like
323 * the PCI bus. Note that there are other caches and buffers on many
324 * busses. In particular driver authors should read up on PCI writes
326 * It's useful if some control registers are in such an area and
327 * write combining or read caching is not desirable:
329 * Must be freed with iounmap.
331 void __iomem *ioremap(resource_size_t phys_addr, unsigned long size)
334 * Ideally, this should be:
335 * pat_enabled() ? _PAGE_CACHE_MODE_UC : _PAGE_CACHE_MODE_UC_MINUS;
337 * Till we fix all X drivers to use ioremap_wc(), we will use
338 * UC MINUS. Drivers that are certain they need or can already
339 * be converted over to strong UC can use ioremap_uc().
341 enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC_MINUS;
343 return __ioremap_caller(phys_addr, size, pcm,
344 __builtin_return_address(0), false);
346 EXPORT_SYMBOL(ioremap);
349 * ioremap_uc - map bus memory into CPU space as strongly uncachable
350 * @phys_addr: bus address of the memory
351 * @size: size of the resource to map
353 * ioremap_uc performs a platform specific sequence of operations to
354 * make bus memory CPU accessible via the readb/readw/readl/writeb/
355 * writew/writel functions and the other mmio helpers. The returned
356 * address is not guaranteed to be usable directly as a virtual
359 * This version of ioremap ensures that the memory is marked with a strong
360 * preference as completely uncachable on the CPU when possible. For non-PAT
361 * systems this ends up setting page-attribute flags PCD=1, PWT=1. For PAT
362 * systems this will set the PAT entry for the pages as strong UC. This call
363 * will honor existing caching rules from things like the PCI bus. Note that
364 * there are other caches and buffers on many busses. In particular driver
365 * authors should read up on PCI writes.
367 * It's useful if some control registers are in such an area and
368 * write combining or read caching is not desirable:
370 * Must be freed with iounmap.
372 void __iomem *ioremap_uc(resource_size_t phys_addr, unsigned long size)
374 enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC;
376 return __ioremap_caller(phys_addr, size, pcm,
377 __builtin_return_address(0), false);
379 EXPORT_SYMBOL_GPL(ioremap_uc);
382 * ioremap_wc - map memory into CPU space write combined
383 * @phys_addr: bus address of the memory
384 * @size: size of the resource to map
386 * This version of ioremap ensures that the memory is marked write combining.
387 * Write combining allows faster writes to some hardware devices.
389 * Must be freed with iounmap.
391 void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
393 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC,
394 __builtin_return_address(0), false);
396 EXPORT_SYMBOL(ioremap_wc);
399 * ioremap_wt - map memory into CPU space write through
400 * @phys_addr: bus address of the memory
401 * @size: size of the resource to map
403 * This version of ioremap ensures that the memory is marked write through.
404 * Write through stores data into memory while keeping the cache up-to-date.
406 * Must be freed with iounmap.
408 void __iomem *ioremap_wt(resource_size_t phys_addr, unsigned long size)
410 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WT,
411 __builtin_return_address(0), false);
413 EXPORT_SYMBOL(ioremap_wt);
415 void __iomem *ioremap_encrypted(resource_size_t phys_addr, unsigned long size)
417 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
418 __builtin_return_address(0), true);
420 EXPORT_SYMBOL(ioremap_encrypted);
422 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
424 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
425 __builtin_return_address(0), false);
427 EXPORT_SYMBOL(ioremap_cache);
429 void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
430 unsigned long prot_val)
432 return __ioremap_caller(phys_addr, size,
433 pgprot2cachemode(__pgprot(prot_val)),
434 __builtin_return_address(0), false);
436 EXPORT_SYMBOL(ioremap_prot);
439 * iounmap - Free a IO remapping
440 * @addr: virtual address from ioremap_*
442 * Caller must ensure there is only one unmapping for the same pointer.
444 void iounmap(volatile void __iomem *addr)
446 struct vm_struct *p, *o;
448 if ((void __force *)addr <= high_memory)
452 * The PCI/ISA range special-casing was removed from __ioremap()
453 * so this check, in theory, can be removed. However, there are
454 * cases where iounmap() is called for addresses not obtained via
455 * ioremap() (vga16fb for example). Add a warning so that these
456 * cases can be caught and fixed.
458 if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
459 (void __force *)addr < phys_to_virt(ISA_END_ADDRESS)) {
460 WARN(1, "iounmap() called for ISA range not obtained using ioremap()\n");
464 mmiotrace_iounmap(addr);
466 addr = (volatile void __iomem *)
467 (PAGE_MASK & (unsigned long __force)addr);
469 /* Use the vm area unlocked, assuming the caller
470 ensures there isn't another iounmap for the same address
471 in parallel. Reuse of the virtual address is prevented by
472 leaving it in the global lists until we're done with it.
473 cpa takes care of the direct mappings. */
474 p = find_vm_area((void __force *)addr);
477 printk(KERN_ERR "iounmap: bad address %p\n", addr);
482 memtype_free(p->phys_addr, p->phys_addr + get_vm_area_size(p));
484 /* Finally remove it */
485 o = remove_vm_area((void __force *)addr);
486 BUG_ON(p != o || o == NULL);
489 EXPORT_SYMBOL(iounmap);
492 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
495 void *xlate_dev_mem_ptr(phys_addr_t phys)
497 unsigned long start = phys & PAGE_MASK;
498 unsigned long offset = phys & ~PAGE_MASK;
501 /* memremap() maps if RAM, otherwise falls back to ioremap() */
502 vaddr = memremap(start, PAGE_SIZE, MEMREMAP_WB);
504 /* Only add the offset on success and return NULL if memremap() failed */
511 void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
513 memunmap((void *)((unsigned long)addr & PAGE_MASK));
516 #ifdef CONFIG_AMD_MEM_ENCRYPT
518 * Examine the physical address to determine if it is an area of memory
519 * that should be mapped decrypted. If the memory is not part of the
520 * kernel usable area it was accessed and created decrypted, so these
521 * areas should be mapped decrypted. And since the encryption key can
522 * change across reboots, persistent memory should also be mapped
525 * If SEV is active, that implies that BIOS/UEFI also ran encrypted so
526 * only persistent memory should be mapped decrypted.
528 static bool memremap_should_map_decrypted(resource_size_t phys_addr,
534 * Check if the address is part of a persistent memory region.
535 * This check covers areas added by E820, EFI and ACPI.
537 is_pmem = region_intersects(phys_addr, size, IORESOURCE_MEM,
538 IORES_DESC_PERSISTENT_MEMORY);
539 if (is_pmem != REGION_DISJOINT)
543 * Check if the non-volatile attribute is set for an EFI
546 if (efi_enabled(EFI_BOOT)) {
547 switch (efi_mem_type(phys_addr)) {
548 case EFI_RESERVED_TYPE:
549 if (efi_mem_attributes(phys_addr) & EFI_MEMORY_NV)
557 /* Check if the address is outside kernel usable area */
558 switch (e820__get_entry_type(phys_addr, phys_addr + size - 1)) {
559 case E820_TYPE_RESERVED:
562 case E820_TYPE_UNUSABLE:
563 /* For SEV, these areas are encrypted */
564 if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
578 * Examine the physical address to determine if it is EFI data. Check
579 * it against the boot params structure and EFI tables and memory types.
581 static bool memremap_is_efi_data(resource_size_t phys_addr,
586 /* Check if the address is part of EFI boot/runtime data */
587 if (!efi_enabled(EFI_BOOT))
590 paddr = boot_params.efi_info.efi_memmap_hi;
592 paddr |= boot_params.efi_info.efi_memmap;
593 if (phys_addr == paddr)
596 paddr = boot_params.efi_info.efi_systab_hi;
598 paddr |= boot_params.efi_info.efi_systab;
599 if (phys_addr == paddr)
602 if (efi_is_table_address(phys_addr))
605 switch (efi_mem_type(phys_addr)) {
606 case EFI_BOOT_SERVICES_DATA:
607 case EFI_RUNTIME_SERVICES_DATA:
617 * Examine the physical address to determine if it is boot data by checking
618 * it against the boot params setup_data chain.
620 static bool memremap_is_setup_data(resource_size_t phys_addr,
623 struct setup_indirect *indirect;
624 struct setup_data *data;
625 u64 paddr, paddr_next;
627 paddr = boot_params.hdr.setup_data;
631 if (phys_addr == paddr)
634 data = memremap(paddr, sizeof(*data),
635 MEMREMAP_WB | MEMREMAP_DEC);
637 pr_warn("failed to memremap setup_data entry\n");
641 paddr_next = data->next;
644 if ((phys_addr > paddr) && (phys_addr < (paddr + len))) {
649 if (data->type == SETUP_INDIRECT) {
651 data = memremap(paddr, sizeof(*data) + len,
652 MEMREMAP_WB | MEMREMAP_DEC);
654 pr_warn("failed to memremap indirect setup_data\n");
658 indirect = (struct setup_indirect *)data->data;
660 if (indirect->type != SETUP_INDIRECT) {
661 paddr = indirect->addr;
668 if ((phys_addr > paddr) && (phys_addr < (paddr + len)))
678 * Examine the physical address to determine if it is boot data by checking
679 * it against the boot params setup_data chain (early boot version).
681 static bool __init early_memremap_is_setup_data(resource_size_t phys_addr,
684 struct setup_indirect *indirect;
685 struct setup_data *data;
686 u64 paddr, paddr_next;
688 paddr = boot_params.hdr.setup_data;
690 unsigned int len, size;
692 if (phys_addr == paddr)
695 data = early_memremap_decrypted(paddr, sizeof(*data));
697 pr_warn("failed to early memremap setup_data entry\n");
701 size = sizeof(*data);
703 paddr_next = data->next;
706 if ((phys_addr > paddr) && (phys_addr < (paddr + len))) {
707 early_memunmap(data, sizeof(*data));
711 if (data->type == SETUP_INDIRECT) {
713 early_memunmap(data, sizeof(*data));
714 data = early_memremap_decrypted(paddr, size);
716 pr_warn("failed to early memremap indirect setup_data\n");
720 indirect = (struct setup_indirect *)data->data;
722 if (indirect->type != SETUP_INDIRECT) {
723 paddr = indirect->addr;
728 early_memunmap(data, size);
730 if ((phys_addr > paddr) && (phys_addr < (paddr + len)))
740 * Architecture function to determine if RAM remap is allowed. By default, a
741 * RAM remap will map the data as encrypted. Determine if a RAM remap should
742 * not be done so that the data will be mapped decrypted.
744 bool arch_memremap_can_ram_remap(resource_size_t phys_addr, unsigned long size,
747 if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))
750 if (flags & MEMREMAP_ENC)
753 if (flags & MEMREMAP_DEC)
756 if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) {
757 if (memremap_is_setup_data(phys_addr, size) ||
758 memremap_is_efi_data(phys_addr, size))
762 return !memremap_should_map_decrypted(phys_addr, size);
766 * Architecture override of __weak function to adjust the protection attributes
767 * used when remapping memory. By default, early_memremap() will map the data
768 * as encrypted. Determine if an encrypted mapping should not be done and set
769 * the appropriate protection attributes.
771 pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr,
777 if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))
780 encrypted_prot = true;
782 if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) {
783 if (early_memremap_is_setup_data(phys_addr, size) ||
784 memremap_is_efi_data(phys_addr, size))
785 encrypted_prot = false;
788 if (encrypted_prot && memremap_should_map_decrypted(phys_addr, size))
789 encrypted_prot = false;
791 return encrypted_prot ? pgprot_encrypted(prot)
792 : pgprot_decrypted(prot);
795 bool phys_mem_access_encrypted(unsigned long phys_addr, unsigned long size)
797 return arch_memremap_can_ram_remap(phys_addr, size, 0);
800 /* Remap memory with encryption */
801 void __init *early_memremap_encrypted(resource_size_t phys_addr,
804 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC);
808 * Remap memory with encryption and write-protected - cannot be called
809 * before pat_init() is called
811 void __init *early_memremap_encrypted_wp(resource_size_t phys_addr,
814 if (!x86_has_pat_wp())
816 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC_WP);
819 /* Remap memory without encryption */
820 void __init *early_memremap_decrypted(resource_size_t phys_addr,
823 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC);
827 * Remap memory without encryption and write-protected - cannot be called
828 * before pat_init() is called
830 void __init *early_memremap_decrypted_wp(resource_size_t phys_addr,
833 if (!x86_has_pat_wp())
835 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC_WP);
837 #endif /* CONFIG_AMD_MEM_ENCRYPT */
839 static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
841 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
843 /* Don't assume we're using swapper_pg_dir at this point */
844 pgd_t *base = __va(read_cr3_pa());
845 pgd_t *pgd = &base[pgd_index(addr)];
846 p4d_t *p4d = p4d_offset(pgd, addr);
847 pud_t *pud = pud_offset(p4d, addr);
848 pmd_t *pmd = pmd_offset(pud, addr);
853 static inline pte_t * __init early_ioremap_pte(unsigned long addr)
855 return &bm_pte[pte_index(addr)];
858 bool __init is_early_ioremap_ptep(pte_t *ptep)
860 return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)];
863 void __init early_ioremap_init(void)
868 BUILD_BUG_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
870 WARN_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
873 early_ioremap_setup();
875 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
876 memset(bm_pte, 0, sizeof(bm_pte));
877 pmd_populate_kernel(&init_mm, pmd, bm_pte);
880 * The boot-ioremap range spans multiple pmds, for which
881 * we are not prepared:
883 #define __FIXADDR_TOP (-PAGE_SIZE)
884 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
885 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
887 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
889 printk(KERN_WARNING "pmd %p != %p\n",
890 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
891 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
892 fix_to_virt(FIX_BTMAP_BEGIN));
893 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
894 fix_to_virt(FIX_BTMAP_END));
896 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
897 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
902 void __init __early_set_fixmap(enum fixed_addresses idx,
903 phys_addr_t phys, pgprot_t flags)
905 unsigned long addr = __fix_to_virt(idx);
908 if (idx >= __end_of_fixed_addresses) {
912 pte = early_ioremap_pte(addr);
914 /* Sanitize 'prot' against any unsupported bits: */
915 pgprot_val(flags) &= __supported_pte_mask;
917 if (pgprot_val(flags))
918 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
920 pte_clear(&init_mm, addr, pte);
921 flush_tlb_one_kernel(addr);