config RISCV
def_bool y
select ARCH_CLOCKSOURCE_INIT
- select ARCH_SUPPORTS_ATOMIC_RMW
- select ARCH_SUPPORTS_DEBUG_PAGEALLOC if MMU
- select ARCH_STACKWALK
+ select ARCH_ENABLE_HUGEPAGE_MIGRATION if HUGETLB_PAGE && MIGRATION
select ARCH_HAS_BINFMT_FLAT
select ARCH_HAS_DEBUG_VM_PGTABLE
select ARCH_HAS_DEBUG_VIRTUAL if MMU
select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL
select ARCH_HAS_STRICT_MODULE_RWX if MMU && !XIP_KERNEL
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
+ select ARCH_HAS_UBSAN_SANITIZE_ALL
select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT
+ select ARCH_STACKWALK
+ select ARCH_SUPPORTS_ATOMIC_RMW
+ select ARCH_SUPPORTS_DEBUG_PAGEALLOC if MMU
select ARCH_SUPPORTS_HUGETLBFS if MMU
select ARCH_USE_MEMTEST
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
select GENERIC_EARLY_IOREMAP
select GENERIC_GETTIMEOFDAY if HAVE_GENERIC_VDSO
+ select GENERIC_IDLE_POLL_SETUP
select GENERIC_IOREMAP
select GENERIC_IRQ_MULTI_HANDLER
select GENERIC_IRQ_SHOW
+ select GENERIC_IRQ_SHOW_LEVEL
select GENERIC_LIB_DEVMEM_IS_ALLOWED
select GENERIC_PCI_IOMAP
select GENERIC_PTDUMP if MMU
select GENERIC_SCHED_CLOCK
select GENERIC_SMP_IDLE_THREAD
- select GENERIC_STRNCPY_FROM_USER if MMU
- select GENERIC_STRNLEN_USER if MMU
select GENERIC_TIME_VSYSCALL if MMU && 64BIT
select HANDLE_DOMAIN_IRQ
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE if 64BIT && MMU
+ select HAVE_ARCH_THREAD_STRUCT_WHITELIST
select HAVE_ARCH_VMAP_STACK if MMU && 64BIT
select HAVE_ASM_MODVERSIONS
select HAVE_CONTEXT_TRACKING
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS
select IRQ_DOMAIN
+ select IRQ_FORCED_THREADING
select MODULES_USE_ELF_RELA if MODULES
select MODULE_SECTIONS if MODULES
select OF
config STACKPROTECTOR_PER_TASK
def_bool y
+ depends on !GCC_PLUGIN_RANDSTRUCT
depends on STACKPROTECTOR && CC_HAVE_STACKPROTECTOR_TLS
+config PHYS_RAM_BASE_FIXED
+ bool "Explicitly specified physical RAM address"
+ default n
+
config PHYS_RAM_BASE
hex "Platform Physical RAM address"
+ depends on PHYS_RAM_BASE_FIXED
default "0x80000000"
help
This is the physical address of RAM in the system. It has to be
# This prevents XIP from being enabled by all{yes,mod}config, which
# fail to build since XIP doesn't support large kernels.
depends on !COMPILE_TEST
+ select PHYS_RAM_BASE_FIXED
help
Execute-In-Place allows the kernel to run from non-volatile storage
directly addressable by the CPU, such as NOR flash. This saves RAM
#include <linux/set_memory.h>
#include <linux/dma-map-ops.h>
#include <linux/crash_dump.h>
+ #include <linux/hugetlb.h>
#include <asm/fixmap.h>
#include <asm/tlbflush.h>
#define kernel_map (*(struct kernel_mapping *)XIP_FIXUP(&kernel_map))
#endif
+phys_addr_t phys_ram_base __ro_after_init;
+EXPORT_SYMBOL(phys_ram_base);
+
#ifdef CONFIG_XIP_KERNEL
extern char _xiprom[], _exiprom[];
#endif
}
/*
- * The default maximal physical memory size is -PAGE_OFFSET,
- * limit the memory size via mem.
+ * The default maximal physical memory size is -PAGE_OFFSET for 32-bit kernel,
+ * whereas for 64-bit kernel, the end of the virtual address space is occupied
+ * by the modules/BPF/kernel mappings which reduces the available size of the
+ * linear mapping.
+ * Limit the memory size via mem.
*/
+#ifdef CONFIG_64BIT
+static phys_addr_t memory_limit = -PAGE_OFFSET - SZ_4G;
+#else
static phys_addr_t memory_limit = -PAGE_OFFSET;
+#endif
static int __init early_mem(char *p)
{
{
phys_addr_t vmlinux_end = __pa_symbol(&_end);
phys_addr_t vmlinux_start = __pa_symbol(&_start);
- phys_addr_t max_mapped_addr = __pa(~(ulong)0);
- phys_addr_t dram_end;
+ phys_addr_t __maybe_unused max_mapped_addr;
+ phys_addr_t phys_ram_end;
#ifdef CONFIG_XIP_KERNEL
vmlinux_start = __pa_symbol(&_sdata);
#endif
memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
- dram_end = memblock_end_of_DRAM();
+
+ phys_ram_end = memblock_end_of_DRAM();
+#ifndef CONFIG_64BIT
+#ifndef CONFIG_XIP_KERNEL
+ phys_ram_base = memblock_start_of_DRAM();
+#endif
/*
* memblock allocator is not aware of the fact that last 4K bytes of
* the addressable memory can not be mapped because of IS_ERR_VALUE
* macro. Make sure that last 4k bytes are not usable by memblock
- * if end of dram is equal to maximum addressable memory.
+ * if end of dram is equal to maximum addressable memory. For 64-bit
+ * kernel, this problem can't happen here as the end of the virtual
+ * address space is occupied by the kernel mapping then this check must
+ * be done as soon as the kernel mapping base address is determined.
*/
- if (max_mapped_addr == (dram_end - 1))
+ max_mapped_addr = __pa(~(ulong)0);
+ if (max_mapped_addr == (phys_ram_end - 1))
memblock_set_current_limit(max_mapped_addr - 4096);
+#endif
- min_low_pfn = PFN_UP(memblock_start_of_DRAM());
- max_low_pfn = max_pfn = PFN_DOWN(dram_end);
+ min_low_pfn = PFN_UP(phys_ram_base);
+ max_low_pfn = max_pfn = PFN_DOWN(phys_ram_end);
dma32_phys_limit = min(4UL * SZ_1G, (unsigned long)PFN_PHYS(max_low_pfn));
set_max_mapnr(max_low_pfn - ARCH_PFN_OFFSET);
early_init_fdt_scan_reserved_mem();
dma_contiguous_reserve(dma32_phys_limit);
+ if (IS_ENABLED(CONFIG_64BIT))
+ hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
memblock_allow_resize();
}
#define pt_ops _pt_ops
#endif
- unsigned long pfn_base __ro_after_init;
- EXPORT_SYMBOL(pfn_base);
+ unsigned long riscv_pfn_base __ro_after_init;
+ EXPORT_SYMBOL(riscv_pfn_base);
pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
static pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss;
pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
+ static pmd_t __maybe_unused early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
#ifdef CONFIG_XIP_KERNEL
#define trampoline_pg_dir ((pgd_t *)XIP_FIXUP(trampoline_pg_dir))
static pmd_t trampoline_pmd[PTRS_PER_PMD] __page_aligned_bss;
static pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss;
static pmd_t early_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
- static pmd_t early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
#ifdef CONFIG_XIP_KERNEL
#define trampoline_pmd ((pmd_t *)XIP_FIXUP(trampoline_pmd))
#define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot) \
create_pte_mapping(__nextp, __va, __pa, __sz, __prot)
#define fixmap_pgd_next fixmap_pte
+ #define create_pmd_mapping(__pmdp, __va, __pa, __sz, __prot)
#endif
void __init create_pgd_mapping(pgd_t *pgdp,
#endif
#ifdef CONFIG_XIP_KERNEL
- static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size,
+ static void __init create_kernel_page_table(pgd_t *pgdir,
__always_unused bool early)
{
uintptr_t va, end_va;
/* Map the flash resident part */
end_va = kernel_map.virt_addr + kernel_map.xiprom_sz;
- for (va = kernel_map.virt_addr; va < end_va; va += map_size)
+ for (va = kernel_map.virt_addr; va < end_va; va += PMD_SIZE)
create_pgd_mapping(pgdir, va,
kernel_map.xiprom + (va - kernel_map.virt_addr),
- map_size, PAGE_KERNEL_EXEC);
+ PMD_SIZE, PAGE_KERNEL_EXEC);
/* Map the data in RAM */
end_va = kernel_map.virt_addr + XIP_OFFSET + kernel_map.size;
- for (va = kernel_map.virt_addr + XIP_OFFSET; va < end_va; va += map_size)
+ for (va = kernel_map.virt_addr + XIP_OFFSET; va < end_va; va += PMD_SIZE)
create_pgd_mapping(pgdir, va,
kernel_map.phys_addr + (va - (kernel_map.virt_addr + XIP_OFFSET)),
- map_size, PAGE_KERNEL);
+ PMD_SIZE, PAGE_KERNEL);
}
#else
- static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size,
- bool early)
+ static void __init create_kernel_page_table(pgd_t *pgdir, bool early)
{
uintptr_t va, end_va;
end_va = kernel_map.virt_addr + kernel_map.size;
- for (va = kernel_map.virt_addr; va < end_va; va += map_size)
+ for (va = kernel_map.virt_addr; va < end_va; va += PMD_SIZE)
create_pgd_mapping(pgdir, va,
kernel_map.phys_addr + (va - kernel_map.virt_addr),
- map_size,
+ PMD_SIZE,
early ?
PAGE_KERNEL_EXEC : pgprot_from_va(va));
}
#endif
- asmlinkage void __init setup_vm(uintptr_t dtb_pa)
+ /*
+ * Setup a 4MB mapping that encompasses the device tree: for 64-bit kernel,
+ * this means 2 PMD entries whereas for 32-bit kernel, this is only 1 PGDIR
+ * entry.
+ */
+ static void __init create_fdt_early_page_table(pgd_t *pgdir, uintptr_t dtb_pa)
{
- uintptr_t __maybe_unused pa;
- uintptr_t map_size;
- #ifndef __PAGETABLE_PMD_FOLDED
- pmd_t fix_bmap_spmd, fix_bmap_epmd;
+ #ifndef CONFIG_BUILTIN_DTB
+ uintptr_t pa = dtb_pa & ~(PMD_SIZE - 1);
+
+ create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA,
+ IS_ENABLED(CONFIG_64BIT) ? (uintptr_t)early_dtb_pmd : pa,
+ PGDIR_SIZE,
+ IS_ENABLED(CONFIG_64BIT) ? PAGE_TABLE : PAGE_KERNEL);
+
+ if (IS_ENABLED(CONFIG_64BIT)) {
+ create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA,
+ pa, PMD_SIZE, PAGE_KERNEL);
+ create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA + PMD_SIZE,
+ pa + PMD_SIZE, PMD_SIZE, PAGE_KERNEL);
+ }
+
+ dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PMD_SIZE - 1));
+ #else
+ /*
+ * For 64-bit kernel, __va can't be used since it would return a linear
+ * mapping address whereas dtb_early_va will be used before
+ * setup_vm_final installs the linear mapping. For 32-bit kernel, as the
+ * kernel is mapped in the linear mapping, that makes no difference.
+ */
+ dtb_early_va = kernel_mapping_pa_to_va(XIP_FIXUP(dtb_pa));
#endif
+ dtb_early_pa = dtb_pa;
+ }
+
+ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
+ {
+ pmd_t __maybe_unused fix_bmap_spmd, fix_bmap_epmd;
+
kernel_map.virt_addr = KERNEL_LINK_ADDR;
#ifdef CONFIG_XIP_KERNEL
kernel_map.xiprom = (uintptr_t)CONFIG_XIP_PHYS_ADDR;
kernel_map.xiprom_sz = (uintptr_t)(&_exiprom) - (uintptr_t)(&_xiprom);
+ phys_ram_base = CONFIG_PHYS_RAM_BASE;
kernel_map.phys_addr = (uintptr_t)CONFIG_PHYS_RAM_BASE;
kernel_map.size = (uintptr_t)(&_end) - (uintptr_t)(&_sdata);
kernel_map.phys_addr = (uintptr_t)(&_start);
kernel_map.size = (uintptr_t)(&_end) - kernel_map.phys_addr;
#endif
-
kernel_map.va_pa_offset = PAGE_OFFSET - kernel_map.phys_addr;
- #ifdef CONFIG_64BIT
kernel_map.va_kernel_pa_offset = kernel_map.virt_addr - kernel_map.phys_addr;
- #endif
-
- pfn_base = PFN_DOWN(kernel_map.phys_addr);
- /*
- * Enforce boot alignment requirements of RV32 and
- * RV64 by only allowing PMD or PGD mappings.
- */
- map_size = PMD_SIZE;
+ riscv_pfn_base = PFN_DOWN(kernel_map.phys_addr);
/* Sanity check alignment and size */
BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0);
- BUG_ON((kernel_map.phys_addr % map_size) != 0);
+ BUG_ON((kernel_map.phys_addr % PMD_SIZE) != 0);
+#ifdef CONFIG_64BIT
+ /*
+ * The last 4K bytes of the addressable memory can not be mapped because
+ * of IS_ERR_VALUE macro.
+ */
+ BUG_ON((kernel_map.virt_addr + kernel_map.size) > ADDRESS_SPACE_END - SZ_4K);
+#endif
+
pt_ops.alloc_pte = alloc_pte_early;
pt_ops.get_pte_virt = get_pte_virt_early;
#ifndef __PAGETABLE_PMD_FOLDED
* us to reach paging_init(). We map all memory banks later
* in setup_vm_final() below.
*/
- create_kernel_page_table(early_pg_dir, map_size, true);
+ create_kernel_page_table(early_pg_dir, true);
- #ifndef __PAGETABLE_PMD_FOLDED
- /* Setup early PMD for DTB */
- create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA,
- (uintptr_t)early_dtb_pmd, PGDIR_SIZE, PAGE_TABLE);
- #ifndef CONFIG_BUILTIN_DTB
- /* Create two consecutive PMD mappings for FDT early scan */
- pa = dtb_pa & ~(PMD_SIZE - 1);
- create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA,
- pa, PMD_SIZE, PAGE_KERNEL);
- create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA + PMD_SIZE,
- pa + PMD_SIZE, PMD_SIZE, PAGE_KERNEL);
- dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PMD_SIZE - 1));
- #else /* CONFIG_BUILTIN_DTB */
- #ifdef CONFIG_64BIT
- /*
- * __va can't be used since it would return a linear mapping address
- * whereas dtb_early_va will be used before setup_vm_final installs
- * the linear mapping.
- */
- dtb_early_va = kernel_mapping_pa_to_va(XIP_FIXUP(dtb_pa));
- #else
- dtb_early_va = __va(dtb_pa);
- #endif /* CONFIG_64BIT */
- #endif /* CONFIG_BUILTIN_DTB */
- #else
- #ifndef CONFIG_BUILTIN_DTB
- /* Create two consecutive PGD mappings for FDT early scan */
- pa = dtb_pa & ~(PGDIR_SIZE - 1);
- create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA,
- pa, PGDIR_SIZE, PAGE_KERNEL);
- create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA + PGDIR_SIZE,
- pa + PGDIR_SIZE, PGDIR_SIZE, PAGE_KERNEL);
- dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PGDIR_SIZE - 1));
- #else /* CONFIG_BUILTIN_DTB */
- #ifdef CONFIG_64BIT
- dtb_early_va = kernel_mapping_pa_to_va(XIP_FIXUP(dtb_pa));
- #else
- dtb_early_va = __va(dtb_pa);
- #endif /* CONFIG_64BIT */
- #endif /* CONFIG_BUILTIN_DTB */
- #endif
- dtb_early_pa = dtb_pa;
+ /* Setup early mapping for FDT early scan */
+ create_fdt_early_page_table(early_pg_dir, dtb_pa);
/*
* Bootime fixmap only can handle PMD_SIZE mapping. Thus, boot-ioremap
if (start <= __pa(PAGE_OFFSET) &&
__pa(PAGE_OFFSET) < end)
start = __pa(PAGE_OFFSET);
+ if (end >= __pa(PAGE_OFFSET) + memory_limit)
+ end = __pa(PAGE_OFFSET) + memory_limit;
map_size = best_map_size(start, end - start);
for (pa = start; pa < end; pa += map_size) {
#ifdef CONFIG_64BIT
/* Map the kernel */
- create_kernel_page_table(swapper_pg_dir, PMD_SIZE, false);
+ create_kernel_page_table(swapper_pg_dir, false);
#endif
/* Clear fixmap PTE and PMD mappings */
crash_size = PAGE_ALIGN(crash_size);
- if (crash_base == 0) {
- /*
- * Current riscv boot protocol requires 2MB alignment for
- * RV64 and 4MB alignment for RV32 (hugepage size)
- */
- crash_base = memblock_find_in_range(search_start, search_end,
- crash_size, PMD_SIZE);
-
- if (crash_base == 0) {
- pr_warn("crashkernel: couldn't allocate %lldKB\n",
- crash_size >> 10);
- return;
- }
- } else {
- /* User specifies base address explicitly. */
- if (!memblock_is_region_memory(crash_base, crash_size)) {
- pr_warn("crashkernel: requested region is not memory\n");
- return;
- }
-
- if (memblock_is_region_reserved(crash_base, crash_size)) {
- pr_warn("crashkernel: requested region is reserved\n");
- return;
- }
-
+ if (crash_base) {
+ search_start = crash_base;
+ search_end = crash_base + crash_size;
+ }
- if (!IS_ALIGNED(crash_base, PMD_SIZE)) {
- pr_warn("crashkernel: requested region is misaligned\n");
- return;
- }
+ /*
+ * Current riscv boot protocol requires 2MB alignment for
+ * RV64 and 4MB alignment for RV32 (hugepage size)
+ */
+ crash_base = memblock_phys_alloc_range(crash_size, PMD_SIZE,
+ search_start, search_end);
+ if (crash_base == 0) {
+ pr_warn("crashkernel: couldn't allocate %lldKB\n",
+ crash_size >> 10);
+ return;
}
- memblock_reserve(crash_base, crash_size);
pr_info("crashkernel: reserved 0x%016llx - 0x%016llx (%lld MB)\n",
crash_base, crash_base + crash_size, crash_size >> 20);
}
#endif /* CONFIG_KEXEC_CORE */
-#ifdef CONFIG_CRASH_DUMP
-/*
- * We keep track of the ELF core header of the crashed
- * kernel with a reserved-memory region with compatible
- * string "linux,elfcorehdr". Here we register a callback
- * to populate elfcorehdr_addr/size when this region is
- * present. Note that this region will be marked as
- * reserved once we call early_init_fdt_scan_reserved_mem()
- * later on.
- */
-static int __init elfcore_hdr_setup(struct reserved_mem *rmem)
-{
- elfcorehdr_addr = rmem->base;
- elfcorehdr_size = rmem->size;
- return 0;
-}
-
-RESERVEDMEM_OF_DECLARE(elfcorehdr, "linux,elfcorehdr", elfcore_hdr_setup);
-#endif
-
void __init paging_init(void)
{
setup_bootmem();