1 // SPDX-License-Identifier: GPL-2.0
5 * This contains the routines needed to generate a reasonable level of
6 * entropy to choose a randomized kernel base address offset in support
7 * of Kernel Address Space Layout Randomization (KASLR). Additionally
8 * handles walking the physical memory maps (and tracking memory regions
9 * to avoid) in order to select a physical memory location that can
10 * contain the entire properly aligned running kernel image.
15 * isspace() in linux/ctype.h is expected by next_args() to filter
16 * out "space/lf/tab". While boot/ctype.h conflicts with linux/ctype.h,
17 * since isdigit() is implemented in both of them. Hence disable it
23 * _ctype[] in lib/ctype.c is needed by isspace() of linux/ctype.h.
24 * While both lib/ctype.c and lib/cmdline.c will bring EXPORT_SYMBOL
25 * which is meaningless and will cause compiling error in some cases.
27 #define __DISABLE_EXPORTS
31 #include "../string.h"
33 #include <generated/compile.h>
34 #include <linux/module.h>
35 #include <linux/uts.h>
36 #include <linux/utsname.h>
37 #include <linux/ctype.h>
38 #include <linux/efi.h>
39 #include <generated/utsrelease.h>
42 /* Macros used by the included decompressor code below. */
44 #include <linux/decompress/mm.h>
46 #ifdef CONFIG_X86_5LEVEL
47 unsigned int __pgtable_l5_enabled;
48 unsigned int pgdir_shift __ro_after_init = 39;
49 unsigned int ptrs_per_p4d __ro_after_init = 1;
52 extern unsigned long get_cmd_line_ptr(void);
54 /* Used by PAGE_KERN* macros: */
55 pteval_t __default_kernel_pte_mask __read_mostly = ~0;
57 /* Simplified build-specific string for starting entropy. */
58 static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@"
59 LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION;
61 static unsigned long rotate_xor(unsigned long hash, const void *area,
65 unsigned long *ptr = (unsigned long *)area;
67 for (i = 0; i < size / sizeof(hash); i++) {
68 /* Rotate by odd number of bits and XOR. */
69 hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7);
76 /* Attempt to create a simple but unpredictable starting entropy. */
77 static unsigned long get_boot_seed(void)
79 unsigned long hash = 0;
81 hash = rotate_xor(hash, build_str, sizeof(build_str));
82 hash = rotate_xor(hash, boot_params, sizeof(*boot_params));
87 #define KASLR_COMPRESSED_BOOT
88 #include "../../lib/kaslr.c"
91 /* Only supporting at most 4 unusable memmap regions with kaslr */
92 #define MAX_MEMMAP_REGIONS 4
94 static bool memmap_too_large;
97 /* Store memory limit specified by "mem=nn[KMG]" or "memmap=nn[KMG]" */
98 static unsigned long long mem_limit = ULLONG_MAX;
100 /* Number of immovable memory regions */
101 static int num_immovable_mem;
103 enum mem_avoid_index {
104 MEM_AVOID_ZO_RANGE = 0,
107 MEM_AVOID_BOOTPARAMS,
108 MEM_AVOID_MEMMAP_BEGIN,
109 MEM_AVOID_MEMMAP_END = MEM_AVOID_MEMMAP_BEGIN + MAX_MEMMAP_REGIONS - 1,
113 static struct mem_vector mem_avoid[MEM_AVOID_MAX];
115 static bool mem_overlaps(struct mem_vector *one, struct mem_vector *two)
117 /* Item one is entirely before item two. */
118 if (one->start + one->size <= two->start)
120 /* Item one is entirely after item two. */
121 if (one->start >= two->start + two->size)
126 char *skip_spaces(const char *str)
128 while (isspace(*str))
132 #include "../../../../lib/ctype.c"
133 #include "../../../../lib/cmdline.c"
141 parse_memmap(char *p, unsigned long long *start, unsigned long long *size,
142 enum parse_mode mode)
149 /* We don't care about this option here */
150 if (!strncmp(p, "exactmap", 8))
154 *size = memparse(p, &p);
162 *start = memparse(p + 1, &p);
165 if (mode == PARSE_MEMMAP) {
167 * memmap=nn@ss specifies usable region, should
172 unsigned long long flags;
175 * efi_fake_mem=nn@ss:attr the attr specifies
176 * flags that might imply a soft-reservation.
178 *start = memparse(p + 1, &p);
179 if (p && *p == ':') {
181 if (kstrtoull(p, 0, &flags) < 0)
183 else if (flags & EFI_MEMORY_SP)
191 * If w/o offset, only size specified, memmap=nn[KMG] has the
192 * same behaviour as mem=nn[KMG]. It limits the max address
193 * system can use. Region above the limit should be avoided.
202 static void mem_avoid_memmap(enum parse_mode mode, char *str)
206 if (i >= MAX_MEMMAP_REGIONS)
209 while (str && (i < MAX_MEMMAP_REGIONS)) {
211 unsigned long long start, size;
212 char *k = strchr(str, ',');
217 rc = parse_memmap(str, &start, &size, mode);
223 /* Store the specified memory limit if size > 0 */
230 mem_avoid[MEM_AVOID_MEMMAP_BEGIN + i].start = start;
231 mem_avoid[MEM_AVOID_MEMMAP_BEGIN + i].size = size;
235 /* More than 4 memmaps, fail kaslr */
236 if ((i >= MAX_MEMMAP_REGIONS) && str)
237 memmap_too_large = true;
240 /* Store the number of 1GB huge pages which users specified: */
241 static unsigned long max_gb_huge_pages;
243 static void parse_gb_huge_pages(char *param, char *val)
245 static bool gbpage_sz;
248 if (!strcmp(param, "hugepagesz")) {
250 if (memparse(p, &p) != PUD_SIZE) {
256 warn("Repeatedly set hugeTLB page size of 1G!\n");
261 if (!strcmp(param, "hugepages") && gbpage_sz) {
263 max_gb_huge_pages = simple_strtoull(p, &p, 0);
268 static void handle_mem_options(void)
270 char *args = (char *)get_cmd_line_ptr();
279 if (!strstr(args, "memmap=") && !strstr(args, "mem=") &&
280 !strstr(args, "hugepages"))
284 tmp_cmdline = malloc(len + 1);
286 error("Failed to allocate space for tmp_cmdline");
288 memcpy(tmp_cmdline, args, len);
289 tmp_cmdline[len] = 0;
292 /* Chew leading spaces */
293 args = skip_spaces(args);
296 args = next_arg(args, ¶m, &val);
298 if (!val && strcmp(param, "--") == 0)
301 if (!strcmp(param, "memmap")) {
302 mem_avoid_memmap(PARSE_MEMMAP, val);
303 } else if (strstr(param, "hugepages")) {
304 parse_gb_huge_pages(param, val);
305 } else if (!strcmp(param, "mem")) {
308 if (!strcmp(p, "nopentium"))
310 mem_size = memparse(p, &p);
314 mem_limit = mem_size;
315 } else if (!strcmp(param, "efi_fake_mem")) {
316 mem_avoid_memmap(PARSE_EFI, val);
325 * In theory, KASLR can put the kernel anywhere in the range of [16M, 64T).
326 * The mem_avoid array is used to store the ranges that need to be avoided
327 * when KASLR searches for an appropriate random address. We must avoid any
328 * regions that are unsafe to overlap with during decompression, and other
329 * things like the initrd, cmdline and boot_params. This comment seeks to
330 * explain mem_avoid as clearly as possible since incorrect mem_avoid
331 * memory ranges lead to really hard to debug boot failures.
333 * The initrd, cmdline, and boot_params are trivial to identify for
334 * avoiding. They are MEM_AVOID_INITRD, MEM_AVOID_CMDLINE, and
335 * MEM_AVOID_BOOTPARAMS respectively below.
337 * What is not obvious how to avoid is the range of memory that is used
338 * during decompression (MEM_AVOID_ZO_RANGE below). This range must cover
339 * the compressed kernel (ZO) and its run space, which is used to extract
340 * the uncompressed kernel (VO) and relocs.
342 * ZO's full run size sits against the end of the decompression buffer, so
343 * we can calculate where text, data, bss, etc of ZO are positioned more
346 * For additional background, the decompression calculations can be found
347 * in header.S, and the memory diagram is based on the one found in misc.c.
349 * The following conditions are already enforced by the image layouts and
351 * - input + input_size >= output + output_size
352 * - kernel_total_size <= init_size
353 * - kernel_total_size <= output_size (see Note below)
354 * - output + init_size >= output + output_size
356 * (Note that kernel_total_size and output_size have no fundamental
357 * relationship, but output_size is passed to choose_random_location
358 * as a maximum of the two. The diagram is showing a case where
359 * kernel_total_size is larger than output_size, but this case is
360 * handled by bumping output_size.)
362 * The above conditions can be illustrated by a diagram:
364 * 0 output input input+input_size output+init_size
367 * |-----|--------|--------|--------------|-----------|--|-------------|
370 * output+init_size-ZO_INIT_SIZE output+output_size output+kernel_total_size
372 * [output, output+init_size) is the entire memory range used for
373 * extracting the compressed image.
375 * [output, output+kernel_total_size) is the range needed for the
376 * uncompressed kernel (VO) and its run size (bss, brk, etc).
378 * [output, output+output_size) is VO plus relocs (i.e. the entire
379 * uncompressed payload contained by ZO). This is the area of the buffer
380 * written to during decompression.
382 * [output+init_size-ZO_INIT_SIZE, output+init_size) is the worst-case
383 * range of the copied ZO and decompression code. (i.e. the range
384 * covered backwards of size ZO_INIT_SIZE, starting from output+init_size.)
386 * [input, input+input_size) is the original copied compressed image (ZO)
387 * (i.e. it does not include its run size). This range must be avoided
388 * because it contains the data used for decompression.
390 * [input+input_size, output+init_size) is [_text, _end) for ZO. This
391 * range includes ZO's heap and stack, and must be avoided since it
392 * performs the decompression.
394 * Since the above two ranges need to be avoided and they are adjacent,
395 * they can be merged, resulting in: [input, output+init_size) which
396 * becomes the MEM_AVOID_ZO_RANGE below.
398 static void mem_avoid_init(unsigned long input, unsigned long input_size,
399 unsigned long output)
401 unsigned long init_size = boot_params->hdr.init_size;
402 u64 initrd_start, initrd_size;
403 unsigned long cmd_line, cmd_line_size;
406 * Avoid the region that is unsafe to overlap during
409 mem_avoid[MEM_AVOID_ZO_RANGE].start = input;
410 mem_avoid[MEM_AVOID_ZO_RANGE].size = (output + init_size) - input;
411 add_identity_map(mem_avoid[MEM_AVOID_ZO_RANGE].start,
412 mem_avoid[MEM_AVOID_ZO_RANGE].size);
415 initrd_start = (u64)boot_params->ext_ramdisk_image << 32;
416 initrd_start |= boot_params->hdr.ramdisk_image;
417 initrd_size = (u64)boot_params->ext_ramdisk_size << 32;
418 initrd_size |= boot_params->hdr.ramdisk_size;
419 mem_avoid[MEM_AVOID_INITRD].start = initrd_start;
420 mem_avoid[MEM_AVOID_INITRD].size = initrd_size;
421 /* No need to set mapping for initrd, it will be handled in VO. */
423 /* Avoid kernel command line. */
424 cmd_line = get_cmd_line_ptr();
425 /* Calculate size of cmd_line. */
427 cmd_line_size = strlen((char *)cmd_line) + 1;
428 mem_avoid[MEM_AVOID_CMDLINE].start = cmd_line;
429 mem_avoid[MEM_AVOID_CMDLINE].size = cmd_line_size;
430 add_identity_map(mem_avoid[MEM_AVOID_CMDLINE].start,
431 mem_avoid[MEM_AVOID_CMDLINE].size);
434 /* Avoid boot parameters. */
435 mem_avoid[MEM_AVOID_BOOTPARAMS].start = (unsigned long)boot_params;
436 mem_avoid[MEM_AVOID_BOOTPARAMS].size = sizeof(*boot_params);
437 add_identity_map(mem_avoid[MEM_AVOID_BOOTPARAMS].start,
438 mem_avoid[MEM_AVOID_BOOTPARAMS].size);
440 /* We don't need to set a mapping for setup_data. */
442 /* Mark the memmap regions we need to avoid */
443 handle_mem_options();
445 /* Enumerate the immovable memory regions */
446 num_immovable_mem = count_immovable_mem_regions();
448 #ifdef CONFIG_X86_VERBOSE_BOOTUP
449 /* Make sure video RAM can be used. */
450 add_identity_map(0, PMD_SIZE);
455 * Does this memory vector overlap a known avoided area? If so, record the
456 * overlap region with the lowest address.
458 static bool mem_avoid_overlap(struct mem_vector *img,
459 struct mem_vector *overlap)
462 struct setup_data *ptr;
463 unsigned long earliest = img->start + img->size;
464 bool is_overlapping = false;
466 for (i = 0; i < MEM_AVOID_MAX; i++) {
467 if (mem_overlaps(img, &mem_avoid[i]) &&
468 mem_avoid[i].start < earliest) {
469 *overlap = mem_avoid[i];
470 earliest = overlap->start;
471 is_overlapping = true;
475 /* Avoid all entries in the setup_data linked list. */
476 ptr = (struct setup_data *)(unsigned long)boot_params->hdr.setup_data;
478 struct mem_vector avoid;
480 avoid.start = (unsigned long)ptr;
481 avoid.size = sizeof(*ptr) + ptr->len;
483 if (mem_overlaps(img, &avoid) && (avoid.start < earliest)) {
485 earliest = overlap->start;
486 is_overlapping = true;
489 if (ptr->type == SETUP_INDIRECT &&
490 ((struct setup_indirect *)ptr->data)->type != SETUP_INDIRECT) {
491 avoid.start = ((struct setup_indirect *)ptr->data)->addr;
492 avoid.size = ((struct setup_indirect *)ptr->data)->len;
494 if (mem_overlaps(img, &avoid) && (avoid.start < earliest)) {
496 earliest = overlap->start;
497 is_overlapping = true;
501 ptr = (struct setup_data *)(unsigned long)ptr->next;
504 return is_overlapping;
512 #define MAX_SLOT_AREA 100
514 static struct slot_area slot_areas[MAX_SLOT_AREA];
516 static unsigned long slot_max;
518 static unsigned long slot_area_index;
520 static void store_slot_info(struct mem_vector *region, unsigned long image_size)
522 struct slot_area slot_area;
524 if (slot_area_index == MAX_SLOT_AREA)
527 slot_area.addr = region->start;
528 slot_area.num = (region->size - image_size) /
529 CONFIG_PHYSICAL_ALIGN + 1;
531 if (slot_area.num > 0) {
532 slot_areas[slot_area_index++] = slot_area;
533 slot_max += slot_area.num;
538 * Skip as many 1GB huge pages as possible in the passed region
539 * according to the number which users specified:
542 process_gb_huge_pages(struct mem_vector *region, unsigned long image_size)
544 unsigned long addr, size = 0;
545 struct mem_vector tmp;
548 if (!max_gb_huge_pages) {
549 store_slot_info(region, image_size);
553 addr = ALIGN(region->start, PUD_SIZE);
554 /* Did we raise the address above the passed in memory entry? */
555 if (addr < region->start + region->size)
556 size = region->size - (addr - region->start);
558 /* Check how many 1GB huge pages can be filtered out: */
559 while (size > PUD_SIZE && max_gb_huge_pages) {
565 /* No good 1GB huge pages found: */
567 store_slot_info(region, image_size);
572 * Skip those 'i'*1GB good huge pages, and continue checking and
573 * processing the remaining head or tail part of the passed region
577 if (addr >= region->start + image_size) {
578 tmp.start = region->start;
579 tmp.size = addr - region->start;
580 store_slot_info(&tmp, image_size);
583 size = region->size - (addr - region->start) - i * PUD_SIZE;
584 if (size >= image_size) {
585 tmp.start = addr + i * PUD_SIZE;
587 store_slot_info(&tmp, image_size);
591 static unsigned long slots_fetch_random(void)
596 /* Handle case of no slots stored. */
600 slot = kaslr_get_random_long("Physical") % slot_max;
602 for (i = 0; i < slot_area_index; i++) {
603 if (slot >= slot_areas[i].num) {
604 slot -= slot_areas[i].num;
607 return slot_areas[i].addr + slot * CONFIG_PHYSICAL_ALIGN;
610 if (i == slot_area_index)
611 debug_putstr("slots_fetch_random() failed!?\n");
615 static void __process_mem_region(struct mem_vector *entry,
616 unsigned long minimum,
617 unsigned long image_size)
619 struct mem_vector region, overlap;
620 unsigned long start_orig, end;
621 struct mem_vector cur_entry;
623 /* On 32-bit, ignore entries entirely above our maximum. */
624 if (IS_ENABLED(CONFIG_X86_32) && entry->start >= KERNEL_IMAGE_SIZE)
627 /* Ignore entries entirely below our minimum. */
628 if (entry->start + entry->size < minimum)
631 /* Ignore entries above memory limit */
632 end = min(entry->size + entry->start, mem_limit);
633 if (entry->start >= end)
635 cur_entry.start = entry->start;
636 cur_entry.size = end - entry->start;
638 region.start = cur_entry.start;
639 region.size = cur_entry.size;
641 /* Give up if slot area array is full. */
642 while (slot_area_index < MAX_SLOT_AREA) {
643 start_orig = region.start;
645 /* Potentially raise address to minimum location. */
646 if (region.start < minimum)
647 region.start = minimum;
649 /* Potentially raise address to meet alignment needs. */
650 region.start = ALIGN(region.start, CONFIG_PHYSICAL_ALIGN);
652 /* Did we raise the address above the passed in memory entry? */
653 if (region.start > cur_entry.start + cur_entry.size)
656 /* Reduce size by any delta from the original address. */
657 region.size -= region.start - start_orig;
659 /* On 32-bit, reduce region size to fit within max size. */
660 if (IS_ENABLED(CONFIG_X86_32) &&
661 region.start + region.size > KERNEL_IMAGE_SIZE)
662 region.size = KERNEL_IMAGE_SIZE - region.start;
664 /* Return if region can't contain decompressed kernel */
665 if (region.size < image_size)
668 /* If nothing overlaps, store the region and return. */
669 if (!mem_avoid_overlap(®ion, &overlap)) {
670 process_gb_huge_pages(®ion, image_size);
674 /* Store beginning of region if holds at least image_size. */
675 if (overlap.start > region.start + image_size) {
676 struct mem_vector beginning;
678 beginning.start = region.start;
679 beginning.size = overlap.start - region.start;
680 process_gb_huge_pages(&beginning, image_size);
683 /* Return if overlap extends to or past end of region. */
684 if (overlap.start + overlap.size >= region.start + region.size)
687 /* Clip off the overlapping region and start over. */
688 region.size -= overlap.start - region.start + overlap.size;
689 region.start = overlap.start + overlap.size;
693 static bool process_mem_region(struct mem_vector *region,
694 unsigned long long minimum,
695 unsigned long long image_size)
699 * If no immovable memory found, or MEMORY_HOTREMOVE disabled,
700 * use @region directly.
702 if (!num_immovable_mem) {
703 __process_mem_region(region, minimum, image_size);
705 if (slot_area_index == MAX_SLOT_AREA) {
706 debug_putstr("Aborted e820/efi memmap scan (slot_areas full)!\n");
712 #if defined(CONFIG_MEMORY_HOTREMOVE) && defined(CONFIG_ACPI)
714 * If immovable memory found, filter the intersection between
715 * immovable memory and @region.
717 for (i = 0; i < num_immovable_mem; i++) {
718 unsigned long long start, end, entry_end, region_end;
719 struct mem_vector entry;
721 if (!mem_overlaps(region, &immovable_mem[i]))
724 start = immovable_mem[i].start;
725 end = start + immovable_mem[i].size;
726 region_end = region->start + region->size;
728 entry.start = clamp(region->start, start, end);
729 entry_end = clamp(region_end, start, end);
730 entry.size = entry_end - entry.start;
732 __process_mem_region(&entry, minimum, image_size);
734 if (slot_area_index == MAX_SLOT_AREA) {
735 debug_putstr("Aborted e820/efi memmap scan when walking immovable regions(slot_areas full)!\n");
745 * Returns true if we processed the EFI memmap, which we prefer over the E820
746 * table if it is available.
749 process_efi_entries(unsigned long minimum, unsigned long image_size)
751 struct efi_info *e = &boot_params->efi_info;
752 bool efi_mirror_found = false;
753 struct mem_vector region;
754 efi_memory_desc_t *md;
760 signature = (char *)&e->efi_loader_signature;
761 if (strncmp(signature, EFI32_LOADER_SIGNATURE, 4) &&
762 strncmp(signature, EFI64_LOADER_SIGNATURE, 4))
766 /* Can't handle data above 4GB at this time */
767 if (e->efi_memmap_hi) {
768 warn("EFI memmap is above 4GB, can't be handled now on x86_32. EFI should be disabled.\n");
771 pmap = e->efi_memmap;
773 pmap = (e->efi_memmap | ((__u64)e->efi_memmap_hi << 32));
776 nr_desc = e->efi_memmap_size / e->efi_memdesc_size;
777 for (i = 0; i < nr_desc; i++) {
778 md = efi_early_memdesc_ptr(pmap, e->efi_memdesc_size, i);
779 if (md->attribute & EFI_MEMORY_MORE_RELIABLE) {
780 efi_mirror_found = true;
785 for (i = 0; i < nr_desc; i++) {
786 md = efi_early_memdesc_ptr(pmap, e->efi_memdesc_size, i);
789 * Here we are more conservative in picking free memory than
790 * the EFI spec allows:
792 * According to the spec, EFI_BOOT_SERVICES_{CODE|DATA} are also
793 * free memory and thus available to place the kernel image into,
794 * but in practice there's firmware where using that memory leads
797 * Only EFI_CONVENTIONAL_MEMORY is guaranteed to be free.
799 if (md->type != EFI_CONVENTIONAL_MEMORY)
802 if (efi_soft_reserve_enabled() &&
803 (md->attribute & EFI_MEMORY_SP))
806 if (efi_mirror_found &&
807 !(md->attribute & EFI_MEMORY_MORE_RELIABLE))
810 region.start = md->phys_addr;
811 region.size = md->num_pages << EFI_PAGE_SHIFT;
812 if (process_mem_region(®ion, minimum, image_size))
819 process_efi_entries(unsigned long minimum, unsigned long image_size)
825 static void process_e820_entries(unsigned long minimum,
826 unsigned long image_size)
829 struct mem_vector region;
830 struct boot_e820_entry *entry;
832 /* Verify potential e820 positions, appending to slots list. */
833 for (i = 0; i < boot_params->e820_entries; i++) {
834 entry = &boot_params->e820_table[i];
835 /* Skip non-RAM entries. */
836 if (entry->type != E820_TYPE_RAM)
838 region.start = entry->addr;
839 region.size = entry->size;
840 if (process_mem_region(®ion, minimum, image_size))
845 static unsigned long find_random_phys_addr(unsigned long minimum,
846 unsigned long image_size)
848 /* Check if we had too many memmaps. */
849 if (memmap_too_large) {
850 debug_putstr("Aborted memory entries scan (more than 4 memmap= args)!\n");
854 /* Make sure minimum is aligned. */
855 minimum = ALIGN(minimum, CONFIG_PHYSICAL_ALIGN);
857 if (process_efi_entries(minimum, image_size))
858 return slots_fetch_random();
860 process_e820_entries(minimum, image_size);
861 return slots_fetch_random();
864 static unsigned long find_random_virt_addr(unsigned long minimum,
865 unsigned long image_size)
867 unsigned long slots, random_addr;
869 /* Make sure minimum is aligned. */
870 minimum = ALIGN(minimum, CONFIG_PHYSICAL_ALIGN);
871 /* Align image_size for easy slot calculations. */
872 image_size = ALIGN(image_size, CONFIG_PHYSICAL_ALIGN);
875 * There are how many CONFIG_PHYSICAL_ALIGN-sized slots
876 * that can hold image_size within the range of minimum to
879 slots = (KERNEL_IMAGE_SIZE - minimum - image_size) /
880 CONFIG_PHYSICAL_ALIGN + 1;
882 random_addr = kaslr_get_random_long("Virtual") % slots;
884 return random_addr * CONFIG_PHYSICAL_ALIGN + minimum;
888 * Since this function examines addresses much more numerically,
889 * it takes the input and output pointers as 'unsigned long'.
891 void choose_random_location(unsigned long input,
892 unsigned long input_size,
893 unsigned long *output,
894 unsigned long output_size,
895 unsigned long *virt_addr)
897 unsigned long random_addr, min_addr;
899 if (cmdline_find_option_bool("nokaslr")) {
900 warn("KASLR disabled: 'nokaslr' on cmdline.");
904 #ifdef CONFIG_X86_5LEVEL
905 if (__read_cr4() & X86_CR4_LA57) {
906 __pgtable_l5_enabled = 1;
912 boot_params->hdr.loadflags |= KASLR_FLAG;
914 /* Prepare to add new identity pagetables on demand. */
915 initialize_identity_maps();
917 /* Record the various known unsafe memory ranges. */
918 mem_avoid_init(input, input_size, *output);
921 * Low end of the randomization range should be the
922 * smaller of 512M or the initial kernel image
925 min_addr = min(*output, 512UL << 20);
927 /* Walk available memory entries to find a random address. */
928 random_addr = find_random_phys_addr(min_addr, output_size);
930 warn("Physical KASLR disabled: no suitable memory region!");
932 /* Update the new physical address location. */
933 if (*output != random_addr) {
934 add_identity_map(random_addr, output_size);
935 *output = random_addr;
939 * This loads the identity mapping page table.
940 * This should only be done if a new physical address
941 * is found for the kernel, otherwise we should keep
942 * the old page table to make it be like the "nokaslr"
945 finalize_identity_maps();
949 /* Pick random virtual address starting from LOAD_PHYSICAL_ADDR. */
950 if (IS_ENABLED(CONFIG_X86_64))
951 random_addr = find_random_virt_addr(LOAD_PHYSICAL_ADDR, output_size);
952 *virt_addr = random_addr;