2 * Helper macros to support writing architecture specific
5 * A minimal linker scripts has following content:
6 * [This is a sample, architectures may have special requiriements]
16 * INIT_TEXT_SECTION(PAGE_SIZE)
17 * INIT_DATA_SECTION(...)
18 * PERCPU_SECTION(CACHELINE_SIZE)
30 * EXCEPTION_TABLE(...)
32 * BSS_SECTION(0, 0, 0)
39 * DISCARDS // must be the last
42 * [__init_begin, __init_end] is the init section that may be freed after init
43 * // __init_begin and __init_end should be page aligned, so that we can
44 * // free the whole .init memory
45 * [_stext, _etext] is the text section
46 * [_sdata, _edata] is the data section
48 * Some of the included output section have their own set of constants.
49 * Examples are: [__initramfs_start, __initramfs_end] for initramfs and
50 * [__nosave_begin, __nosave_end] for the nosave data
58 * Only some architectures want to have the .notes segment visible in
59 * a separate PT_NOTE ELF Program Header. When this happens, it needs
60 * to be visible in both the kernel text's PT_LOAD and the PT_NOTE
61 * Program Headers. In this case, though, the PT_LOAD needs to be made
62 * the default again so that all the following sections don't also end
63 * up in the PT_NOTE Program Header.
66 #define NOTES_HEADERS :text :note
67 #define NOTES_HEADERS_RESTORE __restore_ph : { *(.__restore_ph) } :text
70 #define NOTES_HEADERS_RESTORE
74 * Some architectures have non-executable read-only exception tables.
75 * They can be added to the RO_DATA segment by specifying their desired
78 #ifdef RO_EXCEPTION_TABLE_ALIGN
79 #define RO_EXCEPTION_TABLE EXCEPTION_TABLE(RO_EXCEPTION_TABLE_ALIGN)
81 #define RO_EXCEPTION_TABLE
84 /* Align . to a 8 byte boundary equals to maximum function alignment. */
85 #define ALIGN_FUNCTION() . = ALIGN(8)
88 * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections, which
89 * generates .data.identifier sections, which need to be pulled in with
90 * .data. We don't want to pull in .data..other sections, which Linux
91 * has defined. Same for text and bss.
93 * With LTO_CLANG, the linker also splits sections by default, so we need
94 * these macros to combine the sections during the final link.
96 * RODATA_MAIN is not used because existing code already defines .rodata.x
97 * sections to be brought in with rodata.
99 #if defined(CONFIG_LD_DEAD_CODE_DATA_ELIMINATION) || defined(CONFIG_LTO_CLANG)
100 #define TEXT_MAIN .text .text.[0-9a-zA-Z_]*
101 #define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..L* .data..compoundliteral* .data.$__unnamed_* .data.$L*
102 #define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]*
103 #define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]* .rodata..L*
104 #define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* .bss..compoundliteral*
105 #define SBSS_MAIN .sbss .sbss.[0-9a-zA-Z_]*
107 #define TEXT_MAIN .text
108 #define DATA_MAIN .data
109 #define SDATA_MAIN .sdata
110 #define RODATA_MAIN .rodata
111 #define BSS_MAIN .bss
112 #define SBSS_MAIN .sbss
116 * GCC 4.5 and later have a 32 bytes section alignment for structures.
117 * Except GCC 4.9, that feels the need to align on 64 bytes.
119 #define STRUCT_ALIGNMENT 32
120 #define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT)
123 * The order of the sched class addresses are important, as they are
124 * used to determine the order of the priority of each sched class in
125 * relation to each other.
129 __begin_sched_classes = .; \
130 *(__idle_sched_class) \
131 *(__fair_sched_class) \
132 *(__rt_sched_class) \
133 *(__dl_sched_class) \
134 *(__stop_sched_class) \
135 __end_sched_classes = .;
137 /* The actual configuration determine if the init/exit sections
138 * are handled as text/data or they can be discarded (which
139 * often happens at runtime)
141 #ifdef CONFIG_HOTPLUG_CPU
142 #define CPU_KEEP(sec) *(.cpu##sec)
143 #define CPU_DISCARD(sec)
145 #define CPU_KEEP(sec)
146 #define CPU_DISCARD(sec) *(.cpu##sec)
149 #if defined(CONFIG_MEMORY_HOTPLUG)
150 #define MEM_KEEP(sec) *(.mem##sec)
151 #define MEM_DISCARD(sec)
153 #define MEM_KEEP(sec)
154 #define MEM_DISCARD(sec) *(.mem##sec)
157 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
159 * The ftrace call sites are logged to a section whose name depends on the
160 * compiler option used. A given kernel image will only use one, AKA
161 * FTRACE_CALLSITE_SECTION. We capture all of them here to avoid header
162 * dependencies for FTRACE_CALLSITE_SECTION's definition.
164 * Need to also make ftrace_stub_graph point to ftrace_stub
165 * so that the same stub location may have different protocols
166 * and not mess up with C verifiers.
168 * ftrace_ops_list_func will be defined as arch_ftrace_ops_list_func
169 * as some archs will have a different prototype for that function
170 * but ftrace_ops_list_func() will have a single prototype.
172 #define MCOUNT_REC() . = ALIGN(8); \
173 __start_mcount_loc = .; \
174 KEEP(*(__mcount_loc)) \
175 KEEP(*(__patchable_function_entries)) \
176 __stop_mcount_loc = .; \
177 ftrace_stub_graph = ftrace_stub; \
178 ftrace_ops_list_func = arch_ftrace_ops_list_func;
180 # ifdef CONFIG_FUNCTION_TRACER
181 # define MCOUNT_REC() ftrace_stub_graph = ftrace_stub; \
182 ftrace_ops_list_func = arch_ftrace_ops_list_func;
184 # define MCOUNT_REC()
188 #ifdef CONFIG_TRACE_BRANCH_PROFILING
189 #define LIKELY_PROFILE() __start_annotated_branch_profile = .; \
190 KEEP(*(_ftrace_annotated_branch)) \
191 __stop_annotated_branch_profile = .;
193 #define LIKELY_PROFILE()
196 #ifdef CONFIG_PROFILE_ALL_BRANCHES
197 #define BRANCH_PROFILE() __start_branch_profile = .; \
198 KEEP(*(_ftrace_branch)) \
199 __stop_branch_profile = .;
201 #define BRANCH_PROFILE()
204 #ifdef CONFIG_KPROBES
205 #define KPROBE_BLACKLIST() . = ALIGN(8); \
206 __start_kprobe_blacklist = .; \
207 KEEP(*(_kprobe_blacklist)) \
208 __stop_kprobe_blacklist = .;
210 #define KPROBE_BLACKLIST()
213 #ifdef CONFIG_FUNCTION_ERROR_INJECTION
214 #define ERROR_INJECT_WHITELIST() STRUCT_ALIGN(); \
215 __start_error_injection_whitelist = .; \
216 KEEP(*(_error_injection_whitelist)) \
217 __stop_error_injection_whitelist = .;
219 #define ERROR_INJECT_WHITELIST()
222 #ifdef CONFIG_EVENT_TRACING
223 #define FTRACE_EVENTS() . = ALIGN(8); \
224 __start_ftrace_events = .; \
225 KEEP(*(_ftrace_events)) \
226 __stop_ftrace_events = .; \
227 __start_ftrace_eval_maps = .; \
228 KEEP(*(_ftrace_eval_map)) \
229 __stop_ftrace_eval_maps = .;
231 #define FTRACE_EVENTS()
234 #ifdef CONFIG_TRACING
235 #define TRACE_PRINTKS() __start___trace_bprintk_fmt = .; \
236 KEEP(*(__trace_printk_fmt)) /* Trace_printk fmt' pointer */ \
237 __stop___trace_bprintk_fmt = .;
238 #define TRACEPOINT_STR() __start___tracepoint_str = .; \
239 KEEP(*(__tracepoint_str)) /* Trace_printk fmt' pointer */ \
240 __stop___tracepoint_str = .;
242 #define TRACE_PRINTKS()
243 #define TRACEPOINT_STR()
246 #ifdef CONFIG_FTRACE_SYSCALLS
247 #define TRACE_SYSCALLS() . = ALIGN(8); \
248 __start_syscalls_metadata = .; \
249 KEEP(*(__syscalls_metadata)) \
250 __stop_syscalls_metadata = .;
252 #define TRACE_SYSCALLS()
255 #ifdef CONFIG_BPF_EVENTS
256 #define BPF_RAW_TP() STRUCT_ALIGN(); \
257 __start__bpf_raw_tp = .; \
258 KEEP(*(__bpf_raw_tp_map)) \
259 __stop__bpf_raw_tp = .;
264 #ifdef CONFIG_SERIAL_EARLYCON
265 #define EARLYCON_TABLE() . = ALIGN(8); \
266 __earlycon_table = .; \
267 KEEP(*(__earlycon_table)) \
268 __earlycon_table_end = .;
270 #define EARLYCON_TABLE()
273 #ifdef CONFIG_SECURITY
274 #define LSM_TABLE() . = ALIGN(8); \
275 __start_lsm_info = .; \
276 KEEP(*(.lsm_info.init)) \
278 #define EARLY_LSM_TABLE() . = ALIGN(8); \
279 __start_early_lsm_info = .; \
280 KEEP(*(.early_lsm_info.init)) \
281 __end_early_lsm_info = .;
284 #define EARLY_LSM_TABLE()
287 #define ___OF_TABLE(cfg, name) _OF_TABLE_##cfg(name)
288 #define __OF_TABLE(cfg, name) ___OF_TABLE(cfg, name)
289 #define OF_TABLE(cfg, name) __OF_TABLE(IS_ENABLED(cfg), name)
290 #define _OF_TABLE_0(name)
291 #define _OF_TABLE_1(name) \
293 __##name##_of_table = .; \
294 KEEP(*(__##name##_of_table)) \
295 KEEP(*(__##name##_of_table_end))
297 #define TIMER_OF_TABLES() OF_TABLE(CONFIG_TIMER_OF, timer)
298 #define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip)
299 #define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk)
300 #define RESERVEDMEM_OF_TABLES() OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem)
301 #define CPU_METHOD_OF_TABLES() OF_TABLE(CONFIG_SMP, cpu_method)
302 #define CPUIDLE_METHOD_OF_TABLES() OF_TABLE(CONFIG_CPU_IDLE, cpuidle_method)
305 #define ACPI_PROBE_TABLE(name) \
307 __##name##_acpi_probe_table = .; \
308 KEEP(*(__##name##_acpi_probe_table)) \
309 __##name##_acpi_probe_table_end = .;
311 #define ACPI_PROBE_TABLE(name)
314 #ifdef CONFIG_THERMAL
315 #define THERMAL_TABLE(name) \
317 __##name##_thermal_table = .; \
318 KEEP(*(__##name##_thermal_table)) \
319 __##name##_thermal_table_end = .;
321 #define THERMAL_TABLE(name)
324 #define KERNEL_DTB() \
327 KEEP(*(.dtb.init.rodata)) \
337 *(.data..shared_aligned) /* percpu related */ \
338 MEM_KEEP(init.data*) \
339 MEM_KEEP(exit.data*) \
346 /* implement dynamic printk debug */ \
348 __start___dyndbg = .; \
350 __stop___dyndbg = .; \
358 * Data section helpers
360 #define NOSAVE_DATA \
361 . = ALIGN(PAGE_SIZE); \
362 __nosave_begin = .; \
364 . = ALIGN(PAGE_SIZE); \
367 #define PAGE_ALIGNED_DATA(page_align) \
368 . = ALIGN(page_align); \
369 *(.data..page_aligned) \
370 . = ALIGN(page_align);
372 #define READ_MOSTLY_DATA(align) \
374 *(.data..read_mostly) \
377 #define CACHELINE_ALIGNED_DATA(align) \
379 *(.data..cacheline_aligned)
381 #define INIT_TASK_DATA(align) \
383 __start_init_task = .; \
384 init_thread_union = .; \
386 KEEP(*(.data..init_task)) \
387 KEEP(*(.data..init_thread_info)) \
388 . = __start_init_task + THREAD_SIZE; \
391 #define JUMP_TABLE_DATA \
393 __start___jump_table = .; \
394 KEEP(*(__jump_table)) \
395 __stop___jump_table = .;
397 #define STATIC_CALL_DATA \
399 __start_static_call_sites = .; \
400 KEEP(*(.static_call_sites)) \
401 __stop_static_call_sites = .; \
402 __start_static_call_tramp_key = .; \
403 KEEP(*(.static_call_tramp_key)) \
404 __stop_static_call_tramp_key = .;
407 * Allow architectures to handle ro_after_init data on their
408 * own by defining an empty RO_AFTER_INIT_DATA.
410 #ifndef RO_AFTER_INIT_DATA
411 #define RO_AFTER_INIT_DATA \
413 __start_ro_after_init = .; \
414 *(.data..ro_after_init) \
417 __end_ro_after_init = .;
423 #define RO_DATA(align) \
424 . = ALIGN((align)); \
425 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
426 __start_rodata = .; \
427 *(.rodata) *(.rodata.*) \
429 RO_AFTER_INIT_DATA /* Read only after init */ \
431 __start___tracepoints_ptrs = .; \
432 KEEP(*(__tracepoints_ptrs)) /* Tracepoints: pointer array */ \
433 __stop___tracepoints_ptrs = .; \
434 *(__tracepoints_strings)/* Tracepoints: strings */ \
437 .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \
442 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
443 __start_pci_fixups_early = .; \
444 KEEP(*(.pci_fixup_early)) \
445 __end_pci_fixups_early = .; \
446 __start_pci_fixups_header = .; \
447 KEEP(*(.pci_fixup_header)) \
448 __end_pci_fixups_header = .; \
449 __start_pci_fixups_final = .; \
450 KEEP(*(.pci_fixup_final)) \
451 __end_pci_fixups_final = .; \
452 __start_pci_fixups_enable = .; \
453 KEEP(*(.pci_fixup_enable)) \
454 __end_pci_fixups_enable = .; \
455 __start_pci_fixups_resume = .; \
456 KEEP(*(.pci_fixup_resume)) \
457 __end_pci_fixups_resume = .; \
458 __start_pci_fixups_resume_early = .; \
459 KEEP(*(.pci_fixup_resume_early)) \
460 __end_pci_fixups_resume_early = .; \
461 __start_pci_fixups_suspend = .; \
462 KEEP(*(.pci_fixup_suspend)) \
463 __end_pci_fixups_suspend = .; \
464 __start_pci_fixups_suspend_late = .; \
465 KEEP(*(.pci_fixup_suspend_late)) \
466 __end_pci_fixups_suspend_late = .; \
469 FW_LOADER_BUILT_IN_DATA \
474 /* Kernel symbol table: Normal symbols */ \
475 __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
476 __start___ksymtab = .; \
477 KEEP(*(SORT(___ksymtab+*))) \
478 __stop___ksymtab = .; \
481 /* Kernel symbol table: GPL-only symbols */ \
482 __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
483 __start___ksymtab_gpl = .; \
484 KEEP(*(SORT(___ksymtab_gpl+*))) \
485 __stop___ksymtab_gpl = .; \
488 /* Kernel symbol table: Normal symbols */ \
489 __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
490 __start___kcrctab = .; \
491 KEEP(*(SORT(___kcrctab+*))) \
492 __stop___kcrctab = .; \
495 /* Kernel symbol table: GPL-only symbols */ \
496 __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
497 __start___kcrctab_gpl = .; \
498 KEEP(*(SORT(___kcrctab_gpl+*))) \
499 __stop___kcrctab_gpl = .; \
502 /* Kernel symbol table: strings */ \
503 __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
504 *(__ksymtab_strings) \
507 /* __*init sections */ \
508 __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \
510 MEM_KEEP(init.rodata) \
511 MEM_KEEP(exit.rodata) \
514 /* Built-in module parameters. */ \
515 __param : AT(ADDR(__param) - LOAD_OFFSET) { \
516 __start___param = .; \
518 __stop___param = .; \
521 /* Built-in module versions. */ \
522 __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \
523 __start___modver = .; \
525 __stop___modver = .; \
532 . = ALIGN((align)); \
537 * .text..L.cfi.jumptable.* contain Control-Flow Integrity (CFI)
538 * jump table entries.
540 #ifdef CONFIG_CFI_CLANG
541 #define TEXT_CFI_JT \
542 . = ALIGN(PMD_SIZE); \
543 __cfi_jt_start = .; \
544 *(.text..L.cfi.jumptable .text..L.cfi.jumptable.*) \
545 . = ALIGN(PMD_SIZE); \
552 * Non-instrumentable text section
554 #define NOINSTR_TEXT \
556 __noinstr_text_start = .; \
558 __noinstr_text_end = .;
561 * .text section. Map to function alignment to avoid address changes
562 * during second ld run in second ld pass when generating System.map
564 * TEXT_MAIN here will match .text.fixup and .text.unlikely if dead
565 * code elimination is enabled, so these sections should be converted
570 *(.text.hot .text.hot.*) \
571 *(TEXT_MAIN .text.fixup) \
572 *(.text.unlikely .text.unlikely.*) \
573 *(.text.unknown .text.unknown.*) \
577 *(.text.asan.* .text.tsan.*) \
579 MEM_KEEP(init.text*) \
580 MEM_KEEP(exit.text*) \
583 /* sched.text is aling to function alignment to secure we have same
584 * address even at second ld pass when generating System.map */
587 __sched_text_start = .; \
589 __sched_text_end = .;
591 /* spinlock.text is aling to function alignment to secure we have same
592 * address even at second ld pass when generating System.map */
595 __lock_text_start = .; \
599 #define CPUIDLE_TEXT \
601 __cpuidle_text_start = .; \
603 __cpuidle_text_end = .;
605 #define KPROBES_TEXT \
607 __kprobes_text_start = .; \
609 __kprobes_text_end = .;
613 __entry_text_start = .; \
615 __entry_text_end = .;
617 #define IRQENTRY_TEXT \
619 __irqentry_text_start = .; \
621 __irqentry_text_end = .;
623 #define SOFTIRQENTRY_TEXT \
625 __softirqentry_text_start = .; \
626 *(.softirqentry.text) \
627 __softirqentry_text_end = .;
629 #define STATIC_CALL_TEXT \
631 __static_call_text_start = .; \
632 *(.static_call.text) \
633 __static_call_text_end = .;
635 /* Section used for early init (in .S files) */
636 #define HEAD_TEXT KEEP(*(.head.text))
638 #define HEAD_TEXT_SECTION \
639 .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \
646 #define EXCEPTION_TABLE(align) \
648 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \
649 __start___ex_table = .; \
650 KEEP(*(__ex_table)) \
651 __stop___ex_table = .; \
657 #ifdef CONFIG_DEBUG_INFO_BTF
659 .BTF : AT(ADDR(.BTF) - LOAD_OFFSET) { \
665 .BTF_ids : AT(ADDR(.BTF_ids) - LOAD_OFFSET) { \
675 #define INIT_TASK_DATA_SECTION(align) \
677 .data..init_task : AT(ADDR(.data..init_task) - LOAD_OFFSET) { \
678 INIT_TASK_DATA(align) \
681 #ifdef CONFIG_CONSTRUCTORS
682 #define KERNEL_CTORS() . = ALIGN(8); \
684 KEEP(*(SORT(.ctors.*))) \
686 KEEP(*(SORT(.init_array.*))) \
687 KEEP(*(.init_array)) \
690 #define KERNEL_CTORS()
693 /* init and exit section handling */
695 KEEP(*(SORT(___kentry+*))) \
696 *(.init.data init.data.*) \
697 MEM_DISCARD(init.data*) \
700 *(.init.rodata .init.rodata.*) \
704 ERROR_INJECT_WHITELIST() \
705 MEM_DISCARD(init.rodata) \
707 RESERVEDMEM_OF_TABLES() \
709 CPU_METHOD_OF_TABLES() \
710 CPUIDLE_METHOD_OF_TABLES() \
712 IRQCHIP_OF_MATCH_TABLE() \
713 ACPI_PROBE_TABLE(irqchip) \
714 ACPI_PROBE_TABLE(timer) \
715 THERMAL_TABLE(governor) \
722 *(.init.text .init.text.*) \
724 MEM_DISCARD(init.text*)
727 *(.exit.data .exit.data.*) \
728 *(.fini_array .fini_array.*) \
730 MEM_DISCARD(exit.data*) \
731 MEM_DISCARD(exit.rodata*)
736 MEM_DISCARD(exit.text)
742 * bss (Block Started by Symbol) - uninitialized data
743 * zeroed during startup
745 #define SBSS(sbss_align) \
746 . = ALIGN(sbss_align); \
747 .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \
754 * Allow archectures to redefine BSS_FIRST_SECTIONS to add extra
755 * sections to the front of bss.
757 #ifndef BSS_FIRST_SECTIONS
758 #define BSS_FIRST_SECTIONS
761 #define BSS(bss_align) \
762 . = ALIGN(bss_align); \
763 .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \
765 . = ALIGN(PAGE_SIZE); \
766 *(.bss..page_aligned) \
767 . = ALIGN(PAGE_SIZE); \
774 * DWARF debug sections.
775 * Symbols in the DWARF debugging sections are relative to
776 * the beginning of the section so we begin them at 0.
778 #define DWARF_DEBUG \
780 .debug 0 : { *(.debug) } \
781 .line 0 : { *(.line) } \
782 /* GNU DWARF 1 extensions */ \
783 .debug_srcinfo 0 : { *(.debug_srcinfo) } \
784 .debug_sfnames 0 : { *(.debug_sfnames) } \
785 /* DWARF 1.1 and DWARF 2 */ \
786 .debug_aranges 0 : { *(.debug_aranges) } \
787 .debug_pubnames 0 : { *(.debug_pubnames) } \
789 .debug_info 0 : { *(.debug_info \
790 .gnu.linkonce.wi.*) } \
791 .debug_abbrev 0 : { *(.debug_abbrev) } \
792 .debug_line 0 : { *(.debug_line) } \
793 .debug_frame 0 : { *(.debug_frame) } \
794 .debug_str 0 : { *(.debug_str) } \
795 .debug_loc 0 : { *(.debug_loc) } \
796 .debug_macinfo 0 : { *(.debug_macinfo) } \
797 .debug_pubtypes 0 : { *(.debug_pubtypes) } \
799 .debug_ranges 0 : { *(.debug_ranges) } \
800 /* SGI/MIPS DWARF 2 extensions */ \
801 .debug_weaknames 0 : { *(.debug_weaknames) } \
802 .debug_funcnames 0 : { *(.debug_funcnames) } \
803 .debug_typenames 0 : { *(.debug_typenames) } \
804 .debug_varnames 0 : { *(.debug_varnames) } \
805 /* GNU DWARF 2 extensions */ \
806 .debug_gnu_pubnames 0 : { *(.debug_gnu_pubnames) } \
807 .debug_gnu_pubtypes 0 : { *(.debug_gnu_pubtypes) } \
809 .debug_types 0 : { *(.debug_types) } \
811 .debug_addr 0 : { *(.debug_addr) } \
812 .debug_line_str 0 : { *(.debug_line_str) } \
813 .debug_loclists 0 : { *(.debug_loclists) } \
814 .debug_macro 0 : { *(.debug_macro) } \
815 .debug_names 0 : { *(.debug_names) } \
816 .debug_rnglists 0 : { *(.debug_rnglists) } \
817 .debug_str_offsets 0 : { *(.debug_str_offsets) }
819 /* Stabs debugging sections. */
820 #define STABS_DEBUG \
821 .stab 0 : { *(.stab) } \
822 .stabstr 0 : { *(.stabstr) } \
823 .stab.excl 0 : { *(.stab.excl) } \
824 .stab.exclstr 0 : { *(.stab.exclstr) } \
825 .stab.index 0 : { *(.stab.index) } \
826 .stab.indexstr 0 : { *(.stab.indexstr) }
828 /* Required sections not related to debugging. */
829 #define ELF_DETAILS \
830 .comment 0 : { *(.comment) } \
831 .symtab 0 : { *(.symtab) } \
832 .strtab 0 : { *(.strtab) } \
833 .shstrtab 0 : { *(.shstrtab) }
835 #ifdef CONFIG_GENERIC_BUG
838 __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \
839 __start___bug_table = .; \
840 KEEP(*(__bug_table)) \
841 __stop___bug_table = .; \
847 #ifdef CONFIG_UNWINDER_ORC
848 #define ORC_UNWIND_TABLE \
850 .orc_unwind_ip : AT(ADDR(.orc_unwind_ip) - LOAD_OFFSET) { \
851 __start_orc_unwind_ip = .; \
852 KEEP(*(.orc_unwind_ip)) \
853 __stop_orc_unwind_ip = .; \
856 .orc_unwind : AT(ADDR(.orc_unwind) - LOAD_OFFSET) { \
857 __start_orc_unwind = .; \
858 KEEP(*(.orc_unwind)) \
859 __stop_orc_unwind = .; \
861 text_size = _etext - _stext; \
863 .orc_lookup : AT(ADDR(.orc_lookup) - LOAD_OFFSET) { \
865 . += (((text_size + LOOKUP_BLOCK_SIZE - 1) / \
866 LOOKUP_BLOCK_SIZE) + 1) * 4; \
867 orc_lookup_end = .; \
870 #define ORC_UNWIND_TABLE
873 /* Built-in firmware blobs */
874 #ifdef CONFIG_FW_LOADER
875 #define FW_LOADER_BUILT_IN_DATA \
876 .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) ALIGN(8) { \
877 __start_builtin_fw = .; \
878 KEEP(*(.builtin_fw)) \
879 __end_builtin_fw = .; \
882 #define FW_LOADER_BUILT_IN_DATA
885 #ifdef CONFIG_PM_TRACE
888 .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \
889 __tracedata_start = .; \
890 KEEP(*(.tracedata)) \
891 __tracedata_end = .; \
897 #ifdef CONFIG_PRINTK_INDEX
898 #define PRINTK_INDEX \
899 .printk_index : AT(ADDR(.printk_index) - LOAD_OFFSET) { \
900 __start_printk_index = .; \
902 __stop_printk_index = .; \
909 .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \
914 NOTES_HEADERS_RESTORE
916 #define INIT_SETUP(initsetup_align) \
917 . = ALIGN(initsetup_align); \
919 KEEP(*(.init.setup)) \
922 #define INIT_CALLS_LEVEL(level) \
923 __initcall##level##_start = .; \
924 KEEP(*(.initcall##level##.init)) \
925 KEEP(*(.initcall##level##s.init)) \
928 __initcall_start = .; \
929 KEEP(*(.initcallearly.init)) \
930 INIT_CALLS_LEVEL(0) \
931 INIT_CALLS_LEVEL(1) \
932 INIT_CALLS_LEVEL(2) \
933 INIT_CALLS_LEVEL(3) \
934 INIT_CALLS_LEVEL(4) \
935 INIT_CALLS_LEVEL(5) \
936 INIT_CALLS_LEVEL(rootfs) \
937 INIT_CALLS_LEVEL(6) \
938 INIT_CALLS_LEVEL(7) \
941 #define CON_INITCALL \
942 __con_initcall_start = .; \
943 KEEP(*(.con_initcall.init)) \
944 __con_initcall_end = .;
946 /* Alignment must be consistent with (kunit_suite *) in include/kunit/test.h */
947 #define KUNIT_TABLE() \
949 __kunit_suites_start = .; \
950 KEEP(*(.kunit_test_suites)) \
951 __kunit_suites_end = .;
953 #ifdef CONFIG_BLK_DEV_INITRD
954 #define INIT_RAM_FS \
956 __initramfs_start = .; \
957 KEEP(*(.init.ramfs)) \
959 KEEP(*(.init.ramfs.info))
965 * Memory encryption operates on a page basis. Since we need to clear
966 * the memory encryption mask for this section, it needs to be aligned
967 * on a page boundary and be a page-size multiple in length.
969 * Note: We use a separate section so that only this section gets
970 * decrypted to avoid exposing more than we wish.
972 #ifdef CONFIG_AMD_MEM_ENCRYPT
973 #define PERCPU_DECRYPTED_SECTION \
974 . = ALIGN(PAGE_SIZE); \
975 *(.data..decrypted) \
976 *(.data..percpu..decrypted) \
977 . = ALIGN(PAGE_SIZE);
979 #define PERCPU_DECRYPTED_SECTION
984 * Default discarded sections.
986 * Some archs want to discard exit text/data at runtime rather than
987 * link time due to cross-section references such as alt instructions,
988 * bug table, eh_frame, etc. DISCARDS must be the last of output
989 * section definitions so that such archs put those in earlier section
992 #ifdef RUNTIME_DISCARD_EXIT
993 #define EXIT_DISCARDS
995 #define EXIT_DISCARDS \
1001 * Clang's -fprofile-arcs, -fsanitize=kernel-address, and
1002 * -fsanitize=thread produce unwanted sections (.eh_frame
1003 * and .init_array.*), but CONFIG_CONSTRUCTORS wants to
1004 * keep any .init_array.* sections.
1005 * https://bugs.llvm.org/show_bug.cgi?id=46478
1007 #if defined(CONFIG_GCOV_KERNEL) || defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KCSAN) || \
1008 defined(CONFIG_CFI_CLANG)
1009 # ifdef CONFIG_CONSTRUCTORS
1010 # define SANITIZER_DISCARDS \
1013 # define SANITIZER_DISCARDS \
1014 *(.init_array) *(.init_array.*) \
1018 # define SANITIZER_DISCARDS
1021 #define COMMON_DISCARDS \
1022 SANITIZER_DISCARDS \
1026 /* ld.bfd warns about .gnu.version* even when not emitted */ \
1037 * PERCPU_INPUT - the percpu input sections
1038 * @cacheline: cacheline size
1040 * The core percpu section names and core symbols which do not rely
1041 * directly upon load addresses.
1043 * @cacheline is used to align subsections to avoid false cacheline
1044 * sharing between subsections for different purposes.
1046 #define PERCPU_INPUT(cacheline) \
1047 __per_cpu_start = .; \
1048 *(.data..percpu..first) \
1049 . = ALIGN(PAGE_SIZE); \
1050 *(.data..percpu..page_aligned) \
1051 . = ALIGN(cacheline); \
1052 *(.data..percpu..read_mostly) \
1053 . = ALIGN(cacheline); \
1055 *(.data..percpu..shared_aligned) \
1056 PERCPU_DECRYPTED_SECTION \
1060 * PERCPU_VADDR - define output section for percpu area
1061 * @cacheline: cacheline size
1062 * @vaddr: explicit base address (optional)
1063 * @phdr: destination PHDR (optional)
1065 * Macro which expands to output section for percpu area.
1067 * @cacheline is used to align subsections to avoid false cacheline
1068 * sharing between subsections for different purposes.
1070 * If @vaddr is not blank, it specifies explicit base address and all
1071 * percpu symbols will be offset from the given address. If blank,
1072 * @vaddr always equals @laddr + LOAD_OFFSET.
1074 * @phdr defines the output PHDR to use if not blank. Be warned that
1075 * output PHDR is sticky. If @phdr is specified, the next output
1076 * section in the linker script will go there too. @phdr should have
1079 * Note that this macros defines __per_cpu_load as an absolute symbol.
1080 * If there is no need to put the percpu section at a predetermined
1081 * address, use PERCPU_SECTION.
1083 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
1084 __per_cpu_load = .; \
1085 .data..percpu vaddr : AT(__per_cpu_load - LOAD_OFFSET) { \
1086 PERCPU_INPUT(cacheline) \
1088 . = __per_cpu_load + SIZEOF(.data..percpu);
1091 * PERCPU_SECTION - define output section for percpu area, simple version
1092 * @cacheline: cacheline size
1094 * Align to PAGE_SIZE and outputs output section for percpu area. This
1095 * macro doesn't manipulate @vaddr or @phdr and __per_cpu_load and
1096 * __per_cpu_start will be identical.
1098 * This macro is equivalent to ALIGN(PAGE_SIZE); PERCPU_VADDR(@cacheline,,)
1099 * except that __per_cpu_load is defined as a relative symbol against
1100 * .data..percpu which is required for relocatable x86_32 configuration.
1102 #define PERCPU_SECTION(cacheline) \
1103 . = ALIGN(PAGE_SIZE); \
1104 .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \
1105 __per_cpu_load = .; \
1106 PERCPU_INPUT(cacheline) \
1111 * Definition of the high level *_SECTION macros
1112 * They will fit only a subset of the architectures
1118 * All sections are combined in a single .data section.
1119 * The sections following CONSTRUCTORS are arranged so their
1120 * typical alignment matches.
1121 * A cacheline is typical/always less than a PAGE_SIZE so
1122 * the sections that has this restriction (or similar)
1123 * is located before the ones requiring PAGE_SIZE alignment.
1124 * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which
1125 * matches the requirement of PAGE_ALIGNED_DATA.
1127 * use 0 as page_align if page_aligned data is not used */
1128 #define RW_DATA(cacheline, pagealigned, inittask) \
1129 . = ALIGN(PAGE_SIZE); \
1130 .data : AT(ADDR(.data) - LOAD_OFFSET) { \
1131 INIT_TASK_DATA(inittask) \
1133 PAGE_ALIGNED_DATA(pagealigned) \
1134 CACHELINE_ALIGNED_DATA(cacheline) \
1135 READ_MOSTLY_DATA(cacheline) \
1141 #define INIT_TEXT_SECTION(inittext_align) \
1142 . = ALIGN(inittext_align); \
1143 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { \
1149 #define INIT_DATA_SECTION(initsetup_align) \
1150 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { \
1152 INIT_SETUP(initsetup_align) \
1158 #define BSS_SECTION(sbss_align, bss_align, stop_align) \
1159 . = ALIGN(sbss_align); \
1163 . = ALIGN(stop_align); \