2 * Helper macros to support writing architecture specific
5 * A minimal linker scripts has following content:
6 * [This is a sample, architectures may have special requiriements]
16 * INIT_TEXT_SECTION(PAGE_SIZE)
17 * INIT_DATA_SECTION(...)
18 * PERCPU_SECTION(CACHELINE_SIZE)
30 * EXCEPTION_TABLE(...)
32 * BSS_SECTION(0, 0, 0)
39 * DISCARDS // must be the last
42 * [__init_begin, __init_end] is the init section that may be freed after init
43 * // __init_begin and __init_end should be page aligned, so that we can
44 * // free the whole .init memory
45 * [_stext, _etext] is the text section
46 * [_sdata, _edata] is the data section
48 * Some of the included output section have their own set of constants.
49 * Examples are: [__initramfs_start, __initramfs_end] for initramfs and
50 * [__nosave_begin, __nosave_end] for the nosave data
58 * Only some architectures want to have the .notes segment visible in
59 * a separate PT_NOTE ELF Program Header. When this happens, it needs
60 * to be visible in both the kernel text's PT_LOAD and the PT_NOTE
61 * Program Headers. In this case, though, the PT_LOAD needs to be made
62 * the default again so that all the following sections don't also end
63 * up in the PT_NOTE Program Header.
66 #define NOTES_HEADERS :text :note
67 #define NOTES_HEADERS_RESTORE __restore_ph : { *(.__restore_ph) } :text
70 #define NOTES_HEADERS_RESTORE
74 * Some architectures have non-executable read-only exception tables.
75 * They can be added to the RO_DATA segment by specifying their desired
78 #ifdef RO_EXCEPTION_TABLE_ALIGN
79 #define RO_EXCEPTION_TABLE EXCEPTION_TABLE(RO_EXCEPTION_TABLE_ALIGN)
81 #define RO_EXCEPTION_TABLE
84 /* Align . to a 8 byte boundary equals to maximum function alignment. */
85 #define ALIGN_FUNCTION() . = ALIGN(8)
88 * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections, which
89 * generates .data.identifier sections, which need to be pulled in with
90 * .data. We don't want to pull in .data..other sections, which Linux
91 * has defined. Same for text and bss.
93 * With LTO_CLANG, the linker also splits sections by default, so we need
94 * these macros to combine the sections during the final link.
96 * RODATA_MAIN is not used because existing code already defines .rodata.x
97 * sections to be brought in with rodata.
99 #if defined(CONFIG_LD_DEAD_CODE_DATA_ELIMINATION) || defined(CONFIG_LTO_CLANG)
100 #define TEXT_MAIN .text .text.[0-9a-zA-Z_]*
101 #define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..L* .data..compoundliteral* .data.$__unnamed_* .data.$L*
102 #define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]*
103 #define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]* .rodata..L*
104 #define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* .bss..compoundliteral*
105 #define SBSS_MAIN .sbss .sbss.[0-9a-zA-Z_]*
107 #define TEXT_MAIN .text
108 #define DATA_MAIN .data
109 #define SDATA_MAIN .sdata
110 #define RODATA_MAIN .rodata
111 #define BSS_MAIN .bss
112 #define SBSS_MAIN .sbss
116 * GCC 4.5 and later have a 32 bytes section alignment for structures.
117 * Except GCC 4.9, that feels the need to align on 64 bytes.
119 #define STRUCT_ALIGNMENT 32
120 #define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT)
123 * The order of the sched class addresses are important, as they are
124 * used to determine the order of the priority of each sched class in
125 * relation to each other.
129 __begin_sched_classes = .; \
130 *(__idle_sched_class) \
131 *(__fair_sched_class) \
132 *(__rt_sched_class) \
133 *(__dl_sched_class) \
134 *(__stop_sched_class) \
135 __end_sched_classes = .;
137 /* The actual configuration determine if the init/exit sections
138 * are handled as text/data or they can be discarded (which
139 * often happens at runtime)
141 #ifdef CONFIG_HOTPLUG_CPU
142 #define CPU_KEEP(sec) *(.cpu##sec)
143 #define CPU_DISCARD(sec)
145 #define CPU_KEEP(sec)
146 #define CPU_DISCARD(sec) *(.cpu##sec)
149 #if defined(CONFIG_MEMORY_HOTPLUG)
150 #define MEM_KEEP(sec) *(.mem##sec)
151 #define MEM_DISCARD(sec)
153 #define MEM_KEEP(sec)
154 #define MEM_DISCARD(sec) *(.mem##sec)
157 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
159 * The ftrace call sites are logged to a section whose name depends on the
160 * compiler option used. A given kernel image will only use one, AKA
161 * FTRACE_CALLSITE_SECTION. We capture all of them here to avoid header
162 * dependencies for FTRACE_CALLSITE_SECTION's definition.
164 * Need to also make ftrace_stub_graph point to ftrace_stub
165 * so that the same stub location may have different protocols
166 * and not mess up with C verifiers.
168 * ftrace_ops_list_func will be defined as arch_ftrace_ops_list_func
169 * as some archs will have a different prototype for that function
170 * but ftrace_ops_list_func() will have a single prototype.
172 #define MCOUNT_REC() . = ALIGN(8); \
173 __start_mcount_loc = .; \
174 KEEP(*(__mcount_loc)) \
175 KEEP(*(__patchable_function_entries)) \
176 __stop_mcount_loc = .; \
177 ftrace_stub_graph = ftrace_stub; \
178 ftrace_ops_list_func = arch_ftrace_ops_list_func;
180 # ifdef CONFIG_FUNCTION_TRACER
181 # define MCOUNT_REC() ftrace_stub_graph = ftrace_stub; \
182 ftrace_ops_list_func = arch_ftrace_ops_list_func;
184 # define MCOUNT_REC()
188 #ifdef CONFIG_TRACE_BRANCH_PROFILING
189 #define LIKELY_PROFILE() __start_annotated_branch_profile = .; \
190 KEEP(*(_ftrace_annotated_branch)) \
191 __stop_annotated_branch_profile = .;
193 #define LIKELY_PROFILE()
196 #ifdef CONFIG_PROFILE_ALL_BRANCHES
197 #define BRANCH_PROFILE() __start_branch_profile = .; \
198 KEEP(*(_ftrace_branch)) \
199 __stop_branch_profile = .;
201 #define BRANCH_PROFILE()
204 #ifdef CONFIG_KPROBES
205 #define KPROBE_BLACKLIST() . = ALIGN(8); \
206 __start_kprobe_blacklist = .; \
207 KEEP(*(_kprobe_blacklist)) \
208 __stop_kprobe_blacklist = .;
210 #define KPROBE_BLACKLIST()
213 #ifdef CONFIG_FUNCTION_ERROR_INJECTION
214 #define ERROR_INJECT_WHITELIST() STRUCT_ALIGN(); \
215 __start_error_injection_whitelist = .; \
216 KEEP(*(_error_injection_whitelist)) \
217 __stop_error_injection_whitelist = .;
219 #define ERROR_INJECT_WHITELIST()
222 #ifdef CONFIG_EVENT_TRACING
223 #define FTRACE_EVENTS() . = ALIGN(8); \
224 __start_ftrace_events = .; \
225 KEEP(*(_ftrace_events)) \
226 __stop_ftrace_events = .; \
227 __start_ftrace_eval_maps = .; \
228 KEEP(*(_ftrace_eval_map)) \
229 __stop_ftrace_eval_maps = .;
231 #define FTRACE_EVENTS()
234 #ifdef CONFIG_TRACING
235 #define TRACE_PRINTKS() __start___trace_bprintk_fmt = .; \
236 KEEP(*(__trace_printk_fmt)) /* Trace_printk fmt' pointer */ \
237 __stop___trace_bprintk_fmt = .;
238 #define TRACEPOINT_STR() __start___tracepoint_str = .; \
239 KEEP(*(__tracepoint_str)) /* Trace_printk fmt' pointer */ \
240 __stop___tracepoint_str = .;
242 #define TRACE_PRINTKS()
243 #define TRACEPOINT_STR()
246 #ifdef CONFIG_FTRACE_SYSCALLS
247 #define TRACE_SYSCALLS() . = ALIGN(8); \
248 __start_syscalls_metadata = .; \
249 KEEP(*(__syscalls_metadata)) \
250 __stop_syscalls_metadata = .;
252 #define TRACE_SYSCALLS()
255 #ifdef CONFIG_BPF_EVENTS
256 #define BPF_RAW_TP() STRUCT_ALIGN(); \
257 __start__bpf_raw_tp = .; \
258 KEEP(*(__bpf_raw_tp_map)) \
259 __stop__bpf_raw_tp = .;
264 #ifdef CONFIG_SERIAL_EARLYCON
265 #define EARLYCON_TABLE() . = ALIGN(8); \
266 __earlycon_table = .; \
267 KEEP(*(__earlycon_table)) \
268 __earlycon_table_end = .;
270 #define EARLYCON_TABLE()
273 #ifdef CONFIG_SECURITY
274 #define LSM_TABLE() . = ALIGN(8); \
275 __start_lsm_info = .; \
276 KEEP(*(.lsm_info.init)) \
278 #define EARLY_LSM_TABLE() . = ALIGN(8); \
279 __start_early_lsm_info = .; \
280 KEEP(*(.early_lsm_info.init)) \
281 __end_early_lsm_info = .;
284 #define EARLY_LSM_TABLE()
287 #define ___OF_TABLE(cfg, name) _OF_TABLE_##cfg(name)
288 #define __OF_TABLE(cfg, name) ___OF_TABLE(cfg, name)
289 #define OF_TABLE(cfg, name) __OF_TABLE(IS_ENABLED(cfg), name)
290 #define _OF_TABLE_0(name)
291 #define _OF_TABLE_1(name) \
293 __##name##_of_table = .; \
294 KEEP(*(__##name##_of_table)) \
295 KEEP(*(__##name##_of_table_end))
297 #define TIMER_OF_TABLES() OF_TABLE(CONFIG_TIMER_OF, timer)
298 #define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip)
299 #define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk)
300 #define RESERVEDMEM_OF_TABLES() OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem)
301 #define CPU_METHOD_OF_TABLES() OF_TABLE(CONFIG_SMP, cpu_method)
302 #define CPUIDLE_METHOD_OF_TABLES() OF_TABLE(CONFIG_CPU_IDLE, cpuidle_method)
305 #define ACPI_PROBE_TABLE(name) \
307 __##name##_acpi_probe_table = .; \
308 KEEP(*(__##name##_acpi_probe_table)) \
309 __##name##_acpi_probe_table_end = .;
311 #define ACPI_PROBE_TABLE(name)
314 #ifdef CONFIG_THERMAL
315 #define THERMAL_TABLE(name) \
317 __##name##_thermal_table = .; \
318 KEEP(*(__##name##_thermal_table)) \
319 __##name##_thermal_table_end = .;
321 #define THERMAL_TABLE(name)
325 #define DTPM_TABLE() \
328 KEEP(*(__dtpm_table)) \
329 __dtpm_table_end = .;
334 #define KERNEL_DTB() \
337 KEEP(*(.dtb.init.rodata)) \
347 *(.data..shared_aligned) /* percpu related */ \
348 MEM_KEEP(init.data*) \
349 MEM_KEEP(exit.data*) \
356 /* implement dynamic printk debug */ \
358 __start___dyndbg = .; \
360 __stop___dyndbg = .; \
368 * Data section helpers
370 #define NOSAVE_DATA \
371 . = ALIGN(PAGE_SIZE); \
372 __nosave_begin = .; \
374 . = ALIGN(PAGE_SIZE); \
377 #define PAGE_ALIGNED_DATA(page_align) \
378 . = ALIGN(page_align); \
379 *(.data..page_aligned) \
380 . = ALIGN(page_align);
382 #define READ_MOSTLY_DATA(align) \
384 *(.data..read_mostly) \
387 #define CACHELINE_ALIGNED_DATA(align) \
389 *(.data..cacheline_aligned)
391 #define INIT_TASK_DATA(align) \
393 __start_init_task = .; \
394 init_thread_union = .; \
396 KEEP(*(.data..init_task)) \
397 KEEP(*(.data..init_thread_info)) \
398 . = __start_init_task + THREAD_SIZE; \
401 #define JUMP_TABLE_DATA \
403 __start___jump_table = .; \
404 KEEP(*(__jump_table)) \
405 __stop___jump_table = .;
407 #define STATIC_CALL_DATA \
409 __start_static_call_sites = .; \
410 KEEP(*(.static_call_sites)) \
411 __stop_static_call_sites = .; \
412 __start_static_call_tramp_key = .; \
413 KEEP(*(.static_call_tramp_key)) \
414 __stop_static_call_tramp_key = .;
417 * Allow architectures to handle ro_after_init data on their
418 * own by defining an empty RO_AFTER_INIT_DATA.
420 #ifndef RO_AFTER_INIT_DATA
421 #define RO_AFTER_INIT_DATA \
423 __start_ro_after_init = .; \
424 *(.data..ro_after_init) \
427 __end_ro_after_init = .;
433 #define RO_DATA(align) \
434 . = ALIGN((align)); \
435 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
436 __start_rodata = .; \
437 *(.rodata) *(.rodata.*) \
439 RO_AFTER_INIT_DATA /* Read only after init */ \
441 __start___tracepoints_ptrs = .; \
442 KEEP(*(__tracepoints_ptrs)) /* Tracepoints: pointer array */ \
443 __stop___tracepoints_ptrs = .; \
444 *(__tracepoints_strings)/* Tracepoints: strings */ \
447 .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \
452 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
453 __start_pci_fixups_early = .; \
454 KEEP(*(.pci_fixup_early)) \
455 __end_pci_fixups_early = .; \
456 __start_pci_fixups_header = .; \
457 KEEP(*(.pci_fixup_header)) \
458 __end_pci_fixups_header = .; \
459 __start_pci_fixups_final = .; \
460 KEEP(*(.pci_fixup_final)) \
461 __end_pci_fixups_final = .; \
462 __start_pci_fixups_enable = .; \
463 KEEP(*(.pci_fixup_enable)) \
464 __end_pci_fixups_enable = .; \
465 __start_pci_fixups_resume = .; \
466 KEEP(*(.pci_fixup_resume)) \
467 __end_pci_fixups_resume = .; \
468 __start_pci_fixups_resume_early = .; \
469 KEEP(*(.pci_fixup_resume_early)) \
470 __end_pci_fixups_resume_early = .; \
471 __start_pci_fixups_suspend = .; \
472 KEEP(*(.pci_fixup_suspend)) \
473 __end_pci_fixups_suspend = .; \
474 __start_pci_fixups_suspend_late = .; \
475 KEEP(*(.pci_fixup_suspend_late)) \
476 __end_pci_fixups_suspend_late = .; \
479 FW_LOADER_BUILT_IN_DATA \
484 /* Kernel symbol table: Normal symbols */ \
485 __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
486 __start___ksymtab = .; \
487 KEEP(*(SORT(___ksymtab+*))) \
488 __stop___ksymtab = .; \
491 /* Kernel symbol table: GPL-only symbols */ \
492 __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
493 __start___ksymtab_gpl = .; \
494 KEEP(*(SORT(___ksymtab_gpl+*))) \
495 __stop___ksymtab_gpl = .; \
498 /* Kernel symbol table: Normal symbols */ \
499 __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
500 __start___kcrctab = .; \
501 KEEP(*(SORT(___kcrctab+*))) \
502 __stop___kcrctab = .; \
505 /* Kernel symbol table: GPL-only symbols */ \
506 __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
507 __start___kcrctab_gpl = .; \
508 KEEP(*(SORT(___kcrctab_gpl+*))) \
509 __stop___kcrctab_gpl = .; \
512 /* Kernel symbol table: strings */ \
513 __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
514 *(__ksymtab_strings) \
517 /* __*init sections */ \
518 __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \
520 MEM_KEEP(init.rodata) \
521 MEM_KEEP(exit.rodata) \
524 /* Built-in module parameters. */ \
525 __param : AT(ADDR(__param) - LOAD_OFFSET) { \
526 __start___param = .; \
528 __stop___param = .; \
531 /* Built-in module versions. */ \
532 __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \
533 __start___modver = .; \
535 __stop___modver = .; \
542 . = ALIGN((align)); \
547 * .text..L.cfi.jumptable.* contain Control-Flow Integrity (CFI)
548 * jump table entries.
550 #ifdef CONFIG_CFI_CLANG
551 #define TEXT_CFI_JT \
552 . = ALIGN(PMD_SIZE); \
553 __cfi_jt_start = .; \
554 *(.text..L.cfi.jumptable .text..L.cfi.jumptable.*) \
555 . = ALIGN(PMD_SIZE); \
562 * Non-instrumentable text section
564 #define NOINSTR_TEXT \
566 __noinstr_text_start = .; \
568 __noinstr_text_end = .;
571 * .text section. Map to function alignment to avoid address changes
572 * during second ld run in second ld pass when generating System.map
574 * TEXT_MAIN here will match .text.fixup and .text.unlikely if dead
575 * code elimination is enabled, so these sections should be converted
580 *(.text.hot .text.hot.*) \
581 *(TEXT_MAIN .text.fixup) \
582 *(.text.unlikely .text.unlikely.*) \
583 *(.text.unknown .text.unknown.*) \
587 *(.text.asan.* .text.tsan.*) \
589 MEM_KEEP(init.text*) \
590 MEM_KEEP(exit.text*) \
593 /* sched.text is aling to function alignment to secure we have same
594 * address even at second ld pass when generating System.map */
597 __sched_text_start = .; \
599 __sched_text_end = .;
601 /* spinlock.text is aling to function alignment to secure we have same
602 * address even at second ld pass when generating System.map */
605 __lock_text_start = .; \
609 #define CPUIDLE_TEXT \
611 __cpuidle_text_start = .; \
613 __cpuidle_text_end = .;
615 #define KPROBES_TEXT \
617 __kprobes_text_start = .; \
619 __kprobes_text_end = .;
623 __entry_text_start = .; \
625 __entry_text_end = .;
627 #define IRQENTRY_TEXT \
629 __irqentry_text_start = .; \
631 __irqentry_text_end = .;
633 #define SOFTIRQENTRY_TEXT \
635 __softirqentry_text_start = .; \
636 *(.softirqentry.text) \
637 __softirqentry_text_end = .;
639 #define STATIC_CALL_TEXT \
641 __static_call_text_start = .; \
642 *(.static_call.text) \
643 __static_call_text_end = .;
645 /* Section used for early init (in .S files) */
646 #define HEAD_TEXT KEEP(*(.head.text))
648 #define HEAD_TEXT_SECTION \
649 .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \
656 #define EXCEPTION_TABLE(align) \
658 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \
659 __start___ex_table = .; \
660 KEEP(*(__ex_table)) \
661 __stop___ex_table = .; \
667 #ifdef CONFIG_DEBUG_INFO_BTF
669 .BTF : AT(ADDR(.BTF) - LOAD_OFFSET) { \
675 .BTF_ids : AT(ADDR(.BTF_ids) - LOAD_OFFSET) { \
685 #define INIT_TASK_DATA_SECTION(align) \
687 .data..init_task : AT(ADDR(.data..init_task) - LOAD_OFFSET) { \
688 INIT_TASK_DATA(align) \
691 #ifdef CONFIG_CONSTRUCTORS
692 #define KERNEL_CTORS() . = ALIGN(8); \
694 KEEP(*(SORT(.ctors.*))) \
696 KEEP(*(SORT(.init_array.*))) \
697 KEEP(*(.init_array)) \
700 #define KERNEL_CTORS()
703 /* init and exit section handling */
705 KEEP(*(SORT(___kentry+*))) \
706 *(.init.data init.data.*) \
707 MEM_DISCARD(init.data*) \
710 *(.init.rodata .init.rodata.*) \
714 ERROR_INJECT_WHITELIST() \
715 MEM_DISCARD(init.rodata) \
717 RESERVEDMEM_OF_TABLES() \
719 CPU_METHOD_OF_TABLES() \
720 CPUIDLE_METHOD_OF_TABLES() \
722 IRQCHIP_OF_MATCH_TABLE() \
723 ACPI_PROBE_TABLE(irqchip) \
724 ACPI_PROBE_TABLE(timer) \
725 THERMAL_TABLE(governor) \
733 *(.init.text .init.text.*) \
735 MEM_DISCARD(init.text*)
738 *(.exit.data .exit.data.*) \
739 *(.fini_array .fini_array.*) \
741 MEM_DISCARD(exit.data*) \
742 MEM_DISCARD(exit.rodata*)
747 MEM_DISCARD(exit.text)
753 * bss (Block Started by Symbol) - uninitialized data
754 * zeroed during startup
756 #define SBSS(sbss_align) \
757 . = ALIGN(sbss_align); \
758 .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \
765 * Allow archectures to redefine BSS_FIRST_SECTIONS to add extra
766 * sections to the front of bss.
768 #ifndef BSS_FIRST_SECTIONS
769 #define BSS_FIRST_SECTIONS
772 #define BSS(bss_align) \
773 . = ALIGN(bss_align); \
774 .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \
776 . = ALIGN(PAGE_SIZE); \
777 *(.bss..page_aligned) \
778 . = ALIGN(PAGE_SIZE); \
785 * DWARF debug sections.
786 * Symbols in the DWARF debugging sections are relative to
787 * the beginning of the section so we begin them at 0.
789 #define DWARF_DEBUG \
791 .debug 0 : { *(.debug) } \
792 .line 0 : { *(.line) } \
793 /* GNU DWARF 1 extensions */ \
794 .debug_srcinfo 0 : { *(.debug_srcinfo) } \
795 .debug_sfnames 0 : { *(.debug_sfnames) } \
796 /* DWARF 1.1 and DWARF 2 */ \
797 .debug_aranges 0 : { *(.debug_aranges) } \
798 .debug_pubnames 0 : { *(.debug_pubnames) } \
800 .debug_info 0 : { *(.debug_info \
801 .gnu.linkonce.wi.*) } \
802 .debug_abbrev 0 : { *(.debug_abbrev) } \
803 .debug_line 0 : { *(.debug_line) } \
804 .debug_frame 0 : { *(.debug_frame) } \
805 .debug_str 0 : { *(.debug_str) } \
806 .debug_loc 0 : { *(.debug_loc) } \
807 .debug_macinfo 0 : { *(.debug_macinfo) } \
808 .debug_pubtypes 0 : { *(.debug_pubtypes) } \
810 .debug_ranges 0 : { *(.debug_ranges) } \
811 /* SGI/MIPS DWARF 2 extensions */ \
812 .debug_weaknames 0 : { *(.debug_weaknames) } \
813 .debug_funcnames 0 : { *(.debug_funcnames) } \
814 .debug_typenames 0 : { *(.debug_typenames) } \
815 .debug_varnames 0 : { *(.debug_varnames) } \
816 /* GNU DWARF 2 extensions */ \
817 .debug_gnu_pubnames 0 : { *(.debug_gnu_pubnames) } \
818 .debug_gnu_pubtypes 0 : { *(.debug_gnu_pubtypes) } \
820 .debug_types 0 : { *(.debug_types) } \
822 .debug_addr 0 : { *(.debug_addr) } \
823 .debug_line_str 0 : { *(.debug_line_str) } \
824 .debug_loclists 0 : { *(.debug_loclists) } \
825 .debug_macro 0 : { *(.debug_macro) } \
826 .debug_names 0 : { *(.debug_names) } \
827 .debug_rnglists 0 : { *(.debug_rnglists) } \
828 .debug_str_offsets 0 : { *(.debug_str_offsets) }
830 /* Stabs debugging sections. */
831 #define STABS_DEBUG \
832 .stab 0 : { *(.stab) } \
833 .stabstr 0 : { *(.stabstr) } \
834 .stab.excl 0 : { *(.stab.excl) } \
835 .stab.exclstr 0 : { *(.stab.exclstr) } \
836 .stab.index 0 : { *(.stab.index) } \
837 .stab.indexstr 0 : { *(.stab.indexstr) }
839 /* Required sections not related to debugging. */
840 #define ELF_DETAILS \
841 .comment 0 : { *(.comment) } \
842 .symtab 0 : { *(.symtab) } \
843 .strtab 0 : { *(.strtab) } \
844 .shstrtab 0 : { *(.shstrtab) }
846 #ifdef CONFIG_GENERIC_BUG
849 __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \
850 __start___bug_table = .; \
851 KEEP(*(__bug_table)) \
852 __stop___bug_table = .; \
858 #ifdef CONFIG_UNWINDER_ORC
859 #define ORC_UNWIND_TABLE \
861 .orc_unwind_ip : AT(ADDR(.orc_unwind_ip) - LOAD_OFFSET) { \
862 __start_orc_unwind_ip = .; \
863 KEEP(*(.orc_unwind_ip)) \
864 __stop_orc_unwind_ip = .; \
867 .orc_unwind : AT(ADDR(.orc_unwind) - LOAD_OFFSET) { \
868 __start_orc_unwind = .; \
869 KEEP(*(.orc_unwind)) \
870 __stop_orc_unwind = .; \
872 text_size = _etext - _stext; \
874 .orc_lookup : AT(ADDR(.orc_lookup) - LOAD_OFFSET) { \
876 . += (((text_size + LOOKUP_BLOCK_SIZE - 1) / \
877 LOOKUP_BLOCK_SIZE) + 1) * 4; \
878 orc_lookup_end = .; \
881 #define ORC_UNWIND_TABLE
884 /* Built-in firmware blobs */
885 #ifdef CONFIG_FW_LOADER
886 #define FW_LOADER_BUILT_IN_DATA \
887 .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) ALIGN(8) { \
888 __start_builtin_fw = .; \
889 KEEP(*(.builtin_fw)) \
890 __end_builtin_fw = .; \
893 #define FW_LOADER_BUILT_IN_DATA
896 #ifdef CONFIG_PM_TRACE
899 .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \
900 __tracedata_start = .; \
901 KEEP(*(.tracedata)) \
902 __tracedata_end = .; \
908 #ifdef CONFIG_PRINTK_INDEX
909 #define PRINTK_INDEX \
910 .printk_index : AT(ADDR(.printk_index) - LOAD_OFFSET) { \
911 __start_printk_index = .; \
913 __stop_printk_index = .; \
920 .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \
925 NOTES_HEADERS_RESTORE
927 #define INIT_SETUP(initsetup_align) \
928 . = ALIGN(initsetup_align); \
930 KEEP(*(.init.setup)) \
933 #define INIT_CALLS_LEVEL(level) \
934 __initcall##level##_start = .; \
935 KEEP(*(.initcall##level##.init)) \
936 KEEP(*(.initcall##level##s.init)) \
939 __initcall_start = .; \
940 KEEP(*(.initcallearly.init)) \
941 INIT_CALLS_LEVEL(0) \
942 INIT_CALLS_LEVEL(1) \
943 INIT_CALLS_LEVEL(2) \
944 INIT_CALLS_LEVEL(3) \
945 INIT_CALLS_LEVEL(4) \
946 INIT_CALLS_LEVEL(5) \
947 INIT_CALLS_LEVEL(rootfs) \
948 INIT_CALLS_LEVEL(6) \
949 INIT_CALLS_LEVEL(7) \
952 #define CON_INITCALL \
953 __con_initcall_start = .; \
954 KEEP(*(.con_initcall.init)) \
955 __con_initcall_end = .;
957 /* Alignment must be consistent with (kunit_suite *) in include/kunit/test.h */
958 #define KUNIT_TABLE() \
960 __kunit_suites_start = .; \
961 KEEP(*(.kunit_test_suites)) \
962 __kunit_suites_end = .;
964 #ifdef CONFIG_BLK_DEV_INITRD
965 #define INIT_RAM_FS \
967 __initramfs_start = .; \
968 KEEP(*(.init.ramfs)) \
970 KEEP(*(.init.ramfs.info))
976 * Memory encryption operates on a page basis. Since we need to clear
977 * the memory encryption mask for this section, it needs to be aligned
978 * on a page boundary and be a page-size multiple in length.
980 * Note: We use a separate section so that only this section gets
981 * decrypted to avoid exposing more than we wish.
983 #ifdef CONFIG_AMD_MEM_ENCRYPT
984 #define PERCPU_DECRYPTED_SECTION \
985 . = ALIGN(PAGE_SIZE); \
986 *(.data..decrypted) \
987 *(.data..percpu..decrypted) \
988 . = ALIGN(PAGE_SIZE);
990 #define PERCPU_DECRYPTED_SECTION
995 * Default discarded sections.
997 * Some archs want to discard exit text/data at runtime rather than
998 * link time due to cross-section references such as alt instructions,
999 * bug table, eh_frame, etc. DISCARDS must be the last of output
1000 * section definitions so that such archs put those in earlier section
1003 #ifdef RUNTIME_DISCARD_EXIT
1004 #define EXIT_DISCARDS
1006 #define EXIT_DISCARDS \
1012 * Clang's -fprofile-arcs, -fsanitize=kernel-address, and
1013 * -fsanitize=thread produce unwanted sections (.eh_frame
1014 * and .init_array.*), but CONFIG_CONSTRUCTORS wants to
1015 * keep any .init_array.* sections.
1016 * https://bugs.llvm.org/show_bug.cgi?id=46478
1018 #if defined(CONFIG_GCOV_KERNEL) || defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KCSAN) || \
1019 defined(CONFIG_CFI_CLANG)
1020 # ifdef CONFIG_CONSTRUCTORS
1021 # define SANITIZER_DISCARDS \
1024 # define SANITIZER_DISCARDS \
1025 *(.init_array) *(.init_array.*) \
1029 # define SANITIZER_DISCARDS
1032 #define COMMON_DISCARDS \
1033 SANITIZER_DISCARDS \
1037 /* ld.bfd warns about .gnu.version* even when not emitted */ \
1048 * PERCPU_INPUT - the percpu input sections
1049 * @cacheline: cacheline size
1051 * The core percpu section names and core symbols which do not rely
1052 * directly upon load addresses.
1054 * @cacheline is used to align subsections to avoid false cacheline
1055 * sharing between subsections for different purposes.
1057 #define PERCPU_INPUT(cacheline) \
1058 __per_cpu_start = .; \
1059 *(.data..percpu..first) \
1060 . = ALIGN(PAGE_SIZE); \
1061 *(.data..percpu..page_aligned) \
1062 . = ALIGN(cacheline); \
1063 *(.data..percpu..read_mostly) \
1064 . = ALIGN(cacheline); \
1066 *(.data..percpu..shared_aligned) \
1067 PERCPU_DECRYPTED_SECTION \
1071 * PERCPU_VADDR - define output section for percpu area
1072 * @cacheline: cacheline size
1073 * @vaddr: explicit base address (optional)
1074 * @phdr: destination PHDR (optional)
1076 * Macro which expands to output section for percpu area.
1078 * @cacheline is used to align subsections to avoid false cacheline
1079 * sharing between subsections for different purposes.
1081 * If @vaddr is not blank, it specifies explicit base address and all
1082 * percpu symbols will be offset from the given address. If blank,
1083 * @vaddr always equals @laddr + LOAD_OFFSET.
1085 * @phdr defines the output PHDR to use if not blank. Be warned that
1086 * output PHDR is sticky. If @phdr is specified, the next output
1087 * section in the linker script will go there too. @phdr should have
1090 * Note that this macros defines __per_cpu_load as an absolute symbol.
1091 * If there is no need to put the percpu section at a predetermined
1092 * address, use PERCPU_SECTION.
1094 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
1095 __per_cpu_load = .; \
1096 .data..percpu vaddr : AT(__per_cpu_load - LOAD_OFFSET) { \
1097 PERCPU_INPUT(cacheline) \
1099 . = __per_cpu_load + SIZEOF(.data..percpu);
1102 * PERCPU_SECTION - define output section for percpu area, simple version
1103 * @cacheline: cacheline size
1105 * Align to PAGE_SIZE and outputs output section for percpu area. This
1106 * macro doesn't manipulate @vaddr or @phdr and __per_cpu_load and
1107 * __per_cpu_start will be identical.
1109 * This macro is equivalent to ALIGN(PAGE_SIZE); PERCPU_VADDR(@cacheline,,)
1110 * except that __per_cpu_load is defined as a relative symbol against
1111 * .data..percpu which is required for relocatable x86_32 configuration.
1113 #define PERCPU_SECTION(cacheline) \
1114 . = ALIGN(PAGE_SIZE); \
1115 .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \
1116 __per_cpu_load = .; \
1117 PERCPU_INPUT(cacheline) \
1122 * Definition of the high level *_SECTION macros
1123 * They will fit only a subset of the architectures
1129 * All sections are combined in a single .data section.
1130 * The sections following CONSTRUCTORS are arranged so their
1131 * typical alignment matches.
1132 * A cacheline is typical/always less than a PAGE_SIZE so
1133 * the sections that has this restriction (or similar)
1134 * is located before the ones requiring PAGE_SIZE alignment.
1135 * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which
1136 * matches the requirement of PAGE_ALIGNED_DATA.
1138 * use 0 as page_align if page_aligned data is not used */
1139 #define RW_DATA(cacheline, pagealigned, inittask) \
1140 . = ALIGN(PAGE_SIZE); \
1141 .data : AT(ADDR(.data) - LOAD_OFFSET) { \
1142 INIT_TASK_DATA(inittask) \
1144 PAGE_ALIGNED_DATA(pagealigned) \
1145 CACHELINE_ALIGNED_DATA(cacheline) \
1146 READ_MOSTLY_DATA(cacheline) \
1152 #define INIT_TEXT_SECTION(inittext_align) \
1153 . = ALIGN(inittext_align); \
1154 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { \
1160 #define INIT_DATA_SECTION(initsetup_align) \
1161 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { \
1163 INIT_SETUP(initsetup_align) \
1169 #define BSS_SECTION(sbss_align, bss_align, stop_align) \
1170 . = ALIGN(sbss_align); \
1174 . = ALIGN(stop_align); \