Merge tag 'amd-drm-fixes-5.9-2020-08-12' of git://people.freedesktop.org/~agd5f/linux...
[linux-2.6-microblaze.git] / include / asm-generic / vmlinux.lds.h
1 /*
2  * Helper macros to support writing architecture specific
3  * linker scripts.
4  *
5  * A minimal linker scripts has following content:
6  * [This is a sample, architectures may have special requiriements]
7  *
8  * OUTPUT_FORMAT(...)
9  * OUTPUT_ARCH(...)
10  * ENTRY(...)
11  * SECTIONS
12  * {
13  *      . = START;
14  *      __init_begin = .;
15  *      HEAD_TEXT_SECTION
16  *      INIT_TEXT_SECTION(PAGE_SIZE)
17  *      INIT_DATA_SECTION(...)
18  *      PERCPU_SECTION(CACHELINE_SIZE)
19  *      __init_end = .;
20  *
21  *      _stext = .;
22  *      TEXT_SECTION = 0
23  *      _etext = .;
24  *
25  *      _sdata = .;
26  *      RO_DATA(PAGE_SIZE)
27  *      RW_DATA(...)
28  *      _edata = .;
29  *
30  *      EXCEPTION_TABLE(...)
31  *
32  *      BSS_SECTION(0, 0, 0)
33  *      _end = .;
34  *
35  *      STABS_DEBUG
36  *      DWARF_DEBUG
37  *
38  *      DISCARDS                // must be the last
39  * }
40  *
41  * [__init_begin, __init_end] is the init section that may be freed after init
42  *      // __init_begin and __init_end should be page aligned, so that we can
43  *      // free the whole .init memory
44  * [_stext, _etext] is the text section
45  * [_sdata, _edata] is the data section
46  *
47  * Some of the included output section have their own set of constants.
48  * Examples are: [__initramfs_start, __initramfs_end] for initramfs and
49  *               [__nosave_begin, __nosave_end] for the nosave data
50  */
51
52 #ifndef LOAD_OFFSET
53 #define LOAD_OFFSET 0
54 #endif
55
56 /*
57  * Only some architectures want to have the .notes segment visible in
58  * a separate PT_NOTE ELF Program Header. When this happens, it needs
59  * to be visible in both the kernel text's PT_LOAD and the PT_NOTE
60  * Program Headers. In this case, though, the PT_LOAD needs to be made
61  * the default again so that all the following sections don't also end
62  * up in the PT_NOTE Program Header.
63  */
64 #ifdef EMITS_PT_NOTE
65 #define NOTES_HEADERS           :text :note
66 #define NOTES_HEADERS_RESTORE   __restore_ph : { *(.__restore_ph) } :text
67 #else
68 #define NOTES_HEADERS
69 #define NOTES_HEADERS_RESTORE
70 #endif
71
72 /*
73  * Some architectures have non-executable read-only exception tables.
74  * They can be added to the RO_DATA segment by specifying their desired
75  * alignment.
76  */
77 #ifdef RO_EXCEPTION_TABLE_ALIGN
78 #define RO_EXCEPTION_TABLE      EXCEPTION_TABLE(RO_EXCEPTION_TABLE_ALIGN)
79 #else
80 #define RO_EXCEPTION_TABLE
81 #endif
82
83 /* Align . to a 8 byte boundary equals to maximum function alignment. */
84 #define ALIGN_FUNCTION()  . = ALIGN(8)
85
86 /*
87  * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections, which
88  * generates .data.identifier sections, which need to be pulled in with
89  * .data. We don't want to pull in .data..other sections, which Linux
90  * has defined. Same for text and bss.
91  *
92  * RODATA_MAIN is not used because existing code already defines .rodata.x
93  * sections to be brought in with rodata.
94  */
95 #ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
96 #define TEXT_MAIN .text .text.[0-9a-zA-Z_]*
97 #define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..LPBX*
98 #define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]*
99 #define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]*
100 #define BSS_MAIN .bss .bss.[0-9a-zA-Z_]*
101 #define SBSS_MAIN .sbss .sbss.[0-9a-zA-Z_]*
102 #else
103 #define TEXT_MAIN .text
104 #define DATA_MAIN .data
105 #define SDATA_MAIN .sdata
106 #define RODATA_MAIN .rodata
107 #define BSS_MAIN .bss
108 #define SBSS_MAIN .sbss
109 #endif
110
111 /*
112  * GCC 4.5 and later have a 32 bytes section alignment for structures.
113  * Except GCC 4.9, that feels the need to align on 64 bytes.
114  */
115 #if __GNUC__ == 4 && __GNUC_MINOR__ == 9
116 #define STRUCT_ALIGNMENT 64
117 #else
118 #define STRUCT_ALIGNMENT 32
119 #endif
120 #define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT)
121
122 /*
123  * The order of the sched class addresses are important, as they are
124  * used to determine the order of the priority of each sched class in
125  * relation to each other.
126  */
127 #define SCHED_DATA                              \
128         STRUCT_ALIGN();                         \
129         __begin_sched_classes = .;              \
130         *(__idle_sched_class)                   \
131         *(__fair_sched_class)                   \
132         *(__rt_sched_class)                     \
133         *(__dl_sched_class)                     \
134         *(__stop_sched_class)                   \
135         __end_sched_classes = .;
136
137 /* The actual configuration determine if the init/exit sections
138  * are handled as text/data or they can be discarded (which
139  * often happens at runtime)
140  */
141 #ifdef CONFIG_HOTPLUG_CPU
142 #define CPU_KEEP(sec)    *(.cpu##sec)
143 #define CPU_DISCARD(sec)
144 #else
145 #define CPU_KEEP(sec)
146 #define CPU_DISCARD(sec) *(.cpu##sec)
147 #endif
148
149 #if defined(CONFIG_MEMORY_HOTPLUG)
150 #define MEM_KEEP(sec)    *(.mem##sec)
151 #define MEM_DISCARD(sec)
152 #else
153 #define MEM_KEEP(sec)
154 #define MEM_DISCARD(sec) *(.mem##sec)
155 #endif
156
157 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
158 /*
159  * The ftrace call sites are logged to a section whose name depends on the
160  * compiler option used. A given kernel image will only use one, AKA
161  * FTRACE_CALLSITE_SECTION. We capture all of them here to avoid header
162  * dependencies for FTRACE_CALLSITE_SECTION's definition.
163  *
164  * Need to also make ftrace_stub_graph point to ftrace_stub
165  * so that the same stub location may have different protocols
166  * and not mess up with C verifiers.
167  */
168 #define MCOUNT_REC()    . = ALIGN(8);                           \
169                         __start_mcount_loc = .;                 \
170                         KEEP(*(__mcount_loc))                   \
171                         KEEP(*(__patchable_function_entries))   \
172                         __stop_mcount_loc = .;                  \
173                         ftrace_stub_graph = ftrace_stub;
174 #else
175 # ifdef CONFIG_FUNCTION_TRACER
176 #  define MCOUNT_REC()  ftrace_stub_graph = ftrace_stub;
177 # else
178 #  define MCOUNT_REC()
179 # endif
180 #endif
181
182 #ifdef CONFIG_TRACE_BRANCH_PROFILING
183 #define LIKELY_PROFILE()        __start_annotated_branch_profile = .;   \
184                                 KEEP(*(_ftrace_annotated_branch))       \
185                                 __stop_annotated_branch_profile = .;
186 #else
187 #define LIKELY_PROFILE()
188 #endif
189
190 #ifdef CONFIG_PROFILE_ALL_BRANCHES
191 #define BRANCH_PROFILE()        __start_branch_profile = .;             \
192                                 KEEP(*(_ftrace_branch))                 \
193                                 __stop_branch_profile = .;
194 #else
195 #define BRANCH_PROFILE()
196 #endif
197
198 #ifdef CONFIG_KPROBES
199 #define KPROBE_BLACKLIST()      . = ALIGN(8);                                 \
200                                 __start_kprobe_blacklist = .;                 \
201                                 KEEP(*(_kprobe_blacklist))                    \
202                                 __stop_kprobe_blacklist = .;
203 #else
204 #define KPROBE_BLACKLIST()
205 #endif
206
207 #ifdef CONFIG_FUNCTION_ERROR_INJECTION
208 #define ERROR_INJECT_WHITELIST()        STRUCT_ALIGN();                       \
209                         __start_error_injection_whitelist = .;                \
210                         KEEP(*(_error_injection_whitelist))                   \
211                         __stop_error_injection_whitelist = .;
212 #else
213 #define ERROR_INJECT_WHITELIST()
214 #endif
215
216 #ifdef CONFIG_EVENT_TRACING
217 #define FTRACE_EVENTS() . = ALIGN(8);                                   \
218                         __start_ftrace_events = .;                      \
219                         KEEP(*(_ftrace_events))                         \
220                         __stop_ftrace_events = .;                       \
221                         __start_ftrace_eval_maps = .;                   \
222                         KEEP(*(_ftrace_eval_map))                       \
223                         __stop_ftrace_eval_maps = .;
224 #else
225 #define FTRACE_EVENTS()
226 #endif
227
228 #ifdef CONFIG_TRACING
229 #define TRACE_PRINTKS()  __start___trace_bprintk_fmt = .;      \
230                          KEEP(*(__trace_printk_fmt)) /* Trace_printk fmt' pointer */ \
231                          __stop___trace_bprintk_fmt = .;
232 #define TRACEPOINT_STR() __start___tracepoint_str = .;  \
233                          KEEP(*(__tracepoint_str)) /* Trace_printk fmt' pointer */ \
234                          __stop___tracepoint_str = .;
235 #else
236 #define TRACE_PRINTKS()
237 #define TRACEPOINT_STR()
238 #endif
239
240 #ifdef CONFIG_FTRACE_SYSCALLS
241 #define TRACE_SYSCALLS() . = ALIGN(8);                                  \
242                          __start_syscalls_metadata = .;                 \
243                          KEEP(*(__syscalls_metadata))                   \
244                          __stop_syscalls_metadata = .;
245 #else
246 #define TRACE_SYSCALLS()
247 #endif
248
249 #ifdef CONFIG_BPF_EVENTS
250 #define BPF_RAW_TP() STRUCT_ALIGN();                                    \
251                          __start__bpf_raw_tp = .;                       \
252                          KEEP(*(__bpf_raw_tp_map))                      \
253                          __stop__bpf_raw_tp = .;
254 #else
255 #define BPF_RAW_TP()
256 #endif
257
258 #ifdef CONFIG_SERIAL_EARLYCON
259 #define EARLYCON_TABLE() . = ALIGN(8);                          \
260                          __earlycon_table = .;                  \
261                          KEEP(*(__earlycon_table))              \
262                          __earlycon_table_end = .;
263 #else
264 #define EARLYCON_TABLE()
265 #endif
266
267 #ifdef CONFIG_SECURITY
268 #define LSM_TABLE()     . = ALIGN(8);                                   \
269                         __start_lsm_info = .;                           \
270                         KEEP(*(.lsm_info.init))                         \
271                         __end_lsm_info = .;
272 #define EARLY_LSM_TABLE()       . = ALIGN(8);                           \
273                         __start_early_lsm_info = .;                     \
274                         KEEP(*(.early_lsm_info.init))                   \
275                         __end_early_lsm_info = .;
276 #else
277 #define LSM_TABLE()
278 #define EARLY_LSM_TABLE()
279 #endif
280
281 #define ___OF_TABLE(cfg, name)  _OF_TABLE_##cfg(name)
282 #define __OF_TABLE(cfg, name)   ___OF_TABLE(cfg, name)
283 #define OF_TABLE(cfg, name)     __OF_TABLE(IS_ENABLED(cfg), name)
284 #define _OF_TABLE_0(name)
285 #define _OF_TABLE_1(name)                                               \
286         . = ALIGN(8);                                                   \
287         __##name##_of_table = .;                                        \
288         KEEP(*(__##name##_of_table))                                    \
289         KEEP(*(__##name##_of_table_end))
290
291 #define TIMER_OF_TABLES()       OF_TABLE(CONFIG_TIMER_OF, timer)
292 #define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip)
293 #define CLK_OF_TABLES()         OF_TABLE(CONFIG_COMMON_CLK, clk)
294 #define RESERVEDMEM_OF_TABLES() OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem)
295 #define CPU_METHOD_OF_TABLES()  OF_TABLE(CONFIG_SMP, cpu_method)
296 #define CPUIDLE_METHOD_OF_TABLES() OF_TABLE(CONFIG_CPU_IDLE, cpuidle_method)
297
298 #ifdef CONFIG_ACPI
299 #define ACPI_PROBE_TABLE(name)                                          \
300         . = ALIGN(8);                                                   \
301         __##name##_acpi_probe_table = .;                                \
302         KEEP(*(__##name##_acpi_probe_table))                            \
303         __##name##_acpi_probe_table_end = .;
304 #else
305 #define ACPI_PROBE_TABLE(name)
306 #endif
307
308 #ifdef CONFIG_THERMAL
309 #define THERMAL_TABLE(name)                                             \
310         . = ALIGN(8);                                                   \
311         __##name##_thermal_table = .;                                   \
312         KEEP(*(__##name##_thermal_table))                               \
313         __##name##_thermal_table_end = .;
314 #else
315 #define THERMAL_TABLE(name)
316 #endif
317
318 #define KERNEL_DTB()                                                    \
319         STRUCT_ALIGN();                                                 \
320         __dtb_start = .;                                                \
321         KEEP(*(.dtb.init.rodata))                                       \
322         __dtb_end = .;
323
324 /*
325  * .data section
326  */
327 #define DATA_DATA                                                       \
328         *(.xiptext)                                                     \
329         *(DATA_MAIN)                                                    \
330         *(.ref.data)                                                    \
331         *(.data..shared_aligned) /* percpu related */                   \
332         MEM_KEEP(init.data*)                                            \
333         MEM_KEEP(exit.data*)                                            \
334         *(.data.unlikely)                                               \
335         __start_once = .;                                               \
336         *(.data.once)                                                   \
337         __end_once = .;                                                 \
338         STRUCT_ALIGN();                                                 \
339         *(__tracepoints)                                                \
340         /* implement dynamic printk debug */                            \
341         . = ALIGN(8);                                                   \
342         __start___dyndbg = .;                                           \
343         KEEP(*(__dyndbg))                                               \
344         __stop___dyndbg = .;                                            \
345         LIKELY_PROFILE()                                                \
346         BRANCH_PROFILE()                                                \
347         TRACE_PRINTKS()                                                 \
348         BPF_RAW_TP()                                                    \
349         TRACEPOINT_STR()
350
351 /*
352  * Data section helpers
353  */
354 #define NOSAVE_DATA                                                     \
355         . = ALIGN(PAGE_SIZE);                                           \
356         __nosave_begin = .;                                             \
357         *(.data..nosave)                                                \
358         . = ALIGN(PAGE_SIZE);                                           \
359         __nosave_end = .;
360
361 #define PAGE_ALIGNED_DATA(page_align)                                   \
362         . = ALIGN(page_align);                                          \
363         *(.data..page_aligned)                                          \
364         . = ALIGN(page_align);
365
366 #define READ_MOSTLY_DATA(align)                                         \
367         . = ALIGN(align);                                               \
368         *(.data..read_mostly)                                           \
369         . = ALIGN(align);
370
371 #define CACHELINE_ALIGNED_DATA(align)                                   \
372         . = ALIGN(align);                                               \
373         *(.data..cacheline_aligned)
374
375 #define INIT_TASK_DATA(align)                                           \
376         . = ALIGN(align);                                               \
377         __start_init_task = .;                                          \
378         init_thread_union = .;                                          \
379         init_stack = .;                                                 \
380         KEEP(*(.data..init_task))                                       \
381         KEEP(*(.data..init_thread_info))                                \
382         . = __start_init_task + THREAD_SIZE;                            \
383         __end_init_task = .;
384
385 #define JUMP_TABLE_DATA                                                 \
386         . = ALIGN(8);                                                   \
387         __start___jump_table = .;                                       \
388         KEEP(*(__jump_table))                                           \
389         __stop___jump_table = .;
390
391 /*
392  * Allow architectures to handle ro_after_init data on their
393  * own by defining an empty RO_AFTER_INIT_DATA.
394  */
395 #ifndef RO_AFTER_INIT_DATA
396 #define RO_AFTER_INIT_DATA                                              \
397         . = ALIGN(8);                                                   \
398         __start_ro_after_init = .;                                      \
399         *(.data..ro_after_init)                                         \
400         JUMP_TABLE_DATA                                                 \
401         __end_ro_after_init = .;
402 #endif
403
404 /*
405  * Read only Data
406  */
407 #define RO_DATA(align)                                                  \
408         . = ALIGN((align));                                             \
409         .rodata           : AT(ADDR(.rodata) - LOAD_OFFSET) {           \
410                 __start_rodata = .;                                     \
411                 *(.rodata) *(.rodata.*)                                 \
412                 SCHED_DATA                                              \
413                 RO_AFTER_INIT_DATA      /* Read only after init */      \
414                 . = ALIGN(8);                                           \
415                 __start___tracepoints_ptrs = .;                         \
416                 KEEP(*(__tracepoints_ptrs)) /* Tracepoints: pointer array */ \
417                 __stop___tracepoints_ptrs = .;                          \
418                 *(__tracepoints_strings)/* Tracepoints: strings */      \
419         }                                                               \
420                                                                         \
421         .rodata1          : AT(ADDR(.rodata1) - LOAD_OFFSET) {          \
422                 *(.rodata1)                                             \
423         }                                                               \
424                                                                         \
425         /* PCI quirks */                                                \
426         .pci_fixup        : AT(ADDR(.pci_fixup) - LOAD_OFFSET) {        \
427                 __start_pci_fixups_early = .;                           \
428                 KEEP(*(.pci_fixup_early))                               \
429                 __end_pci_fixups_early = .;                             \
430                 __start_pci_fixups_header = .;                          \
431                 KEEP(*(.pci_fixup_header))                              \
432                 __end_pci_fixups_header = .;                            \
433                 __start_pci_fixups_final = .;                           \
434                 KEEP(*(.pci_fixup_final))                               \
435                 __end_pci_fixups_final = .;                             \
436                 __start_pci_fixups_enable = .;                          \
437                 KEEP(*(.pci_fixup_enable))                              \
438                 __end_pci_fixups_enable = .;                            \
439                 __start_pci_fixups_resume = .;                          \
440                 KEEP(*(.pci_fixup_resume))                              \
441                 __end_pci_fixups_resume = .;                            \
442                 __start_pci_fixups_resume_early = .;                    \
443                 KEEP(*(.pci_fixup_resume_early))                        \
444                 __end_pci_fixups_resume_early = .;                      \
445                 __start_pci_fixups_suspend = .;                         \
446                 KEEP(*(.pci_fixup_suspend))                             \
447                 __end_pci_fixups_suspend = .;                           \
448                 __start_pci_fixups_suspend_late = .;                    \
449                 KEEP(*(.pci_fixup_suspend_late))                        \
450                 __end_pci_fixups_suspend_late = .;                      \
451         }                                                               \
452                                                                         \
453         /* Built-in firmware blobs */                                   \
454         .builtin_fw        : AT(ADDR(.builtin_fw) - LOAD_OFFSET) {      \
455                 __start_builtin_fw = .;                                 \
456                 KEEP(*(.builtin_fw))                                    \
457                 __end_builtin_fw = .;                                   \
458         }                                                               \
459                                                                         \
460         TRACEDATA                                                       \
461                                                                         \
462         /* Kernel symbol table: Normal symbols */                       \
463         __ksymtab         : AT(ADDR(__ksymtab) - LOAD_OFFSET) {         \
464                 __start___ksymtab = .;                                  \
465                 KEEP(*(SORT(___ksymtab+*)))                             \
466                 __stop___ksymtab = .;                                   \
467         }                                                               \
468                                                                         \
469         /* Kernel symbol table: GPL-only symbols */                     \
470         __ksymtab_gpl     : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) {     \
471                 __start___ksymtab_gpl = .;                              \
472                 KEEP(*(SORT(___ksymtab_gpl+*)))                         \
473                 __stop___ksymtab_gpl = .;                               \
474         }                                                               \
475                                                                         \
476         /* Kernel symbol table: Normal unused symbols */                \
477         __ksymtab_unused  : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) {  \
478                 __start___ksymtab_unused = .;                           \
479                 KEEP(*(SORT(___ksymtab_unused+*)))                      \
480                 __stop___ksymtab_unused = .;                            \
481         }                                                               \
482                                                                         \
483         /* Kernel symbol table: GPL-only unused symbols */              \
484         __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
485                 __start___ksymtab_unused_gpl = .;                       \
486                 KEEP(*(SORT(___ksymtab_unused_gpl+*)))                  \
487                 __stop___ksymtab_unused_gpl = .;                        \
488         }                                                               \
489                                                                         \
490         /* Kernel symbol table: GPL-future-only symbols */              \
491         __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
492                 __start___ksymtab_gpl_future = .;                       \
493                 KEEP(*(SORT(___ksymtab_gpl_future+*)))                  \
494                 __stop___ksymtab_gpl_future = .;                        \
495         }                                                               \
496                                                                         \
497         /* Kernel symbol table: Normal symbols */                       \
498         __kcrctab         : AT(ADDR(__kcrctab) - LOAD_OFFSET) {         \
499                 __start___kcrctab = .;                                  \
500                 KEEP(*(SORT(___kcrctab+*)))                             \
501                 __stop___kcrctab = .;                                   \
502         }                                                               \
503                                                                         \
504         /* Kernel symbol table: GPL-only symbols */                     \
505         __kcrctab_gpl     : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) {     \
506                 __start___kcrctab_gpl = .;                              \
507                 KEEP(*(SORT(___kcrctab_gpl+*)))                         \
508                 __stop___kcrctab_gpl = .;                               \
509         }                                                               \
510                                                                         \
511         /* Kernel symbol table: Normal unused symbols */                \
512         __kcrctab_unused  : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) {  \
513                 __start___kcrctab_unused = .;                           \
514                 KEEP(*(SORT(___kcrctab_unused+*)))                      \
515                 __stop___kcrctab_unused = .;                            \
516         }                                                               \
517                                                                         \
518         /* Kernel symbol table: GPL-only unused symbols */              \
519         __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
520                 __start___kcrctab_unused_gpl = .;                       \
521                 KEEP(*(SORT(___kcrctab_unused_gpl+*)))                  \
522                 __stop___kcrctab_unused_gpl = .;                        \
523         }                                                               \
524                                                                         \
525         /* Kernel symbol table: GPL-future-only symbols */              \
526         __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
527                 __start___kcrctab_gpl_future = .;                       \
528                 KEEP(*(SORT(___kcrctab_gpl_future+*)))                  \
529                 __stop___kcrctab_gpl_future = .;                        \
530         }                                                               \
531                                                                         \
532         /* Kernel symbol table: strings */                              \
533         __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
534                 *(__ksymtab_strings)                                    \
535         }                                                               \
536                                                                         \
537         /* __*init sections */                                          \
538         __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) {         \
539                 *(.ref.rodata)                                          \
540                 MEM_KEEP(init.rodata)                                   \
541                 MEM_KEEP(exit.rodata)                                   \
542         }                                                               \
543                                                                         \
544         /* Built-in module parameters. */                               \
545         __param : AT(ADDR(__param) - LOAD_OFFSET) {                     \
546                 __start___param = .;                                    \
547                 KEEP(*(__param))                                        \
548                 __stop___param = .;                                     \
549         }                                                               \
550                                                                         \
551         /* Built-in module versions. */                                 \
552         __modver : AT(ADDR(__modver) - LOAD_OFFSET) {                   \
553                 __start___modver = .;                                   \
554                 KEEP(*(__modver))                                       \
555                 __stop___modver = .;                                    \
556         }                                                               \
557                                                                         \
558         RO_EXCEPTION_TABLE                                              \
559         NOTES                                                           \
560         BTF                                                             \
561                                                                         \
562         . = ALIGN((align));                                             \
563         __end_rodata = .;
564
565 /*
566  * Non-instrumentable text section
567  */
568 #define NOINSTR_TEXT                                                    \
569                 ALIGN_FUNCTION();                                       \
570                 __noinstr_text_start = .;                               \
571                 *(.noinstr.text)                                        \
572                 __noinstr_text_end = .;
573
574 /*
575  * .text section. Map to function alignment to avoid address changes
576  * during second ld run in second ld pass when generating System.map
577  *
578  * TEXT_MAIN here will match .text.fixup and .text.unlikely if dead
579  * code elimination is enabled, so these sections should be converted
580  * to use ".." first.
581  */
582 #define TEXT_TEXT                                                       \
583                 ALIGN_FUNCTION();                                       \
584                 *(.text.hot TEXT_MAIN .text.fixup .text.unlikely)       \
585                 NOINSTR_TEXT                                            \
586                 *(.text..refcount)                                      \
587                 *(.ref.text)                                            \
588         MEM_KEEP(init.text*)                                            \
589         MEM_KEEP(exit.text*)                                            \
590
591
592 /* sched.text is aling to function alignment to secure we have same
593  * address even at second ld pass when generating System.map */
594 #define SCHED_TEXT                                                      \
595                 ALIGN_FUNCTION();                                       \
596                 __sched_text_start = .;                                 \
597                 *(.sched.text)                                          \
598                 __sched_text_end = .;
599
600 /* spinlock.text is aling to function alignment to secure we have same
601  * address even at second ld pass when generating System.map */
602 #define LOCK_TEXT                                                       \
603                 ALIGN_FUNCTION();                                       \
604                 __lock_text_start = .;                                  \
605                 *(.spinlock.text)                                       \
606                 __lock_text_end = .;
607
608 #define CPUIDLE_TEXT                                                    \
609                 ALIGN_FUNCTION();                                       \
610                 __cpuidle_text_start = .;                               \
611                 *(.cpuidle.text)                                        \
612                 __cpuidle_text_end = .;
613
614 #define KPROBES_TEXT                                                    \
615                 ALIGN_FUNCTION();                                       \
616                 __kprobes_text_start = .;                               \
617                 *(.kprobes.text)                                        \
618                 __kprobes_text_end = .;
619
620 #define ENTRY_TEXT                                                      \
621                 ALIGN_FUNCTION();                                       \
622                 __entry_text_start = .;                                 \
623                 *(.entry.text)                                          \
624                 __entry_text_end = .;
625
626 #define IRQENTRY_TEXT                                                   \
627                 ALIGN_FUNCTION();                                       \
628                 __irqentry_text_start = .;                              \
629                 *(.irqentry.text)                                       \
630                 __irqentry_text_end = .;
631
632 #define SOFTIRQENTRY_TEXT                                               \
633                 ALIGN_FUNCTION();                                       \
634                 __softirqentry_text_start = .;                          \
635                 *(.softirqentry.text)                                   \
636                 __softirqentry_text_end = .;
637
638 /* Section used for early init (in .S files) */
639 #define HEAD_TEXT  KEEP(*(.head.text))
640
641 #define HEAD_TEXT_SECTION                                                       \
642         .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) {               \
643                 HEAD_TEXT                                               \
644         }
645
646 /*
647  * Exception table
648  */
649 #define EXCEPTION_TABLE(align)                                          \
650         . = ALIGN(align);                                               \
651         __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {               \
652                 __start___ex_table = .;                                 \
653                 KEEP(*(__ex_table))                                     \
654                 __stop___ex_table = .;                                  \
655         }
656
657 /*
658  * .BTF
659  */
660 #ifdef CONFIG_DEBUG_INFO_BTF
661 #define BTF                                                             \
662         .BTF : AT(ADDR(.BTF) - LOAD_OFFSET) {                           \
663                 __start_BTF = .;                                        \
664                 *(.BTF)                                                 \
665                 __stop_BTF = .;                                         \
666         }                                                               \
667         . = ALIGN(4);                                                   \
668         .BTF_ids : AT(ADDR(.BTF_ids) - LOAD_OFFSET) {                   \
669                 *(.BTF_ids)                                             \
670         }
671 #else
672 #define BTF
673 #endif
674
675 /*
676  * Init task
677  */
678 #define INIT_TASK_DATA_SECTION(align)                                   \
679         . = ALIGN(align);                                               \
680         .data..init_task :  AT(ADDR(.data..init_task) - LOAD_OFFSET) {  \
681                 INIT_TASK_DATA(align)                                   \
682         }
683
684 #ifdef CONFIG_CONSTRUCTORS
685 #define KERNEL_CTORS()  . = ALIGN(8);                      \
686                         __ctors_start = .;                 \
687                         KEEP(*(.ctors))                    \
688                         KEEP(*(SORT(.init_array.*)))       \
689                         KEEP(*(.init_array))               \
690                         __ctors_end = .;
691 #else
692 #define KERNEL_CTORS()
693 #endif
694
695 /* init and exit section handling */
696 #define INIT_DATA                                                       \
697         KEEP(*(SORT(___kentry+*)))                                      \
698         *(.init.data init.data.*)                                       \
699         MEM_DISCARD(init.data*)                                         \
700         KERNEL_CTORS()                                                  \
701         MCOUNT_REC()                                                    \
702         *(.init.rodata .init.rodata.*)                                  \
703         FTRACE_EVENTS()                                                 \
704         TRACE_SYSCALLS()                                                \
705         KPROBE_BLACKLIST()                                              \
706         ERROR_INJECT_WHITELIST()                                        \
707         MEM_DISCARD(init.rodata)                                        \
708         CLK_OF_TABLES()                                                 \
709         RESERVEDMEM_OF_TABLES()                                         \
710         TIMER_OF_TABLES()                                               \
711         CPU_METHOD_OF_TABLES()                                          \
712         CPUIDLE_METHOD_OF_TABLES()                                      \
713         KERNEL_DTB()                                                    \
714         IRQCHIP_OF_MATCH_TABLE()                                        \
715         ACPI_PROBE_TABLE(irqchip)                                       \
716         ACPI_PROBE_TABLE(timer)                                         \
717         THERMAL_TABLE(governor)                                         \
718         EARLYCON_TABLE()                                                \
719         LSM_TABLE()                                                     \
720         EARLY_LSM_TABLE()
721
722 #define INIT_TEXT                                                       \
723         *(.init.text .init.text.*)                                      \
724         *(.text.startup)                                                \
725         MEM_DISCARD(init.text*)
726
727 #define EXIT_DATA                                                       \
728         *(.exit.data .exit.data.*)                                      \
729         *(.fini_array .fini_array.*)                                    \
730         *(.dtors .dtors.*)                                              \
731         MEM_DISCARD(exit.data*)                                         \
732         MEM_DISCARD(exit.rodata*)
733
734 #define EXIT_TEXT                                                       \
735         *(.exit.text)                                                   \
736         *(.text.exit)                                                   \
737         MEM_DISCARD(exit.text)
738
739 #define EXIT_CALL                                                       \
740         *(.exitcall.exit)
741
742 /*
743  * bss (Block Started by Symbol) - uninitialized data
744  * zeroed during startup
745  */
746 #define SBSS(sbss_align)                                                \
747         . = ALIGN(sbss_align);                                          \
748         .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) {                         \
749                 *(.dynsbss)                                             \
750                 *(SBSS_MAIN)                                            \
751                 *(.scommon)                                             \
752         }
753
754 /*
755  * Allow archectures to redefine BSS_FIRST_SECTIONS to add extra
756  * sections to the front of bss.
757  */
758 #ifndef BSS_FIRST_SECTIONS
759 #define BSS_FIRST_SECTIONS
760 #endif
761
762 #define BSS(bss_align)                                                  \
763         . = ALIGN(bss_align);                                           \
764         .bss : AT(ADDR(.bss) - LOAD_OFFSET) {                           \
765                 BSS_FIRST_SECTIONS                                      \
766                 . = ALIGN(PAGE_SIZE);                                   \
767                 *(.bss..page_aligned)                                   \
768                 . = ALIGN(PAGE_SIZE);                                   \
769                 *(.dynbss)                                              \
770                 *(BSS_MAIN)                                             \
771                 *(COMMON)                                               \
772         }
773
774 /*
775  * DWARF debug sections.
776  * Symbols in the DWARF debugging sections are relative to
777  * the beginning of the section so we begin them at 0.
778  */
779 #define DWARF_DEBUG                                                     \
780                 /* DWARF 1 */                                           \
781                 .debug          0 : { *(.debug) }                       \
782                 .line           0 : { *(.line) }                        \
783                 /* GNU DWARF 1 extensions */                            \
784                 .debug_srcinfo  0 : { *(.debug_srcinfo) }               \
785                 .debug_sfnames  0 : { *(.debug_sfnames) }               \
786                 /* DWARF 1.1 and DWARF 2 */                             \
787                 .debug_aranges  0 : { *(.debug_aranges) }               \
788                 .debug_pubnames 0 : { *(.debug_pubnames) }              \
789                 /* DWARF 2 */                                           \
790                 .debug_info     0 : { *(.debug_info                     \
791                                 .gnu.linkonce.wi.*) }                   \
792                 .debug_abbrev   0 : { *(.debug_abbrev) }                \
793                 .debug_line     0 : { *(.debug_line) }                  \
794                 .debug_frame    0 : { *(.debug_frame) }                 \
795                 .debug_str      0 : { *(.debug_str) }                   \
796                 .debug_loc      0 : { *(.debug_loc) }                   \
797                 .debug_macinfo  0 : { *(.debug_macinfo) }               \
798                 .debug_pubtypes 0 : { *(.debug_pubtypes) }              \
799                 /* DWARF 3 */                                           \
800                 .debug_ranges   0 : { *(.debug_ranges) }                \
801                 /* SGI/MIPS DWARF 2 extensions */                       \
802                 .debug_weaknames 0 : { *(.debug_weaknames) }            \
803                 .debug_funcnames 0 : { *(.debug_funcnames) }            \
804                 .debug_typenames 0 : { *(.debug_typenames) }            \
805                 .debug_varnames  0 : { *(.debug_varnames) }             \
806                 /* GNU DWARF 2 extensions */                            \
807                 .debug_gnu_pubnames 0 : { *(.debug_gnu_pubnames) }      \
808                 .debug_gnu_pubtypes 0 : { *(.debug_gnu_pubtypes) }      \
809                 /* DWARF 4 */                                           \
810                 .debug_types    0 : { *(.debug_types) }                 \
811                 /* DWARF 5 */                                           \
812                 .debug_macro    0 : { *(.debug_macro) }                 \
813                 .debug_addr     0 : { *(.debug_addr) }
814
815                 /* Stabs debugging sections.  */
816 #define STABS_DEBUG                                                     \
817                 .stab 0 : { *(.stab) }                                  \
818                 .stabstr 0 : { *(.stabstr) }                            \
819                 .stab.excl 0 : { *(.stab.excl) }                        \
820                 .stab.exclstr 0 : { *(.stab.exclstr) }                  \
821                 .stab.index 0 : { *(.stab.index) }                      \
822                 .stab.indexstr 0 : { *(.stab.indexstr) }                \
823                 .comment 0 : { *(.comment) }
824
825 #ifdef CONFIG_GENERIC_BUG
826 #define BUG_TABLE                                                       \
827         . = ALIGN(8);                                                   \
828         __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) {             \
829                 __start___bug_table = .;                                \
830                 KEEP(*(__bug_table))                                    \
831                 __stop___bug_table = .;                                 \
832         }
833 #else
834 #define BUG_TABLE
835 #endif
836
837 #ifdef CONFIG_UNWINDER_ORC
838 #define ORC_UNWIND_TABLE                                                \
839         . = ALIGN(4);                                                   \
840         .orc_unwind_ip : AT(ADDR(.orc_unwind_ip) - LOAD_OFFSET) {       \
841                 __start_orc_unwind_ip = .;                              \
842                 KEEP(*(.orc_unwind_ip))                                 \
843                 __stop_orc_unwind_ip = .;                               \
844         }                                                               \
845         . = ALIGN(2);                                                   \
846         .orc_unwind : AT(ADDR(.orc_unwind) - LOAD_OFFSET) {             \
847                 __start_orc_unwind = .;                                 \
848                 KEEP(*(.orc_unwind))                                    \
849                 __stop_orc_unwind = .;                                  \
850         }                                                               \
851         . = ALIGN(4);                                                   \
852         .orc_lookup : AT(ADDR(.orc_lookup) - LOAD_OFFSET) {             \
853                 orc_lookup = .;                                         \
854                 . += (((SIZEOF(.text) + LOOKUP_BLOCK_SIZE - 1) /        \
855                         LOOKUP_BLOCK_SIZE) + 1) * 4;                    \
856                 orc_lookup_end = .;                                     \
857         }
858 #else
859 #define ORC_UNWIND_TABLE
860 #endif
861
862 #ifdef CONFIG_PM_TRACE
863 #define TRACEDATA                                                       \
864         . = ALIGN(4);                                                   \
865         .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) {               \
866                 __tracedata_start = .;                                  \
867                 KEEP(*(.tracedata))                                     \
868                 __tracedata_end = .;                                    \
869         }
870 #else
871 #define TRACEDATA
872 #endif
873
874 #define NOTES                                                           \
875         .notes : AT(ADDR(.notes) - LOAD_OFFSET) {                       \
876                 __start_notes = .;                                      \
877                 KEEP(*(.note.*))                                        \
878                 __stop_notes = .;                                       \
879         } NOTES_HEADERS                                                 \
880         NOTES_HEADERS_RESTORE
881
882 #define INIT_SETUP(initsetup_align)                                     \
883                 . = ALIGN(initsetup_align);                             \
884                 __setup_start = .;                                      \
885                 KEEP(*(.init.setup))                                    \
886                 __setup_end = .;
887
888 #define INIT_CALLS_LEVEL(level)                                         \
889                 __initcall##level##_start = .;                          \
890                 KEEP(*(.initcall##level##.init))                        \
891                 KEEP(*(.initcall##level##s.init))                       \
892
893 #define INIT_CALLS                                                      \
894                 __initcall_start = .;                                   \
895                 KEEP(*(.initcallearly.init))                            \
896                 INIT_CALLS_LEVEL(0)                                     \
897                 INIT_CALLS_LEVEL(1)                                     \
898                 INIT_CALLS_LEVEL(2)                                     \
899                 INIT_CALLS_LEVEL(3)                                     \
900                 INIT_CALLS_LEVEL(4)                                     \
901                 INIT_CALLS_LEVEL(5)                                     \
902                 INIT_CALLS_LEVEL(rootfs)                                \
903                 INIT_CALLS_LEVEL(6)                                     \
904                 INIT_CALLS_LEVEL(7)                                     \
905                 __initcall_end = .;
906
907 #define CON_INITCALL                                                    \
908                 __con_initcall_start = .;                               \
909                 KEEP(*(.con_initcall.init))                             \
910                 __con_initcall_end = .;
911
912 #ifdef CONFIG_BLK_DEV_INITRD
913 #define INIT_RAM_FS                                                     \
914         . = ALIGN(4);                                                   \
915         __initramfs_start = .;                                          \
916         KEEP(*(.init.ramfs))                                            \
917         . = ALIGN(8);                                                   \
918         KEEP(*(.init.ramfs.info))
919 #else
920 #define INIT_RAM_FS
921 #endif
922
923 /*
924  * Memory encryption operates on a page basis. Since we need to clear
925  * the memory encryption mask for this section, it needs to be aligned
926  * on a page boundary and be a page-size multiple in length.
927  *
928  * Note: We use a separate section so that only this section gets
929  * decrypted to avoid exposing more than we wish.
930  */
931 #ifdef CONFIG_AMD_MEM_ENCRYPT
932 #define PERCPU_DECRYPTED_SECTION                                        \
933         . = ALIGN(PAGE_SIZE);                                           \
934         *(.data..percpu..decrypted)                                     \
935         . = ALIGN(PAGE_SIZE);
936 #else
937 #define PERCPU_DECRYPTED_SECTION
938 #endif
939
940
941 /*
942  * Default discarded sections.
943  *
944  * Some archs want to discard exit text/data at runtime rather than
945  * link time due to cross-section references such as alt instructions,
946  * bug table, eh_frame, etc.  DISCARDS must be the last of output
947  * section definitions so that such archs put those in earlier section
948  * definitions.
949  */
950 #ifdef RUNTIME_DISCARD_EXIT
951 #define EXIT_DISCARDS
952 #else
953 #define EXIT_DISCARDS                                                   \
954         EXIT_TEXT                                                       \
955         EXIT_DATA
956 #endif
957
958 #define DISCARDS                                                        \
959         /DISCARD/ : {                                                   \
960         EXIT_DISCARDS                                                   \
961         EXIT_CALL                                                       \
962         *(.discard)                                                     \
963         *(.discard.*)                                                   \
964         *(.modinfo)                                                     \
965         }
966
967 /**
968  * PERCPU_INPUT - the percpu input sections
969  * @cacheline: cacheline size
970  *
971  * The core percpu section names and core symbols which do not rely
972  * directly upon load addresses.
973  *
974  * @cacheline is used to align subsections to avoid false cacheline
975  * sharing between subsections for different purposes.
976  */
977 #define PERCPU_INPUT(cacheline)                                         \
978         __per_cpu_start = .;                                            \
979         *(.data..percpu..first)                                         \
980         . = ALIGN(PAGE_SIZE);                                           \
981         *(.data..percpu..page_aligned)                                  \
982         . = ALIGN(cacheline);                                           \
983         *(.data..percpu..read_mostly)                                   \
984         . = ALIGN(cacheline);                                           \
985         *(.data..percpu)                                                \
986         *(.data..percpu..shared_aligned)                                \
987         PERCPU_DECRYPTED_SECTION                                        \
988         __per_cpu_end = .;
989
990 /**
991  * PERCPU_VADDR - define output section for percpu area
992  * @cacheline: cacheline size
993  * @vaddr: explicit base address (optional)
994  * @phdr: destination PHDR (optional)
995  *
996  * Macro which expands to output section for percpu area.
997  *
998  * @cacheline is used to align subsections to avoid false cacheline
999  * sharing between subsections for different purposes.
1000  *
1001  * If @vaddr is not blank, it specifies explicit base address and all
1002  * percpu symbols will be offset from the given address.  If blank,
1003  * @vaddr always equals @laddr + LOAD_OFFSET.
1004  *
1005  * @phdr defines the output PHDR to use if not blank.  Be warned that
1006  * output PHDR is sticky.  If @phdr is specified, the next output
1007  * section in the linker script will go there too.  @phdr should have
1008  * a leading colon.
1009  *
1010  * Note that this macros defines __per_cpu_load as an absolute symbol.
1011  * If there is no need to put the percpu section at a predetermined
1012  * address, use PERCPU_SECTION.
1013  */
1014 #define PERCPU_VADDR(cacheline, vaddr, phdr)                            \
1015         __per_cpu_load = .;                                             \
1016         .data..percpu vaddr : AT(__per_cpu_load - LOAD_OFFSET) {        \
1017                 PERCPU_INPUT(cacheline)                                 \
1018         } phdr                                                          \
1019         . = __per_cpu_load + SIZEOF(.data..percpu);
1020
1021 /**
1022  * PERCPU_SECTION - define output section for percpu area, simple version
1023  * @cacheline: cacheline size
1024  *
1025  * Align to PAGE_SIZE and outputs output section for percpu area.  This
1026  * macro doesn't manipulate @vaddr or @phdr and __per_cpu_load and
1027  * __per_cpu_start will be identical.
1028  *
1029  * This macro is equivalent to ALIGN(PAGE_SIZE); PERCPU_VADDR(@cacheline,,)
1030  * except that __per_cpu_load is defined as a relative symbol against
1031  * .data..percpu which is required for relocatable x86_32 configuration.
1032  */
1033 #define PERCPU_SECTION(cacheline)                                       \
1034         . = ALIGN(PAGE_SIZE);                                           \
1035         .data..percpu   : AT(ADDR(.data..percpu) - LOAD_OFFSET) {       \
1036                 __per_cpu_load = .;                                     \
1037                 PERCPU_INPUT(cacheline)                                 \
1038         }
1039
1040
1041 /*
1042  * Definition of the high level *_SECTION macros
1043  * They will fit only a subset of the architectures
1044  */
1045
1046
1047 /*
1048  * Writeable data.
1049  * All sections are combined in a single .data section.
1050  * The sections following CONSTRUCTORS are arranged so their
1051  * typical alignment matches.
1052  * A cacheline is typical/always less than a PAGE_SIZE so
1053  * the sections that has this restriction (or similar)
1054  * is located before the ones requiring PAGE_SIZE alignment.
1055  * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which
1056  * matches the requirement of PAGE_ALIGNED_DATA.
1057  *
1058  * use 0 as page_align if page_aligned data is not used */
1059 #define RW_DATA(cacheline, pagealigned, inittask)                       \
1060         . = ALIGN(PAGE_SIZE);                                           \
1061         .data : AT(ADDR(.data) - LOAD_OFFSET) {                         \
1062                 INIT_TASK_DATA(inittask)                                \
1063                 NOSAVE_DATA                                             \
1064                 PAGE_ALIGNED_DATA(pagealigned)                          \
1065                 CACHELINE_ALIGNED_DATA(cacheline)                       \
1066                 READ_MOSTLY_DATA(cacheline)                             \
1067                 DATA_DATA                                               \
1068                 CONSTRUCTORS                                            \
1069         }                                                               \
1070         BUG_TABLE                                                       \
1071
1072 #define INIT_TEXT_SECTION(inittext_align)                               \
1073         . = ALIGN(inittext_align);                                      \
1074         .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {               \
1075                 _sinittext = .;                                         \
1076                 INIT_TEXT                                               \
1077                 _einittext = .;                                         \
1078         }
1079
1080 #define INIT_DATA_SECTION(initsetup_align)                              \
1081         .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {               \
1082                 INIT_DATA                                               \
1083                 INIT_SETUP(initsetup_align)                             \
1084                 INIT_CALLS                                              \
1085                 CON_INITCALL                                            \
1086                 INIT_RAM_FS                                             \
1087         }
1088
1089 #define BSS_SECTION(sbss_align, bss_align, stop_align)                  \
1090         . = ALIGN(sbss_align);                                          \
1091         __bss_start = .;                                                \
1092         SBSS(sbss_align)                                                \
1093         BSS(bss_align)                                                  \
1094         . = ALIGN(stop_align);                                          \
1095         __bss_stop = .;