1 /* SPDX-License-Identifier: GPL-2.0 */
3 #define PROVIDE32(x) PROVIDE(__unused__##x)
5 #define PROVIDE32(x) PROVIDE(x)
8 #define BSS_FIRST_SECTIONS *(.bss.prominit)
10 #define RO_EXCEPTION_TABLE_ALIGN 0
12 #define SOFT_MASK_TABLE(align) \
14 __soft_mask_table : AT(ADDR(__soft_mask_table) - LOAD_OFFSET) { \
15 __start___soft_mask_table = .; \
16 KEEP(*(__soft_mask_table)) \
17 __stop___soft_mask_table = .; \
20 #define RESTART_TABLE(align) \
22 __restart_table : AT(ADDR(__restart_table) - LOAD_OFFSET) { \
23 __start___restart_table = .; \
24 KEEP(*(__restart_table)) \
25 __stop___restart_table = .; \
29 #include <asm-generic/vmlinux.lds.h>
30 #include <asm/cache.h>
31 #include <asm/thread_info.h>
33 #define STRICT_ALIGN_SIZE (1 << CONFIG_DATA_SHIFT)
38 text PT_LOAD FLAGS(7); /* RWX */
39 note PT_NOTE FLAGS(0);
43 OUTPUT_ARCH(powerpc:common64)
46 OUTPUT_ARCH(powerpc:common)
47 jiffies = jiffies_64 + 4;
54 * Text, read only data and other permanent read-only sections
62 * This needs to be in its own output section to avoid ld placing
63 * branch trampoline stubs randomly throughout the fixed sections,
64 * which it will do (even if the branch comes from another section)
65 * in order to optimize stub generation.
67 .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) {
69 KEEP(*(.head.text.first_256B));
70 #ifdef CONFIG_PPC_BOOK3E
72 KEEP(*(.head.text.real_vectors));
73 *(.head.text.real_trampolines);
74 KEEP(*(.head.text.virt_vectors));
75 *(.head.text.virt_trampolines);
76 # if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
77 KEEP(*(.head.data.fwnmi_page));
80 #else /* !CONFIG_PPC64 */
89 * ALIGN(0) overrides the default output section alignment because
90 * this needs to start right after .head.text in order for fixed
91 * section placement to work.
93 .text ALIGN(0) : AT(ADDR(.text) - LOAD_OFFSET) {
94 #ifdef CONFIG_LD_HEAD_STUB_CATCH
95 KEEP(*(.linker_stub_catch));
100 .text : AT(ADDR(.text) - LOAD_OFFSET) {
103 /* careful! __ftr_alt_* sections need to be close to .text */
104 *(.text.hot .text.hot.* TEXT_MAIN .text.fixup .text.unlikely .text.unlikely.* .fixup __ftr_alt_* .ref.text);
106 *(.tramp.ftrace.text);
116 * -Os builds call FP save/restore functions. The powerpc64
117 * linker generates those on demand in the .sfpr section.
118 * .sfpr gets placed at the beginning of a group of input
119 * sections, which can break start-of-text offset if it is
120 * included with the main text sections, so put it by itself.
131 #endif /* CONFIG_PPC32 */
135 . = ALIGN(PAGE_SIZE);
137 PROVIDE32 (etext = .);
147 __stf_entry_barrier_fixup : AT(ADDR(__stf_entry_barrier_fixup) - LOAD_OFFSET) {
148 __start___stf_entry_barrier_fixup = .;
149 *(__stf_entry_barrier_fixup)
150 __stop___stf_entry_barrier_fixup = .;
154 __uaccess_flush_fixup : AT(ADDR(__uaccess_flush_fixup) - LOAD_OFFSET) {
155 __start___uaccess_flush_fixup = .;
156 *(__uaccess_flush_fixup)
157 __stop___uaccess_flush_fixup = .;
161 __entry_flush_fixup : AT(ADDR(__entry_flush_fixup) - LOAD_OFFSET) {
162 __start___entry_flush_fixup = .;
163 *(__entry_flush_fixup)
164 __stop___entry_flush_fixup = .;
168 __scv_entry_flush_fixup : AT(ADDR(__scv_entry_flush_fixup) - LOAD_OFFSET) {
169 __start___scv_entry_flush_fixup = .;
170 *(__scv_entry_flush_fixup)
171 __stop___scv_entry_flush_fixup = .;
175 __stf_exit_barrier_fixup : AT(ADDR(__stf_exit_barrier_fixup) - LOAD_OFFSET) {
176 __start___stf_exit_barrier_fixup = .;
177 *(__stf_exit_barrier_fixup)
178 __stop___stf_exit_barrier_fixup = .;
182 __rfi_flush_fixup : AT(ADDR(__rfi_flush_fixup) - LOAD_OFFSET) {
183 __start___rfi_flush_fixup = .;
185 __stop___rfi_flush_fixup = .;
187 #endif /* CONFIG_PPC64 */
189 #ifdef CONFIG_PPC_BARRIER_NOSPEC
191 __spec_barrier_fixup : AT(ADDR(__spec_barrier_fixup) - LOAD_OFFSET) {
192 __start___barrier_nospec_fixup = .;
193 *(__barrier_nospec_fixup)
194 __stop___barrier_nospec_fixup = .;
196 #endif /* CONFIG_PPC_BARRIER_NOSPEC */
198 #ifdef CONFIG_PPC_FSL_BOOK3E
200 __spec_btb_flush_fixup : AT(ADDR(__spec_btb_flush_fixup) - LOAD_OFFSET) {
201 __start__btb_flush_fixup = .;
203 __stop__btb_flush_fixup = .;
208 * Init sections discarded at runtime
210 . = ALIGN(STRICT_ALIGN_SIZE);
212 . = ALIGN(PAGE_SIZE);
213 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
218 *.init.text might be RO so we must ensure this section ends on
221 . = ALIGN(PAGE_SIZE);
224 *(.tramp.ftrace.init);
228 /* .exit.text is discarded at runtime, not link time,
229 * to deal with references from __bug_table
231 .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
235 . = ALIGN(PAGE_SIZE);
237 INIT_DATA_SECTION(16)
240 __ftr_fixup : AT(ADDR(__ftr_fixup) - LOAD_OFFSET) {
241 __start___ftr_fixup = .;
243 __stop___ftr_fixup = .;
246 __mmu_ftr_fixup : AT(ADDR(__mmu_ftr_fixup) - LOAD_OFFSET) {
247 __start___mmu_ftr_fixup = .;
248 KEEP(*(__mmu_ftr_fixup))
249 __stop___mmu_ftr_fixup = .;
252 __lwsync_fixup : AT(ADDR(__lwsync_fixup) - LOAD_OFFSET) {
253 __start___lwsync_fixup = .;
254 KEEP(*(__lwsync_fixup))
255 __stop___lwsync_fixup = .;
259 __fw_ftr_fixup : AT(ADDR(__fw_ftr_fixup) - LOAD_OFFSET) {
260 __start___fw_ftr_fixup = .;
261 KEEP(*(__fw_ftr_fixup))
262 __stop___fw_ftr_fixup = .;
266 PERCPU_SECTION(L1_CACHE_BYTES)
269 .machine.desc : AT(ADDR(.machine.desc) - LOAD_OFFSET) {
270 __machine_desc_start = . ;
271 KEEP(*(.machine.desc))
272 __machine_desc_end = . ;
274 #ifdef CONFIG_RELOCATABLE
276 .dynsym : AT(ADDR(.dynsym) - LOAD_OFFSET)
279 __dynamic_symtab = .;
283 .dynstr : AT(ADDR(.dynstr) - LOAD_OFFSET) { *(.dynstr) }
284 .dynamic : AT(ADDR(.dynamic) - LOAD_OFFSET)
289 .hash : AT(ADDR(.hash) - LOAD_OFFSET) { *(.hash) }
290 .gnu.hash : AT(ADDR(.gnu.hash) - LOAD_OFFSET) { *(.gnu.hash) }
291 .interp : AT(ADDR(.interp) - LOAD_OFFSET) { *(.interp) }
292 .rela.dyn : AT(ADDR(.rela.dyn) - LOAD_OFFSET)
294 __rela_dyn_start = .;
298 /* .exit.data is discarded at runtime, not link time,
299 * to deal with references from .exit.text
301 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
305 /* freed after init ends here */
306 . = ALIGN(PAGE_SIZE);
310 * And now the various read/write data
313 . = ALIGN(PAGE_SIZE);
317 .data : AT(ADDR(.data) - LOAD_OFFSET) {
320 *(.data..Lubsan_data*)
321 *(.data..Lubsan_type*)
331 .data : AT(ADDR(.data) - LOAD_OFFSET) {
334 *(.data..Lubsan_data*)
335 *(.data..Lubsan_type*)
342 .opd : AT(ADDR(.opd) - LOAD_OFFSET) {
349 .got : AT(ADDR(.got) - LOAD_OFFSET) {
351 #ifndef CONFIG_RELOCATABLE
352 __prom_init_toc_start = .;
353 arch/powerpc/kernel/prom_init.o*(.toc .got)
354 __prom_init_toc_end = .;
361 /* The initial task and kernel stack */
362 INIT_TASK_DATA_SECTION(THREAD_ALIGN)
364 .data..page_aligned : AT(ADDR(.data..page_aligned) - LOAD_OFFSET) {
365 PAGE_ALIGNED_DATA(PAGE_SIZE)
368 .data..cacheline_aligned : AT(ADDR(.data..cacheline_aligned) - LOAD_OFFSET) {
369 CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
372 .data..read_mostly : AT(ADDR(.data..read_mostly) - LOAD_OFFSET) {
373 READ_MOSTLY_DATA(L1_CACHE_BYTES)
376 . = ALIGN(PAGE_SIZE);
377 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
383 . = ALIGN(PAGE_SIZE);
385 PROVIDE32 (edata = .);
388 * And finally the bss
393 . = ALIGN(PAGE_SIZE);
404 *(.glink .iplt .plt .rela* .comment)