1 /* SPDX-License-Identifier: GPL-2.0 */
3 #define PROVIDE32(x) PROVIDE(__unused__##x)
5 #define PROVIDE32(x) PROVIDE(x)
8 #define BSS_FIRST_SECTIONS *(.bss.prominit)
10 #define RO_EXCEPTION_TABLE_ALIGN 0
11 #define RUNTIME_DISCARD_EXIT
13 #define SOFT_MASK_TABLE(align) \
15 __soft_mask_table : AT(ADDR(__soft_mask_table) - LOAD_OFFSET) { \
16 __start___soft_mask_table = .; \
17 KEEP(*(__soft_mask_table)) \
18 __stop___soft_mask_table = .; \
21 #define RESTART_TABLE(align) \
23 __restart_table : AT(ADDR(__restart_table) - LOAD_OFFSET) { \
24 __start___restart_table = .; \
25 KEEP(*(__restart_table)) \
26 __stop___restart_table = .; \
30 #include <asm-generic/vmlinux.lds.h>
31 #include <asm/cache.h>
32 #include <asm/thread_info.h>
34 #define STRICT_ALIGN_SIZE (1 << CONFIG_DATA_SHIFT)
36 #if STRICT_ALIGN_SIZE < PAGE_SIZE
37 #error "CONFIG_DATA_SHIFT must be >= PAGE_SHIFT"
43 text PT_LOAD FLAGS(7); /* RWX */
44 note PT_NOTE FLAGS(0);
48 OUTPUT_ARCH(powerpc:common64)
51 OUTPUT_ARCH(powerpc:common)
52 jiffies = jiffies_64 + 4;
59 * Text, read only data and other permanent read-only sections
67 * This needs to be in its own output section to avoid ld placing
68 * branch trampoline stubs randomly throughout the fixed sections,
69 * which it will do (even if the branch comes from another section)
70 * in order to optimize stub generation.
72 .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) {
74 KEEP(*(.head.text.first_256B));
75 #ifdef CONFIG_PPC_BOOK3E_64
77 KEEP(*(.head.text.real_vectors));
78 *(.head.text.real_trampolines);
79 KEEP(*(.head.text.virt_vectors));
80 *(.head.text.virt_trampolines);
81 # if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
82 KEEP(*(.head.data.fwnmi_page));
85 #else /* !CONFIG_PPC64 */
94 * ALIGN(0) overrides the default output section alignment because
95 * this needs to start right after .head.text in order for fixed
96 * section placement to work.
98 .text ALIGN(0) : AT(ADDR(.text) - LOAD_OFFSET) {
99 #ifdef CONFIG_LD_HEAD_STUB_CATCH
100 KEEP(*(.linker_stub_catch));
105 .text : AT(ADDR(.text) - LOAD_OFFSET) {
108 /* careful! __ftr_alt_* sections need to be close to .text */
109 *(.text.hot .text.hot.* TEXT_MAIN .text.fixup .text.unlikely .text.unlikely.* .fixup __ftr_alt_* .ref.text);
110 *(.tramp.ftrace.text);
118 * -Os builds call FP save/restore functions. The powerpc64
119 * linker generates those on demand in the .sfpr section.
120 * .sfpr gets placed at the beginning of a group of input
121 * sections, which can break start-of-text offset if it is
122 * included with the main text sections, so put it by itself.
125 *(.text.asan.* .text.tsan.*)
130 . = ALIGN(PAGE_SIZE);
132 PROVIDE32 (etext = .);
138 .sdata2 : AT(ADDR(.sdata2) - LOAD_OFFSET) {
143 .data.rel.ro : AT(ADDR(.data.rel.ro) - LOAD_OFFSET) {
144 *(.data.rel.ro .data.rel.ro.*)
147 .branch_lt : AT(ADDR(.branch_lt) - LOAD_OFFSET) {
152 .got1 : AT(ADDR(.got1) - LOAD_OFFSET) {
155 .got2 : AT(ADDR(.got2) - LOAD_OFFSET) {
160 .got : AT(ADDR(.got) - LOAD_OFFSET) {
164 .plt : AT(ADDR(.plt) - LOAD_OFFSET) {
165 /* XXX: is .plt (and .got.plt) required? */
169 #else /* CONFIG_PPC32 */
170 #ifndef CONFIG_PPC_KERNEL_PCREL
171 .toc1 : AT(ADDR(.toc1) - LOAD_OFFSET) {
176 .got : AT(ADDR(.got) - LOAD_OFFSET) ALIGN(256) {
177 #ifdef CONFIG_PPC_KERNEL_PCREL
187 #ifdef CONFIG_PPC64_ELF_ABI_V1
188 .opd : AT(ADDR(.opd) - LOAD_OFFSET) {
196 __stf_entry_barrier_fixup : AT(ADDR(__stf_entry_barrier_fixup) - LOAD_OFFSET) {
197 __start___stf_entry_barrier_fixup = .;
198 *(__stf_entry_barrier_fixup)
199 __stop___stf_entry_barrier_fixup = .;
203 __uaccess_flush_fixup : AT(ADDR(__uaccess_flush_fixup) - LOAD_OFFSET) {
204 __start___uaccess_flush_fixup = .;
205 *(__uaccess_flush_fixup)
206 __stop___uaccess_flush_fixup = .;
210 __entry_flush_fixup : AT(ADDR(__entry_flush_fixup) - LOAD_OFFSET) {
211 __start___entry_flush_fixup = .;
212 *(__entry_flush_fixup)
213 __stop___entry_flush_fixup = .;
217 __scv_entry_flush_fixup : AT(ADDR(__scv_entry_flush_fixup) - LOAD_OFFSET) {
218 __start___scv_entry_flush_fixup = .;
219 *(__scv_entry_flush_fixup)
220 __stop___scv_entry_flush_fixup = .;
224 __stf_exit_barrier_fixup : AT(ADDR(__stf_exit_barrier_fixup) - LOAD_OFFSET) {
225 __start___stf_exit_barrier_fixup = .;
226 *(__stf_exit_barrier_fixup)
227 __stop___stf_exit_barrier_fixup = .;
231 __rfi_flush_fixup : AT(ADDR(__rfi_flush_fixup) - LOAD_OFFSET) {
232 __start___rfi_flush_fixup = .;
234 __stop___rfi_flush_fixup = .;
236 #endif /* CONFIG_PPC32 */
238 #ifdef CONFIG_PPC_BARRIER_NOSPEC
240 __spec_barrier_fixup : AT(ADDR(__spec_barrier_fixup) - LOAD_OFFSET) {
241 __start___barrier_nospec_fixup = .;
242 *(__barrier_nospec_fixup)
243 __stop___barrier_nospec_fixup = .;
245 #endif /* CONFIG_PPC_BARRIER_NOSPEC */
247 #ifdef CONFIG_PPC_E500
249 __spec_btb_flush_fixup : AT(ADDR(__spec_btb_flush_fixup) - LOAD_OFFSET) {
250 __start__btb_flush_fixup = .;
252 __stop__btb_flush_fixup = .;
257 * Various code relies on __init_begin being at the strict RWX boundary.
259 . = ALIGN(STRICT_ALIGN_SIZE);
265 * Init sections discarded at runtime
267 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
272 *.init.text might be RO so we must ensure this section ends on
275 . = ALIGN(PAGE_SIZE);
277 *(.tramp.ftrace.init);
280 /* .exit.text is discarded at runtime, not link time,
281 * to deal with references from __bug_table
283 .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
287 . = ALIGN(PAGE_SIZE);
289 INIT_DATA_SECTION(16)
292 __ftr_fixup : AT(ADDR(__ftr_fixup) - LOAD_OFFSET) {
293 __start___ftr_fixup = .;
295 __stop___ftr_fixup = .;
298 __mmu_ftr_fixup : AT(ADDR(__mmu_ftr_fixup) - LOAD_OFFSET) {
299 __start___mmu_ftr_fixup = .;
300 KEEP(*(__mmu_ftr_fixup))
301 __stop___mmu_ftr_fixup = .;
304 __lwsync_fixup : AT(ADDR(__lwsync_fixup) - LOAD_OFFSET) {
305 __start___lwsync_fixup = .;
306 KEEP(*(__lwsync_fixup))
307 __stop___lwsync_fixup = .;
311 __fw_ftr_fixup : AT(ADDR(__fw_ftr_fixup) - LOAD_OFFSET) {
312 __start___fw_ftr_fixup = .;
313 KEEP(*(__fw_ftr_fixup))
314 __stop___fw_ftr_fixup = .;
318 PERCPU_SECTION(L1_CACHE_BYTES)
321 .machine.desc : AT(ADDR(.machine.desc) - LOAD_OFFSET) {
322 __machine_desc_start = . ;
323 KEEP(*(.machine.desc))
324 __machine_desc_end = . ;
326 #ifdef CONFIG_RELOCATABLE
328 .dynsym : AT(ADDR(.dynsym) - LOAD_OFFSET)
330 __dynamic_symtab = .;
333 .dynstr : AT(ADDR(.dynstr) - LOAD_OFFSET) { *(.dynstr) }
334 .dynamic : AT(ADDR(.dynamic) - LOAD_OFFSET)
339 .hash : AT(ADDR(.hash) - LOAD_OFFSET) { *(.hash) }
340 .gnu.hash : AT(ADDR(.gnu.hash) - LOAD_OFFSET) { *(.gnu.hash) }
341 .interp : AT(ADDR(.interp) - LOAD_OFFSET) { *(.interp) }
342 .rela.dyn : AT(ADDR(.rela.dyn) - LOAD_OFFSET)
344 __rela_dyn_start = .;
348 /* .exit.data is discarded at runtime, not link time,
349 * to deal with references from .exit.text
351 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
355 /* freed after init ends here */
356 . = ALIGN(PAGE_SIZE);
360 * And now the various read/write data
363 . = ALIGN(PAGE_SIZE);
366 .data : AT(ADDR(.data) - LOAD_OFFSET) {
374 /* The initial task and kernel stack */
375 INIT_TASK_DATA_SECTION(THREAD_ALIGN)
377 .data..page_aligned : AT(ADDR(.data..page_aligned) - LOAD_OFFSET) {
378 PAGE_ALIGNED_DATA(PAGE_SIZE)
381 .data..cacheline_aligned : AT(ADDR(.data..cacheline_aligned) - LOAD_OFFSET) {
382 CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
385 .data..read_mostly : AT(ADDR(.data..read_mostly) - LOAD_OFFSET) {
386 READ_MOSTLY_DATA(L1_CACHE_BYTES)
389 . = ALIGN(PAGE_SIZE);
390 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
396 . = ALIGN(PAGE_SIZE);
398 PROVIDE32 (edata = .);
401 * And finally the bss
406 . = ALIGN(PAGE_SIZE);
420 #ifndef CONFIG_RELOCATABLE