1 /* SPDX-License-Identifier: GPL-2.0 */
3 #define PROVIDE32(x) PROVIDE(__unused__##x)
5 #define PROVIDE32(x) PROVIDE(x)
8 #define BSS_FIRST_SECTIONS *(.bss.prominit)
9 #define RUNTIME_DISCARD_EXIT
12 #include <asm-generic/vmlinux.lds.h>
13 #include <asm/cache.h>
14 #include <asm/thread_info.h>
16 #define STRICT_ALIGN_SIZE (1 << CONFIG_DATA_SHIFT)
17 #define ETEXT_ALIGN_SIZE (1 << CONFIG_ETEXT_SHIFT)
22 kernel PT_LOAD FLAGS(7); /* RWX */
23 notes PT_NOTE FLAGS(0);
24 dummy PT_NOTE FLAGS(0);
26 /* binutils < 2.18 has a bug that makes it misbehave when taking an
27 ELF file with all segments at load address 0 as input. This
28 happens when running "strip" on vmlinux, because of the AT() magic
29 in this linker script. People using GCC >= 4.2 won't run into
30 this problem, because the "build-id" support will put some data
31 into the "notes" segment (at a non-zero load address).
33 To work around this, we force some data into both the "dummy"
34 segment and the kernel segment, so the dummy segment will get a
35 non-zero load address. It's not enough to always create the
36 "notes" segment, since if nothing gets assigned to it, its load
37 address will be zero. */
41 OUTPUT_ARCH(powerpc:common64)
44 OUTPUT_ARCH(powerpc:common)
45 jiffies = jiffies_64 + 4;
52 * Text, read only data and other permanent read-only sections
60 * This needs to be in its own output section to avoid ld placing
61 * branch trampoline stubs randomly throughout the fixed sections,
62 * which it will do (even if the branch comes from another section)
63 * in order to optimize stub generation.
65 .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) {
67 KEEP(*(.head.text.first_256B));
68 #ifdef CONFIG_PPC_BOOK3E
70 KEEP(*(.head.text.real_vectors));
71 *(.head.text.real_trampolines);
72 KEEP(*(.head.text.virt_vectors));
73 *(.head.text.virt_trampolines);
74 # if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
75 KEEP(*(.head.data.fwnmi_page));
78 #else /* !CONFIG_PPC64 */
87 * ALIGN(0) overrides the default output section alignment because
88 * this needs to start right after .head.text in order for fixed
89 * section placement to work.
91 .text ALIGN(0) : AT(ADDR(.text) - LOAD_OFFSET) {
92 #ifdef CONFIG_LD_HEAD_STUB_CATCH
93 KEEP(*(.linker_stub_catch));
98 .text : AT(ADDR(.text) - LOAD_OFFSET) {
101 /* careful! __ftr_alt_* sections need to be close to .text */
102 *(.text.hot .text.hot.* TEXT_MAIN .text.fixup .text.unlikely .text.unlikely.* .fixup __ftr_alt_* .ref.text);
104 *(.tramp.ftrace.text);
114 * -Os builds call FP save/restore functions. The powerpc64
115 * linker generates those on demand in the .sfpr section.
116 * .sfpr gets placed at the beginning of a group of input
117 * sections, which can break start-of-text offset if it is
118 * included with the main text sections, so put it by itself.
129 #endif /* CONFIG_PPC32 */
133 . = ALIGN(ETEXT_ALIGN_SIZE);
135 PROVIDE32 (etext = .);
142 __stf_entry_barrier_fixup : AT(ADDR(__stf_entry_barrier_fixup) - LOAD_OFFSET) {
143 __start___stf_entry_barrier_fixup = .;
144 *(__stf_entry_barrier_fixup)
145 __stop___stf_entry_barrier_fixup = .;
149 __uaccess_flush_fixup : AT(ADDR(__uaccess_flush_fixup) - LOAD_OFFSET) {
150 __start___uaccess_flush_fixup = .;
151 *(__uaccess_flush_fixup)
152 __stop___uaccess_flush_fixup = .;
156 __entry_flush_fixup : AT(ADDR(__entry_flush_fixup) - LOAD_OFFSET) {
157 __start___entry_flush_fixup = .;
158 *(__entry_flush_fixup)
159 __stop___entry_flush_fixup = .;
163 __stf_exit_barrier_fixup : AT(ADDR(__stf_exit_barrier_fixup) - LOAD_OFFSET) {
164 __start___stf_exit_barrier_fixup = .;
165 *(__stf_exit_barrier_fixup)
166 __stop___stf_exit_barrier_fixup = .;
170 __rfi_flush_fixup : AT(ADDR(__rfi_flush_fixup) - LOAD_OFFSET) {
171 __start___rfi_flush_fixup = .;
173 __stop___rfi_flush_fixup = .;
175 #endif /* CONFIG_PPC64 */
177 #ifdef CONFIG_PPC_BARRIER_NOSPEC
179 __spec_barrier_fixup : AT(ADDR(__spec_barrier_fixup) - LOAD_OFFSET) {
180 __start___barrier_nospec_fixup = .;
181 *(__barrier_nospec_fixup)
182 __stop___barrier_nospec_fixup = .;
184 #endif /* CONFIG_PPC_BARRIER_NOSPEC */
186 #ifdef CONFIG_PPC_FSL_BOOK3E
188 __spec_btb_flush_fixup : AT(ADDR(__spec_btb_flush_fixup) - LOAD_OFFSET) {
189 __start__btb_flush_fixup = .;
191 __stop__btb_flush_fixup = .;
198 /* The dummy segment contents for the bug workaround mentioned above
200 .dummy : AT(ADDR(.dummy) - LOAD_OFFSET) {
207 * Init sections discarded at runtime
209 . = ALIGN(STRICT_ALIGN_SIZE);
211 . = ALIGN(PAGE_SIZE);
212 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
217 *.init.text might be RO so we must ensure this section ends on
220 . = ALIGN(PAGE_SIZE);
223 *(.tramp.ftrace.init);
227 /* .exit.text is discarded at runtime, not link time,
228 * to deal with references from __bug_table
230 .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
234 . = ALIGN(PAGE_SIZE);
236 INIT_DATA_SECTION(16)
239 __ftr_fixup : AT(ADDR(__ftr_fixup) - LOAD_OFFSET) {
240 __start___ftr_fixup = .;
242 __stop___ftr_fixup = .;
245 __mmu_ftr_fixup : AT(ADDR(__mmu_ftr_fixup) - LOAD_OFFSET) {
246 __start___mmu_ftr_fixup = .;
247 KEEP(*(__mmu_ftr_fixup))
248 __stop___mmu_ftr_fixup = .;
251 __lwsync_fixup : AT(ADDR(__lwsync_fixup) - LOAD_OFFSET) {
252 __start___lwsync_fixup = .;
253 KEEP(*(__lwsync_fixup))
254 __stop___lwsync_fixup = .;
258 __fw_ftr_fixup : AT(ADDR(__fw_ftr_fixup) - LOAD_OFFSET) {
259 __start___fw_ftr_fixup = .;
260 KEEP(*(__fw_ftr_fixup))
261 __stop___fw_ftr_fixup = .;
265 PERCPU_SECTION(L1_CACHE_BYTES)
268 .machine.desc : AT(ADDR(.machine.desc) - LOAD_OFFSET) {
269 __machine_desc_start = . ;
270 KEEP(*(.machine.desc))
271 __machine_desc_end = . ;
273 #ifdef CONFIG_RELOCATABLE
275 .dynsym : AT(ADDR(.dynsym) - LOAD_OFFSET)
278 __dynamic_symtab = .;
282 .dynstr : AT(ADDR(.dynstr) - LOAD_OFFSET) { *(.dynstr) }
283 .dynamic : AT(ADDR(.dynamic) - LOAD_OFFSET)
288 .hash : AT(ADDR(.hash) - LOAD_OFFSET) { *(.hash) }
289 .interp : AT(ADDR(.interp) - LOAD_OFFSET) { *(.interp) }
290 .rela.dyn : AT(ADDR(.rela.dyn) - LOAD_OFFSET)
292 __rela_dyn_start = .;
296 /* .exit.data is discarded at runtime, not link time,
297 * to deal with references from .exit.text
299 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
303 /* freed after init ends here */
304 . = ALIGN(PAGE_SIZE);
308 * And now the various read/write data
311 . = ALIGN(PAGE_SIZE);
315 .data : AT(ADDR(.data) - LOAD_OFFSET) {
318 *(.data..Lubsan_data*)
319 *(.data..Lubsan_type*)
329 .data : AT(ADDR(.data) - LOAD_OFFSET) {
336 .opd : AT(ADDR(.opd) - LOAD_OFFSET) {
343 .got : AT(ADDR(.got) - LOAD_OFFSET) {
345 #ifndef CONFIG_RELOCATABLE
346 __prom_init_toc_start = .;
347 arch/powerpc/kernel/prom_init.o*(.toc .got)
348 __prom_init_toc_end = .;
355 /* The initial task and kernel stack */
356 INIT_TASK_DATA_SECTION(THREAD_SIZE)
358 .data..page_aligned : AT(ADDR(.data..page_aligned) - LOAD_OFFSET) {
359 PAGE_ALIGNED_DATA(PAGE_SIZE)
362 .data..cacheline_aligned : AT(ADDR(.data..cacheline_aligned) - LOAD_OFFSET) {
363 CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
366 .data..read_mostly : AT(ADDR(.data..read_mostly) - LOAD_OFFSET) {
367 READ_MOSTLY_DATA(L1_CACHE_BYTES)
370 . = ALIGN(PAGE_SIZE);
371 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
377 . = ALIGN(PAGE_SIZE);
379 PROVIDE32 (edata = .);
382 * And finally the bss
387 . = ALIGN(PAGE_SIZE);
398 *(.glink .iplt .plt .comment)
402 #ifndef CONFIG_RELOCATABLE