1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* ld script to make ARM Linux kernel
3 * taken from the i386 version by Russell King
4 * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
7 #ifdef CONFIG_XIP_KERNEL
8 #include "vmlinux-xip.lds.S"
11 #include <asm-generic/vmlinux.lds.h>
12 #include <asm/cache.h>
13 #include <asm/thread_info.h>
14 #include <asm/memory.h>
16 #include <asm/pgtable.h>
19 * ld.lld does not support NOCROSSREFS:
20 * https://github.com/ClangBuiltLinux/linux/issues/1609
22 #ifdef CONFIG_LD_IS_LLD
26 /* Set start/end symbol names to the LMA for the section */
27 #define ARM_LMA(sym, section) \
28 sym##_start = LOADADDR(section); \
29 sym##_end = LOADADDR(section) + SIZEOF(section)
33 VMLINUX_SYMBOL(__proc_info_begin) = .; \
35 VMLINUX_SYMBOL(__proc_info_end) = .;
37 #define HYPERVISOR_TEXT \
38 VMLINUX_SYMBOL(__hyp_text_start) = .; \
40 VMLINUX_SYMBOL(__hyp_text_end) = .;
44 VMLINUX_SYMBOL(__idmap_text_start) = .; \
46 VMLINUX_SYMBOL(__idmap_text_end) = .; \
47 . = ALIGN(PAGE_SIZE); \
48 VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \
50 VMLINUX_SYMBOL(__hyp_idmap_text_end) = .;
52 #ifdef CONFIG_HOTPLUG_CPU
53 #define ARM_CPU_DISCARD(x)
54 #define ARM_CPU_KEEP(x) x
56 #define ARM_CPU_DISCARD(x) x
57 #define ARM_CPU_KEEP(x)
60 #if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
61 defined(CONFIG_GENERIC_BUG) || defined(CONFIG_JUMP_LABEL)
62 #define ARM_EXIT_KEEP(x) x
63 #define ARM_EXIT_DISCARD(x)
65 #define ARM_EXIT_KEEP(x)
66 #define ARM_EXIT_DISCARD(x) x
75 jiffies = jiffies_64 + 4;
81 * XXX: The linker does not define how output sections are
82 * assigned to input sections when there are multiple statements
83 * matching the same input section name. There is no documented
86 * unwind exit sections must be discarded before the rest of the
87 * unwind sections get included.
90 *(.ARM.exidx.exit.text)
91 *(.ARM.extab.exit.text)
92 ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text))
93 ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text))
94 ARM_EXIT_DISCARD(EXIT_TEXT)
95 ARM_EXIT_DISCARD(EXIT_DATA)
101 #ifndef CONFIG_SMP_ON_UP
108 . = PAGE_OFFSET + TEXT_OFFSET;
114 #ifdef CONFIG_STRICT_KERNEL_RWX
115 . = ALIGN(1<<SECTION_SHIFT);
118 .text : { /* Real text segment */
119 _stext = .; /* Text and read-only data */
121 __exception_text_start = .;
123 __exception_text_end = .;
136 *(.got) /* Global offset table */
137 ARM_CPU_KEEP(PROC_INFO)
140 #ifdef CONFIG_DEBUG_ALIGN_RODATA
141 . = ALIGN(1<<SECTION_SHIFT);
143 _etext = .; /* End of text section */
148 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
149 __start___ex_table = .;
153 __stop___ex_table = .;
156 #ifdef CONFIG_ARM_UNWIND
158 * Stack unwinding tables
162 __start_unwind_idx = .;
164 __stop_unwind_idx = .;
167 __start_unwind_tab = .;
169 __stop_unwind_tab = .;
175 #ifdef CONFIG_STRICT_KERNEL_RWX
176 . = ALIGN(1<<SECTION_SHIFT);
178 . = ALIGN(PAGE_SIZE);
183 * The vectors and stubs are relocatable code, and the
184 * only thing that matters is their relative offsets
187 OVERLAY 0xffff0000 : NOCROSSREFS AT(__vectors_lma) {
192 *(.vectors.bhb.loop8)
194 .vectors.bhb.bpiall {
195 *(.vectors.bhb.bpiall)
198 ARM_LMA(__vectors, .vectors);
199 ARM_LMA(__vectors_bhb_loop8, .vectors.bhb.loop8);
200 ARM_LMA(__vectors_bhb_bpiall, .vectors.bhb.bpiall);
201 . = __vectors_lma + SIZEOF(.vectors) +
202 SIZEOF(.vectors.bhb.loop8) +
203 SIZEOF(.vectors.bhb.bpiall);
206 .stubs ADDR(.vectors) + 0x1000 : AT(__stubs_lma) {
209 ARM_LMA(__stubs, .stubs);
210 . = __stubs_lma + SIZEOF(.stubs);
212 PROVIDE(vector_fiq_offset = vector_fiq - ADDR(.vectors));
216 ARM_EXIT_KEEP(EXIT_TEXT)
219 ARM_CPU_DISCARD(PROC_INFO)
222 __arch_info_begin = .;
227 __tagtable_begin = .;
231 #ifdef CONFIG_SMP_ON_UP
239 __pv_table_begin = .;
252 ARM_EXIT_KEEP(EXIT_DATA)
256 PERCPU_SECTION(L1_CACHE_BYTES)
259 #ifdef CONFIG_STRICT_KERNEL_RWX
260 . = ALIGN(1<<SECTION_SHIFT);
262 . = ALIGN(THREAD_SIZE);
267 .data : AT(__data_loc) {
268 _data = .; /* address in memory */
272 * first, the init task union, aligned
273 * to an 8192 byte boundary.
275 INIT_TASK_DATA(THREAD_SIZE)
278 CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
279 READ_MOSTLY_DATA(L1_CACHE_BYTES)
282 * and the usual data section
289 _edata_loc = __data_loc + SIZEOF(.data);
293 #ifdef CONFIG_HAVE_TCM
295 * We align everything to a page boundary so we can
296 * free it after init has commenced and TCM contents have
297 * been copied to its destination.
300 . = ALIGN(PAGE_SIZE);
306 * Link these to the ITCM RAM
307 * Put VMA to the TCM address and LMA to the common RAM
308 * and we'll upload the contents from RAM to TCM and free
309 * the used RAM after that.
311 .text_itcm ITCM_OFFSET : AT(__itcm_start)
321 * Reset the dot pointer, this is needed to create the
322 * relative __dtcm_start below (to be used as extern in code).
324 . = ADDR(.tcm_start) + SIZEOF(.tcm_start) + SIZEOF(.text_itcm);
330 /* TODO: add remainder of ITCM as well, that can be used for data! */
331 .data_dtcm DTCM_OFFSET : AT(__dtcm_start)
340 /* Reset the dot pointer or the linker gets confused */
341 . = ADDR(.dtcm_start) + SIZEOF(.data_dtcm);
343 /* End marker for freeing TCM copy in linked object */
344 .tcm_end : AT(ADDR(.dtcm_start) + SIZEOF(.data_dtcm)){
345 . = ALIGN(PAGE_SIZE);
356 #ifdef CONFIG_STRICT_KERNEL_RWX
358 * Without CONFIG_DEBUG_ALIGN_RODATA, __start_rodata_section_aligned will
359 * be the first section-aligned location after __start_rodata. Otherwise,
360 * it will be equal to __start_rodata.
362 __start_rodata_section_aligned = ALIGN(__start_rodata, 1 << SECTION_SHIFT);
366 * These must never be empty
367 * If you have to comment these two assert statements out, your
368 * binutils is too old (for other reasons as well)
370 ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support")
371 ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")
374 * The HYP init code can't be more than a page long,
375 * and should not cross a page boundary.
376 * The above comment applies as well.
378 ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & PAGE_MASK) <= PAGE_SIZE,
379 "HYP init code too big or misaligned")
381 #endif /* CONFIG_XIP_KERNEL */