2 * ld script to make ARM Linux kernel
3 * taken from the i386 version by Russell King
4 * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
7 #include <asm-generic/vmlinux.lds.h>
9 #include <asm/kernel-pgtable.h>
10 #include <asm/thread_info.h>
11 #include <asm/memory.h>
13 #include <asm/pgtable.h>
17 /* .exit.text needed in case of alternative patching */
18 #define ARM_EXIT_KEEP(x) x
19 #define ARM_EXIT_DISCARD(x)
27 #define HYPERVISOR_EXTABLE \
29 VMLINUX_SYMBOL(__start___kvm_ex_table) = .; \
31 VMLINUX_SYMBOL(__stop___kvm_ex_table) = .;
33 #define HYPERVISOR_TEXT \
35 * Align to 4 KB so that \
36 * a) the HYP vector table is at its minimum \
37 * alignment of 2048 bytes \
38 * b) the HYP init code will not cross a page \
39 * boundary if its size does not exceed \
40 * 4 KB (see related ASSERT() below) \
43 VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \
45 VMLINUX_SYMBOL(__hyp_idmap_text_end) = .; \
46 VMLINUX_SYMBOL(__hyp_text_start) = .; \
49 VMLINUX_SYMBOL(__hyp_text_end) = .;
53 VMLINUX_SYMBOL(__idmap_text_start) = .; \
55 VMLINUX_SYMBOL(__idmap_text_end) = .;
57 #ifdef CONFIG_HIBERNATION
58 #define HIBERNATE_TEXT \
60 VMLINUX_SYMBOL(__hibernate_exit_text_start) = .;\
61 *(.hibernate_exit.text) \
62 VMLINUX_SYMBOL(__hibernate_exit_text_end) = .;
64 #define HIBERNATE_TEXT
67 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
69 . = ALIGN(PAGE_SIZE); \
70 VMLINUX_SYMBOL(__entry_tramp_text_start) = .; \
71 *(.entry.tramp.text) \
72 . = ALIGN(PAGE_SIZE); \
73 VMLINUX_SYMBOL(__entry_tramp_text_end) = .;
79 * The size of the PE/COFF section that covers the kernel image, which
80 * runs from stext to _edata, must be a round multiple of the PE/COFF
81 * FileAlignment, which we set to its minimum value of 0x200. 'stext'
82 * itself is 4 KB aligned, so padding out _edata to a 0x200 aligned
83 * boundary should be sufficient.
85 PECOFF_FILE_ALIGNMENT = 0x200;
88 #define PECOFF_EDATA_PADDING \
89 .pecoff_edata_padding : { BYTE(0); . = ALIGN(PECOFF_FILE_ALIGNMENT); }
91 #define PECOFF_EDATA_PADDING
94 #if defined(CONFIG_DEBUG_ALIGN_RODATA)
96 * 4 KB granule: 1 level 2 entry
97 * 16 KB granule: 128 level 3 entries, with contiguous bit
98 * 64 KB granule: 32 level 3 entries, with contiguous bit
100 #define SEGMENT_ALIGN SZ_2M
103 * 4 KB granule: 16 level 3 entries, with contiguous bit
104 * 16 KB granule: 4 level 3 entries, without contiguous bit
105 * 64 KB granule: 1 level 3 entry
107 #define SEGMENT_ALIGN SZ_64K
113 * XXX: The linker does not define how output sections are
114 * assigned to input sections when there are multiple statements
115 * matching the same input section name. There is no documented
119 ARM_EXIT_DISCARD(EXIT_TEXT)
120 ARM_EXIT_DISCARD(EXIT_DATA)
125 *(.dynsym .dynstr .hash)
128 . = KIMAGE_VADDR + TEXT_OFFSET;
134 .text : { /* Real text segment */
135 _stext = .; /* Text and read-only data */
136 __exception_text_start = .;
138 __exception_text_end = .;
154 *(.got) /* Global offset table */
157 . = ALIGN(SEGMENT_ALIGN);
158 _etext = .; /* End of text section */
160 RO_DATA(PAGE_SIZE) /* everything from this point to */
161 EXCEPTION_TABLE(8) /* __init_begin will be marked RO NX */
164 . = ALIGN(SEGMENT_ALIGN);
169 ARM_EXIT_KEEP(EXIT_TEXT)
179 *(.init.rodata.* .init.bss) /* from the EFI stub */
182 ARM_EXIT_KEEP(EXIT_DATA)
185 PERCPU_SECTION(L1_CACHE_BYTES)
189 __alt_instructions = .;
191 __alt_instructions_end = .;
193 .altinstr_replacement : {
194 *(.altinstr_replacement)
200 __rela_offset = ABSOLUTE(ADDR(.rela) - KIMAGE_VADDR);
201 __rela_size = SIZEOF(.rela);
203 . = ALIGN(SEGMENT_ALIGN);
208 RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
211 * Data written with the MMU off but read with the MMU on requires
212 * cache lines to be invalidated, discarding up to a Cache Writeback
213 * Granule (CWG) of data from the cache. Keep the section that
214 * requires this type of maintenance to be in its own Cache Writeback
215 * Granule (CWG) area so the cache maintenance operations don't
216 * interfere with adjacent data.
218 .mmuoff.data.write : ALIGN(SZ_2K) {
219 __mmuoff_data_start = .;
220 *(.mmuoff.data.write)
223 .mmuoff.data.read : {
225 __mmuoff_data_end = .;
233 . = ALIGN(PAGE_SIZE);
237 . += SWAPPER_DIR_SIZE;
239 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
252 * The HYP init code and ID map text can't be longer than a page each,
253 * and should not cross a page boundary.
255 ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
256 "HYP init code too big or misaligned")
257 ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
258 "ID map text too big or misaligned")
259 #ifdef CONFIG_HIBERNATION
260 ASSERT(__hibernate_exit_text_end - (__hibernate_exit_text_start & ~(SZ_4K - 1))
261 <= SZ_4K, "Hibernate exit text too big or misaligned")
263 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
264 ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) == PAGE_SIZE,
265 "Entry trampoline text too big")
268 * If padding is applied before .head.text, virt<->phys conversions will fail.
270 ASSERT(_text == (KIMAGE_VADDR + TEXT_OFFSET), "HEAD is misaligned")