2 * Helper macros to support writing architecture specific
5 * A minimal linker scripts has following content:
6 * [This is a sample, architectures may have special requiriements]
16 * INIT_TEXT_SECTION(PAGE_SIZE)
17 * INIT_DATA_SECTION(...)
18 * PERCPU_SECTION(CACHELINE_SIZE)
26 * RO_DATA_SECTION(PAGE_SIZE)
27 * RW_DATA_SECTION(...)
30 * EXCEPTION_TABLE(...)
33 * BSS_SECTION(0, 0, 0)
39 * DISCARDS // must be the last
42 * [__init_begin, __init_end] is the init section that may be freed after init
43 * // __init_begin and __init_end should be page aligned, so that we can
44 * // free the whole .init memory
45 * [_stext, _etext] is the text section
46 * [_sdata, _edata] is the data section
48 * Some of the included output section have their own set of constants.
49 * Examples are: [__initramfs_start, __initramfs_end] for initramfs and
50 * [__nosave_begin, __nosave_end] for the nosave data
57 /* Align . to a 8 byte boundary equals to maximum function alignment. */
58 #define ALIGN_FUNCTION() . = ALIGN(8)
61 * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections, which
62 * generates .data.identifier sections, which need to be pulled in with
63 * .data. We don't want to pull in .data..other sections, which Linux
64 * has defined. Same for text and bss.
66 * RODATA_MAIN is not used because existing code already defines .rodata.x
67 * sections to be brought in with rodata.
69 #ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
70 #define TEXT_MAIN .text .text.[0-9a-zA-Z_]*
71 #define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..LPBX*
72 #define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]*
73 #define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]*
74 #define BSS_MAIN .bss .bss.[0-9a-zA-Z_]*
75 #define SBSS_MAIN .sbss .sbss.[0-9a-zA-Z_]*
77 #define TEXT_MAIN .text
78 #define DATA_MAIN .data
79 #define SDATA_MAIN .sdata
80 #define RODATA_MAIN .rodata
82 #define SBSS_MAIN .sbss
86 * Align to a 32 byte boundary equal to the
87 * alignment gcc 4.5 uses for a struct
89 #define STRUCT_ALIGNMENT 32
90 #define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT)
92 /* The actual configuration determine if the init/exit sections
93 * are handled as text/data or they can be discarded (which
94 * often happens at runtime)
96 #ifdef CONFIG_HOTPLUG_CPU
97 #define CPU_KEEP(sec) *(.cpu##sec)
98 #define CPU_DISCARD(sec)
100 #define CPU_KEEP(sec)
101 #define CPU_DISCARD(sec) *(.cpu##sec)
104 #if defined(CONFIG_MEMORY_HOTPLUG)
105 #define MEM_KEEP(sec) *(.mem##sec)
106 #define MEM_DISCARD(sec)
108 #define MEM_KEEP(sec)
109 #define MEM_DISCARD(sec) *(.mem##sec)
112 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
114 * The ftrace call sites are logged to a section whose name depends on the
115 * compiler option used. A given kernel image will only use one, AKA
116 * FTRACE_CALLSITE_SECTION. We capture all of them here to avoid header
117 * dependencies for FTRACE_CALLSITE_SECTION's definition.
119 #define MCOUNT_REC() . = ALIGN(8); \
120 __start_mcount_loc = .; \
121 KEEP(*(__mcount_loc)) \
122 KEEP(*(__patchable_function_entries)) \
123 __stop_mcount_loc = .;
128 #ifdef CONFIG_TRACE_BRANCH_PROFILING
129 #define LIKELY_PROFILE() __start_annotated_branch_profile = .; \
130 KEEP(*(_ftrace_annotated_branch)) \
131 __stop_annotated_branch_profile = .;
133 #define LIKELY_PROFILE()
136 #ifdef CONFIG_PROFILE_ALL_BRANCHES
137 #define BRANCH_PROFILE() __start_branch_profile = .; \
138 KEEP(*(_ftrace_branch)) \
139 __stop_branch_profile = .;
141 #define BRANCH_PROFILE()
144 #ifdef CONFIG_KPROBES
145 #define KPROBE_BLACKLIST() . = ALIGN(8); \
146 __start_kprobe_blacklist = .; \
147 KEEP(*(_kprobe_blacklist)) \
148 __stop_kprobe_blacklist = .;
150 #define KPROBE_BLACKLIST()
153 #ifdef CONFIG_FUNCTION_ERROR_INJECTION
154 #define ERROR_INJECT_WHITELIST() STRUCT_ALIGN(); \
155 __start_error_injection_whitelist = .; \
156 KEEP(*(_error_injection_whitelist)) \
157 __stop_error_injection_whitelist = .;
159 #define ERROR_INJECT_WHITELIST()
162 #ifdef CONFIG_EVENT_TRACING
163 #define FTRACE_EVENTS() . = ALIGN(8); \
164 __start_ftrace_events = .; \
165 KEEP(*(_ftrace_events)) \
166 __stop_ftrace_events = .; \
167 __start_ftrace_eval_maps = .; \
168 KEEP(*(_ftrace_eval_map)) \
169 __stop_ftrace_eval_maps = .;
171 #define FTRACE_EVENTS()
174 #ifdef CONFIG_TRACING
175 #define TRACE_PRINTKS() __start___trace_bprintk_fmt = .; \
176 KEEP(*(__trace_printk_fmt)) /* Trace_printk fmt' pointer */ \
177 __stop___trace_bprintk_fmt = .;
178 #define TRACEPOINT_STR() __start___tracepoint_str = .; \
179 KEEP(*(__tracepoint_str)) /* Trace_printk fmt' pointer */ \
180 __stop___tracepoint_str = .;
182 #define TRACE_PRINTKS()
183 #define TRACEPOINT_STR()
186 #ifdef CONFIG_FTRACE_SYSCALLS
187 #define TRACE_SYSCALLS() . = ALIGN(8); \
188 __start_syscalls_metadata = .; \
189 KEEP(*(__syscalls_metadata)) \
190 __stop_syscalls_metadata = .;
192 #define TRACE_SYSCALLS()
195 #ifdef CONFIG_BPF_EVENTS
196 #define BPF_RAW_TP() STRUCT_ALIGN(); \
197 __start__bpf_raw_tp = .; \
198 KEEP(*(__bpf_raw_tp_map)) \
199 __stop__bpf_raw_tp = .;
204 #ifdef CONFIG_SERIAL_EARLYCON
205 #define EARLYCON_TABLE() . = ALIGN(8); \
206 __earlycon_table = .; \
207 KEEP(*(__earlycon_table)) \
208 __earlycon_table_end = .;
210 #define EARLYCON_TABLE()
213 #ifdef CONFIG_SECURITY
214 #define LSM_TABLE() . = ALIGN(8); \
215 __start_lsm_info = .; \
216 KEEP(*(.lsm_info.init)) \
218 #define EARLY_LSM_TABLE() . = ALIGN(8); \
219 __start_early_lsm_info = .; \
220 KEEP(*(.early_lsm_info.init)) \
221 __end_early_lsm_info = .;
224 #define EARLY_LSM_TABLE()
227 #define ___OF_TABLE(cfg, name) _OF_TABLE_##cfg(name)
228 #define __OF_TABLE(cfg, name) ___OF_TABLE(cfg, name)
229 #define OF_TABLE(cfg, name) __OF_TABLE(IS_ENABLED(cfg), name)
230 #define _OF_TABLE_0(name)
231 #define _OF_TABLE_1(name) \
233 __##name##_of_table = .; \
234 KEEP(*(__##name##_of_table)) \
235 KEEP(*(__##name##_of_table_end))
237 #define TIMER_OF_TABLES() OF_TABLE(CONFIG_TIMER_OF, timer)
238 #define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip)
239 #define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk)
240 #define RESERVEDMEM_OF_TABLES() OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem)
241 #define CPU_METHOD_OF_TABLES() OF_TABLE(CONFIG_SMP, cpu_method)
242 #define CPUIDLE_METHOD_OF_TABLES() OF_TABLE(CONFIG_CPU_IDLE, cpuidle_method)
245 #define ACPI_PROBE_TABLE(name) \
247 __##name##_acpi_probe_table = .; \
248 KEEP(*(__##name##_acpi_probe_table)) \
249 __##name##_acpi_probe_table_end = .;
251 #define ACPI_PROBE_TABLE(name)
254 #ifdef CONFIG_THERMAL
255 #define THERMAL_TABLE(name) \
257 __##name##_thermal_table = .; \
258 KEEP(*(__##name##_thermal_table)) \
259 __##name##_thermal_table_end = .;
261 #define THERMAL_TABLE(name)
264 #define KERNEL_DTB() \
267 KEEP(*(.dtb.init.rodata)) \
276 *(.data..decrypted) \
278 *(.data..shared_aligned) /* percpu related */ \
279 MEM_KEEP(init.data*) \
280 MEM_KEEP(exit.data*) \
287 /* implement dynamic printk debug */ \
289 __start___verbose = .; \
291 __stop___verbose = .; \
299 * Data section helpers
301 #define NOSAVE_DATA \
302 . = ALIGN(PAGE_SIZE); \
303 __nosave_begin = .; \
305 . = ALIGN(PAGE_SIZE); \
308 #define PAGE_ALIGNED_DATA(page_align) \
309 . = ALIGN(page_align); \
310 *(.data..page_aligned) \
311 . = ALIGN(page_align);
313 #define READ_MOSTLY_DATA(align) \
315 *(.data..read_mostly) \
318 #define CACHELINE_ALIGNED_DATA(align) \
320 *(.data..cacheline_aligned)
322 #define INIT_TASK_DATA(align) \
324 __start_init_task = .; \
325 init_thread_union = .; \
327 KEEP(*(.data..init_task)) \
328 KEEP(*(.data..init_thread_info)) \
329 . = __start_init_task + THREAD_SIZE; \
332 #define JUMP_TABLE_DATA \
334 __start___jump_table = .; \
335 KEEP(*(__jump_table)) \
336 __stop___jump_table = .;
339 * Allow architectures to handle ro_after_init data on their
340 * own by defining an empty RO_AFTER_INIT_DATA.
342 #ifndef RO_AFTER_INIT_DATA
343 #define RO_AFTER_INIT_DATA \
345 __start_ro_after_init = .; \
346 *(.data..ro_after_init) \
348 __end_ro_after_init = .;
354 #define RO_DATA_SECTION(align) \
355 . = ALIGN((align)); \
356 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
357 __start_rodata = .; \
358 *(.rodata) *(.rodata.*) \
359 RO_AFTER_INIT_DATA /* Read only after init */ \
361 __start___tracepoints_ptrs = .; \
362 KEEP(*(__tracepoints_ptrs)) /* Tracepoints: pointer array */ \
363 __stop___tracepoints_ptrs = .; \
364 *(__tracepoints_strings)/* Tracepoints: strings */ \
367 .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \
372 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
373 __start_pci_fixups_early = .; \
374 KEEP(*(.pci_fixup_early)) \
375 __end_pci_fixups_early = .; \
376 __start_pci_fixups_header = .; \
377 KEEP(*(.pci_fixup_header)) \
378 __end_pci_fixups_header = .; \
379 __start_pci_fixups_final = .; \
380 KEEP(*(.pci_fixup_final)) \
381 __end_pci_fixups_final = .; \
382 __start_pci_fixups_enable = .; \
383 KEEP(*(.pci_fixup_enable)) \
384 __end_pci_fixups_enable = .; \
385 __start_pci_fixups_resume = .; \
386 KEEP(*(.pci_fixup_resume)) \
387 __end_pci_fixups_resume = .; \
388 __start_pci_fixups_resume_early = .; \
389 KEEP(*(.pci_fixup_resume_early)) \
390 __end_pci_fixups_resume_early = .; \
391 __start_pci_fixups_suspend = .; \
392 KEEP(*(.pci_fixup_suspend)) \
393 __end_pci_fixups_suspend = .; \
394 __start_pci_fixups_suspend_late = .; \
395 KEEP(*(.pci_fixup_suspend_late)) \
396 __end_pci_fixups_suspend_late = .; \
399 /* Built-in firmware blobs */ \
400 .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) ALIGN(8) { \
401 __start_builtin_fw = .; \
402 KEEP(*(.builtin_fw)) \
403 __end_builtin_fw = .; \
408 /* Kernel symbol table: Normal symbols */ \
409 __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
410 __start___ksymtab = .; \
411 KEEP(*(SORT(___ksymtab+*))) \
412 __stop___ksymtab = .; \
415 /* Kernel symbol table: GPL-only symbols */ \
416 __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
417 __start___ksymtab_gpl = .; \
418 KEEP(*(SORT(___ksymtab_gpl+*))) \
419 __stop___ksymtab_gpl = .; \
422 /* Kernel symbol table: Normal unused symbols */ \
423 __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \
424 __start___ksymtab_unused = .; \
425 KEEP(*(SORT(___ksymtab_unused+*))) \
426 __stop___ksymtab_unused = .; \
429 /* Kernel symbol table: GPL-only unused symbols */ \
430 __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
431 __start___ksymtab_unused_gpl = .; \
432 KEEP(*(SORT(___ksymtab_unused_gpl+*))) \
433 __stop___ksymtab_unused_gpl = .; \
436 /* Kernel symbol table: GPL-future-only symbols */ \
437 __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
438 __start___ksymtab_gpl_future = .; \
439 KEEP(*(SORT(___ksymtab_gpl_future+*))) \
440 __stop___ksymtab_gpl_future = .; \
443 /* Kernel symbol table: Normal symbols */ \
444 __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
445 __start___kcrctab = .; \
446 KEEP(*(SORT(___kcrctab+*))) \
447 __stop___kcrctab = .; \
450 /* Kernel symbol table: GPL-only symbols */ \
451 __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
452 __start___kcrctab_gpl = .; \
453 KEEP(*(SORT(___kcrctab_gpl+*))) \
454 __stop___kcrctab_gpl = .; \
457 /* Kernel symbol table: Normal unused symbols */ \
458 __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \
459 __start___kcrctab_unused = .; \
460 KEEP(*(SORT(___kcrctab_unused+*))) \
461 __stop___kcrctab_unused = .; \
464 /* Kernel symbol table: GPL-only unused symbols */ \
465 __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
466 __start___kcrctab_unused_gpl = .; \
467 KEEP(*(SORT(___kcrctab_unused_gpl+*))) \
468 __stop___kcrctab_unused_gpl = .; \
471 /* Kernel symbol table: GPL-future-only symbols */ \
472 __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
473 __start___kcrctab_gpl_future = .; \
474 KEEP(*(SORT(___kcrctab_gpl_future+*))) \
475 __stop___kcrctab_gpl_future = .; \
478 /* Kernel symbol table: strings */ \
479 __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
480 *(__ksymtab_strings) \
483 /* __*init sections */ \
484 __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \
486 MEM_KEEP(init.rodata) \
487 MEM_KEEP(exit.rodata) \
490 /* Built-in module parameters. */ \
491 __param : AT(ADDR(__param) - LOAD_OFFSET) { \
492 __start___param = .; \
494 __stop___param = .; \
497 /* Built-in module versions. */ \
498 __modver : AT(ADDR(__modver) - LOAD_OFFSET) { \
499 __start___modver = .; \
501 __stop___modver = .; \
506 . = ALIGN((align)); \
509 /* RODATA & RO_DATA provided for backward compatibility.
510 * All archs are supposed to use RO_DATA() */
511 #define RODATA RO_DATA_SECTION(4096)
512 #define RO_DATA(align) RO_DATA_SECTION(align)
515 * Non-instrumentable text section
517 #define NOINSTR_TEXT \
519 __noinstr_text_start = .; \
521 __noinstr_text_end = .;
524 * .text section. Map to function alignment to avoid address changes
525 * during second ld run in second ld pass when generating System.map
527 * TEXT_MAIN here will match .text.fixup and .text.unlikely if dead
528 * code elimination is enabled, so these sections should be converted
533 *(.text.hot .text.hot.*) \
534 *(TEXT_MAIN .text.fixup) \
535 *(.text.unlikely .text.unlikely.*) \
536 *(.text.unknown .text.unknown.*) \
540 *(.text.asan.* .text.tsan.*) \
541 MEM_KEEP(init.text*) \
542 MEM_KEEP(exit.text*) \
545 /* sched.text is aling to function alignment to secure we have same
546 * address even at second ld pass when generating System.map */
549 __sched_text_start = .; \
551 __sched_text_end = .;
553 /* spinlock.text is aling to function alignment to secure we have same
554 * address even at second ld pass when generating System.map */
557 __lock_text_start = .; \
561 #define CPUIDLE_TEXT \
563 __cpuidle_text_start = .; \
565 __cpuidle_text_end = .;
567 #define KPROBES_TEXT \
569 __kprobes_text_start = .; \
571 __kprobes_text_end = .;
575 __entry_text_start = .; \
577 __entry_text_end = .;
579 #define IRQENTRY_TEXT \
581 __irqentry_text_start = .; \
583 __irqentry_text_end = .;
585 #define SOFTIRQENTRY_TEXT \
587 __softirqentry_text_start = .; \
588 *(.softirqentry.text) \
589 __softirqentry_text_end = .;
591 /* Section used for early init (in .S files) */
592 #define HEAD_TEXT KEEP(*(.head.text))
594 #define HEAD_TEXT_SECTION \
595 .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \
602 #define EXCEPTION_TABLE(align) \
604 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \
605 __start___ex_table = .; \
606 KEEP(*(__ex_table)) \
607 __stop___ex_table = .; \
613 #ifdef CONFIG_DEBUG_INFO_BTF
615 .BTF : AT(ADDR(.BTF) - LOAD_OFFSET) { \
627 #define INIT_TASK_DATA_SECTION(align) \
629 .data..init_task : AT(ADDR(.data..init_task) - LOAD_OFFSET) { \
630 INIT_TASK_DATA(align) \
633 #ifdef CONFIG_CONSTRUCTORS
634 #define KERNEL_CTORS() . = ALIGN(8); \
637 KEEP(*(SORT(.init_array.*))) \
638 KEEP(*(.init_array)) \
641 #define KERNEL_CTORS()
644 /* init and exit section handling */
646 KEEP(*(SORT(___kentry+*))) \
647 *(.init.data init.data.*) \
648 MEM_DISCARD(init.data*) \
651 *(.init.rodata .init.rodata.*) \
655 ERROR_INJECT_WHITELIST() \
656 MEM_DISCARD(init.rodata) \
658 RESERVEDMEM_OF_TABLES() \
660 CPU_METHOD_OF_TABLES() \
661 CPUIDLE_METHOD_OF_TABLES() \
663 IRQCHIP_OF_MATCH_TABLE() \
664 ACPI_PROBE_TABLE(irqchip) \
665 ACPI_PROBE_TABLE(timer) \
666 THERMAL_TABLE(governor) \
672 *(.init.text .init.text.*) \
674 MEM_DISCARD(init.text*)
677 *(.exit.data .exit.data.*) \
678 *(.fini_array .fini_array.*) \
680 MEM_DISCARD(exit.data*) \
681 MEM_DISCARD(exit.rodata*)
686 MEM_DISCARD(exit.text)
692 * bss (Block Started by Symbol) - uninitialized data
693 * zeroed during startup
695 #define SBSS(sbss_align) \
696 . = ALIGN(sbss_align); \
697 .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \
704 * Allow archectures to redefine BSS_FIRST_SECTIONS to add extra
705 * sections to the front of bss.
707 #ifndef BSS_FIRST_SECTIONS
708 #define BSS_FIRST_SECTIONS
711 #define BSS(bss_align) \
712 . = ALIGN(bss_align); \
713 .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \
715 . = ALIGN(PAGE_SIZE); \
716 *(.bss..page_aligned) \
717 . = ALIGN(PAGE_SIZE); \
724 * DWARF debug sections.
725 * Symbols in the DWARF debugging sections are relative to
726 * the beginning of the section so we begin them at 0.
728 #define DWARF_DEBUG \
730 .debug 0 : { *(.debug) } \
731 .line 0 : { *(.line) } \
732 /* GNU DWARF 1 extensions */ \
733 .debug_srcinfo 0 : { *(.debug_srcinfo) } \
734 .debug_sfnames 0 : { *(.debug_sfnames) } \
735 /* DWARF 1.1 and DWARF 2 */ \
736 .debug_aranges 0 : { *(.debug_aranges) } \
737 .debug_pubnames 0 : { *(.debug_pubnames) } \
739 .debug_info 0 : { *(.debug_info \
740 .gnu.linkonce.wi.*) } \
741 .debug_abbrev 0 : { *(.debug_abbrev) } \
742 .debug_line 0 : { *(.debug_line) } \
743 .debug_frame 0 : { *(.debug_frame) } \
744 .debug_str 0 : { *(.debug_str) } \
745 .debug_loc 0 : { *(.debug_loc) } \
746 .debug_macinfo 0 : { *(.debug_macinfo) } \
747 .debug_pubtypes 0 : { *(.debug_pubtypes) } \
749 .debug_ranges 0 : { *(.debug_ranges) } \
750 /* SGI/MIPS DWARF 2 extensions */ \
751 .debug_weaknames 0 : { *(.debug_weaknames) } \
752 .debug_funcnames 0 : { *(.debug_funcnames) } \
753 .debug_typenames 0 : { *(.debug_typenames) } \
754 .debug_varnames 0 : { *(.debug_varnames) } \
755 /* GNU DWARF 2 extensions */ \
756 .debug_gnu_pubnames 0 : { *(.debug_gnu_pubnames) } \
757 .debug_gnu_pubtypes 0 : { *(.debug_gnu_pubtypes) } \
759 .debug_types 0 : { *(.debug_types) } \
761 .debug_addr 0 : { *(.debug_addr) } \
762 .debug_line_str 0 : { *(.debug_line_str) } \
763 .debug_loclists 0 : { *(.debug_loclists) } \
764 .debug_macro 0 : { *(.debug_macro) } \
765 .debug_names 0 : { *(.debug_names) } \
766 .debug_rnglists 0 : { *(.debug_rnglists) } \
767 .debug_str_offsets 0 : { *(.debug_str_offsets) }
769 /* Stabs debugging sections. */
770 #define STABS_DEBUG \
771 .stab 0 : { *(.stab) } \
772 .stabstr 0 : { *(.stabstr) } \
773 .stab.excl 0 : { *(.stab.excl) } \
774 .stab.exclstr 0 : { *(.stab.exclstr) } \
775 .stab.index 0 : { *(.stab.index) } \
776 .stab.indexstr 0 : { *(.stab.indexstr) } \
777 .comment 0 : { *(.comment) }
779 #ifdef CONFIG_GENERIC_BUG
782 __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \
783 __start___bug_table = .; \
784 KEEP(*(__bug_table)) \
785 __stop___bug_table = .; \
791 #ifdef CONFIG_UNWINDER_ORC
792 #define ORC_UNWIND_TABLE \
794 .orc_unwind_ip : AT(ADDR(.orc_unwind_ip) - LOAD_OFFSET) { \
795 __start_orc_unwind_ip = .; \
796 KEEP(*(.orc_unwind_ip)) \
797 __stop_orc_unwind_ip = .; \
800 .orc_unwind : AT(ADDR(.orc_unwind) - LOAD_OFFSET) { \
801 __start_orc_unwind = .; \
802 KEEP(*(.orc_unwind)) \
803 __stop_orc_unwind = .; \
806 .orc_lookup : AT(ADDR(.orc_lookup) - LOAD_OFFSET) { \
808 . += (((SIZEOF(.text) + LOOKUP_BLOCK_SIZE - 1) / \
809 LOOKUP_BLOCK_SIZE) + 1) * 4; \
810 orc_lookup_end = .; \
813 #define ORC_UNWIND_TABLE
816 #ifdef CONFIG_PM_TRACE
819 .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \
820 __tracedata_start = .; \
821 KEEP(*(.tracedata)) \
822 __tracedata_end = .; \
829 * Discard .note.GNU-stack, which is emitted as PROGBITS by the compiler.
830 * Otherwise, the type of .notes section would become PROGBITS instead of NOTES.
833 /DISCARD/ : { *(.note.GNU-stack) } \
834 .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \
840 #define INIT_SETUP(initsetup_align) \
841 . = ALIGN(initsetup_align); \
843 KEEP(*(.init.setup)) \
846 #define INIT_CALLS_LEVEL(level) \
847 __initcall##level##_start = .; \
848 KEEP(*(.initcall##level##.init)) \
849 KEEP(*(.initcall##level##s.init)) \
852 __initcall_start = .; \
853 KEEP(*(.initcallearly.init)) \
854 INIT_CALLS_LEVEL(0) \
855 INIT_CALLS_LEVEL(1) \
856 INIT_CALLS_LEVEL(2) \
857 INIT_CALLS_LEVEL(3) \
858 INIT_CALLS_LEVEL(4) \
859 INIT_CALLS_LEVEL(5) \
860 INIT_CALLS_LEVEL(rootfs) \
861 INIT_CALLS_LEVEL(6) \
862 INIT_CALLS_LEVEL(7) \
865 #define CON_INITCALL \
866 __con_initcall_start = .; \
867 KEEP(*(.con_initcall.init)) \
868 __con_initcall_end = .;
870 #ifdef CONFIG_BLK_DEV_INITRD
871 #define INIT_RAM_FS \
873 __initramfs_start = .; \
874 KEEP(*(.init.ramfs)) \
876 KEEP(*(.init.ramfs.info))
882 * Memory encryption operates on a page basis. Since we need to clear
883 * the memory encryption mask for this section, it needs to be aligned
884 * on a page boundary and be a page-size multiple in length.
886 * Note: We use a separate section so that only this section gets
887 * decrypted to avoid exposing more than we wish.
889 #ifdef CONFIG_AMD_MEM_ENCRYPT
890 #define PERCPU_DECRYPTED_SECTION \
891 . = ALIGN(PAGE_SIZE); \
892 *(.data..percpu..decrypted) \
893 . = ALIGN(PAGE_SIZE);
895 #define PERCPU_DECRYPTED_SECTION
900 * Default discarded sections.
902 * Some archs want to discard exit text/data at runtime rather than
903 * link time due to cross-section references such as alt instructions,
904 * bug table, eh_frame, etc. DISCARDS must be the last of output
905 * section definitions so that such archs put those in earlier section
908 #ifdef RUNTIME_DISCARD_EXIT
909 #define EXIT_DISCARDS
911 #define EXIT_DISCARDS \
926 * PERCPU_INPUT - the percpu input sections
927 * @cacheline: cacheline size
929 * The core percpu section names and core symbols which do not rely
930 * directly upon load addresses.
932 * @cacheline is used to align subsections to avoid false cacheline
933 * sharing between subsections for different purposes.
935 #define PERCPU_INPUT(cacheline) \
936 __per_cpu_start = .; \
937 *(.data..percpu..first) \
938 . = ALIGN(PAGE_SIZE); \
939 *(.data..percpu..page_aligned) \
940 . = ALIGN(cacheline); \
941 *(.data..percpu..read_mostly) \
942 . = ALIGN(cacheline); \
944 *(.data..percpu..shared_aligned) \
945 PERCPU_DECRYPTED_SECTION \
949 * PERCPU_VADDR - define output section for percpu area
950 * @cacheline: cacheline size
951 * @vaddr: explicit base address (optional)
952 * @phdr: destination PHDR (optional)
954 * Macro which expands to output section for percpu area.
956 * @cacheline is used to align subsections to avoid false cacheline
957 * sharing between subsections for different purposes.
959 * If @vaddr is not blank, it specifies explicit base address and all
960 * percpu symbols will be offset from the given address. If blank,
961 * @vaddr always equals @laddr + LOAD_OFFSET.
963 * @phdr defines the output PHDR to use if not blank. Be warned that
964 * output PHDR is sticky. If @phdr is specified, the next output
965 * section in the linker script will go there too. @phdr should have
968 * Note that this macros defines __per_cpu_load as an absolute symbol.
969 * If there is no need to put the percpu section at a predetermined
970 * address, use PERCPU_SECTION.
972 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
973 __per_cpu_load = .; \
974 .data..percpu vaddr : AT(__per_cpu_load - LOAD_OFFSET) { \
975 PERCPU_INPUT(cacheline) \
977 . = __per_cpu_load + SIZEOF(.data..percpu);
980 * PERCPU_SECTION - define output section for percpu area, simple version
981 * @cacheline: cacheline size
983 * Align to PAGE_SIZE and outputs output section for percpu area. This
984 * macro doesn't manipulate @vaddr or @phdr and __per_cpu_load and
985 * __per_cpu_start will be identical.
987 * This macro is equivalent to ALIGN(PAGE_SIZE); PERCPU_VADDR(@cacheline,,)
988 * except that __per_cpu_load is defined as a relative symbol against
989 * .data..percpu which is required for relocatable x86_32 configuration.
991 #define PERCPU_SECTION(cacheline) \
992 . = ALIGN(PAGE_SIZE); \
993 .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \
994 __per_cpu_load = .; \
995 PERCPU_INPUT(cacheline) \
1000 * Definition of the high level *_SECTION macros
1001 * They will fit only a subset of the architectures
1007 * All sections are combined in a single .data section.
1008 * The sections following CONSTRUCTORS are arranged so their
1009 * typical alignment matches.
1010 * A cacheline is typical/always less than a PAGE_SIZE so
1011 * the sections that has this restriction (or similar)
1012 * is located before the ones requiring PAGE_SIZE alignment.
1013 * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which
1014 * matches the requirement of PAGE_ALIGNED_DATA.
1016 * use 0 as page_align if page_aligned data is not used */
1017 #define RW_DATA_SECTION(cacheline, pagealigned, inittask) \
1018 . = ALIGN(PAGE_SIZE); \
1019 .data : AT(ADDR(.data) - LOAD_OFFSET) { \
1020 INIT_TASK_DATA(inittask) \
1022 PAGE_ALIGNED_DATA(pagealigned) \
1023 CACHELINE_ALIGNED_DATA(cacheline) \
1024 READ_MOSTLY_DATA(cacheline) \
1030 #define INIT_TEXT_SECTION(inittext_align) \
1031 . = ALIGN(inittext_align); \
1032 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { \
1038 #define INIT_DATA_SECTION(initsetup_align) \
1039 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { \
1041 INIT_SETUP(initsetup_align) \
1047 #define BSS_SECTION(sbss_align, bss_align, stop_align) \
1048 . = ALIGN(sbss_align); \
1052 . = ALIGN(stop_align); \