1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2012 ARM Ltd.
8 #include <asm-generic/module.h>
10 #define MODULE_ARCH_VERMAGIC "aarch64"
12 #ifdef CONFIG_ARM64_MODULE_PLTS
19 struct mod_arch_specific {
20 struct mod_plt_sec core;
21 struct mod_plt_sec init;
23 /* for CONFIG_DYNAMIC_FTRACE */
24 struct plt_entry *ftrace_trampoline;
28 u64 module_emit_plt_entry(struct module *mod, Elf64_Shdr *sechdrs,
29 void *loc, const Elf64_Rela *rela,
32 u64 module_emit_veneer_for_adrp(struct module *mod, Elf64_Shdr *sechdrs,
35 #ifdef CONFIG_RANDOMIZE_BASE
36 extern u64 module_alloc_base;
38 #define module_alloc_base ((u64)_etext - MODULES_VSIZE)
43 * A program that conforms to the AArch64 Procedure Call Standard
44 * (AAPCS64) must assume that a veneer that alters IP0 (x16) and/or
45 * IP1 (x17) may be inserted at any branch instruction that is
46 * exposed to a relocation that supports long branches. Since that
47 * is exactly what we are dealing with here, we are free to use x16
48 * as a scratch register in the PLT veneers.
50 __le32 adrp; /* adrp x16, .... */
51 __le32 add; /* add x16, x16, #0x.... */
52 __le32 br; /* br x16 */
55 static inline bool is_forbidden_offset_for_adrp(void *place)
57 return IS_ENABLED(CONFIG_ARM64_ERRATUM_843419) &&
58 cpus_have_const_cap(ARM64_WORKAROUND_843419) &&
59 ((u64)place & 0xfff) >= 0xff8;
62 struct plt_entry get_plt_entry(u64 dst, void *pc);
63 bool plt_entries_equal(const struct plt_entry *a, const struct plt_entry *b);
65 static inline bool plt_entry_is_initialized(const struct plt_entry *e)
67 return e->adrp || e->add || e->br;
70 #endif /* __ASM_MODULE_H */