1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2014-2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
7 #include <linux/ftrace.h>
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/moduleloader.h>
11 #include <linux/sort.h>
13 static struct plt_entry __get_adrp_add_pair(u64 dst, u64 pc,
14 enum aarch64_insn_register reg)
18 adrp = aarch64_insn_gen_adr(pc, dst, reg, AARCH64_INSN_ADR_TYPE_ADRP);
19 add = aarch64_insn_gen_add_sub_imm(reg, reg, dst % SZ_4K,
20 AARCH64_INSN_VARIANT_64BIT,
21 AARCH64_INSN_ADSB_ADD);
23 return (struct plt_entry){ cpu_to_le32(adrp), cpu_to_le32(add) };
26 struct plt_entry get_plt_entry(u64 dst, void *pc)
32 br = aarch64_insn_gen_branch_reg(AARCH64_INSN_REG_16,
33 AARCH64_INSN_BRANCH_NOLINK);
35 plt = __get_adrp_add_pair(dst, (u64)pc, AARCH64_INSN_REG_16);
36 plt.br = cpu_to_le32(br);
41 static bool plt_entries_equal(const struct plt_entry *a,
42 const struct plt_entry *b)
47 * Check whether both entries refer to the same target:
48 * do the cheapest checks first.
49 * If the 'add' or 'br' opcodes are different, then the target
52 if (a->add != b->add || a->br != b->br)
55 p = ALIGN_DOWN((u64)a, SZ_4K);
56 q = ALIGN_DOWN((u64)b, SZ_4K);
59 * If the 'adrp' opcodes are the same then we just need to check
60 * that they refer to the same 4k region.
62 if (a->adrp == b->adrp && p == q)
65 return (p + aarch64_insn_adrp_get_offset(le32_to_cpu(a->adrp))) ==
66 (q + aarch64_insn_adrp_get_offset(le32_to_cpu(b->adrp)));
69 static bool in_init(const struct module *mod, void *loc)
71 return (u64)loc - (u64)mod->init_layout.base < mod->init_layout.size;
74 u64 module_emit_plt_entry(struct module *mod, Elf64_Shdr *sechdrs,
75 void *loc, const Elf64_Rela *rela,
78 struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core :
80 struct plt_entry *plt = (struct plt_entry *)sechdrs[pltsec->plt_shndx].sh_addr;
81 int i = pltsec->plt_num_entries;
83 u64 val = sym->st_value + rela->r_addend;
85 if (is_forbidden_offset_for_adrp(&plt[i].adrp))
88 plt[i] = get_plt_entry(val, &plt[i]);
91 * Check if the entry we just created is a duplicate. Given that the
92 * relocations are sorted, this will be the last entry we allocated.
95 if (j >= 0 && plt_entries_equal(plt + i, plt + j))
98 pltsec->plt_num_entries += i - j;
99 if (WARN_ON(pltsec->plt_num_entries > pltsec->plt_max_entries))
105 #ifdef CONFIG_ARM64_ERRATUM_843419
106 u64 module_emit_veneer_for_adrp(struct module *mod, Elf64_Shdr *sechdrs,
109 struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core :
111 struct plt_entry *plt = (struct plt_entry *)sechdrs[pltsec->plt_shndx].sh_addr;
112 int i = pltsec->plt_num_entries++;
116 if (WARN_ON(pltsec->plt_num_entries > pltsec->plt_max_entries))
119 if (is_forbidden_offset_for_adrp(&plt[i].adrp))
120 i = pltsec->plt_num_entries++;
122 /* get the destination register of the ADRP instruction */
123 rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD,
124 le32_to_cpup((__le32 *)loc));
126 br = aarch64_insn_gen_branch_imm((u64)&plt[i].br, (u64)loc + 4,
127 AARCH64_INSN_BRANCH_NOLINK);
129 plt[i] = __get_adrp_add_pair(val, (u64)&plt[i], rd);
130 plt[i].br = cpu_to_le32(br);
136 #define cmp_3way(a, b) ((a) < (b) ? -1 : (a) > (b))
138 static int cmp_rela(const void *a, const void *b)
140 const Elf64_Rela *x = a, *y = b;
143 /* sort by type, symbol index and addend */
144 i = cmp_3way(ELF64_R_TYPE(x->r_info), ELF64_R_TYPE(y->r_info));
146 i = cmp_3way(ELF64_R_SYM(x->r_info), ELF64_R_SYM(y->r_info));
148 i = cmp_3way(x->r_addend, y->r_addend);
152 static bool duplicate_rel(const Elf64_Rela *rela, int num)
155 * Entries are sorted by type, symbol index and addend. That means
156 * that, if a duplicate entry exists, it must be in the preceding
159 return num > 0 && cmp_rela(rela + num, rela + num - 1) == 0;
162 static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num,
163 Elf64_Word dstidx, Elf_Shdr *dstsec)
165 unsigned int ret = 0;
169 for (i = 0; i < num; i++) {
172 switch (ELF64_R_TYPE(rela[i].r_info)) {
173 case R_AARCH64_JUMP26:
174 case R_AARCH64_CALL26:
175 if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
179 * We only have to consider branch targets that resolve
180 * to symbols that are defined in a different section.
181 * This is not simply a heuristic, it is a fundamental
182 * limitation, since there is no guaranteed way to emit
183 * PLT entries sufficiently close to the branch if the
184 * section size exceeds the range of a branch
185 * instruction. So ignore relocations against defined
186 * symbols if they live in the same section as the
189 s = syms + ELF64_R_SYM(rela[i].r_info);
190 if (s->st_shndx == dstidx)
194 * Jump relocations with non-zero addends against
195 * undefined symbols are supported by the ELF spec, but
196 * do not occur in practice (e.g., 'jump n bytes past
197 * the entry point of undefined function symbol f').
198 * So we need to support them, but there is no need to
199 * take them into consideration when trying to optimize
200 * this code. So let's only check for duplicates when
201 * the addend is zero: this allows us to record the PLT
202 * entry address in the symbol table itself, rather than
203 * having to search the list for duplicates each time we
206 if (rela[i].r_addend != 0 || !duplicate_rel(rela, i))
209 case R_AARCH64_ADR_PREL_PG_HI21_NC:
210 case R_AARCH64_ADR_PREL_PG_HI21:
211 if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_843419) ||
212 !cpus_have_const_cap(ARM64_WORKAROUND_843419))
216 * Determine the minimal safe alignment for this ADRP
217 * instruction: the section alignment at which it is
218 * guaranteed not to appear at a vulnerable offset.
220 * This comes down to finding the least significant zero
221 * bit in bits [11:3] of the section offset, and
222 * increasing the section's alignment so that the
223 * resulting address of this instruction is guaranteed
224 * to equal the offset in that particular bit (as well
225 * as all less significant bits). This ensures that the
226 * address modulo 4 KB != 0xfff8 or 0xfffc (which would
227 * have all ones in bits [11:3])
229 min_align = 2ULL << ffz(rela[i].r_offset | 0x7);
232 * Allocate veneer space for each ADRP that may appear
233 * at a vulnerable offset nonetheless. At relocation
234 * time, some of these will remain unused since some
235 * ADRP instructions can be patched to ADR instructions
238 if (min_align > SZ_4K)
241 dstsec->sh_addralign = max(dstsec->sh_addralign,
247 if (IS_ENABLED(CONFIG_ARM64_ERRATUM_843419) &&
248 cpus_have_const_cap(ARM64_WORKAROUND_843419))
250 * Add some slack so we can skip PLT slots that may trigger
251 * the erratum due to the placement of the ADRP instruction.
253 ret += DIV_ROUND_UP(ret, (SZ_4K / sizeof(struct plt_entry)));
258 static bool branch_rela_needs_plt(Elf64_Sym *syms, Elf64_Rela *rela,
262 Elf64_Sym *s = syms + ELF64_R_SYM(rela->r_info);
264 if (s->st_shndx == dstidx)
267 return ELF64_R_TYPE(rela->r_info) == R_AARCH64_JUMP26 ||
268 ELF64_R_TYPE(rela->r_info) == R_AARCH64_CALL26;
271 /* Group branch PLT relas at the front end of the array. */
272 static int partition_branch_plt_relas(Elf64_Sym *syms, Elf64_Rela *rela,
273 int numrels, Elf64_Word dstidx)
275 int i = 0, j = numrels - 1;
277 if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
281 if (branch_rela_needs_plt(syms, &rela[i], dstidx))
283 else if (branch_rela_needs_plt(syms, &rela[j], dstidx))
284 swap(rela[i], rela[j]);
292 int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
293 char *secstrings, struct module *mod)
295 unsigned long core_plts = 0;
296 unsigned long init_plts = 0;
297 Elf64_Sym *syms = NULL;
298 Elf_Shdr *pltsec, *tramp = NULL;
302 * Find the empty .plt section so we can expand it to store the PLT
303 * entries. Record the symtab address as well.
305 for (i = 0; i < ehdr->e_shnum; i++) {
306 if (!strcmp(secstrings + sechdrs[i].sh_name, ".plt"))
307 mod->arch.core.plt_shndx = i;
308 else if (!strcmp(secstrings + sechdrs[i].sh_name, ".init.plt"))
309 mod->arch.init.plt_shndx = i;
310 else if (!strcmp(secstrings + sechdrs[i].sh_name,
311 ".text.ftrace_trampoline"))
313 else if (sechdrs[i].sh_type == SHT_SYMTAB)
314 syms = (Elf64_Sym *)sechdrs[i].sh_addr;
317 if (!mod->arch.core.plt_shndx || !mod->arch.init.plt_shndx) {
318 pr_err("%s: module PLT section(s) missing\n", mod->name);
322 pr_err("%s: module symtab section missing\n", mod->name);
326 for (i = 0; i < ehdr->e_shnum; i++) {
327 Elf64_Rela *rels = (void *)ehdr + sechdrs[i].sh_offset;
328 int nents, numrels = sechdrs[i].sh_size / sizeof(Elf64_Rela);
329 Elf64_Shdr *dstsec = sechdrs + sechdrs[i].sh_info;
331 if (sechdrs[i].sh_type != SHT_RELA)
334 /* ignore relocations that operate on non-exec sections */
335 if (!(dstsec->sh_flags & SHF_EXECINSTR))
339 * sort branch relocations requiring a PLT by type, symbol index
342 nents = partition_branch_plt_relas(syms, rels, numrels,
345 sort(rels, nents, sizeof(Elf64_Rela), cmp_rela, NULL);
347 if (!module_init_layout_section(secstrings + dstsec->sh_name))
348 core_plts += count_plts(syms, rels, numrels,
349 sechdrs[i].sh_info, dstsec);
351 init_plts += count_plts(syms, rels, numrels,
352 sechdrs[i].sh_info, dstsec);
355 pltsec = sechdrs + mod->arch.core.plt_shndx;
356 pltsec->sh_type = SHT_NOBITS;
357 pltsec->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
358 pltsec->sh_addralign = L1_CACHE_BYTES;
359 pltsec->sh_size = (core_plts + 1) * sizeof(struct plt_entry);
360 mod->arch.core.plt_num_entries = 0;
361 mod->arch.core.plt_max_entries = core_plts;
363 pltsec = sechdrs + mod->arch.init.plt_shndx;
364 pltsec->sh_type = SHT_NOBITS;
365 pltsec->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
366 pltsec->sh_addralign = L1_CACHE_BYTES;
367 pltsec->sh_size = (init_plts + 1) * sizeof(struct plt_entry);
368 mod->arch.init.plt_num_entries = 0;
369 mod->arch.init.plt_max_entries = init_plts;
372 tramp->sh_type = SHT_NOBITS;
373 tramp->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
374 tramp->sh_addralign = __alignof__(struct plt_entry);
375 tramp->sh_size = NR_FTRACE_PLTS * sizeof(struct plt_entry);