2 * AArch64 loadable module support.
4 * Copyright (C) 2012 ARM Limited
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 * Author: Will Deacon <will.deacon@arm.com>
21 #include <linux/bitops.h>
22 #include <linux/elf.h>
23 #include <linux/gfp.h>
24 #include <linux/kasan.h>
25 #include <linux/kernel.h>
27 #include <linux/moduleloader.h>
28 #include <linux/vmalloc.h>
29 #include <asm/alternative.h>
31 #include <asm/sections.h>
33 void *module_alloc(unsigned long size)
35 u64 module_alloc_end = module_alloc_base + MODULES_VSIZE;
36 gfp_t gfp_mask = GFP_KERNEL;
39 /* Silence the initial allocation */
40 if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
41 gfp_mask |= __GFP_NOWARN;
43 if (IS_ENABLED(CONFIG_KASAN))
44 /* don't exceed the static module region - see below */
45 module_alloc_end = MODULES_END;
47 p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
48 module_alloc_end, gfp_mask, PAGE_KERNEL_EXEC, 0,
49 NUMA_NO_NODE, __builtin_return_address(0));
51 if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
52 !IS_ENABLED(CONFIG_KASAN))
54 * KASAN can only deal with module allocations being served
55 * from the reserved module region, since the remainder of
56 * the vmalloc region is already backed by zero shadow pages,
57 * and punching holes into it is non-trivial. Since the module
58 * region is not randomized when KASAN is enabled, it is even
59 * less likely that the module region gets exhausted, so we
60 * can simply omit this fallback in that case.
62 p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
63 module_alloc_base + SZ_2G, GFP_KERNEL,
64 PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
65 __builtin_return_address(0));
67 if (p && (kasan_module_alloc(p, size) < 0)) {
75 enum aarch64_reloc_op {
82 static u64 do_reloc(enum aarch64_reloc_op reloc_op, __le32 *place, u64 val)
88 return val - (u64)place;
90 return (val & ~0xfff) - ((u64)place & ~0xfff);
95 pr_err("do_reloc: unknown relocation operation %d\n", reloc_op);
99 static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
101 s64 sval = do_reloc(op, place, val);
105 *(s16 *)place = sval;
106 if (sval < S16_MIN || sval > U16_MAX)
110 *(s32 *)place = sval;
111 if (sval < S32_MIN || sval > U32_MAX)
115 *(s64 *)place = sval;
118 pr_err("Invalid length (%d) for data relocation\n", len);
124 enum aarch64_insn_movw_imm_type {
125 AARCH64_INSN_IMM_MOVNZ,
126 AARCH64_INSN_IMM_MOVKZ,
129 static int reloc_insn_movw(enum aarch64_reloc_op op, __le32 *place, u64 val,
130 int lsb, enum aarch64_insn_movw_imm_type imm_type)
134 u32 insn = le32_to_cpu(*place);
136 sval = do_reloc(op, place, val);
139 if (imm_type == AARCH64_INSN_IMM_MOVNZ) {
141 * For signed MOVW relocations, we have to manipulate the
142 * instruction encoding depending on whether or not the
143 * immediate is less than zero.
147 /* >=0: Set the instruction to MOVZ (opcode 10b). */
151 * <0: Set the instruction to MOVN (opcode 00b).
152 * Since we've masked the opcode already, we
153 * don't need to do anything other than
154 * inverting the new immediate field.
160 /* Update the instruction with the new encoding. */
161 insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
162 *place = cpu_to_le32(insn);
170 static int reloc_insn_imm(enum aarch64_reloc_op op, __le32 *place, u64 val,
171 int lsb, int len, enum aarch64_insn_imm_type imm_type)
175 u32 insn = le32_to_cpu(*place);
177 /* Calculate the relocation value. */
178 sval = do_reloc(op, place, val);
181 /* Extract the value bits and shift them to bit 0. */
182 imm_mask = (BIT(lsb + len) - 1) >> lsb;
183 imm = sval & imm_mask;
185 /* Update the instruction's immediate field. */
186 insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
187 *place = cpu_to_le32(insn);
190 * Extract the upper value bits (including the sign bit) and
191 * shift them to bit 0.
193 sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1);
196 * Overflow has occurred if the upper bits are not all equal to
197 * the sign bit of the value.
199 if ((u64)(sval + 1) >= 2)
205 static int reloc_insn_adrp(struct module *mod, __le32 *place, u64 val)
209 if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_843419) ||
210 !cpus_have_const_cap(ARM64_WORKAROUND_843419) ||
211 ((u64)place & 0xfff) < 0xff8)
212 return reloc_insn_imm(RELOC_OP_PAGE, place, val, 12, 21,
213 AARCH64_INSN_IMM_ADR);
215 /* patch ADRP to ADR if it is in range */
216 if (!reloc_insn_imm(RELOC_OP_PREL, place, val & ~0xfff, 0, 21,
217 AARCH64_INSN_IMM_ADR)) {
218 insn = le32_to_cpu(*place);
221 /* out of range for ADR -> emit a veneer */
222 val = module_emit_veneer_for_adrp(mod, place, val & ~0xfff);
225 insn = aarch64_insn_gen_branch_imm((u64)place, val,
226 AARCH64_INSN_BRANCH_NOLINK);
229 *place = cpu_to_le32(insn);
233 int apply_relocate_add(Elf64_Shdr *sechdrs,
235 unsigned int symindex,
245 Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
247 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
248 /* loc corresponds to P in the AArch64 ELF document. */
249 loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
252 /* sym is the ELF symbol we're referring to. */
253 sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
254 + ELF64_R_SYM(rel[i].r_info);
256 /* val corresponds to (S + A) in the AArch64 ELF document. */
257 val = sym->st_value + rel[i].r_addend;
259 /* Check for overflow by default. */
260 overflow_check = true;
262 /* Perform the static relocation. */
263 switch (ELF64_R_TYPE(rel[i].r_info)) {
264 /* Null relocations. */
270 /* Data relocations. */
271 case R_AARCH64_ABS64:
272 overflow_check = false;
273 ovf = reloc_data(RELOC_OP_ABS, loc, val, 64);
275 case R_AARCH64_ABS32:
276 ovf = reloc_data(RELOC_OP_ABS, loc, val, 32);
278 case R_AARCH64_ABS16:
279 ovf = reloc_data(RELOC_OP_ABS, loc, val, 16);
281 case R_AARCH64_PREL64:
282 overflow_check = false;
283 ovf = reloc_data(RELOC_OP_PREL, loc, val, 64);
285 case R_AARCH64_PREL32:
286 ovf = reloc_data(RELOC_OP_PREL, loc, val, 32);
288 case R_AARCH64_PREL16:
289 ovf = reloc_data(RELOC_OP_PREL, loc, val, 16);
292 /* MOVW instruction relocations. */
293 case R_AARCH64_MOVW_UABS_G0_NC:
294 overflow_check = false;
295 case R_AARCH64_MOVW_UABS_G0:
296 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
297 AARCH64_INSN_IMM_MOVKZ);
299 case R_AARCH64_MOVW_UABS_G1_NC:
300 overflow_check = false;
301 case R_AARCH64_MOVW_UABS_G1:
302 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
303 AARCH64_INSN_IMM_MOVKZ);
305 case R_AARCH64_MOVW_UABS_G2_NC:
306 overflow_check = false;
307 case R_AARCH64_MOVW_UABS_G2:
308 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
309 AARCH64_INSN_IMM_MOVKZ);
311 case R_AARCH64_MOVW_UABS_G3:
312 /* We're using the top bits so we can't overflow. */
313 overflow_check = false;
314 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48,
315 AARCH64_INSN_IMM_MOVKZ);
317 case R_AARCH64_MOVW_SABS_G0:
318 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
319 AARCH64_INSN_IMM_MOVNZ);
321 case R_AARCH64_MOVW_SABS_G1:
322 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
323 AARCH64_INSN_IMM_MOVNZ);
325 case R_AARCH64_MOVW_SABS_G2:
326 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
327 AARCH64_INSN_IMM_MOVNZ);
329 case R_AARCH64_MOVW_PREL_G0_NC:
330 overflow_check = false;
331 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
332 AARCH64_INSN_IMM_MOVKZ);
334 case R_AARCH64_MOVW_PREL_G0:
335 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
336 AARCH64_INSN_IMM_MOVNZ);
338 case R_AARCH64_MOVW_PREL_G1_NC:
339 overflow_check = false;
340 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
341 AARCH64_INSN_IMM_MOVKZ);
343 case R_AARCH64_MOVW_PREL_G1:
344 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
345 AARCH64_INSN_IMM_MOVNZ);
347 case R_AARCH64_MOVW_PREL_G2_NC:
348 overflow_check = false;
349 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
350 AARCH64_INSN_IMM_MOVKZ);
352 case R_AARCH64_MOVW_PREL_G2:
353 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
354 AARCH64_INSN_IMM_MOVNZ);
356 case R_AARCH64_MOVW_PREL_G3:
357 /* We're using the top bits so we can't overflow. */
358 overflow_check = false;
359 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48,
360 AARCH64_INSN_IMM_MOVNZ);
363 /* Immediate instruction relocations. */
364 case R_AARCH64_LD_PREL_LO19:
365 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
366 AARCH64_INSN_IMM_19);
368 case R_AARCH64_ADR_PREL_LO21:
369 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
370 AARCH64_INSN_IMM_ADR);
372 case R_AARCH64_ADR_PREL_PG_HI21_NC:
373 overflow_check = false;
374 case R_AARCH64_ADR_PREL_PG_HI21:
375 ovf = reloc_insn_adrp(me, loc, val);
376 if (ovf && ovf != -ERANGE)
379 case R_AARCH64_ADD_ABS_LO12_NC:
380 case R_AARCH64_LDST8_ABS_LO12_NC:
381 overflow_check = false;
382 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12,
383 AARCH64_INSN_IMM_12);
385 case R_AARCH64_LDST16_ABS_LO12_NC:
386 overflow_check = false;
387 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11,
388 AARCH64_INSN_IMM_12);
390 case R_AARCH64_LDST32_ABS_LO12_NC:
391 overflow_check = false;
392 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10,
393 AARCH64_INSN_IMM_12);
395 case R_AARCH64_LDST64_ABS_LO12_NC:
396 overflow_check = false;
397 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9,
398 AARCH64_INSN_IMM_12);
400 case R_AARCH64_LDST128_ABS_LO12_NC:
401 overflow_check = false;
402 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8,
403 AARCH64_INSN_IMM_12);
405 case R_AARCH64_TSTBR14:
406 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14,
407 AARCH64_INSN_IMM_14);
409 case R_AARCH64_CONDBR19:
410 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
411 AARCH64_INSN_IMM_19);
413 case R_AARCH64_JUMP26:
414 case R_AARCH64_CALL26:
415 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26,
416 AARCH64_INSN_IMM_26);
418 if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
420 val = module_emit_plt_entry(me, loc, &rel[i], sym);
423 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2,
424 26, AARCH64_INSN_IMM_26);
429 pr_err("module %s: unsupported RELA relocation: %llu\n",
430 me->name, ELF64_R_TYPE(rel[i].r_info));
434 if (overflow_check && ovf == -ERANGE)
442 pr_err("module %s: overflow in relocation type %d val %Lx\n",
443 me->name, (int)ELF64_R_TYPE(rel[i].r_info), val);
447 int module_finalize(const Elf_Ehdr *hdr,
448 const Elf_Shdr *sechdrs,
451 const Elf_Shdr *s, *se;
452 const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
454 for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
455 if (strcmp(".altinstructions", secstrs + s->sh_name) == 0)
456 apply_alternatives_module((void *)s->sh_addr, s->sh_size);
457 #ifdef CONFIG_ARM64_MODULE_PLTS
458 if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) &&
459 !strcmp(".text.ftrace_trampoline", secstrs + s->sh_name))
460 me->arch.ftrace_trampoline = (void *)s->sh_addr;