2 * AArch64 loadable module support.
4 * Copyright (C) 2012 ARM Limited
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 * Author: Will Deacon <will.deacon@arm.com>
21 #include <linux/bitops.h>
22 #include <linux/elf.h>
23 #include <linux/gfp.h>
24 #include <linux/kasan.h>
25 #include <linux/kernel.h>
27 #include <linux/moduleloader.h>
28 #include <linux/vmalloc.h>
29 #include <asm/alternative.h>
31 #include <asm/sections.h>
33 void *module_alloc(unsigned long size)
35 u64 module_alloc_end = module_alloc_base + MODULES_VSIZE;
36 gfp_t gfp_mask = GFP_KERNEL;
39 /* Silence the initial allocation */
40 if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
41 gfp_mask |= __GFP_NOWARN;
43 if (IS_ENABLED(CONFIG_KASAN))
44 /* don't exceed the static module region - see below */
45 module_alloc_end = MODULES_END;
47 p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
48 module_alloc_end, gfp_mask, PAGE_KERNEL_EXEC, 0,
49 NUMA_NO_NODE, __builtin_return_address(0));
51 if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
52 !IS_ENABLED(CONFIG_KASAN))
54 * KASAN can only deal with module allocations being served
55 * from the reserved module region, since the remainder of
56 * the vmalloc region is already backed by zero shadow pages,
57 * and punching holes into it is non-trivial. Since the module
58 * region is not randomized when KASAN is enabled, it is even
59 * less likely that the module region gets exhausted, so we
60 * can simply omit this fallback in that case.
62 p = __vmalloc_node_range(size, MODULE_ALIGN, VMALLOC_START,
63 VMALLOC_END, GFP_KERNEL, PAGE_KERNEL_EXEC, 0,
64 NUMA_NO_NODE, __builtin_return_address(0));
66 if (p && (kasan_module_alloc(p, size) < 0)) {
74 enum aarch64_reloc_op {
81 static u64 do_reloc(enum aarch64_reloc_op reloc_op, __le32 *place, u64 val)
87 return val - (u64)place;
89 return (val & ~0xfff) - ((u64)place & ~0xfff);
94 pr_err("do_reloc: unknown relocation operation %d\n", reloc_op);
98 static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
100 s64 sval = do_reloc(op, place, val);
104 *(s16 *)place = sval;
105 if (sval < S16_MIN || sval > U16_MAX)
109 *(s32 *)place = sval;
110 if (sval < S32_MIN || sval > U32_MAX)
114 *(s64 *)place = sval;
117 pr_err("Invalid length (%d) for data relocation\n", len);
123 enum aarch64_insn_movw_imm_type {
124 AARCH64_INSN_IMM_MOVNZ,
125 AARCH64_INSN_IMM_MOVKZ,
128 static int reloc_insn_movw(enum aarch64_reloc_op op, __le32 *place, u64 val,
129 int lsb, enum aarch64_insn_movw_imm_type imm_type)
133 u32 insn = le32_to_cpu(*place);
135 sval = do_reloc(op, place, val);
138 if (imm_type == AARCH64_INSN_IMM_MOVNZ) {
140 * For signed MOVW relocations, we have to manipulate the
141 * instruction encoding depending on whether or not the
142 * immediate is less than zero.
146 /* >=0: Set the instruction to MOVZ (opcode 10b). */
150 * <0: Set the instruction to MOVN (opcode 00b).
151 * Since we've masked the opcode already, we
152 * don't need to do anything other than
153 * inverting the new immediate field.
159 /* Update the instruction with the new encoding. */
160 insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
161 *place = cpu_to_le32(insn);
169 static int reloc_insn_imm(enum aarch64_reloc_op op, __le32 *place, u64 val,
170 int lsb, int len, enum aarch64_insn_imm_type imm_type)
174 u32 insn = le32_to_cpu(*place);
176 /* Calculate the relocation value. */
177 sval = do_reloc(op, place, val);
180 /* Extract the value bits and shift them to bit 0. */
181 imm_mask = (BIT(lsb + len) - 1) >> lsb;
182 imm = sval & imm_mask;
184 /* Update the instruction's immediate field. */
185 insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
186 *place = cpu_to_le32(insn);
189 * Extract the upper value bits (including the sign bit) and
190 * shift them to bit 0.
192 sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1);
195 * Overflow has occurred if the upper bits are not all equal to
196 * the sign bit of the value.
198 if ((u64)(sval + 1) >= 2)
204 int apply_relocate_add(Elf64_Shdr *sechdrs,
206 unsigned int symindex,
216 Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
218 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
219 /* loc corresponds to P in the AArch64 ELF document. */
220 loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
223 /* sym is the ELF symbol we're referring to. */
224 sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
225 + ELF64_R_SYM(rel[i].r_info);
227 /* val corresponds to (S + A) in the AArch64 ELF document. */
228 val = sym->st_value + rel[i].r_addend;
230 /* Check for overflow by default. */
231 overflow_check = true;
233 /* Perform the static relocation. */
234 switch (ELF64_R_TYPE(rel[i].r_info)) {
235 /* Null relocations. */
241 /* Data relocations. */
242 case R_AARCH64_ABS64:
243 overflow_check = false;
244 ovf = reloc_data(RELOC_OP_ABS, loc, val, 64);
246 case R_AARCH64_ABS32:
247 ovf = reloc_data(RELOC_OP_ABS, loc, val, 32);
249 case R_AARCH64_ABS16:
250 ovf = reloc_data(RELOC_OP_ABS, loc, val, 16);
252 case R_AARCH64_PREL64:
253 overflow_check = false;
254 ovf = reloc_data(RELOC_OP_PREL, loc, val, 64);
256 case R_AARCH64_PREL32:
257 ovf = reloc_data(RELOC_OP_PREL, loc, val, 32);
259 case R_AARCH64_PREL16:
260 ovf = reloc_data(RELOC_OP_PREL, loc, val, 16);
263 /* MOVW instruction relocations. */
264 case R_AARCH64_MOVW_UABS_G0_NC:
265 overflow_check = false;
266 case R_AARCH64_MOVW_UABS_G0:
267 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
268 AARCH64_INSN_IMM_MOVKZ);
270 case R_AARCH64_MOVW_UABS_G1_NC:
271 overflow_check = false;
272 case R_AARCH64_MOVW_UABS_G1:
273 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
274 AARCH64_INSN_IMM_MOVKZ);
276 case R_AARCH64_MOVW_UABS_G2_NC:
277 overflow_check = false;
278 case R_AARCH64_MOVW_UABS_G2:
279 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
280 AARCH64_INSN_IMM_MOVKZ);
282 case R_AARCH64_MOVW_UABS_G3:
283 /* We're using the top bits so we can't overflow. */
284 overflow_check = false;
285 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48,
286 AARCH64_INSN_IMM_MOVKZ);
288 case R_AARCH64_MOVW_SABS_G0:
289 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
290 AARCH64_INSN_IMM_MOVNZ);
292 case R_AARCH64_MOVW_SABS_G1:
293 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
294 AARCH64_INSN_IMM_MOVNZ);
296 case R_AARCH64_MOVW_SABS_G2:
297 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
298 AARCH64_INSN_IMM_MOVNZ);
300 case R_AARCH64_MOVW_PREL_G0_NC:
301 overflow_check = false;
302 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
303 AARCH64_INSN_IMM_MOVKZ);
305 case R_AARCH64_MOVW_PREL_G0:
306 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
307 AARCH64_INSN_IMM_MOVNZ);
309 case R_AARCH64_MOVW_PREL_G1_NC:
310 overflow_check = false;
311 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
312 AARCH64_INSN_IMM_MOVKZ);
314 case R_AARCH64_MOVW_PREL_G1:
315 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
316 AARCH64_INSN_IMM_MOVNZ);
318 case R_AARCH64_MOVW_PREL_G2_NC:
319 overflow_check = false;
320 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
321 AARCH64_INSN_IMM_MOVKZ);
323 case R_AARCH64_MOVW_PREL_G2:
324 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
325 AARCH64_INSN_IMM_MOVNZ);
327 case R_AARCH64_MOVW_PREL_G3:
328 /* We're using the top bits so we can't overflow. */
329 overflow_check = false;
330 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48,
331 AARCH64_INSN_IMM_MOVNZ);
334 /* Immediate instruction relocations. */
335 case R_AARCH64_LD_PREL_LO19:
336 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
337 AARCH64_INSN_IMM_19);
339 case R_AARCH64_ADR_PREL_LO21:
340 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
341 AARCH64_INSN_IMM_ADR);
343 #ifndef CONFIG_ARM64_ERRATUM_843419
344 case R_AARCH64_ADR_PREL_PG_HI21_NC:
345 overflow_check = false;
346 case R_AARCH64_ADR_PREL_PG_HI21:
347 ovf = reloc_insn_imm(RELOC_OP_PAGE, loc, val, 12, 21,
348 AARCH64_INSN_IMM_ADR);
351 case R_AARCH64_ADD_ABS_LO12_NC:
352 case R_AARCH64_LDST8_ABS_LO12_NC:
353 overflow_check = false;
354 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12,
355 AARCH64_INSN_IMM_12);
357 case R_AARCH64_LDST16_ABS_LO12_NC:
358 overflow_check = false;
359 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11,
360 AARCH64_INSN_IMM_12);
362 case R_AARCH64_LDST32_ABS_LO12_NC:
363 overflow_check = false;
364 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10,
365 AARCH64_INSN_IMM_12);
367 case R_AARCH64_LDST64_ABS_LO12_NC:
368 overflow_check = false;
369 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9,
370 AARCH64_INSN_IMM_12);
372 case R_AARCH64_LDST128_ABS_LO12_NC:
373 overflow_check = false;
374 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8,
375 AARCH64_INSN_IMM_12);
377 case R_AARCH64_TSTBR14:
378 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14,
379 AARCH64_INSN_IMM_14);
381 case R_AARCH64_CONDBR19:
382 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
383 AARCH64_INSN_IMM_19);
385 case R_AARCH64_JUMP26:
386 case R_AARCH64_CALL26:
387 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26,
388 AARCH64_INSN_IMM_26);
390 if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
392 val = module_emit_plt_entry(me, loc, &rel[i], sym);
393 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2,
394 26, AARCH64_INSN_IMM_26);
399 pr_err("module %s: unsupported RELA relocation: %llu\n",
400 me->name, ELF64_R_TYPE(rel[i].r_info));
404 if (overflow_check && ovf == -ERANGE)
412 pr_err("module %s: overflow in relocation type %d val %Lx\n",
413 me->name, (int)ELF64_R_TYPE(rel[i].r_info), val);
417 int module_finalize(const Elf_Ehdr *hdr,
418 const Elf_Shdr *sechdrs,
421 const Elf_Shdr *s, *se;
422 const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
424 for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
425 if (strcmp(".altinstructions", secstrs + s->sh_name) == 0) {
426 apply_alternatives((void *)s->sh_addr, s->sh_size);
428 #ifdef CONFIG_ARM64_MODULE_PLTS
429 if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) &&
430 !strcmp(".text.ftrace_trampoline", secstrs + s->sh_name))
431 me->arch.ftrace_trampoline = (void *)s->sh_addr;