2 * AArch64 loadable module support.
4 * Copyright (C) 2012 ARM Limited
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 * Author: Will Deacon <will.deacon@arm.com>
21 #include <linux/bitops.h>
22 #include <linux/elf.h>
23 #include <linux/gfp.h>
24 #include <linux/kasan.h>
25 #include <linux/kernel.h>
27 #include <linux/moduleloader.h>
28 #include <linux/vmalloc.h>
29 #include <asm/alternative.h>
31 #include <asm/sections.h>
33 void *module_alloc(unsigned long size)
36 u64 module_alloc_end = module_alloc_base + MODULES_VSIZE;
38 if (IS_ENABLED(CONFIG_KASAN))
39 /* don't exceed the static module region - see below */
40 module_alloc_end = MODULES_END;
42 p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
43 module_alloc_end, GFP_KERNEL, PAGE_KERNEL_EXEC, 0,
44 NUMA_NO_NODE, __builtin_return_address(0));
46 if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
47 !IS_ENABLED(CONFIG_KASAN))
49 * KASAN can only deal with module allocations being served
50 * from the reserved module region, since the remainder of
51 * the vmalloc region is already backed by zero shadow pages,
52 * and punching holes into it is non-trivial. Since the module
53 * region is not randomized when KASAN is enabled, it is even
54 * less likely that the module region gets exhausted, so we
55 * can simply omit this fallback in that case.
57 p = __vmalloc_node_range(size, MODULE_ALIGN, VMALLOC_START,
58 VMALLOC_END, GFP_KERNEL, PAGE_KERNEL_EXEC, 0,
59 NUMA_NO_NODE, __builtin_return_address(0));
61 if (p && (kasan_module_alloc(p, size) < 0)) {
69 enum aarch64_reloc_op {
76 static u64 do_reloc(enum aarch64_reloc_op reloc_op, void *place, u64 val)
82 return val - (u64)place;
84 return (val & ~0xfff) - ((u64)place & ~0xfff);
89 pr_err("do_reloc: unknown relocation operation %d\n", reloc_op);
93 static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
95 s64 sval = do_reloc(op, place, val);
100 if (sval < S16_MIN || sval > U16_MAX)
104 *(s32 *)place = sval;
105 if (sval < S32_MIN || sval > U32_MAX)
109 *(s64 *)place = sval;
112 pr_err("Invalid length (%d) for data relocation\n", len);
118 enum aarch64_insn_movw_imm_type {
119 AARCH64_INSN_IMM_MOVNZ,
120 AARCH64_INSN_IMM_MOVKZ,
123 static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val,
124 int lsb, enum aarch64_insn_movw_imm_type imm_type)
128 u32 insn = le32_to_cpu(*(u32 *)place);
130 sval = do_reloc(op, place, val);
133 if (imm_type == AARCH64_INSN_IMM_MOVNZ) {
135 * For signed MOVW relocations, we have to manipulate the
136 * instruction encoding depending on whether or not the
137 * immediate is less than zero.
141 /* >=0: Set the instruction to MOVZ (opcode 10b). */
145 * <0: Set the instruction to MOVN (opcode 00b).
146 * Since we've masked the opcode already, we
147 * don't need to do anything other than
148 * inverting the new immediate field.
154 /* Update the instruction with the new encoding. */
155 insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
156 *(u32 *)place = cpu_to_le32(insn);
164 static int reloc_insn_imm(enum aarch64_reloc_op op, void *place, u64 val,
165 int lsb, int len, enum aarch64_insn_imm_type imm_type)
169 u32 insn = le32_to_cpu(*(u32 *)place);
171 /* Calculate the relocation value. */
172 sval = do_reloc(op, place, val);
175 /* Extract the value bits and shift them to bit 0. */
176 imm_mask = (BIT(lsb + len) - 1) >> lsb;
177 imm = sval & imm_mask;
179 /* Update the instruction's immediate field. */
180 insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
181 *(u32 *)place = cpu_to_le32(insn);
184 * Extract the upper value bits (including the sign bit) and
185 * shift them to bit 0.
187 sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1);
190 * Overflow has occurred if the upper bits are not all equal to
191 * the sign bit of the value.
193 if ((u64)(sval + 1) >= 2)
199 int apply_relocate_add(Elf64_Shdr *sechdrs,
201 unsigned int symindex,
211 Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
213 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
214 /* loc corresponds to P in the AArch64 ELF document. */
215 loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
218 /* sym is the ELF symbol we're referring to. */
219 sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
220 + ELF64_R_SYM(rel[i].r_info);
222 /* val corresponds to (S + A) in the AArch64 ELF document. */
223 val = sym->st_value + rel[i].r_addend;
225 /* Check for overflow by default. */
226 overflow_check = true;
228 /* Perform the static relocation. */
229 switch (ELF64_R_TYPE(rel[i].r_info)) {
230 /* Null relocations. */
236 /* Data relocations. */
237 case R_AARCH64_ABS64:
238 overflow_check = false;
239 ovf = reloc_data(RELOC_OP_ABS, loc, val, 64);
241 case R_AARCH64_ABS32:
242 ovf = reloc_data(RELOC_OP_ABS, loc, val, 32);
244 case R_AARCH64_ABS16:
245 ovf = reloc_data(RELOC_OP_ABS, loc, val, 16);
247 case R_AARCH64_PREL64:
248 overflow_check = false;
249 ovf = reloc_data(RELOC_OP_PREL, loc, val, 64);
251 case R_AARCH64_PREL32:
252 ovf = reloc_data(RELOC_OP_PREL, loc, val, 32);
254 case R_AARCH64_PREL16:
255 ovf = reloc_data(RELOC_OP_PREL, loc, val, 16);
258 /* MOVW instruction relocations. */
259 case R_AARCH64_MOVW_UABS_G0_NC:
260 overflow_check = false;
261 case R_AARCH64_MOVW_UABS_G0:
262 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
263 AARCH64_INSN_IMM_MOVKZ);
265 case R_AARCH64_MOVW_UABS_G1_NC:
266 overflow_check = false;
267 case R_AARCH64_MOVW_UABS_G1:
268 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
269 AARCH64_INSN_IMM_MOVKZ);
271 case R_AARCH64_MOVW_UABS_G2_NC:
272 overflow_check = false;
273 case R_AARCH64_MOVW_UABS_G2:
274 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
275 AARCH64_INSN_IMM_MOVKZ);
277 case R_AARCH64_MOVW_UABS_G3:
278 /* We're using the top bits so we can't overflow. */
279 overflow_check = false;
280 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48,
281 AARCH64_INSN_IMM_MOVKZ);
283 case R_AARCH64_MOVW_SABS_G0:
284 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
285 AARCH64_INSN_IMM_MOVNZ);
287 case R_AARCH64_MOVW_SABS_G1:
288 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
289 AARCH64_INSN_IMM_MOVNZ);
291 case R_AARCH64_MOVW_SABS_G2:
292 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
293 AARCH64_INSN_IMM_MOVNZ);
295 case R_AARCH64_MOVW_PREL_G0_NC:
296 overflow_check = false;
297 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
298 AARCH64_INSN_IMM_MOVKZ);
300 case R_AARCH64_MOVW_PREL_G0:
301 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
302 AARCH64_INSN_IMM_MOVNZ);
304 case R_AARCH64_MOVW_PREL_G1_NC:
305 overflow_check = false;
306 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
307 AARCH64_INSN_IMM_MOVKZ);
309 case R_AARCH64_MOVW_PREL_G1:
310 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
311 AARCH64_INSN_IMM_MOVNZ);
313 case R_AARCH64_MOVW_PREL_G2_NC:
314 overflow_check = false;
315 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
316 AARCH64_INSN_IMM_MOVKZ);
318 case R_AARCH64_MOVW_PREL_G2:
319 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
320 AARCH64_INSN_IMM_MOVNZ);
322 case R_AARCH64_MOVW_PREL_G3:
323 /* We're using the top bits so we can't overflow. */
324 overflow_check = false;
325 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48,
326 AARCH64_INSN_IMM_MOVNZ);
329 /* Immediate instruction relocations. */
330 case R_AARCH64_LD_PREL_LO19:
331 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
332 AARCH64_INSN_IMM_19);
334 case R_AARCH64_ADR_PREL_LO21:
335 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
336 AARCH64_INSN_IMM_ADR);
338 #ifndef CONFIG_ARM64_ERRATUM_843419
339 case R_AARCH64_ADR_PREL_PG_HI21_NC:
340 overflow_check = false;
341 case R_AARCH64_ADR_PREL_PG_HI21:
342 ovf = reloc_insn_imm(RELOC_OP_PAGE, loc, val, 12, 21,
343 AARCH64_INSN_IMM_ADR);
346 case R_AARCH64_ADD_ABS_LO12_NC:
347 case R_AARCH64_LDST8_ABS_LO12_NC:
348 overflow_check = false;
349 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12,
350 AARCH64_INSN_IMM_12);
352 case R_AARCH64_LDST16_ABS_LO12_NC:
353 overflow_check = false;
354 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11,
355 AARCH64_INSN_IMM_12);
357 case R_AARCH64_LDST32_ABS_LO12_NC:
358 overflow_check = false;
359 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10,
360 AARCH64_INSN_IMM_12);
362 case R_AARCH64_LDST64_ABS_LO12_NC:
363 overflow_check = false;
364 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9,
365 AARCH64_INSN_IMM_12);
367 case R_AARCH64_LDST128_ABS_LO12_NC:
368 overflow_check = false;
369 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8,
370 AARCH64_INSN_IMM_12);
372 case R_AARCH64_TSTBR14:
373 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14,
374 AARCH64_INSN_IMM_14);
376 case R_AARCH64_CONDBR19:
377 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
378 AARCH64_INSN_IMM_19);
380 case R_AARCH64_JUMP26:
381 case R_AARCH64_CALL26:
382 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26,
383 AARCH64_INSN_IMM_26);
385 if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
387 val = module_emit_plt_entry(me, &rel[i], sym);
388 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2,
389 26, AARCH64_INSN_IMM_26);
394 pr_err("module %s: unsupported RELA relocation: %llu\n",
395 me->name, ELF64_R_TYPE(rel[i].r_info));
399 if (overflow_check && ovf == -ERANGE)
407 pr_err("module %s: overflow in relocation type %d val %Lx\n",
408 me->name, (int)ELF64_R_TYPE(rel[i].r_info), val);
412 int module_finalize(const Elf_Ehdr *hdr,
413 const Elf_Shdr *sechdrs,
416 const Elf_Shdr *s, *se;
417 const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
419 for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
420 if (strcmp(".altinstructions", secstrs + s->sh_name) == 0) {
421 apply_alternatives((void *)s->sh_addr, s->sh_size);