1 // SPDX-License-Identifier: GPL-2.0-only
3 * AArch64 loadable module support.
5 * Copyright (C) 2012 ARM Limited
7 * Author: Will Deacon <will.deacon@arm.com>
10 #include <linux/bitops.h>
11 #include <linux/elf.h>
12 #include <linux/gfp.h>
13 #include <linux/kasan.h>
14 #include <linux/kernel.h>
16 #include <linux/moduleloader.h>
17 #include <linux/vmalloc.h>
18 #include <asm/alternative.h>
20 #include <asm/sections.h>
22 void *module_alloc(unsigned long size)
24 gfp_t gfp_mask = GFP_KERNEL;
27 /* Silence the initial allocation */
28 if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
29 gfp_mask |= __GFP_NOWARN;
31 p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
32 module_alloc_base + MODULES_VSIZE,
33 gfp_mask, PAGE_KERNEL_EXEC, 0,
34 NUMA_NO_NODE, __builtin_return_address(0));
36 if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
37 !IS_ENABLED(CONFIG_KASAN))
39 * KASAN can only deal with module allocations being served
40 * from the reserved module region, since the remainder of
41 * the vmalloc region is already backed by zero shadow pages,
42 * and punching holes into it is non-trivial. Since the module
43 * region is not randomized when KASAN is enabled, it is even
44 * less likely that the module region gets exhausted, so we
45 * can simply omit this fallback in that case.
47 p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
48 module_alloc_base + SZ_2G, GFP_KERNEL,
49 PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
50 __builtin_return_address(0));
52 if (p && (kasan_module_alloc(p, size) < 0)) {
60 enum aarch64_reloc_op {
67 static u64 do_reloc(enum aarch64_reloc_op reloc_op, __le32 *place, u64 val)
73 return val - (u64)place;
75 return (val & ~0xfff) - ((u64)place & ~0xfff);
80 pr_err("do_reloc: unknown relocation operation %d\n", reloc_op);
84 static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
86 s64 sval = do_reloc(op, place, val);
89 * The ELF psABI for AArch64 documents the 16-bit and 32-bit place
90 * relative and absolute relocations as having a range of [-2^15, 2^16)
91 * or [-2^31, 2^32), respectively. However, in order to be able to
92 * detect overflows reliably, we have to choose whether we interpret
93 * such quantities as signed or as unsigned, and stick with it.
94 * The way we organize our address space requires a signed
95 * interpretation of 32-bit relative references, so let's use that
96 * for all R_AARCH64_PRELxx relocations. This means our upper
97 * bound for overflow detection should be Sxx_MAX rather than Uxx_MAX.
102 *(s16 *)place = sval;
105 if (sval < 0 || sval > U16_MAX)
109 if (sval < S16_MIN || sval > S16_MAX)
113 pr_err("Invalid 16-bit data relocation (%d)\n", op);
118 *(s32 *)place = sval;
121 if (sval < 0 || sval > U32_MAX)
125 if (sval < S32_MIN || sval > S32_MAX)
129 pr_err("Invalid 32-bit data relocation (%d)\n", op);
134 *(s64 *)place = sval;
137 pr_err("Invalid length (%d) for data relocation\n", len);
143 enum aarch64_insn_movw_imm_type {
144 AARCH64_INSN_IMM_MOVNZ,
145 AARCH64_INSN_IMM_MOVKZ,
148 static int reloc_insn_movw(enum aarch64_reloc_op op, __le32 *place, u64 val,
149 int lsb, enum aarch64_insn_movw_imm_type imm_type)
153 u32 insn = le32_to_cpu(*place);
155 sval = do_reloc(op, place, val);
158 if (imm_type == AARCH64_INSN_IMM_MOVNZ) {
160 * For signed MOVW relocations, we have to manipulate the
161 * instruction encoding depending on whether or not the
162 * immediate is less than zero.
166 /* >=0: Set the instruction to MOVZ (opcode 10b). */
170 * <0: Set the instruction to MOVN (opcode 00b).
171 * Since we've masked the opcode already, we
172 * don't need to do anything other than
173 * inverting the new immediate field.
179 /* Update the instruction with the new encoding. */
180 insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
181 *place = cpu_to_le32(insn);
189 static int reloc_insn_imm(enum aarch64_reloc_op op, __le32 *place, u64 val,
190 int lsb, int len, enum aarch64_insn_imm_type imm_type)
194 u32 insn = le32_to_cpu(*place);
196 /* Calculate the relocation value. */
197 sval = do_reloc(op, place, val);
200 /* Extract the value bits and shift them to bit 0. */
201 imm_mask = (BIT(lsb + len) - 1) >> lsb;
202 imm = sval & imm_mask;
204 /* Update the instruction's immediate field. */
205 insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
206 *place = cpu_to_le32(insn);
209 * Extract the upper value bits (including the sign bit) and
210 * shift them to bit 0.
212 sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1);
215 * Overflow has occurred if the upper bits are not all equal to
216 * the sign bit of the value.
218 if ((u64)(sval + 1) >= 2)
224 static int reloc_insn_adrp(struct module *mod, Elf64_Shdr *sechdrs,
225 __le32 *place, u64 val)
229 if (!is_forbidden_offset_for_adrp(place))
230 return reloc_insn_imm(RELOC_OP_PAGE, place, val, 12, 21,
231 AARCH64_INSN_IMM_ADR);
233 /* patch ADRP to ADR if it is in range */
234 if (!reloc_insn_imm(RELOC_OP_PREL, place, val & ~0xfff, 0, 21,
235 AARCH64_INSN_IMM_ADR)) {
236 insn = le32_to_cpu(*place);
239 /* out of range for ADR -> emit a veneer */
240 val = module_emit_veneer_for_adrp(mod, sechdrs, place, val & ~0xfff);
243 insn = aarch64_insn_gen_branch_imm((u64)place, val,
244 AARCH64_INSN_BRANCH_NOLINK);
247 *place = cpu_to_le32(insn);
251 int apply_relocate_add(Elf64_Shdr *sechdrs,
253 unsigned int symindex,
263 Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
265 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
266 /* loc corresponds to P in the AArch64 ELF document. */
267 loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
270 /* sym is the ELF symbol we're referring to. */
271 sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
272 + ELF64_R_SYM(rel[i].r_info);
274 /* val corresponds to (S + A) in the AArch64 ELF document. */
275 val = sym->st_value + rel[i].r_addend;
277 /* Check for overflow by default. */
278 overflow_check = true;
280 /* Perform the static relocation. */
281 switch (ELF64_R_TYPE(rel[i].r_info)) {
282 /* Null relocations. */
288 /* Data relocations. */
289 case R_AARCH64_ABS64:
290 overflow_check = false;
291 ovf = reloc_data(RELOC_OP_ABS, loc, val, 64);
293 case R_AARCH64_ABS32:
294 ovf = reloc_data(RELOC_OP_ABS, loc, val, 32);
296 case R_AARCH64_ABS16:
297 ovf = reloc_data(RELOC_OP_ABS, loc, val, 16);
299 case R_AARCH64_PREL64:
300 overflow_check = false;
301 ovf = reloc_data(RELOC_OP_PREL, loc, val, 64);
303 case R_AARCH64_PREL32:
304 ovf = reloc_data(RELOC_OP_PREL, loc, val, 32);
306 case R_AARCH64_PREL16:
307 ovf = reloc_data(RELOC_OP_PREL, loc, val, 16);
310 /* MOVW instruction relocations. */
311 case R_AARCH64_MOVW_UABS_G0_NC:
312 overflow_check = false;
313 case R_AARCH64_MOVW_UABS_G0:
314 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
315 AARCH64_INSN_IMM_MOVKZ);
317 case R_AARCH64_MOVW_UABS_G1_NC:
318 overflow_check = false;
319 case R_AARCH64_MOVW_UABS_G1:
320 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
321 AARCH64_INSN_IMM_MOVKZ);
323 case R_AARCH64_MOVW_UABS_G2_NC:
324 overflow_check = false;
325 case R_AARCH64_MOVW_UABS_G2:
326 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
327 AARCH64_INSN_IMM_MOVKZ);
329 case R_AARCH64_MOVW_UABS_G3:
330 /* We're using the top bits so we can't overflow. */
331 overflow_check = false;
332 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48,
333 AARCH64_INSN_IMM_MOVKZ);
335 case R_AARCH64_MOVW_SABS_G0:
336 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
337 AARCH64_INSN_IMM_MOVNZ);
339 case R_AARCH64_MOVW_SABS_G1:
340 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
341 AARCH64_INSN_IMM_MOVNZ);
343 case R_AARCH64_MOVW_SABS_G2:
344 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
345 AARCH64_INSN_IMM_MOVNZ);
347 case R_AARCH64_MOVW_PREL_G0_NC:
348 overflow_check = false;
349 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
350 AARCH64_INSN_IMM_MOVKZ);
352 case R_AARCH64_MOVW_PREL_G0:
353 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
354 AARCH64_INSN_IMM_MOVNZ);
356 case R_AARCH64_MOVW_PREL_G1_NC:
357 overflow_check = false;
358 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
359 AARCH64_INSN_IMM_MOVKZ);
361 case R_AARCH64_MOVW_PREL_G1:
362 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
363 AARCH64_INSN_IMM_MOVNZ);
365 case R_AARCH64_MOVW_PREL_G2_NC:
366 overflow_check = false;
367 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
368 AARCH64_INSN_IMM_MOVKZ);
370 case R_AARCH64_MOVW_PREL_G2:
371 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
372 AARCH64_INSN_IMM_MOVNZ);
374 case R_AARCH64_MOVW_PREL_G3:
375 /* We're using the top bits so we can't overflow. */
376 overflow_check = false;
377 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48,
378 AARCH64_INSN_IMM_MOVNZ);
381 /* Immediate instruction relocations. */
382 case R_AARCH64_LD_PREL_LO19:
383 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
384 AARCH64_INSN_IMM_19);
386 case R_AARCH64_ADR_PREL_LO21:
387 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
388 AARCH64_INSN_IMM_ADR);
390 case R_AARCH64_ADR_PREL_PG_HI21_NC:
391 overflow_check = false;
392 case R_AARCH64_ADR_PREL_PG_HI21:
393 ovf = reloc_insn_adrp(me, sechdrs, loc, val);
394 if (ovf && ovf != -ERANGE)
397 case R_AARCH64_ADD_ABS_LO12_NC:
398 case R_AARCH64_LDST8_ABS_LO12_NC:
399 overflow_check = false;
400 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12,
401 AARCH64_INSN_IMM_12);
403 case R_AARCH64_LDST16_ABS_LO12_NC:
404 overflow_check = false;
405 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11,
406 AARCH64_INSN_IMM_12);
408 case R_AARCH64_LDST32_ABS_LO12_NC:
409 overflow_check = false;
410 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10,
411 AARCH64_INSN_IMM_12);
413 case R_AARCH64_LDST64_ABS_LO12_NC:
414 overflow_check = false;
415 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9,
416 AARCH64_INSN_IMM_12);
418 case R_AARCH64_LDST128_ABS_LO12_NC:
419 overflow_check = false;
420 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8,
421 AARCH64_INSN_IMM_12);
423 case R_AARCH64_TSTBR14:
424 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14,
425 AARCH64_INSN_IMM_14);
427 case R_AARCH64_CONDBR19:
428 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
429 AARCH64_INSN_IMM_19);
431 case R_AARCH64_JUMP26:
432 case R_AARCH64_CALL26:
433 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26,
434 AARCH64_INSN_IMM_26);
436 if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
438 val = module_emit_plt_entry(me, sechdrs, loc, &rel[i], sym);
441 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2,
442 26, AARCH64_INSN_IMM_26);
447 pr_err("module %s: unsupported RELA relocation: %llu\n",
448 me->name, ELF64_R_TYPE(rel[i].r_info));
452 if (overflow_check && ovf == -ERANGE)
460 pr_err("module %s: overflow in relocation type %d val %Lx\n",
461 me->name, (int)ELF64_R_TYPE(rel[i].r_info), val);
465 int module_finalize(const Elf_Ehdr *hdr,
466 const Elf_Shdr *sechdrs,
469 const Elf_Shdr *s, *se;
470 const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
472 for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
473 if (strcmp(".altinstructions", secstrs + s->sh_name) == 0)
474 apply_alternatives_module((void *)s->sh_addr, s->sh_size);
475 #ifdef CONFIG_ARM64_MODULE_PLTS
476 if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) &&
477 !strcmp(".text.ftrace_trampoline", secstrs + s->sh_name))
478 me->arch.ftrace_trampoline = (void *)s->sh_addr;