2 * AArch64 loadable module support.
4 * Copyright (C) 2012 ARM Limited
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 * Author: Will Deacon <will.deacon@arm.com>
21 #include <linux/bitops.h>
22 #include <linux/elf.h>
23 #include <linux/gfp.h>
24 #include <linux/kasan.h>
25 #include <linux/kernel.h>
27 #include <linux/moduleloader.h>
28 #include <linux/vmalloc.h>
29 #include <asm/alternative.h>
31 #include <asm/sections.h>
33 void *module_alloc(unsigned long size)
35 gfp_t gfp_mask = GFP_KERNEL;
38 /* Silence the initial allocation */
39 if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
40 gfp_mask |= __GFP_NOWARN;
42 p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
43 module_alloc_base + MODULES_VSIZE,
44 gfp_mask, PAGE_KERNEL_EXEC, 0,
45 NUMA_NO_NODE, __builtin_return_address(0));
47 if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
48 !IS_ENABLED(CONFIG_KASAN))
50 * KASAN can only deal with module allocations being served
51 * from the reserved module region, since the remainder of
52 * the vmalloc region is already backed by zero shadow pages,
53 * and punching holes into it is non-trivial. Since the module
54 * region is not randomized when KASAN is enabled, it is even
55 * less likely that the module region gets exhausted, so we
56 * can simply omit this fallback in that case.
58 p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
59 module_alloc_base + SZ_2G, GFP_KERNEL,
60 PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
61 __builtin_return_address(0));
63 if (p && (kasan_module_alloc(p, size) < 0)) {
71 enum aarch64_reloc_op {
78 static u64 do_reloc(enum aarch64_reloc_op reloc_op, __le32 *place, u64 val)
84 return val - (u64)place;
86 return (val & ~0xfff) - ((u64)place & ~0xfff);
91 pr_err("do_reloc: unknown relocation operation %d\n", reloc_op);
95 static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
97 s64 sval = do_reloc(op, place, val);
100 * The ELF psABI for AArch64 documents the 16-bit and 32-bit place
101 * relative relocations as having a range of [-2^15, 2^16) or
102 * [-2^31, 2^32), respectively. However, in order to be able to detect
103 * overflows reliably, we have to choose whether we interpret such
104 * quantities as signed or as unsigned, and stick with it.
105 * The way we organize our address space requires a signed
106 * interpretation of 32-bit relative references, so let's use that
107 * for all R_AARCH64_PRELxx relocations. This means our upper
108 * bound for overflow detection should be Sxx_MAX rather than Uxx_MAX.
113 *(s16 *)place = sval;
114 if (sval < S16_MIN || sval > S16_MAX)
118 *(s32 *)place = sval;
119 if (sval < S32_MIN || sval > S32_MAX)
123 *(s64 *)place = sval;
126 pr_err("Invalid length (%d) for data relocation\n", len);
132 enum aarch64_insn_movw_imm_type {
133 AARCH64_INSN_IMM_MOVNZ,
134 AARCH64_INSN_IMM_MOVKZ,
137 static int reloc_insn_movw(enum aarch64_reloc_op op, __le32 *place, u64 val,
138 int lsb, enum aarch64_insn_movw_imm_type imm_type)
142 u32 insn = le32_to_cpu(*place);
144 sval = do_reloc(op, place, val);
147 if (imm_type == AARCH64_INSN_IMM_MOVNZ) {
149 * For signed MOVW relocations, we have to manipulate the
150 * instruction encoding depending on whether or not the
151 * immediate is less than zero.
155 /* >=0: Set the instruction to MOVZ (opcode 10b). */
159 * <0: Set the instruction to MOVN (opcode 00b).
160 * Since we've masked the opcode already, we
161 * don't need to do anything other than
162 * inverting the new immediate field.
168 /* Update the instruction with the new encoding. */
169 insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
170 *place = cpu_to_le32(insn);
178 static int reloc_insn_imm(enum aarch64_reloc_op op, __le32 *place, u64 val,
179 int lsb, int len, enum aarch64_insn_imm_type imm_type)
183 u32 insn = le32_to_cpu(*place);
185 /* Calculate the relocation value. */
186 sval = do_reloc(op, place, val);
189 /* Extract the value bits and shift them to bit 0. */
190 imm_mask = (BIT(lsb + len) - 1) >> lsb;
191 imm = sval & imm_mask;
193 /* Update the instruction's immediate field. */
194 insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
195 *place = cpu_to_le32(insn);
198 * Extract the upper value bits (including the sign bit) and
199 * shift them to bit 0.
201 sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1);
204 * Overflow has occurred if the upper bits are not all equal to
205 * the sign bit of the value.
207 if ((u64)(sval + 1) >= 2)
213 static int reloc_insn_adrp(struct module *mod, Elf64_Shdr *sechdrs,
214 __le32 *place, u64 val)
218 if (!is_forbidden_offset_for_adrp(place))
219 return reloc_insn_imm(RELOC_OP_PAGE, place, val, 12, 21,
220 AARCH64_INSN_IMM_ADR);
222 /* patch ADRP to ADR if it is in range */
223 if (!reloc_insn_imm(RELOC_OP_PREL, place, val & ~0xfff, 0, 21,
224 AARCH64_INSN_IMM_ADR)) {
225 insn = le32_to_cpu(*place);
228 /* out of range for ADR -> emit a veneer */
229 val = module_emit_veneer_for_adrp(mod, sechdrs, place, val & ~0xfff);
232 insn = aarch64_insn_gen_branch_imm((u64)place, val,
233 AARCH64_INSN_BRANCH_NOLINK);
236 *place = cpu_to_le32(insn);
240 int apply_relocate_add(Elf64_Shdr *sechdrs,
242 unsigned int symindex,
252 Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
254 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
255 /* loc corresponds to P in the AArch64 ELF document. */
256 loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
259 /* sym is the ELF symbol we're referring to. */
260 sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
261 + ELF64_R_SYM(rel[i].r_info);
263 /* val corresponds to (S + A) in the AArch64 ELF document. */
264 val = sym->st_value + rel[i].r_addend;
266 /* Check for overflow by default. */
267 overflow_check = true;
269 /* Perform the static relocation. */
270 switch (ELF64_R_TYPE(rel[i].r_info)) {
271 /* Null relocations. */
277 /* Data relocations. */
278 case R_AARCH64_ABS64:
279 overflow_check = false;
280 ovf = reloc_data(RELOC_OP_ABS, loc, val, 64);
282 case R_AARCH64_ABS32:
283 ovf = reloc_data(RELOC_OP_ABS, loc, val, 32);
285 case R_AARCH64_ABS16:
286 ovf = reloc_data(RELOC_OP_ABS, loc, val, 16);
288 case R_AARCH64_PREL64:
289 overflow_check = false;
290 ovf = reloc_data(RELOC_OP_PREL, loc, val, 64);
292 case R_AARCH64_PREL32:
293 ovf = reloc_data(RELOC_OP_PREL, loc, val, 32);
295 case R_AARCH64_PREL16:
296 ovf = reloc_data(RELOC_OP_PREL, loc, val, 16);
299 /* MOVW instruction relocations. */
300 case R_AARCH64_MOVW_UABS_G0_NC:
301 overflow_check = false;
302 case R_AARCH64_MOVW_UABS_G0:
303 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
304 AARCH64_INSN_IMM_MOVKZ);
306 case R_AARCH64_MOVW_UABS_G1_NC:
307 overflow_check = false;
308 case R_AARCH64_MOVW_UABS_G1:
309 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
310 AARCH64_INSN_IMM_MOVKZ);
312 case R_AARCH64_MOVW_UABS_G2_NC:
313 overflow_check = false;
314 case R_AARCH64_MOVW_UABS_G2:
315 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
316 AARCH64_INSN_IMM_MOVKZ);
318 case R_AARCH64_MOVW_UABS_G3:
319 /* We're using the top bits so we can't overflow. */
320 overflow_check = false;
321 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48,
322 AARCH64_INSN_IMM_MOVKZ);
324 case R_AARCH64_MOVW_SABS_G0:
325 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
326 AARCH64_INSN_IMM_MOVNZ);
328 case R_AARCH64_MOVW_SABS_G1:
329 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
330 AARCH64_INSN_IMM_MOVNZ);
332 case R_AARCH64_MOVW_SABS_G2:
333 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
334 AARCH64_INSN_IMM_MOVNZ);
336 case R_AARCH64_MOVW_PREL_G0_NC:
337 overflow_check = false;
338 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
339 AARCH64_INSN_IMM_MOVKZ);
341 case R_AARCH64_MOVW_PREL_G0:
342 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
343 AARCH64_INSN_IMM_MOVNZ);
345 case R_AARCH64_MOVW_PREL_G1_NC:
346 overflow_check = false;
347 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
348 AARCH64_INSN_IMM_MOVKZ);
350 case R_AARCH64_MOVW_PREL_G1:
351 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
352 AARCH64_INSN_IMM_MOVNZ);
354 case R_AARCH64_MOVW_PREL_G2_NC:
355 overflow_check = false;
356 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
357 AARCH64_INSN_IMM_MOVKZ);
359 case R_AARCH64_MOVW_PREL_G2:
360 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
361 AARCH64_INSN_IMM_MOVNZ);
363 case R_AARCH64_MOVW_PREL_G3:
364 /* We're using the top bits so we can't overflow. */
365 overflow_check = false;
366 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48,
367 AARCH64_INSN_IMM_MOVNZ);
370 /* Immediate instruction relocations. */
371 case R_AARCH64_LD_PREL_LO19:
372 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
373 AARCH64_INSN_IMM_19);
375 case R_AARCH64_ADR_PREL_LO21:
376 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
377 AARCH64_INSN_IMM_ADR);
379 case R_AARCH64_ADR_PREL_PG_HI21_NC:
380 overflow_check = false;
381 case R_AARCH64_ADR_PREL_PG_HI21:
382 ovf = reloc_insn_adrp(me, sechdrs, loc, val);
383 if (ovf && ovf != -ERANGE)
386 case R_AARCH64_ADD_ABS_LO12_NC:
387 case R_AARCH64_LDST8_ABS_LO12_NC:
388 overflow_check = false;
389 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12,
390 AARCH64_INSN_IMM_12);
392 case R_AARCH64_LDST16_ABS_LO12_NC:
393 overflow_check = false;
394 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11,
395 AARCH64_INSN_IMM_12);
397 case R_AARCH64_LDST32_ABS_LO12_NC:
398 overflow_check = false;
399 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10,
400 AARCH64_INSN_IMM_12);
402 case R_AARCH64_LDST64_ABS_LO12_NC:
403 overflow_check = false;
404 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9,
405 AARCH64_INSN_IMM_12);
407 case R_AARCH64_LDST128_ABS_LO12_NC:
408 overflow_check = false;
409 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8,
410 AARCH64_INSN_IMM_12);
412 case R_AARCH64_TSTBR14:
413 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14,
414 AARCH64_INSN_IMM_14);
416 case R_AARCH64_CONDBR19:
417 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
418 AARCH64_INSN_IMM_19);
420 case R_AARCH64_JUMP26:
421 case R_AARCH64_CALL26:
422 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26,
423 AARCH64_INSN_IMM_26);
425 if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
427 val = module_emit_plt_entry(me, sechdrs, loc, &rel[i], sym);
430 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2,
431 26, AARCH64_INSN_IMM_26);
436 pr_err("module %s: unsupported RELA relocation: %llu\n",
437 me->name, ELF64_R_TYPE(rel[i].r_info));
441 if (overflow_check && ovf == -ERANGE)
449 pr_err("module %s: overflow in relocation type %d val %Lx\n",
450 me->name, (int)ELF64_R_TYPE(rel[i].r_info), val);
454 int module_finalize(const Elf_Ehdr *hdr,
455 const Elf_Shdr *sechdrs,
458 const Elf_Shdr *s, *se;
459 const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
461 for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
462 if (strcmp(".altinstructions", secstrs + s->sh_name) == 0)
463 apply_alternatives_module((void *)s->sh_addr, s->sh_size);
464 #ifdef CONFIG_ARM64_MODULE_PLTS
465 if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) &&
466 !strcmp(".text.ftrace_trampoline", secstrs + s->sh_name))
467 me->arch.ftrace_trampoline = (void *)s->sh_addr;