1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020 - Google LLC
4 * Author: David Brazdil <dbrazdil@google.com>
6 * Generates relocation information used by the kernel to convert
7 * absolute addresses in hyp data from kernel VAs to hyp VAs.
9 * This is necessary because hyp code is linked into the same binary
10 * as the kernel but executes under different memory mappings.
11 * If the compiler used absolute addressing, those addresses need to
12 * be converted before they are used by hyp code.
14 * The input of this program is the relocatable ELF object containing
15 * all hyp code/data, not yet linked into vmlinux. Hyp section names
16 * should have been prefixed with `.hyp` at this point.
18 * The output (printed to stdout) is an assembly file containing
19 * an array of 32-bit integers and static relocations that instruct
20 * the linker of `vmlinux` to populate the array entries with offsets
21 * to positions in the kernel binary containing VAs used by hyp code.
23 * Note that dynamic relocations could be used for the same purpose.
24 * However, those are only generated if CONFIG_RELOCATABLE=y.
36 #include <sys/types.h>
40 #include <generated/autoconf.h>
42 #define HYP_SECTION_PREFIX ".hyp"
43 #define HYP_RELOC_SECTION ".hyp.reloc"
44 #define HYP_SECTION_SYMBOL_PREFIX "__hyp_section_"
47 * AArch64 relocation type constants.
48 * Included in case these are not defined in the host toolchain.
50 #ifndef R_AARCH64_ABS64
51 #define R_AARCH64_ABS64 257
53 #ifndef R_AARCH64_PREL64
54 #define R_AARCH64_PREL64 260
56 #ifndef R_AARCH64_PREL32
57 #define R_AARCH64_PREL32 261
59 #ifndef R_AARCH64_PREL16
60 #define R_AARCH64_PREL16 262
62 #ifndef R_AARCH64_PLT32
63 #define R_AARCH64_PLT32 314
65 #ifndef R_AARCH64_LD_PREL_LO19
66 #define R_AARCH64_LD_PREL_LO19 273
68 #ifndef R_AARCH64_ADR_PREL_LO21
69 #define R_AARCH64_ADR_PREL_LO21 274
71 #ifndef R_AARCH64_ADR_PREL_PG_HI21
72 #define R_AARCH64_ADR_PREL_PG_HI21 275
74 #ifndef R_AARCH64_ADR_PREL_PG_HI21_NC
75 #define R_AARCH64_ADR_PREL_PG_HI21_NC 276
77 #ifndef R_AARCH64_ADD_ABS_LO12_NC
78 #define R_AARCH64_ADD_ABS_LO12_NC 277
80 #ifndef R_AARCH64_LDST8_ABS_LO12_NC
81 #define R_AARCH64_LDST8_ABS_LO12_NC 278
83 #ifndef R_AARCH64_TSTBR14
84 #define R_AARCH64_TSTBR14 279
86 #ifndef R_AARCH64_CONDBR19
87 #define R_AARCH64_CONDBR19 280
89 #ifndef R_AARCH64_JUMP26
90 #define R_AARCH64_JUMP26 282
92 #ifndef R_AARCH64_CALL26
93 #define R_AARCH64_CALL26 283
95 #ifndef R_AARCH64_LDST16_ABS_LO12_NC
96 #define R_AARCH64_LDST16_ABS_LO12_NC 284
98 #ifndef R_AARCH64_LDST32_ABS_LO12_NC
99 #define R_AARCH64_LDST32_ABS_LO12_NC 285
101 #ifndef R_AARCH64_LDST64_ABS_LO12_NC
102 #define R_AARCH64_LDST64_ABS_LO12_NC 286
104 #ifndef R_AARCH64_MOVW_PREL_G0
105 #define R_AARCH64_MOVW_PREL_G0 287
107 #ifndef R_AARCH64_MOVW_PREL_G0_NC
108 #define R_AARCH64_MOVW_PREL_G0_NC 288
110 #ifndef R_AARCH64_MOVW_PREL_G1
111 #define R_AARCH64_MOVW_PREL_G1 289
113 #ifndef R_AARCH64_MOVW_PREL_G1_NC
114 #define R_AARCH64_MOVW_PREL_G1_NC 290
116 #ifndef R_AARCH64_MOVW_PREL_G2
117 #define R_AARCH64_MOVW_PREL_G2 291
119 #ifndef R_AARCH64_MOVW_PREL_G2_NC
120 #define R_AARCH64_MOVW_PREL_G2_NC 292
122 #ifndef R_AARCH64_MOVW_PREL_G3
123 #define R_AARCH64_MOVW_PREL_G3 293
125 #ifndef R_AARCH64_LDST128_ABS_LO12_NC
126 #define R_AARCH64_LDST128_ABS_LO12_NC 299
129 /* Global state of the processed ELF. */
135 Elf64_Shdr *sh_table;
136 const char *sh_string;
139 #if defined(CONFIG_CPU_LITTLE_ENDIAN)
141 #define elf16toh(x) le16toh(x)
142 #define elf32toh(x) le32toh(x)
143 #define elf64toh(x) le64toh(x)
145 #define ELFENDIAN ELFDATA2LSB
147 #elif defined(CONFIG_CPU_BIG_ENDIAN)
149 #define elf16toh(x) be16toh(x)
150 #define elf32toh(x) be32toh(x)
151 #define elf64toh(x) be64toh(x)
153 #define ELFENDIAN ELFDATA2MSB
157 #error PDP-endian sadly unsupported...
161 #define fatal_error(fmt, ...) \
163 fprintf(stderr, "error: %s: " fmt "\n", \
164 elf.path, ## __VA_ARGS__); \
165 exit(EXIT_FAILURE); \
166 __builtin_unreachable(); \
169 #define fatal_perror(msg) \
171 fprintf(stderr, "error: %s: " msg ": %s\n", \
172 elf.path, strerror(errno)); \
173 exit(EXIT_FAILURE); \
174 __builtin_unreachable(); \
177 #define assert_op(lhs, rhs, fmt, op) \
179 typeof(lhs) _lhs = (lhs); \
180 typeof(rhs) _rhs = (rhs); \
182 if (!(_lhs op _rhs)) { \
183 fatal_error("assertion " #lhs " " #op " " #rhs \
184 " failed (lhs=" fmt ", rhs=" fmt \
185 ", line=%d)", _lhs, _rhs, __LINE__); \
189 #define assert_eq(lhs, rhs, fmt) assert_op(lhs, rhs, fmt, ==)
190 #define assert_ne(lhs, rhs, fmt) assert_op(lhs, rhs, fmt, !=)
191 #define assert_lt(lhs, rhs, fmt) assert_op(lhs, rhs, fmt, <)
192 #define assert_ge(lhs, rhs, fmt) assert_op(lhs, rhs, fmt, >=)
195 * Return a pointer of a given type at a given offset from
196 * the beginning of the ELF file.
198 #define elf_ptr(type, off) ((type *)(elf.begin + (off)))
200 /* Iterate over all sections in the ELF. */
201 #define for_each_section(var) \
202 for (var = elf.sh_table; var < elf.sh_table + elf16toh(elf.ehdr->e_shnum); ++var)
204 /* Iterate over all Elf64_Rela relocations in a given section. */
205 #define for_each_rela(shdr, var) \
206 for (var = elf_ptr(Elf64_Rela, elf64toh(shdr->sh_offset)); \
207 var < elf_ptr(Elf64_Rela, elf64toh(shdr->sh_offset) + elf64toh(shdr->sh_size)); var++)
209 /* True if a string starts with a given prefix. */
210 static inline bool starts_with(const char *str, const char *prefix)
212 return memcmp(str, prefix, strlen(prefix)) == 0;
215 /* Returns a string containing the name of a given section. */
216 static inline const char *section_name(Elf64_Shdr *shdr)
218 return elf.sh_string + elf32toh(shdr->sh_name);
221 /* Returns a pointer to the first byte of section data. */
222 static inline const char *section_begin(Elf64_Shdr *shdr)
224 return elf_ptr(char, elf64toh(shdr->sh_offset));
227 /* Find a section by its offset from the beginning of the file. */
228 static inline Elf64_Shdr *section_by_off(Elf64_Off off)
230 assert_ne(off, 0UL, "%lu");
231 return elf_ptr(Elf64_Shdr, off);
234 /* Find a section by its index. */
235 static inline Elf64_Shdr *section_by_idx(uint16_t idx)
237 assert_ne(idx, SHN_UNDEF, "%u");
238 return &elf.sh_table[idx];
242 * Memory-map the given ELF file, perform sanity checks, and
243 * populate global state.
245 static void init_elf(const char *path)
250 /* Store path in the global struct for error printing. */
253 /* Open the ELF file. */
254 fd = open(path, O_RDONLY);
256 fatal_perror("Could not open ELF file");
258 /* Get status of ELF file to obtain its size. */
259 ret = fstat(fd, &stat);
262 fatal_perror("Could not get status of ELF file");
265 /* mmap() the entire ELF file read-only at an arbitrary address. */
266 elf.begin = mmap(0, stat.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
267 if (elf.begin == MAP_FAILED) {
269 fatal_perror("Could not mmap ELF file");
272 /* mmap() was successful, close the FD. */
275 /* Get pointer to the ELF header. */
276 assert_ge(stat.st_size, sizeof(*elf.ehdr), "%lu");
277 elf.ehdr = elf_ptr(Elf64_Ehdr, 0);
279 /* Check the ELF magic. */
280 assert_eq(elf.ehdr->e_ident[EI_MAG0], ELFMAG0, "0x%x");
281 assert_eq(elf.ehdr->e_ident[EI_MAG1], ELFMAG1, "0x%x");
282 assert_eq(elf.ehdr->e_ident[EI_MAG2], ELFMAG2, "0x%x");
283 assert_eq(elf.ehdr->e_ident[EI_MAG3], ELFMAG3, "0x%x");
285 /* Sanity check that this is an ELF64 relocatable object for AArch64. */
286 assert_eq(elf.ehdr->e_ident[EI_CLASS], ELFCLASS64, "%u");
287 assert_eq(elf.ehdr->e_ident[EI_DATA], ELFENDIAN, "%u");
288 assert_eq(elf16toh(elf.ehdr->e_type), ET_REL, "%u");
289 assert_eq(elf16toh(elf.ehdr->e_machine), EM_AARCH64, "%u");
291 /* Populate fields of the global struct. */
292 elf.sh_table = section_by_off(elf64toh(elf.ehdr->e_shoff));
293 elf.sh_string = section_begin(section_by_idx(elf16toh(elf.ehdr->e_shstrndx)));
296 /* Print the prologue of the output ASM file. */
297 static void emit_prologue(void)
300 ".pushsection " HYP_RELOC_SECTION ", \"a\"\n");
303 /* Print ASM statements needed as a prologue to a processed hyp section. */
304 static void emit_section_prologue(const char *sh_orig_name)
306 /* Declare the hyp section symbol. */
307 printf(".global %s%s\n", HYP_SECTION_SYMBOL_PREFIX, sh_orig_name);
311 * Print ASM statements to create a hyp relocation entry for a given
312 * R_AARCH64_ABS64 relocation.
314 * The linker of vmlinux will populate the position given by `rela` with
315 * an absolute 64-bit kernel VA. If the kernel is relocatable, it will
316 * also generate a dynamic relocation entry so that the kernel can shift
317 * the address at runtime for KASLR.
319 * Emit a 32-bit offset from the current address to the position given
320 * by `rela`. This way the kernel can iterate over all kernel VAs used
321 * by hyp at runtime and convert them to hyp VAs. However, that offset
322 * will not be known until linking of `vmlinux`, so emit a PREL32
323 * relocation referencing a symbol that the hyp linker script put at
324 * the beginning of the relocated section + the offset from `rela`.
326 static void emit_rela_abs64(Elf64_Rela *rela, const char *sh_orig_name)
328 /* Offset of this reloc from the beginning of HYP_RELOC_SECTION. */
329 static size_t reloc_offset;
331 /* Create storage for the 32-bit offset. */
335 * Create a PREL32 relocation which instructs the linker of `vmlinux`
336 * to insert offset to position <base> + <offset>, where <base> is
337 * a symbol at the beginning of the relocated section, and <offset>
338 * is `rela->r_offset`.
340 printf(".reloc %lu, R_AARCH64_PREL32, %s%s + 0x%lx\n",
341 reloc_offset, HYP_SECTION_SYMBOL_PREFIX, sh_orig_name,
342 elf64toh(rela->r_offset));
347 /* Print the epilogue of the output ASM file. */
348 static void emit_epilogue(void)
350 printf(".popsection\n");
354 * Iterate over all RELA relocations in a given section and emit
355 * hyp relocation data for all absolute addresses in hyp code/data.
357 * Static relocations that generate PC-relative-addressing are ignored.
358 * Failure is reported for unexpected relocation types.
360 static void emit_rela_section(Elf64_Shdr *sh_rela)
362 Elf64_Shdr *sh_orig = &elf.sh_table[elf32toh(sh_rela->sh_info)];
363 const char *sh_orig_name = section_name(sh_orig);
366 /* Skip all non-hyp sections. */
367 if (!starts_with(sh_orig_name, HYP_SECTION_PREFIX))
370 emit_section_prologue(sh_orig_name);
372 for_each_rela(sh_rela, rela) {
373 uint32_t type = (uint32_t)elf64toh(rela->r_info);
375 /* Check that rela points inside the relocated section. */
376 assert_lt(elf64toh(rela->r_offset), elf64toh(sh_orig->sh_size), "0x%lx");
380 * Data relocations to generate absolute addressing.
381 * Emit a hyp relocation.
383 case R_AARCH64_ABS64:
384 emit_rela_abs64(rela, sh_orig_name);
386 /* Allow position-relative data relocations. */
387 case R_AARCH64_PREL64:
388 case R_AARCH64_PREL32:
389 case R_AARCH64_PREL16:
390 case R_AARCH64_PLT32:
392 /* Allow relocations to generate PC-relative addressing. */
393 case R_AARCH64_LD_PREL_LO19:
394 case R_AARCH64_ADR_PREL_LO21:
395 case R_AARCH64_ADR_PREL_PG_HI21:
396 case R_AARCH64_ADR_PREL_PG_HI21_NC:
397 case R_AARCH64_ADD_ABS_LO12_NC:
398 case R_AARCH64_LDST8_ABS_LO12_NC:
399 case R_AARCH64_LDST16_ABS_LO12_NC:
400 case R_AARCH64_LDST32_ABS_LO12_NC:
401 case R_AARCH64_LDST64_ABS_LO12_NC:
402 case R_AARCH64_LDST128_ABS_LO12_NC:
404 /* Allow relative relocations for control-flow instructions. */
405 case R_AARCH64_TSTBR14:
406 case R_AARCH64_CONDBR19:
407 case R_AARCH64_JUMP26:
408 case R_AARCH64_CALL26:
410 /* Allow group relocations to create PC-relative offset inline. */
411 case R_AARCH64_MOVW_PREL_G0:
412 case R_AARCH64_MOVW_PREL_G0_NC:
413 case R_AARCH64_MOVW_PREL_G1:
414 case R_AARCH64_MOVW_PREL_G1_NC:
415 case R_AARCH64_MOVW_PREL_G2:
416 case R_AARCH64_MOVW_PREL_G2_NC:
417 case R_AARCH64_MOVW_PREL_G3:
420 fatal_error("Unexpected RELA type %u", type);
425 /* Iterate over all sections and emit hyp relocation data for RELA sections. */
426 static void emit_all_relocs(void)
430 for_each_section(shdr) {
431 switch (elf32toh(shdr->sh_type)) {
433 fatal_error("Unexpected SHT_REL section \"%s\"",
436 emit_rela_section(shdr);
442 int main(int argc, const char **argv)
445 fprintf(stderr, "Usage: %s <elf_input>\n", argv[0]);