1 // SPDX-License-Identifier: GPL-2.0-only
3 * BPF JIT compiler for ARM64
5 * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
8 #define pr_fmt(fmt) "bpf_jit: " fmt
10 #include <linux/bitfield.h>
11 #include <linux/bpf.h>
12 #include <linux/filter.h>
13 #include <linux/printk.h>
14 #include <linux/slab.h>
16 #include <asm/byteorder.h>
17 #include <asm/cacheflush.h>
18 #include <asm/debug-monitors.h>
20 #include <asm/set_memory.h>
24 #define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
25 #define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
26 #define TCALL_CNT (MAX_BPF_JIT_REG + 2)
27 #define TMP_REG_3 (MAX_BPF_JIT_REG + 3)
29 /* Map BPF registers to A64 registers */
30 static const int bpf2a64[] = {
31 /* return value from in-kernel function, and exit value from eBPF */
32 [BPF_REG_0] = A64_R(7),
33 /* arguments from eBPF program to in-kernel function */
34 [BPF_REG_1] = A64_R(0),
35 [BPF_REG_2] = A64_R(1),
36 [BPF_REG_3] = A64_R(2),
37 [BPF_REG_4] = A64_R(3),
38 [BPF_REG_5] = A64_R(4),
39 /* callee saved registers that in-kernel function will preserve */
40 [BPF_REG_6] = A64_R(19),
41 [BPF_REG_7] = A64_R(20),
42 [BPF_REG_8] = A64_R(21),
43 [BPF_REG_9] = A64_R(22),
44 /* read-only frame pointer to access stack */
45 [BPF_REG_FP] = A64_R(25),
46 /* temporary registers for internal BPF JIT */
47 [TMP_REG_1] = A64_R(10),
48 [TMP_REG_2] = A64_R(11),
49 [TMP_REG_3] = A64_R(12),
51 [TCALL_CNT] = A64_R(26),
52 /* temporary register for blinding constants */
53 [BPF_REG_AX] = A64_R(9),
57 const struct bpf_prog *prog;
66 static inline void emit(const u32 insn, struct jit_ctx *ctx)
68 if (ctx->image != NULL)
69 ctx->image[ctx->idx] = cpu_to_le32(insn);
74 static inline void emit_a64_mov_i(const int is64, const int reg,
75 const s32 val, struct jit_ctx *ctx)
78 u16 lo = val & 0xffff;
82 emit(A64_MOVN(is64, reg, (u16)~lo, 0), ctx);
84 emit(A64_MOVN(is64, reg, (u16)~hi, 16), ctx);
86 emit(A64_MOVK(is64, reg, lo, 0), ctx);
89 emit(A64_MOVZ(is64, reg, lo, 0), ctx);
91 emit(A64_MOVK(is64, reg, hi, 16), ctx);
95 static int i64_i16_blocks(const u64 val, bool inverse)
97 return (((val >> 0) & 0xffff) != (inverse ? 0xffff : 0x0000)) +
98 (((val >> 16) & 0xffff) != (inverse ? 0xffff : 0x0000)) +
99 (((val >> 32) & 0xffff) != (inverse ? 0xffff : 0x0000)) +
100 (((val >> 48) & 0xffff) != (inverse ? 0xffff : 0x0000));
103 static inline void emit_a64_mov_i64(const int reg, const u64 val,
106 u64 nrm_tmp = val, rev_tmp = ~val;
110 if (!(nrm_tmp >> 32))
111 return emit_a64_mov_i(0, reg, (u32)val, ctx);
113 inverse = i64_i16_blocks(nrm_tmp, true) < i64_i16_blocks(nrm_tmp, false);
114 shift = max(round_down((inverse ? (fls64(rev_tmp) - 1) :
115 (fls64(nrm_tmp) - 1)), 16), 0);
117 emit(A64_MOVN(1, reg, (rev_tmp >> shift) & 0xffff, shift), ctx);
119 emit(A64_MOVZ(1, reg, (nrm_tmp >> shift) & 0xffff, shift), ctx);
122 if (((nrm_tmp >> shift) & 0xffff) != (inverse ? 0xffff : 0x0000))
123 emit(A64_MOVK(1, reg, (nrm_tmp >> shift) & 0xffff, shift), ctx);
129 * Kernel addresses in the vmalloc space use at most 48 bits, and the
130 * remaining bits are guaranteed to be 0x1. So we can compose the address
131 * with a fixed length movn/movk/movk sequence.
133 static inline void emit_addr_mov_i64(const int reg, const u64 val,
139 emit(A64_MOVN(1, reg, ~tmp & 0xffff, shift), ctx);
143 emit(A64_MOVK(1, reg, tmp & 0xffff, shift), ctx);
147 static inline int bpf2a64_offset(int bpf_insn, int off,
148 const struct jit_ctx *ctx)
150 /* BPF JMP offset is relative to the next instruction */
153 * Whereas arm64 branch instructions encode the offset
154 * from the branch itself, so we must subtract 1 from the
155 * instruction offset.
157 return ctx->offset[bpf_insn + off] - (ctx->offset[bpf_insn] - 1);
160 static void jit_fill_hole(void *area, unsigned int size)
163 /* We are guaranteed to have aligned memory. */
164 for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
165 *ptr++ = cpu_to_le32(AARCH64_BREAK_FAULT);
168 static inline int epilogue_offset(const struct jit_ctx *ctx)
170 int to = ctx->epilogue_offset;
176 static bool is_addsub_imm(u32 imm)
178 /* Either imm12 or shifted imm12. */
179 return !(imm & ~0xfff) || !(imm & ~0xfff000);
182 /* Tail call offset to jump into */
183 #if IS_ENABLED(CONFIG_ARM64_BTI_KERNEL)
184 #define PROLOGUE_OFFSET 8
186 #define PROLOGUE_OFFSET 7
189 static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
191 const struct bpf_prog *prog = ctx->prog;
192 const u8 r6 = bpf2a64[BPF_REG_6];
193 const u8 r7 = bpf2a64[BPF_REG_7];
194 const u8 r8 = bpf2a64[BPF_REG_8];
195 const u8 r9 = bpf2a64[BPF_REG_9];
196 const u8 fp = bpf2a64[BPF_REG_FP];
197 const u8 tcc = bpf2a64[TCALL_CNT];
198 const int idx0 = ctx->idx;
202 * BPF prog stack layout
205 * original A64_SP => 0:+-----+ BPF prologue
207 * current A64_FP => -16:+-----+
208 * | ... | callee saved registers
209 * BPF fp register => -64:+-----+ <= (BPF_FP)
211 * | ... | BPF prog stack
213 * +-----+ <= (BPF_FP - prog->aux->stack_depth)
215 * current A64_SP => +-----+ <= (BPF_FP - ctx->stack_size)
217 * | ... | Function call stack
224 /* BTI landing pad */
225 if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL))
226 emit(A64_BTI_C, ctx);
228 /* Save FP and LR registers to stay align with ARM64 AAPCS */
229 emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
230 emit(A64_MOV(1, A64_FP, A64_SP), ctx);
232 /* Save callee-saved registers */
233 emit(A64_PUSH(r6, r7, A64_SP), ctx);
234 emit(A64_PUSH(r8, r9, A64_SP), ctx);
235 emit(A64_PUSH(fp, tcc, A64_SP), ctx);
237 /* Set up BPF prog stack base register */
238 emit(A64_MOV(1, fp, A64_SP), ctx);
240 if (!ebpf_from_cbpf) {
241 /* Initialize tail_call_cnt */
242 emit(A64_MOVZ(1, tcc, 0, 0), ctx);
244 cur_offset = ctx->idx - idx0;
245 if (cur_offset != PROLOGUE_OFFSET) {
246 pr_err_once("PROLOGUE_OFFSET = %d, expected %d!\n",
247 cur_offset, PROLOGUE_OFFSET);
251 /* BTI landing pad for the tail call, done with a BR */
252 if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL))
253 emit(A64_BTI_J, ctx);
256 /* Stack must be multiples of 16B */
257 ctx->stack_size = round_up(prog->aux->stack_depth, 16);
259 /* Set up function call stack */
260 emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
264 static int out_offset = -1; /* initialized on the first pass of build_body() */
265 static int emit_bpf_tail_call(struct jit_ctx *ctx)
267 /* bpf_tail_call(void *prog_ctx, struct bpf_array *array, u64 index) */
268 const u8 r2 = bpf2a64[BPF_REG_2];
269 const u8 r3 = bpf2a64[BPF_REG_3];
271 const u8 tmp = bpf2a64[TMP_REG_1];
272 const u8 prg = bpf2a64[TMP_REG_2];
273 const u8 tcc = bpf2a64[TCALL_CNT];
274 const int idx0 = ctx->idx;
275 #define cur_offset (ctx->idx - idx0)
276 #define jmp_offset (out_offset - (cur_offset))
279 /* if (index >= array->map.max_entries)
282 off = offsetof(struct bpf_array, map.max_entries);
283 emit_a64_mov_i64(tmp, off, ctx);
284 emit(A64_LDR32(tmp, r2, tmp), ctx);
285 emit(A64_MOV(0, r3, r3), ctx);
286 emit(A64_CMP(0, r3, tmp), ctx);
287 emit(A64_B_(A64_COND_CS, jmp_offset), ctx);
289 /* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
293 emit_a64_mov_i64(tmp, MAX_TAIL_CALL_CNT, ctx);
294 emit(A64_CMP(1, tcc, tmp), ctx);
295 emit(A64_B_(A64_COND_HI, jmp_offset), ctx);
296 emit(A64_ADD_I(1, tcc, tcc, 1), ctx);
298 /* prog = array->ptrs[index];
302 off = offsetof(struct bpf_array, ptrs);
303 emit_a64_mov_i64(tmp, off, ctx);
304 emit(A64_ADD(1, tmp, r2, tmp), ctx);
305 emit(A64_LSL(1, prg, r3, 3), ctx);
306 emit(A64_LDR64(prg, tmp, prg), ctx);
307 emit(A64_CBZ(1, prg, jmp_offset), ctx);
309 /* goto *(prog->bpf_func + prologue_offset); */
310 off = offsetof(struct bpf_prog, bpf_func);
311 emit_a64_mov_i64(tmp, off, ctx);
312 emit(A64_LDR64(tmp, prg, tmp), ctx);
313 emit(A64_ADD_I(1, tmp, tmp, sizeof(u32) * PROLOGUE_OFFSET), ctx);
314 emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
315 emit(A64_BR(tmp), ctx);
318 if (out_offset == -1)
319 out_offset = cur_offset;
320 if (cur_offset != out_offset) {
321 pr_err_once("tail_call out_offset = %d, expected %d!\n",
322 cur_offset, out_offset);
330 static void build_epilogue(struct jit_ctx *ctx)
332 const u8 r0 = bpf2a64[BPF_REG_0];
333 const u8 r6 = bpf2a64[BPF_REG_6];
334 const u8 r7 = bpf2a64[BPF_REG_7];
335 const u8 r8 = bpf2a64[BPF_REG_8];
336 const u8 r9 = bpf2a64[BPF_REG_9];
337 const u8 fp = bpf2a64[BPF_REG_FP];
339 /* We're done with BPF stack */
340 emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
342 /* Restore fs (x25) and x26 */
343 emit(A64_POP(fp, A64_R(26), A64_SP), ctx);
345 /* Restore callee-saved register */
346 emit(A64_POP(r8, r9, A64_SP), ctx);
347 emit(A64_POP(r6, r7, A64_SP), ctx);
349 /* Restore FP/LR registers */
350 emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
352 /* Set return value */
353 emit(A64_MOV(1, A64_R(0), r0), ctx);
355 emit(A64_RET(A64_LR), ctx);
358 #define BPF_FIXUP_OFFSET_MASK GENMASK(26, 0)
359 #define BPF_FIXUP_REG_MASK GENMASK(31, 27)
361 int arm64_bpf_fixup_exception(const struct exception_table_entry *ex,
362 struct pt_regs *regs)
364 off_t offset = FIELD_GET(BPF_FIXUP_OFFSET_MASK, ex->fixup);
365 int dst_reg = FIELD_GET(BPF_FIXUP_REG_MASK, ex->fixup);
367 regs->regs[dst_reg] = 0;
368 regs->pc = (unsigned long)&ex->fixup - offset;
372 /* For accesses to BTF pointers, add an entry to the exception table */
373 static int add_exception_handler(const struct bpf_insn *insn,
379 struct exception_table_entry *ex;
385 if (BPF_MODE(insn->code) != BPF_PROBE_MEM)
388 if (!ctx->prog->aux->extable ||
389 WARN_ON_ONCE(ctx->exentry_idx >= ctx->prog->aux->num_exentries))
392 ex = &ctx->prog->aux->extable[ctx->exentry_idx];
393 pc = (unsigned long)&ctx->image[ctx->idx - 1];
395 offset = pc - (long)&ex->insn;
396 if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN))
401 * Since the extable follows the program, the fixup offset is always
402 * negative and limited to BPF_JIT_REGION_SIZE. Store a positive value
403 * to keep things simple, and put the destination register in the upper
404 * bits. We don't need to worry about buildtime or runtime sort
405 * modifying the upper bits because the table is already sorted, and
406 * isn't part of the main exception table.
408 offset = (long)&ex->fixup - (pc + AARCH64_INSN_SIZE);
409 if (!FIELD_FIT(BPF_FIXUP_OFFSET_MASK, offset))
412 ex->fixup = FIELD_PREP(BPF_FIXUP_OFFSET_MASK, offset) |
413 FIELD_PREP(BPF_FIXUP_REG_MASK, dst_reg);
419 /* JITs an eBPF instruction.
421 * 0 - successfully JITed an 8-byte eBPF instruction.
422 * >0 - successfully JITed a 16-byte eBPF instruction.
423 * <0 - failed to JIT.
425 static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
428 const u8 code = insn->code;
429 const u8 dst = bpf2a64[insn->dst_reg];
430 const u8 src = bpf2a64[insn->src_reg];
431 const u8 tmp = bpf2a64[TMP_REG_1];
432 const u8 tmp2 = bpf2a64[TMP_REG_2];
433 const u8 tmp3 = bpf2a64[TMP_REG_3];
434 const s16 off = insn->off;
435 const s32 imm = insn->imm;
436 const int i = insn - ctx->prog->insnsi;
437 const bool is64 = BPF_CLASS(code) == BPF_ALU64 ||
438 BPF_CLASS(code) == BPF_JMP;
439 const bool isdw = BPF_SIZE(code) == BPF_DW;
445 #define check_imm(bits, imm) do { \
446 if ((((imm) > 0) && ((imm) >> (bits))) || \
447 (((imm) < 0) && (~(imm) >> (bits)))) { \
448 pr_info("[%2d] imm=%d(0x%x) out of range\n", \
453 #define check_imm19(imm) check_imm(19, imm)
454 #define check_imm26(imm) check_imm(26, imm)
458 case BPF_ALU | BPF_MOV | BPF_X:
459 case BPF_ALU64 | BPF_MOV | BPF_X:
460 emit(A64_MOV(is64, dst, src), ctx);
462 /* dst = dst OP src */
463 case BPF_ALU | BPF_ADD | BPF_X:
464 case BPF_ALU64 | BPF_ADD | BPF_X:
465 emit(A64_ADD(is64, dst, dst, src), ctx);
467 case BPF_ALU | BPF_SUB | BPF_X:
468 case BPF_ALU64 | BPF_SUB | BPF_X:
469 emit(A64_SUB(is64, dst, dst, src), ctx);
471 case BPF_ALU | BPF_AND | BPF_X:
472 case BPF_ALU64 | BPF_AND | BPF_X:
473 emit(A64_AND(is64, dst, dst, src), ctx);
475 case BPF_ALU | BPF_OR | BPF_X:
476 case BPF_ALU64 | BPF_OR | BPF_X:
477 emit(A64_ORR(is64, dst, dst, src), ctx);
479 case BPF_ALU | BPF_XOR | BPF_X:
480 case BPF_ALU64 | BPF_XOR | BPF_X:
481 emit(A64_EOR(is64, dst, dst, src), ctx);
483 case BPF_ALU | BPF_MUL | BPF_X:
484 case BPF_ALU64 | BPF_MUL | BPF_X:
485 emit(A64_MUL(is64, dst, dst, src), ctx);
487 case BPF_ALU | BPF_DIV | BPF_X:
488 case BPF_ALU64 | BPF_DIV | BPF_X:
489 emit(A64_UDIV(is64, dst, dst, src), ctx);
491 case BPF_ALU | BPF_MOD | BPF_X:
492 case BPF_ALU64 | BPF_MOD | BPF_X:
493 emit(A64_UDIV(is64, tmp, dst, src), ctx);
494 emit(A64_MSUB(is64, dst, dst, tmp, src), ctx);
496 case BPF_ALU | BPF_LSH | BPF_X:
497 case BPF_ALU64 | BPF_LSH | BPF_X:
498 emit(A64_LSLV(is64, dst, dst, src), ctx);
500 case BPF_ALU | BPF_RSH | BPF_X:
501 case BPF_ALU64 | BPF_RSH | BPF_X:
502 emit(A64_LSRV(is64, dst, dst, src), ctx);
504 case BPF_ALU | BPF_ARSH | BPF_X:
505 case BPF_ALU64 | BPF_ARSH | BPF_X:
506 emit(A64_ASRV(is64, dst, dst, src), ctx);
509 case BPF_ALU | BPF_NEG:
510 case BPF_ALU64 | BPF_NEG:
511 emit(A64_NEG(is64, dst, dst), ctx);
513 /* dst = BSWAP##imm(dst) */
514 case BPF_ALU | BPF_END | BPF_FROM_LE:
515 case BPF_ALU | BPF_END | BPF_FROM_BE:
516 #ifdef CONFIG_CPU_BIG_ENDIAN
517 if (BPF_SRC(code) == BPF_FROM_BE)
519 #else /* !CONFIG_CPU_BIG_ENDIAN */
520 if (BPF_SRC(code) == BPF_FROM_LE)
525 emit(A64_REV16(is64, dst, dst), ctx);
526 /* zero-extend 16 bits into 64 bits */
527 emit(A64_UXTH(is64, dst, dst), ctx);
530 emit(A64_REV32(is64, dst, dst), ctx);
531 /* upper 32 bits already cleared */
534 emit(A64_REV64(dst, dst), ctx);
541 /* zero-extend 16 bits into 64 bits */
542 emit(A64_UXTH(is64, dst, dst), ctx);
545 /* zero-extend 32 bits into 64 bits */
546 emit(A64_UXTW(is64, dst, dst), ctx);
554 case BPF_ALU | BPF_MOV | BPF_K:
555 case BPF_ALU64 | BPF_MOV | BPF_K:
556 emit_a64_mov_i(is64, dst, imm, ctx);
558 /* dst = dst OP imm */
559 case BPF_ALU | BPF_ADD | BPF_K:
560 case BPF_ALU64 | BPF_ADD | BPF_K:
561 if (is_addsub_imm(imm)) {
562 emit(A64_ADD_I(is64, dst, dst, imm), ctx);
563 } else if (is_addsub_imm(-imm)) {
564 emit(A64_SUB_I(is64, dst, dst, -imm), ctx);
566 emit_a64_mov_i(is64, tmp, imm, ctx);
567 emit(A64_ADD(is64, dst, dst, tmp), ctx);
570 case BPF_ALU | BPF_SUB | BPF_K:
571 case BPF_ALU64 | BPF_SUB | BPF_K:
572 if (is_addsub_imm(imm)) {
573 emit(A64_SUB_I(is64, dst, dst, imm), ctx);
574 } else if (is_addsub_imm(-imm)) {
575 emit(A64_ADD_I(is64, dst, dst, -imm), ctx);
577 emit_a64_mov_i(is64, tmp, imm, ctx);
578 emit(A64_SUB(is64, dst, dst, tmp), ctx);
581 case BPF_ALU | BPF_AND | BPF_K:
582 case BPF_ALU64 | BPF_AND | BPF_K:
583 a64_insn = A64_AND_I(is64, dst, dst, imm);
584 if (a64_insn != AARCH64_BREAK_FAULT) {
587 emit_a64_mov_i(is64, tmp, imm, ctx);
588 emit(A64_AND(is64, dst, dst, tmp), ctx);
591 case BPF_ALU | BPF_OR | BPF_K:
592 case BPF_ALU64 | BPF_OR | BPF_K:
593 a64_insn = A64_ORR_I(is64, dst, dst, imm);
594 if (a64_insn != AARCH64_BREAK_FAULT) {
597 emit_a64_mov_i(is64, tmp, imm, ctx);
598 emit(A64_ORR(is64, dst, dst, tmp), ctx);
601 case BPF_ALU | BPF_XOR | BPF_K:
602 case BPF_ALU64 | BPF_XOR | BPF_K:
603 a64_insn = A64_EOR_I(is64, dst, dst, imm);
604 if (a64_insn != AARCH64_BREAK_FAULT) {
607 emit_a64_mov_i(is64, tmp, imm, ctx);
608 emit(A64_EOR(is64, dst, dst, tmp), ctx);
611 case BPF_ALU | BPF_MUL | BPF_K:
612 case BPF_ALU64 | BPF_MUL | BPF_K:
613 emit_a64_mov_i(is64, tmp, imm, ctx);
614 emit(A64_MUL(is64, dst, dst, tmp), ctx);
616 case BPF_ALU | BPF_DIV | BPF_K:
617 case BPF_ALU64 | BPF_DIV | BPF_K:
618 emit_a64_mov_i(is64, tmp, imm, ctx);
619 emit(A64_UDIV(is64, dst, dst, tmp), ctx);
621 case BPF_ALU | BPF_MOD | BPF_K:
622 case BPF_ALU64 | BPF_MOD | BPF_K:
623 emit_a64_mov_i(is64, tmp2, imm, ctx);
624 emit(A64_UDIV(is64, tmp, dst, tmp2), ctx);
625 emit(A64_MSUB(is64, dst, dst, tmp, tmp2), ctx);
627 case BPF_ALU | BPF_LSH | BPF_K:
628 case BPF_ALU64 | BPF_LSH | BPF_K:
629 emit(A64_LSL(is64, dst, dst, imm), ctx);
631 case BPF_ALU | BPF_RSH | BPF_K:
632 case BPF_ALU64 | BPF_RSH | BPF_K:
633 emit(A64_LSR(is64, dst, dst, imm), ctx);
635 case BPF_ALU | BPF_ARSH | BPF_K:
636 case BPF_ALU64 | BPF_ARSH | BPF_K:
637 emit(A64_ASR(is64, dst, dst, imm), ctx);
641 case BPF_JMP | BPF_JA:
642 jmp_offset = bpf2a64_offset(i, off, ctx);
643 check_imm26(jmp_offset);
644 emit(A64_B(jmp_offset), ctx);
646 /* IF (dst COND src) JUMP off */
647 case BPF_JMP | BPF_JEQ | BPF_X:
648 case BPF_JMP | BPF_JGT | BPF_X:
649 case BPF_JMP | BPF_JLT | BPF_X:
650 case BPF_JMP | BPF_JGE | BPF_X:
651 case BPF_JMP | BPF_JLE | BPF_X:
652 case BPF_JMP | BPF_JNE | BPF_X:
653 case BPF_JMP | BPF_JSGT | BPF_X:
654 case BPF_JMP | BPF_JSLT | BPF_X:
655 case BPF_JMP | BPF_JSGE | BPF_X:
656 case BPF_JMP | BPF_JSLE | BPF_X:
657 case BPF_JMP32 | BPF_JEQ | BPF_X:
658 case BPF_JMP32 | BPF_JGT | BPF_X:
659 case BPF_JMP32 | BPF_JLT | BPF_X:
660 case BPF_JMP32 | BPF_JGE | BPF_X:
661 case BPF_JMP32 | BPF_JLE | BPF_X:
662 case BPF_JMP32 | BPF_JNE | BPF_X:
663 case BPF_JMP32 | BPF_JSGT | BPF_X:
664 case BPF_JMP32 | BPF_JSLT | BPF_X:
665 case BPF_JMP32 | BPF_JSGE | BPF_X:
666 case BPF_JMP32 | BPF_JSLE | BPF_X:
667 emit(A64_CMP(is64, dst, src), ctx);
669 jmp_offset = bpf2a64_offset(i, off, ctx);
670 check_imm19(jmp_offset);
671 switch (BPF_OP(code)) {
673 jmp_cond = A64_COND_EQ;
676 jmp_cond = A64_COND_HI;
679 jmp_cond = A64_COND_CC;
682 jmp_cond = A64_COND_CS;
685 jmp_cond = A64_COND_LS;
689 jmp_cond = A64_COND_NE;
692 jmp_cond = A64_COND_GT;
695 jmp_cond = A64_COND_LT;
698 jmp_cond = A64_COND_GE;
701 jmp_cond = A64_COND_LE;
706 emit(A64_B_(jmp_cond, jmp_offset), ctx);
708 case BPF_JMP | BPF_JSET | BPF_X:
709 case BPF_JMP32 | BPF_JSET | BPF_X:
710 emit(A64_TST(is64, dst, src), ctx);
712 /* IF (dst COND imm) JUMP off */
713 case BPF_JMP | BPF_JEQ | BPF_K:
714 case BPF_JMP | BPF_JGT | BPF_K:
715 case BPF_JMP | BPF_JLT | BPF_K:
716 case BPF_JMP | BPF_JGE | BPF_K:
717 case BPF_JMP | BPF_JLE | BPF_K:
718 case BPF_JMP | BPF_JNE | BPF_K:
719 case BPF_JMP | BPF_JSGT | BPF_K:
720 case BPF_JMP | BPF_JSLT | BPF_K:
721 case BPF_JMP | BPF_JSGE | BPF_K:
722 case BPF_JMP | BPF_JSLE | BPF_K:
723 case BPF_JMP32 | BPF_JEQ | BPF_K:
724 case BPF_JMP32 | BPF_JGT | BPF_K:
725 case BPF_JMP32 | BPF_JLT | BPF_K:
726 case BPF_JMP32 | BPF_JGE | BPF_K:
727 case BPF_JMP32 | BPF_JLE | BPF_K:
728 case BPF_JMP32 | BPF_JNE | BPF_K:
729 case BPF_JMP32 | BPF_JSGT | BPF_K:
730 case BPF_JMP32 | BPF_JSLT | BPF_K:
731 case BPF_JMP32 | BPF_JSGE | BPF_K:
732 case BPF_JMP32 | BPF_JSLE | BPF_K:
733 if (is_addsub_imm(imm)) {
734 emit(A64_CMP_I(is64, dst, imm), ctx);
735 } else if (is_addsub_imm(-imm)) {
736 emit(A64_CMN_I(is64, dst, -imm), ctx);
738 emit_a64_mov_i(is64, tmp, imm, ctx);
739 emit(A64_CMP(is64, dst, tmp), ctx);
742 case BPF_JMP | BPF_JSET | BPF_K:
743 case BPF_JMP32 | BPF_JSET | BPF_K:
744 a64_insn = A64_TST_I(is64, dst, imm);
745 if (a64_insn != AARCH64_BREAK_FAULT) {
748 emit_a64_mov_i(is64, tmp, imm, ctx);
749 emit(A64_TST(is64, dst, tmp), ctx);
753 case BPF_JMP | BPF_CALL:
755 const u8 r0 = bpf2a64[BPF_REG_0];
756 bool func_addr_fixed;
759 ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass,
760 &func_addr, &func_addr_fixed);
763 emit_addr_mov_i64(tmp, func_addr, ctx);
764 emit(A64_BLR(tmp), ctx);
765 emit(A64_MOV(1, r0, A64_R(0)), ctx);
769 case BPF_JMP | BPF_TAIL_CALL:
770 if (emit_bpf_tail_call(ctx))
773 /* function return */
774 case BPF_JMP | BPF_EXIT:
775 /* Optimization: when last instruction is EXIT,
776 simply fallthrough to epilogue. */
777 if (i == ctx->prog->len - 1)
779 jmp_offset = epilogue_offset(ctx);
780 check_imm26(jmp_offset);
781 emit(A64_B(jmp_offset), ctx);
785 case BPF_LD | BPF_IMM | BPF_DW:
787 const struct bpf_insn insn1 = insn[1];
790 imm64 = (u64)insn1.imm << 32 | (u32)imm;
791 emit_a64_mov_i64(dst, imm64, ctx);
796 /* LDX: dst = *(size *)(src + off) */
797 case BPF_LDX | BPF_MEM | BPF_W:
798 case BPF_LDX | BPF_MEM | BPF_H:
799 case BPF_LDX | BPF_MEM | BPF_B:
800 case BPF_LDX | BPF_MEM | BPF_DW:
801 case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
802 case BPF_LDX | BPF_PROBE_MEM | BPF_W:
803 case BPF_LDX | BPF_PROBE_MEM | BPF_H:
804 case BPF_LDX | BPF_PROBE_MEM | BPF_B:
805 emit_a64_mov_i(1, tmp, off, ctx);
806 switch (BPF_SIZE(code)) {
808 emit(A64_LDR32(dst, src, tmp), ctx);
811 emit(A64_LDRH(dst, src, tmp), ctx);
814 emit(A64_LDRB(dst, src, tmp), ctx);
817 emit(A64_LDR64(dst, src, tmp), ctx);
821 ret = add_exception_handler(insn, ctx, dst);
826 /* speculation barrier */
827 case BPF_ST | BPF_NOSPEC:
829 * Nothing required here.
831 * In case of arm64, we rely on the firmware mitigation of
832 * Speculative Store Bypass as controlled via the ssbd kernel
833 * parameter. Whenever the mitigation is enabled, it works
834 * for all of the kernel code with no need to provide any
835 * additional instructions.
839 /* ST: *(size *)(dst + off) = imm */
840 case BPF_ST | BPF_MEM | BPF_W:
841 case BPF_ST | BPF_MEM | BPF_H:
842 case BPF_ST | BPF_MEM | BPF_B:
843 case BPF_ST | BPF_MEM | BPF_DW:
844 /* Load imm to a register then store it */
845 emit_a64_mov_i(1, tmp2, off, ctx);
846 emit_a64_mov_i(1, tmp, imm, ctx);
847 switch (BPF_SIZE(code)) {
849 emit(A64_STR32(tmp, dst, tmp2), ctx);
852 emit(A64_STRH(tmp, dst, tmp2), ctx);
855 emit(A64_STRB(tmp, dst, tmp2), ctx);
858 emit(A64_STR64(tmp, dst, tmp2), ctx);
863 /* STX: *(size *)(dst + off) = src */
864 case BPF_STX | BPF_MEM | BPF_W:
865 case BPF_STX | BPF_MEM | BPF_H:
866 case BPF_STX | BPF_MEM | BPF_B:
867 case BPF_STX | BPF_MEM | BPF_DW:
868 emit_a64_mov_i(1, tmp, off, ctx);
869 switch (BPF_SIZE(code)) {
871 emit(A64_STR32(src, dst, tmp), ctx);
874 emit(A64_STRH(src, dst, tmp), ctx);
877 emit(A64_STRB(src, dst, tmp), ctx);
880 emit(A64_STR64(src, dst, tmp), ctx);
885 case BPF_STX | BPF_ATOMIC | BPF_W:
886 case BPF_STX | BPF_ATOMIC | BPF_DW:
887 if (insn->imm != BPF_ADD) {
888 pr_err_once("unknown atomic op code %02x\n", insn->imm);
892 /* STX XADD: lock *(u32 *)(dst + off) += src
894 * STX XADD: lock *(u64 *)(dst + off) += src
900 emit_a64_mov_i(1, tmp, off, ctx);
901 emit(A64_ADD(1, tmp, tmp, dst), ctx);
904 if (cpus_have_cap(ARM64_HAS_LSE_ATOMICS)) {
905 emit(A64_STADD(isdw, reg, src), ctx);
907 emit(A64_LDXR(isdw, tmp2, reg), ctx);
908 emit(A64_ADD(isdw, tmp2, tmp2, src), ctx);
909 emit(A64_STXR(isdw, tmp2, reg, tmp3), ctx);
911 check_imm19(jmp_offset);
912 emit(A64_CBNZ(0, tmp3, jmp_offset), ctx);
917 pr_err_once("unknown opcode %02x\n", code);
924 static int build_body(struct jit_ctx *ctx, bool extra_pass)
926 const struct bpf_prog *prog = ctx->prog;
930 * - offset[0] offset of the end of prologue,
931 * start of the 1st instruction.
932 * - offset[1] - offset of the end of 1st instruction,
933 * start of the 2nd instruction
935 * - offset[3] - offset of the end of 3rd instruction,
936 * start of 4th instruction
938 for (i = 0; i < prog->len; i++) {
939 const struct bpf_insn *insn = &prog->insnsi[i];
942 if (ctx->image == NULL)
943 ctx->offset[i] = ctx->idx;
944 ret = build_insn(insn, ctx, extra_pass);
947 if (ctx->image == NULL)
948 ctx->offset[i] = ctx->idx;
955 * offset is allocated with prog->len + 1 so fill in
956 * the last element with the offset after the last
957 * instruction (end of program)
959 if (ctx->image == NULL)
960 ctx->offset[i] = ctx->idx;
965 static int validate_code(struct jit_ctx *ctx)
969 for (i = 0; i < ctx->idx; i++) {
970 u32 a64_insn = le32_to_cpu(ctx->image[i]);
972 if (a64_insn == AARCH64_BREAK_FAULT)
976 if (WARN_ON_ONCE(ctx->exentry_idx != ctx->prog->aux->num_exentries))
982 static inline void bpf_flush_icache(void *start, void *end)
984 flush_icache_range((unsigned long)start, (unsigned long)end);
987 struct arm64_jit_data {
988 struct bpf_binary_header *header;
993 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
995 int image_size, prog_size, extable_size;
996 struct bpf_prog *tmp, *orig_prog = prog;
997 struct bpf_binary_header *header;
998 struct arm64_jit_data *jit_data;
999 bool was_classic = bpf_prog_was_classic(prog);
1000 bool tmp_blinded = false;
1001 bool extra_pass = false;
1005 if (!prog->jit_requested)
1008 tmp = bpf_jit_blind_constants(prog);
1009 /* If blinding was requested and we failed during blinding,
1010 * we must fall back to the interpreter.
1019 jit_data = prog->aux->jit_data;
1021 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
1026 prog->aux->jit_data = jit_data;
1028 if (jit_data->ctx.offset) {
1029 ctx = jit_data->ctx;
1030 image_ptr = jit_data->image;
1031 header = jit_data->header;
1033 prog_size = sizeof(u32) * ctx.idx;
1036 memset(&ctx, 0, sizeof(ctx));
1039 ctx.offset = kcalloc(prog->len + 1, sizeof(int), GFP_KERNEL);
1040 if (ctx.offset == NULL) {
1045 /* 1. Initial fake pass to compute ctx->idx. */
1047 /* Fake pass to fill in ctx->offset. */
1048 if (build_body(&ctx, extra_pass)) {
1053 if (build_prologue(&ctx, was_classic)) {
1058 ctx.epilogue_offset = ctx.idx;
1059 build_epilogue(&ctx);
1061 extable_size = prog->aux->num_exentries *
1062 sizeof(struct exception_table_entry);
1064 /* Now we know the actual image size. */
1065 prog_size = sizeof(u32) * ctx.idx;
1066 image_size = prog_size + extable_size;
1067 header = bpf_jit_binary_alloc(image_size, &image_ptr,
1068 sizeof(u32), jit_fill_hole);
1069 if (header == NULL) {
1074 /* 2. Now, the actual pass. */
1076 ctx.image = (__le32 *)image_ptr;
1078 prog->aux->extable = (void *)image_ptr + prog_size;
1081 ctx.exentry_idx = 0;
1083 build_prologue(&ctx, was_classic);
1085 if (build_body(&ctx, extra_pass)) {
1086 bpf_jit_binary_free(header);
1091 build_epilogue(&ctx);
1093 /* 3. Extra pass to validate JITed code. */
1094 if (validate_code(&ctx)) {
1095 bpf_jit_binary_free(header);
1100 /* And we're done. */
1101 if (bpf_jit_enable > 1)
1102 bpf_jit_dump(prog->len, prog_size, 2, ctx.image);
1104 bpf_flush_icache(header, ctx.image + ctx.idx);
1106 if (!prog->is_func || extra_pass) {
1107 if (extra_pass && ctx.idx != jit_data->ctx.idx) {
1108 pr_err_once("multi-func JIT bug %d != %d\n",
1109 ctx.idx, jit_data->ctx.idx);
1110 bpf_jit_binary_free(header);
1111 prog->bpf_func = NULL;
1115 bpf_jit_binary_lock_ro(header);
1117 jit_data->ctx = ctx;
1118 jit_data->image = image_ptr;
1119 jit_data->header = header;
1121 prog->bpf_func = (void *)ctx.image;
1123 prog->jited_len = prog_size;
1125 if (!prog->is_func || extra_pass) {
1126 bpf_prog_fill_jited_linfo(prog, ctx.offset + 1);
1130 prog->aux->jit_data = NULL;
1134 bpf_jit_prog_release_other(prog, prog == orig_prog ?
1139 void *bpf_jit_alloc_exec(unsigned long size)
1141 return __vmalloc_node_range(size, PAGE_SIZE, BPF_JIT_REGION_START,
1142 BPF_JIT_REGION_END, GFP_KERNEL,
1143 PAGE_KERNEL, 0, NUMA_NO_NODE,
1144 __builtin_return_address(0));
1147 void bpf_jit_free_exec(void *addr)