1 // SPDX-License-Identifier: GPL-2.0-only
3 * bpf_jit_comp.c: BPF JIT compiler
5 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
6 * Internal BPF Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
8 #include <linux/netdevice.h>
9 #include <linux/filter.h>
10 #include <linux/if_vlan.h>
11 #include <linux/bpf.h>
12 #include <linux/memory.h>
13 #include <asm/extable.h>
14 #include <asm/set_memory.h>
15 #include <asm/nospec-branch.h>
16 #include <asm/text-patching.h>
18 static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
31 #define EMIT(bytes, len) \
32 do { prog = emit_code(prog, bytes, len); cnt += len; } while (0)
34 #define EMIT1(b1) EMIT(b1, 1)
35 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
36 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
37 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
39 #define EMIT1_off32(b1, off) \
40 do { EMIT1(b1); EMIT(off, 4); } while (0)
41 #define EMIT2_off32(b1, b2, off) \
42 do { EMIT2(b1, b2); EMIT(off, 4); } while (0)
43 #define EMIT3_off32(b1, b2, b3, off) \
44 do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
45 #define EMIT4_off32(b1, b2, b3, b4, off) \
46 do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
48 static bool is_imm8(int value)
50 return value <= 127 && value >= -128;
53 static bool is_simm32(s64 value)
55 return value == (s64)(s32)value;
58 static bool is_uimm32(u64 value)
60 return value == (u64)(u32)value;
64 #define EMIT_mov(DST, SRC) \
67 EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
70 static int bpf_size_to_x86_bytes(int bpf_size)
72 if (bpf_size == BPF_W)
74 else if (bpf_size == BPF_H)
76 else if (bpf_size == BPF_B)
78 else if (bpf_size == BPF_DW)
85 * List of x86 cond jumps opcodes (. + s8)
86 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
99 /* Pick a register outside of BPF range for JIT internal work */
100 #define AUX_REG (MAX_BPF_JIT_REG + 1)
101 #define X86_REG_R9 (MAX_BPF_JIT_REG + 2)
104 * The following table maps BPF registers to x86-64 registers.
106 * x86-64 register R12 is unused, since if used as base address
107 * register in load/store instructions, it always needs an
108 * extra byte of encoding and is callee saved.
110 * x86-64 register R9 is not used by BPF programs, but can be used by BPF
111 * trampoline. x86-64 register R10 is used for blinding (if enabled).
113 static const int reg2hex[] = {
114 [BPF_REG_0] = 0, /* RAX */
115 [BPF_REG_1] = 7, /* RDI */
116 [BPF_REG_2] = 6, /* RSI */
117 [BPF_REG_3] = 2, /* RDX */
118 [BPF_REG_4] = 1, /* RCX */
119 [BPF_REG_5] = 0, /* R8 */
120 [BPF_REG_6] = 3, /* RBX callee saved */
121 [BPF_REG_7] = 5, /* R13 callee saved */
122 [BPF_REG_8] = 6, /* R14 callee saved */
123 [BPF_REG_9] = 7, /* R15 callee saved */
124 [BPF_REG_FP] = 5, /* RBP readonly */
125 [BPF_REG_AX] = 2, /* R10 temp register */
126 [AUX_REG] = 3, /* R11 temp register */
127 [X86_REG_R9] = 1, /* R9 register, 6th function argument */
130 static const int reg2pt_regs[] = {
131 [BPF_REG_0] = offsetof(struct pt_regs, ax),
132 [BPF_REG_1] = offsetof(struct pt_regs, di),
133 [BPF_REG_2] = offsetof(struct pt_regs, si),
134 [BPF_REG_3] = offsetof(struct pt_regs, dx),
135 [BPF_REG_4] = offsetof(struct pt_regs, cx),
136 [BPF_REG_5] = offsetof(struct pt_regs, r8),
137 [BPF_REG_6] = offsetof(struct pt_regs, bx),
138 [BPF_REG_7] = offsetof(struct pt_regs, r13),
139 [BPF_REG_8] = offsetof(struct pt_regs, r14),
140 [BPF_REG_9] = offsetof(struct pt_regs, r15),
144 * is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15
145 * which need extra byte of encoding.
146 * rax,rcx,...,rbp have simpler encoding
148 static bool is_ereg(u32 reg)
150 return (1 << reg) & (BIT(BPF_REG_5) |
159 static bool is_axreg(u32 reg)
161 return reg == BPF_REG_0;
164 /* Add modifiers if 'reg' maps to x86-64 registers R8..R15 */
165 static u8 add_1mod(u8 byte, u32 reg)
172 static u8 add_2mod(u8 byte, u32 r1, u32 r2)
181 /* Encode 'dst_reg' register into x86-64 opcode 'byte' */
182 static u8 add_1reg(u8 byte, u32 dst_reg)
184 return byte + reg2hex[dst_reg];
187 /* Encode 'dst_reg' and 'src_reg' registers into x86-64 opcode 'byte' */
188 static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
190 return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
193 static void jit_fill_hole(void *area, unsigned int size)
195 /* Fill whole space with INT3 instructions */
196 memset(area, 0xcc, size);
200 int cleanup_addr; /* Epilogue code offset */
203 /* Maximum number of bytes emitted while JITing one eBPF insn */
204 #define BPF_MAX_INSN_SIZE 128
205 #define BPF_INSN_SAFETY 64
207 /* Number of bytes emit_patch() needs to generate instructions */
208 #define X86_PATCH_SIZE 5
210 #define PROLOGUE_SIZE 25
213 * Emit x86-64 prologue code for BPF program and check its size.
214 * bpf_tail_call helper will skip it while jumping into another program
216 static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf)
219 int cnt = X86_PATCH_SIZE;
221 /* BPF trampoline can be made to work without these nops,
222 * but let's waste 5 bytes for now and optimize later
224 memcpy(prog, ideal_nops[NOP_ATOMIC5], cnt);
226 EMIT1(0x55); /* push rbp */
227 EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
228 /* sub rsp, rounded_stack_depth */
229 EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8));
230 EMIT1(0x53); /* push rbx */
231 EMIT2(0x41, 0x55); /* push r13 */
232 EMIT2(0x41, 0x56); /* push r14 */
233 EMIT2(0x41, 0x57); /* push r15 */
234 if (!ebpf_from_cbpf) {
235 /* zero init tail_call_cnt */
237 BUILD_BUG_ON(cnt != PROLOGUE_SIZE);
242 static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode)
248 offset = func - (ip + X86_PATCH_SIZE);
249 if (!is_simm32(offset)) {
250 pr_err("Target call %p is out of range\n", func);
253 EMIT1_off32(opcode, offset);
258 static int emit_call(u8 **pprog, void *func, void *ip)
260 return emit_patch(pprog, func, ip, 0xE8);
263 static int emit_jump(u8 **pprog, void *func, void *ip)
265 return emit_patch(pprog, func, ip, 0xE9);
268 static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
269 void *old_addr, void *new_addr,
270 const bool text_live)
272 const u8 *nop_insn = ideal_nops[NOP_ATOMIC5];
273 u8 old_insn[X86_PATCH_SIZE];
274 u8 new_insn[X86_PATCH_SIZE];
278 memcpy(old_insn, nop_insn, X86_PATCH_SIZE);
281 ret = t == BPF_MOD_CALL ?
282 emit_call(&prog, old_addr, ip) :
283 emit_jump(&prog, old_addr, ip);
288 memcpy(new_insn, nop_insn, X86_PATCH_SIZE);
291 ret = t == BPF_MOD_CALL ?
292 emit_call(&prog, new_addr, ip) :
293 emit_jump(&prog, new_addr, ip);
299 mutex_lock(&text_mutex);
300 if (memcmp(ip, old_insn, X86_PATCH_SIZE))
302 if (memcmp(ip, new_insn, X86_PATCH_SIZE)) {
304 text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL);
306 memcpy(ip, new_insn, X86_PATCH_SIZE);
310 mutex_unlock(&text_mutex);
314 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
315 void *old_addr, void *new_addr)
317 if (!is_kernel_text((long)ip) &&
318 !is_bpf_text_address((long)ip))
319 /* BPF poking in modules is not supported */
322 return __bpf_arch_text_poke(ip, t, old_addr, new_addr, true);
326 * Generate the following code:
328 * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ...
329 * if (index >= array->map.max_entries)
331 * if (++tail_call_cnt > MAX_TAIL_CALL_CNT)
333 * prog = array->ptrs[index];
336 * goto *(prog->bpf_func + prologue_size);
339 static void emit_bpf_tail_call_indirect(u8 **pprog)
342 int label1, label2, label3;
346 * rdi - pointer to ctx
347 * rsi - pointer to bpf_array
348 * rdx - index in bpf_array
352 * if (index >= array->map.max_entries)
355 EMIT2(0x89, 0xD2); /* mov edx, edx */
356 EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */
357 offsetof(struct bpf_array, map.max_entries));
358 #define OFFSET1 (41 + RETPOLINE_RAX_BPF_JIT_SIZE) /* Number of bytes to jump */
359 EMIT2(X86_JBE, OFFSET1); /* jbe out */
363 * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
366 EMIT2_off32(0x8B, 0x85, -36 - MAX_BPF_STACK); /* mov eax, dword ptr [rbp - 548] */
367 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
368 #define OFFSET2 (30 + RETPOLINE_RAX_BPF_JIT_SIZE)
369 EMIT2(X86_JA, OFFSET2); /* ja out */
371 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
372 EMIT2_off32(0x89, 0x85, -36 - MAX_BPF_STACK); /* mov dword ptr [rbp -548], eax */
374 /* prog = array->ptrs[index]; */
375 EMIT4_off32(0x48, 0x8B, 0x84, 0xD6, /* mov rax, [rsi + rdx * 8 + offsetof(...)] */
376 offsetof(struct bpf_array, ptrs));
382 EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */
383 #define OFFSET3 (8 + RETPOLINE_RAX_BPF_JIT_SIZE)
384 EMIT2(X86_JE, OFFSET3); /* je out */
387 /* goto *(prog->bpf_func + prologue_size); */
388 EMIT4(0x48, 0x8B, 0x40, /* mov rax, qword ptr [rax + 32] */
389 offsetof(struct bpf_prog, bpf_func));
390 EMIT4(0x48, 0x83, 0xC0, PROLOGUE_SIZE); /* add rax, prologue_size */
393 * Wow we're ready to jump into next BPF program
394 * rdi == ctx (1st arg)
395 * rax == prog->bpf_func + prologue_size
397 RETPOLINE_RAX_BPF_JIT();
400 BUILD_BUG_ON(cnt - label1 != OFFSET1);
401 BUILD_BUG_ON(cnt - label2 != OFFSET2);
402 BUILD_BUG_ON(cnt - label3 != OFFSET3);
406 static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke,
407 u8 **pprog, int addr, u8 *image)
413 * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
416 EMIT2_off32(0x8B, 0x85, -36 - MAX_BPF_STACK); /* mov eax, dword ptr [rbp - 548] */
417 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
418 EMIT2(X86_JA, 14); /* ja out */
419 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
420 EMIT2_off32(0x89, 0x85, -36 - MAX_BPF_STACK); /* mov dword ptr [rbp -548], eax */
422 poke->ip = image + (addr - X86_PATCH_SIZE);
423 poke->adj_off = PROLOGUE_SIZE;
425 memcpy(prog, ideal_nops[NOP_ATOMIC5], X86_PATCH_SIZE);
426 prog += X86_PATCH_SIZE;
432 static void bpf_tail_call_direct_fixup(struct bpf_prog *prog)
434 struct bpf_jit_poke_descriptor *poke;
435 struct bpf_array *array;
436 struct bpf_prog *target;
439 for (i = 0; i < prog->aux->size_poke_tab; i++) {
440 poke = &prog->aux->poke_tab[i];
441 WARN_ON_ONCE(READ_ONCE(poke->ip_stable));
443 if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
446 array = container_of(poke->tail_call.map, struct bpf_array, map);
447 mutex_lock(&array->aux->poke_mutex);
448 target = array->ptrs[poke->tail_call.key];
450 /* Plain memcpy is used when image is not live yet
451 * and still not locked as read-only. Once poke
452 * location is active (poke->ip_stable), any parallel
453 * bpf_arch_text_poke() might occur still on the
454 * read-write image until we finally locked it as
455 * read-only. Both modifications on the given image
456 * are under text_mutex to avoid interference.
458 ret = __bpf_arch_text_poke(poke->ip, BPF_MOD_JUMP, NULL,
459 (u8 *)target->bpf_func +
460 poke->adj_off, false);
463 WRITE_ONCE(poke->ip_stable, true);
464 mutex_unlock(&array->aux->poke_mutex);
468 static void emit_mov_imm32(u8 **pprog, bool sign_propagate,
469 u32 dst_reg, const u32 imm32)
476 * Optimization: if imm32 is positive, use 'mov %eax, imm32'
477 * (which zero-extends imm32) to save 2 bytes.
479 if (sign_propagate && (s32)imm32 < 0) {
480 /* 'mov %rax, imm32' sign extends imm32 */
481 b1 = add_1mod(0x48, dst_reg);
484 EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
489 * Optimization: if imm32 is zero, use 'xor %eax, %eax'
493 if (is_ereg(dst_reg))
494 EMIT1(add_2mod(0x40, dst_reg, dst_reg));
497 EMIT2(b2, add_2reg(b3, dst_reg, dst_reg));
501 /* mov %eax, imm32 */
502 if (is_ereg(dst_reg))
503 EMIT1(add_1mod(0x40, dst_reg));
504 EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
509 static void emit_mov_imm64(u8 **pprog, u32 dst_reg,
510 const u32 imm32_hi, const u32 imm32_lo)
515 if (is_uimm32(((u64)imm32_hi << 32) | (u32)imm32_lo)) {
517 * For emitting plain u32, where sign bit must not be
518 * propagated LLVM tends to load imm64 over mov32
519 * directly, so save couple of bytes by just doing
520 * 'mov %eax, imm32' instead.
522 emit_mov_imm32(&prog, false, dst_reg, imm32_lo);
524 /* movabsq %rax, imm64 */
525 EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
533 static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg)
540 EMIT_mov(dst_reg, src_reg);
543 if (is_ereg(dst_reg) || is_ereg(src_reg))
544 EMIT1(add_2mod(0x40, dst_reg, src_reg));
545 EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
551 /* LDX: dst_reg = *(u8*)(src_reg + off) */
552 static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
559 /* Emit 'movzx rax, byte ptr [rax + off]' */
560 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
563 /* Emit 'movzx rax, word ptr [rax + off]' */
564 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
567 /* Emit 'mov eax, dword ptr [rax+0x14]' */
568 if (is_ereg(dst_reg) || is_ereg(src_reg))
569 EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
574 /* Emit 'mov rax, qword ptr [rax+0x14]' */
575 EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
579 * If insn->off == 0 we can save one extra byte, but
580 * special case of x86 R13 which always needs an offset
581 * is not worth the hassle
584 EMIT2(add_2reg(0x40, src_reg, dst_reg), off);
586 EMIT1_off32(add_2reg(0x80, src_reg, dst_reg), off);
590 /* STX: *(u8*)(dst_reg + off) = src_reg */
591 static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
598 /* Emit 'mov byte ptr [rax + off], al' */
599 if (is_ereg(dst_reg) || is_ereg(src_reg) ||
600 /* We have to add extra byte for x86 SIL, DIL regs */
601 src_reg == BPF_REG_1 || src_reg == BPF_REG_2)
602 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
607 if (is_ereg(dst_reg) || is_ereg(src_reg))
608 EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
613 if (is_ereg(dst_reg) || is_ereg(src_reg))
614 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
619 EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
623 EMIT2(add_2reg(0x40, dst_reg, src_reg), off);
625 EMIT1_off32(add_2reg(0x80, dst_reg, src_reg), off);
629 static bool ex_handler_bpf(const struct exception_table_entry *x,
630 struct pt_regs *regs, int trapnr,
631 unsigned long error_code, unsigned long fault_addr)
633 u32 reg = x->fixup >> 8;
635 /* jump over faulting load and clear dest register */
636 *(unsigned long *)((void *)regs + reg) = 0;
637 regs->ip += x->fixup & 0xff;
641 static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
642 int oldproglen, struct jit_context *ctx)
644 struct bpf_insn *insn = bpf_prog->insnsi;
645 int insn_cnt = bpf_prog->len;
646 bool seen_exit = false;
647 u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
648 int i, cnt = 0, excnt = 0;
652 emit_prologue(&prog, bpf_prog->aux->stack_depth,
653 bpf_prog_was_classic(bpf_prog));
654 addrs[0] = prog - temp;
656 for (i = 1; i <= insn_cnt; i++, insn++) {
657 const s32 imm32 = insn->imm;
658 u32 dst_reg = insn->dst_reg;
659 u32 src_reg = insn->src_reg;
666 switch (insn->code) {
668 case BPF_ALU | BPF_ADD | BPF_X:
669 case BPF_ALU | BPF_SUB | BPF_X:
670 case BPF_ALU | BPF_AND | BPF_X:
671 case BPF_ALU | BPF_OR | BPF_X:
672 case BPF_ALU | BPF_XOR | BPF_X:
673 case BPF_ALU64 | BPF_ADD | BPF_X:
674 case BPF_ALU64 | BPF_SUB | BPF_X:
675 case BPF_ALU64 | BPF_AND | BPF_X:
676 case BPF_ALU64 | BPF_OR | BPF_X:
677 case BPF_ALU64 | BPF_XOR | BPF_X:
678 switch (BPF_OP(insn->code)) {
679 case BPF_ADD: b2 = 0x01; break;
680 case BPF_SUB: b2 = 0x29; break;
681 case BPF_AND: b2 = 0x21; break;
682 case BPF_OR: b2 = 0x09; break;
683 case BPF_XOR: b2 = 0x31; break;
685 if (BPF_CLASS(insn->code) == BPF_ALU64)
686 EMIT1(add_2mod(0x48, dst_reg, src_reg));
687 else if (is_ereg(dst_reg) || is_ereg(src_reg))
688 EMIT1(add_2mod(0x40, dst_reg, src_reg));
689 EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
692 case BPF_ALU64 | BPF_MOV | BPF_X:
693 case BPF_ALU | BPF_MOV | BPF_X:
695 BPF_CLASS(insn->code) == BPF_ALU64,
700 case BPF_ALU | BPF_NEG:
701 case BPF_ALU64 | BPF_NEG:
702 if (BPF_CLASS(insn->code) == BPF_ALU64)
703 EMIT1(add_1mod(0x48, dst_reg));
704 else if (is_ereg(dst_reg))
705 EMIT1(add_1mod(0x40, dst_reg));
706 EMIT2(0xF7, add_1reg(0xD8, dst_reg));
709 case BPF_ALU | BPF_ADD | BPF_K:
710 case BPF_ALU | BPF_SUB | BPF_K:
711 case BPF_ALU | BPF_AND | BPF_K:
712 case BPF_ALU | BPF_OR | BPF_K:
713 case BPF_ALU | BPF_XOR | BPF_K:
714 case BPF_ALU64 | BPF_ADD | BPF_K:
715 case BPF_ALU64 | BPF_SUB | BPF_K:
716 case BPF_ALU64 | BPF_AND | BPF_K:
717 case BPF_ALU64 | BPF_OR | BPF_K:
718 case BPF_ALU64 | BPF_XOR | BPF_K:
719 if (BPF_CLASS(insn->code) == BPF_ALU64)
720 EMIT1(add_1mod(0x48, dst_reg));
721 else if (is_ereg(dst_reg))
722 EMIT1(add_1mod(0x40, dst_reg));
725 * b3 holds 'normal' opcode, b2 short form only valid
726 * in case dst is eax/rax.
728 switch (BPF_OP(insn->code)) {
752 EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
753 else if (is_axreg(dst_reg))
754 EMIT1_off32(b2, imm32);
756 EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
759 case BPF_ALU64 | BPF_MOV | BPF_K:
760 case BPF_ALU | BPF_MOV | BPF_K:
761 emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64,
765 case BPF_LD | BPF_IMM | BPF_DW:
766 emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm);
771 /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
772 case BPF_ALU | BPF_MOD | BPF_X:
773 case BPF_ALU | BPF_DIV | BPF_X:
774 case BPF_ALU | BPF_MOD | BPF_K:
775 case BPF_ALU | BPF_DIV | BPF_K:
776 case BPF_ALU64 | BPF_MOD | BPF_X:
777 case BPF_ALU64 | BPF_DIV | BPF_X:
778 case BPF_ALU64 | BPF_MOD | BPF_K:
779 case BPF_ALU64 | BPF_DIV | BPF_K:
780 EMIT1(0x50); /* push rax */
781 EMIT1(0x52); /* push rdx */
783 if (BPF_SRC(insn->code) == BPF_X)
784 /* mov r11, src_reg */
785 EMIT_mov(AUX_REG, src_reg);
788 EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
790 /* mov rax, dst_reg */
791 EMIT_mov(BPF_REG_0, dst_reg);
795 * equivalent to 'xor rdx, rdx', but one byte less
799 if (BPF_CLASS(insn->code) == BPF_ALU64)
801 EMIT3(0x49, 0xF7, 0xF3);
804 EMIT3(0x41, 0xF7, 0xF3);
806 if (BPF_OP(insn->code) == BPF_MOD)
808 EMIT3(0x49, 0x89, 0xD3);
811 EMIT3(0x49, 0x89, 0xC3);
813 EMIT1(0x5A); /* pop rdx */
814 EMIT1(0x58); /* pop rax */
816 /* mov dst_reg, r11 */
817 EMIT_mov(dst_reg, AUX_REG);
820 case BPF_ALU | BPF_MUL | BPF_K:
821 case BPF_ALU | BPF_MUL | BPF_X:
822 case BPF_ALU64 | BPF_MUL | BPF_K:
823 case BPF_ALU64 | BPF_MUL | BPF_X:
825 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
827 if (dst_reg != BPF_REG_0)
828 EMIT1(0x50); /* push rax */
829 if (dst_reg != BPF_REG_3)
830 EMIT1(0x52); /* push rdx */
832 /* mov r11, dst_reg */
833 EMIT_mov(AUX_REG, dst_reg);
835 if (BPF_SRC(insn->code) == BPF_X)
836 emit_mov_reg(&prog, is64, BPF_REG_0, src_reg);
838 emit_mov_imm32(&prog, is64, BPF_REG_0, imm32);
841 EMIT1(add_1mod(0x48, AUX_REG));
842 else if (is_ereg(AUX_REG))
843 EMIT1(add_1mod(0x40, AUX_REG));
845 EMIT2(0xF7, add_1reg(0xE0, AUX_REG));
847 if (dst_reg != BPF_REG_3)
848 EMIT1(0x5A); /* pop rdx */
849 if (dst_reg != BPF_REG_0) {
850 /* mov dst_reg, rax */
851 EMIT_mov(dst_reg, BPF_REG_0);
852 EMIT1(0x58); /* pop rax */
857 case BPF_ALU | BPF_LSH | BPF_K:
858 case BPF_ALU | BPF_RSH | BPF_K:
859 case BPF_ALU | BPF_ARSH | BPF_K:
860 case BPF_ALU64 | BPF_LSH | BPF_K:
861 case BPF_ALU64 | BPF_RSH | BPF_K:
862 case BPF_ALU64 | BPF_ARSH | BPF_K:
863 if (BPF_CLASS(insn->code) == BPF_ALU64)
864 EMIT1(add_1mod(0x48, dst_reg));
865 else if (is_ereg(dst_reg))
866 EMIT1(add_1mod(0x40, dst_reg));
868 switch (BPF_OP(insn->code)) {
869 case BPF_LSH: b3 = 0xE0; break;
870 case BPF_RSH: b3 = 0xE8; break;
871 case BPF_ARSH: b3 = 0xF8; break;
875 EMIT2(0xD1, add_1reg(b3, dst_reg));
877 EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
880 case BPF_ALU | BPF_LSH | BPF_X:
881 case BPF_ALU | BPF_RSH | BPF_X:
882 case BPF_ALU | BPF_ARSH | BPF_X:
883 case BPF_ALU64 | BPF_LSH | BPF_X:
884 case BPF_ALU64 | BPF_RSH | BPF_X:
885 case BPF_ALU64 | BPF_ARSH | BPF_X:
887 /* Check for bad case when dst_reg == rcx */
888 if (dst_reg == BPF_REG_4) {
889 /* mov r11, dst_reg */
890 EMIT_mov(AUX_REG, dst_reg);
894 if (src_reg != BPF_REG_4) { /* common case */
895 EMIT1(0x51); /* push rcx */
897 /* mov rcx, src_reg */
898 EMIT_mov(BPF_REG_4, src_reg);
901 /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
902 if (BPF_CLASS(insn->code) == BPF_ALU64)
903 EMIT1(add_1mod(0x48, dst_reg));
904 else if (is_ereg(dst_reg))
905 EMIT1(add_1mod(0x40, dst_reg));
907 switch (BPF_OP(insn->code)) {
908 case BPF_LSH: b3 = 0xE0; break;
909 case BPF_RSH: b3 = 0xE8; break;
910 case BPF_ARSH: b3 = 0xF8; break;
912 EMIT2(0xD3, add_1reg(b3, dst_reg));
914 if (src_reg != BPF_REG_4)
915 EMIT1(0x59); /* pop rcx */
917 if (insn->dst_reg == BPF_REG_4)
918 /* mov dst_reg, r11 */
919 EMIT_mov(insn->dst_reg, AUX_REG);
922 case BPF_ALU | BPF_END | BPF_FROM_BE:
925 /* Emit 'ror %ax, 8' to swap lower 2 bytes */
927 if (is_ereg(dst_reg))
929 EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
931 /* Emit 'movzwl eax, ax' */
932 if (is_ereg(dst_reg))
933 EMIT3(0x45, 0x0F, 0xB7);
936 EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
939 /* Emit 'bswap eax' to swap lower 4 bytes */
940 if (is_ereg(dst_reg))
944 EMIT1(add_1reg(0xC8, dst_reg));
947 /* Emit 'bswap rax' to swap 8 bytes */
948 EMIT3(add_1mod(0x48, dst_reg), 0x0F,
949 add_1reg(0xC8, dst_reg));
954 case BPF_ALU | BPF_END | BPF_FROM_LE:
958 * Emit 'movzwl eax, ax' to zero extend 16-bit
961 if (is_ereg(dst_reg))
962 EMIT3(0x45, 0x0F, 0xB7);
965 EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
968 /* Emit 'mov eax, eax' to clear upper 32-bits */
969 if (is_ereg(dst_reg))
971 EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
979 /* ST: *(u8*)(dst_reg + off) = imm */
980 case BPF_ST | BPF_MEM | BPF_B:
981 if (is_ereg(dst_reg))
986 case BPF_ST | BPF_MEM | BPF_H:
987 if (is_ereg(dst_reg))
988 EMIT3(0x66, 0x41, 0xC7);
992 case BPF_ST | BPF_MEM | BPF_W:
993 if (is_ereg(dst_reg))
998 case BPF_ST | BPF_MEM | BPF_DW:
999 EMIT2(add_1mod(0x48, dst_reg), 0xC7);
1001 st: if (is_imm8(insn->off))
1002 EMIT2(add_1reg(0x40, dst_reg), insn->off);
1004 EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
1006 EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
1009 /* STX: *(u8*)(dst_reg + off) = src_reg */
1010 case BPF_STX | BPF_MEM | BPF_B:
1011 case BPF_STX | BPF_MEM | BPF_H:
1012 case BPF_STX | BPF_MEM | BPF_W:
1013 case BPF_STX | BPF_MEM | BPF_DW:
1014 emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
1017 /* LDX: dst_reg = *(u8*)(src_reg + off) */
1018 case BPF_LDX | BPF_MEM | BPF_B:
1019 case BPF_LDX | BPF_PROBE_MEM | BPF_B:
1020 case BPF_LDX | BPF_MEM | BPF_H:
1021 case BPF_LDX | BPF_PROBE_MEM | BPF_H:
1022 case BPF_LDX | BPF_MEM | BPF_W:
1023 case BPF_LDX | BPF_PROBE_MEM | BPF_W:
1024 case BPF_LDX | BPF_MEM | BPF_DW:
1025 case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
1026 emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
1027 if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
1028 struct exception_table_entry *ex;
1029 u8 *_insn = image + proglen;
1032 if (!bpf_prog->aux->extable)
1035 if (excnt >= bpf_prog->aux->num_exentries) {
1036 pr_err("ex gen bug\n");
1039 ex = &bpf_prog->aux->extable[excnt++];
1041 delta = _insn - (u8 *)&ex->insn;
1042 if (!is_simm32(delta)) {
1043 pr_err("extable->insn doesn't fit into 32-bit\n");
1048 delta = (u8 *)ex_handler_bpf - (u8 *)&ex->handler;
1049 if (!is_simm32(delta)) {
1050 pr_err("extable->handler doesn't fit into 32-bit\n");
1053 ex->handler = delta;
1055 if (dst_reg > BPF_REG_9) {
1056 pr_err("verifier error\n");
1060 * Compute size of x86 insn and its target dest x86 register.
1061 * ex_handler_bpf() will use lower 8 bits to adjust
1062 * pt_regs->ip to jump over this x86 instruction
1063 * and upper bits to figure out which pt_regs to zero out.
1064 * End result: x86 insn "mov rbx, qword ptr [rax+0x14]"
1065 * of 4 bytes will be ignored and rbx will be zero inited.
1067 ex->fixup = (prog - temp) | (reg2pt_regs[dst_reg] << 8);
1071 /* STX XADD: lock *(u32*)(dst_reg + off) += src_reg */
1072 case BPF_STX | BPF_XADD | BPF_W:
1073 /* Emit 'lock add dword ptr [rax + off], eax' */
1074 if (is_ereg(dst_reg) || is_ereg(src_reg))
1075 EMIT3(0xF0, add_2mod(0x40, dst_reg, src_reg), 0x01);
1079 case BPF_STX | BPF_XADD | BPF_DW:
1080 EMIT3(0xF0, add_2mod(0x48, dst_reg, src_reg), 0x01);
1081 xadd: if (is_imm8(insn->off))
1082 EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
1084 EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
1089 case BPF_JMP | BPF_CALL:
1090 func = (u8 *) __bpf_call_base + imm32;
1091 if (!imm32 || emit_call(&prog, func, image + addrs[i - 1]))
1095 case BPF_JMP | BPF_TAIL_CALL:
1097 emit_bpf_tail_call_direct(&bpf_prog->aux->poke_tab[imm32 - 1],
1098 &prog, addrs[i], image);
1100 emit_bpf_tail_call_indirect(&prog);
1104 case BPF_JMP | BPF_JEQ | BPF_X:
1105 case BPF_JMP | BPF_JNE | BPF_X:
1106 case BPF_JMP | BPF_JGT | BPF_X:
1107 case BPF_JMP | BPF_JLT | BPF_X:
1108 case BPF_JMP | BPF_JGE | BPF_X:
1109 case BPF_JMP | BPF_JLE | BPF_X:
1110 case BPF_JMP | BPF_JSGT | BPF_X:
1111 case BPF_JMP | BPF_JSLT | BPF_X:
1112 case BPF_JMP | BPF_JSGE | BPF_X:
1113 case BPF_JMP | BPF_JSLE | BPF_X:
1114 case BPF_JMP32 | BPF_JEQ | BPF_X:
1115 case BPF_JMP32 | BPF_JNE | BPF_X:
1116 case BPF_JMP32 | BPF_JGT | BPF_X:
1117 case BPF_JMP32 | BPF_JLT | BPF_X:
1118 case BPF_JMP32 | BPF_JGE | BPF_X:
1119 case BPF_JMP32 | BPF_JLE | BPF_X:
1120 case BPF_JMP32 | BPF_JSGT | BPF_X:
1121 case BPF_JMP32 | BPF_JSLT | BPF_X:
1122 case BPF_JMP32 | BPF_JSGE | BPF_X:
1123 case BPF_JMP32 | BPF_JSLE | BPF_X:
1124 /* cmp dst_reg, src_reg */
1125 if (BPF_CLASS(insn->code) == BPF_JMP)
1126 EMIT1(add_2mod(0x48, dst_reg, src_reg));
1127 else if (is_ereg(dst_reg) || is_ereg(src_reg))
1128 EMIT1(add_2mod(0x40, dst_reg, src_reg));
1129 EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg));
1132 case BPF_JMP | BPF_JSET | BPF_X:
1133 case BPF_JMP32 | BPF_JSET | BPF_X:
1134 /* test dst_reg, src_reg */
1135 if (BPF_CLASS(insn->code) == BPF_JMP)
1136 EMIT1(add_2mod(0x48, dst_reg, src_reg));
1137 else if (is_ereg(dst_reg) || is_ereg(src_reg))
1138 EMIT1(add_2mod(0x40, dst_reg, src_reg));
1139 EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg));
1142 case BPF_JMP | BPF_JSET | BPF_K:
1143 case BPF_JMP32 | BPF_JSET | BPF_K:
1144 /* test dst_reg, imm32 */
1145 if (BPF_CLASS(insn->code) == BPF_JMP)
1146 EMIT1(add_1mod(0x48, dst_reg));
1147 else if (is_ereg(dst_reg))
1148 EMIT1(add_1mod(0x40, dst_reg));
1149 EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
1152 case BPF_JMP | BPF_JEQ | BPF_K:
1153 case BPF_JMP | BPF_JNE | BPF_K:
1154 case BPF_JMP | BPF_JGT | BPF_K:
1155 case BPF_JMP | BPF_JLT | BPF_K:
1156 case BPF_JMP | BPF_JGE | BPF_K:
1157 case BPF_JMP | BPF_JLE | BPF_K:
1158 case BPF_JMP | BPF_JSGT | BPF_K:
1159 case BPF_JMP | BPF_JSLT | BPF_K:
1160 case BPF_JMP | BPF_JSGE | BPF_K:
1161 case BPF_JMP | BPF_JSLE | BPF_K:
1162 case BPF_JMP32 | BPF_JEQ | BPF_K:
1163 case BPF_JMP32 | BPF_JNE | BPF_K:
1164 case BPF_JMP32 | BPF_JGT | BPF_K:
1165 case BPF_JMP32 | BPF_JLT | BPF_K:
1166 case BPF_JMP32 | BPF_JGE | BPF_K:
1167 case BPF_JMP32 | BPF_JLE | BPF_K:
1168 case BPF_JMP32 | BPF_JSGT | BPF_K:
1169 case BPF_JMP32 | BPF_JSLT | BPF_K:
1170 case BPF_JMP32 | BPF_JSGE | BPF_K:
1171 case BPF_JMP32 | BPF_JSLE | BPF_K:
1172 /* test dst_reg, dst_reg to save one extra byte */
1174 if (BPF_CLASS(insn->code) == BPF_JMP)
1175 EMIT1(add_2mod(0x48, dst_reg, dst_reg));
1176 else if (is_ereg(dst_reg))
1177 EMIT1(add_2mod(0x40, dst_reg, dst_reg));
1178 EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg));
1182 /* cmp dst_reg, imm8/32 */
1183 if (BPF_CLASS(insn->code) == BPF_JMP)
1184 EMIT1(add_1mod(0x48, dst_reg));
1185 else if (is_ereg(dst_reg))
1186 EMIT1(add_1mod(0x40, dst_reg));
1189 EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
1191 EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
1193 emit_cond_jmp: /* Convert BPF opcode to x86 */
1194 switch (BPF_OP(insn->code)) {
1203 /* GT is unsigned '>', JA in x86 */
1207 /* LT is unsigned '<', JB in x86 */
1211 /* GE is unsigned '>=', JAE in x86 */
1215 /* LE is unsigned '<=', JBE in x86 */
1219 /* Signed '>', GT in x86 */
1223 /* Signed '<', LT in x86 */
1227 /* Signed '>=', GE in x86 */
1231 /* Signed '<=', LE in x86 */
1234 default: /* to silence GCC warning */
1237 jmp_offset = addrs[i + insn->off] - addrs[i];
1238 if (is_imm8(jmp_offset)) {
1239 EMIT2(jmp_cond, jmp_offset);
1240 } else if (is_simm32(jmp_offset)) {
1241 EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
1243 pr_err("cond_jmp gen bug %llx\n", jmp_offset);
1249 case BPF_JMP | BPF_JA:
1250 if (insn->off == -1)
1251 /* -1 jmp instructions will always jump
1252 * backwards two bytes. Explicitly handling
1253 * this case avoids wasting too many passes
1254 * when there are long sequences of replaced
1259 jmp_offset = addrs[i + insn->off] - addrs[i];
1262 /* Optimize out nop jumps */
1265 if (is_imm8(jmp_offset)) {
1266 EMIT2(0xEB, jmp_offset);
1267 } else if (is_simm32(jmp_offset)) {
1268 EMIT1_off32(0xE9, jmp_offset);
1270 pr_err("jmp gen bug %llx\n", jmp_offset);
1275 case BPF_JMP | BPF_EXIT:
1277 jmp_offset = ctx->cleanup_addr - addrs[i];
1281 /* Update cleanup_addr */
1282 ctx->cleanup_addr = proglen;
1283 if (!bpf_prog_was_classic(bpf_prog))
1284 EMIT1(0x5B); /* get rid of tail_call_cnt */
1285 EMIT2(0x41, 0x5F); /* pop r15 */
1286 EMIT2(0x41, 0x5E); /* pop r14 */
1287 EMIT2(0x41, 0x5D); /* pop r13 */
1288 EMIT1(0x5B); /* pop rbx */
1289 EMIT1(0xC9); /* leave */
1290 EMIT1(0xC3); /* ret */
1295 * By design x86-64 JIT should support all BPF instructions.
1296 * This error will be seen if new instruction was added
1297 * to the interpreter, but not to the JIT, or if there is
1300 pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
1305 if (ilen > BPF_MAX_INSN_SIZE) {
1306 pr_err("bpf_jit: fatal insn size error\n");
1311 if (unlikely(proglen + ilen > oldproglen)) {
1312 pr_err("bpf_jit: fatal error\n");
1315 memcpy(image + proglen, temp, ilen);
1322 if (image && excnt != bpf_prog->aux->num_exentries) {
1323 pr_err("extable is not populated\n");
1329 static void save_regs(struct btf_func_model *m, u8 **prog, int nr_args,
1333 /* Store function arguments to stack.
1334 * For a function that accepts two pointers the sequence will be:
1335 * mov QWORD PTR [rbp-0x10],rdi
1336 * mov QWORD PTR [rbp-0x8],rsi
1338 for (i = 0; i < min(nr_args, 6); i++)
1339 emit_stx(prog, bytes_to_bpf_size(m->arg_size[i]),
1341 i == 5 ? X86_REG_R9 : BPF_REG_1 + i,
1342 -(stack_size - i * 8));
1345 static void restore_regs(struct btf_func_model *m, u8 **prog, int nr_args,
1350 /* Restore function arguments from stack.
1351 * For a function that accepts two pointers the sequence will be:
1352 * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10]
1353 * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8]
1355 for (i = 0; i < min(nr_args, 6); i++)
1356 emit_ldx(prog, bytes_to_bpf_size(m->arg_size[i]),
1357 i == 5 ? X86_REG_R9 : BPF_REG_1 + i,
1359 -(stack_size - i * 8));
1362 static int invoke_bpf(struct btf_func_model *m, u8 **pprog,
1363 struct bpf_prog **progs, int prog_cnt, int stack_size)
1368 for (i = 0; i < prog_cnt; i++) {
1369 if (emit_call(&prog, __bpf_prog_enter, prog))
1371 /* remember prog start time returned by __bpf_prog_enter */
1372 emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
1374 /* arg1: lea rdi, [rbp - stack_size] */
1375 EMIT4(0x48, 0x8D, 0x7D, -stack_size);
1376 /* arg2: progs[i]->insnsi for interpreter */
1377 if (!progs[i]->jited)
1378 emit_mov_imm64(&prog, BPF_REG_2,
1379 (long) progs[i]->insnsi >> 32,
1380 (u32) (long) progs[i]->insnsi);
1381 /* call JITed bpf program or interpreter */
1382 if (emit_call(&prog, progs[i]->bpf_func, prog))
1385 /* arg1: mov rdi, progs[i] */
1386 emit_mov_imm64(&prog, BPF_REG_1, (long) progs[i] >> 32,
1387 (u32) (long) progs[i]);
1388 /* arg2: mov rsi, rbx <- start time in nsec */
1389 emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
1390 if (emit_call(&prog, __bpf_prog_exit, prog))
1398 * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
1399 * its 'struct btf_func_model' will be nr_args=2
1400 * The assembly code when eth_type_trans is executing after trampoline:
1404 * sub rsp, 16 // space for skb and dev
1405 * push rbx // temp regs to pass start time
1406 * mov qword ptr [rbp - 16], rdi // save skb pointer to stack
1407 * mov qword ptr [rbp - 8], rsi // save dev pointer to stack
1408 * call __bpf_prog_enter // rcu_read_lock and preempt_disable
1409 * mov rbx, rax // remember start time in bpf stats are enabled
1410 * lea rdi, [rbp - 16] // R1==ctx of bpf prog
1411 * call addr_of_jited_FENTRY_prog
1412 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off
1413 * mov rsi, rbx // prog start time
1414 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math
1415 * mov rdi, qword ptr [rbp - 16] // restore skb pointer from stack
1416 * mov rsi, qword ptr [rbp - 8] // restore dev pointer from stack
1421 * eth_type_trans has 5 byte nop at the beginning. These 5 bytes will be
1422 * replaced with 'call generated_bpf_trampoline'. When it returns
1423 * eth_type_trans will continue executing with original skb and dev pointers.
1425 * The assembly code when eth_type_trans is called from trampoline:
1429 * sub rsp, 24 // space for skb, dev, return value
1430 * push rbx // temp regs to pass start time
1431 * mov qword ptr [rbp - 24], rdi // save skb pointer to stack
1432 * mov qword ptr [rbp - 16], rsi // save dev pointer to stack
1433 * call __bpf_prog_enter // rcu_read_lock and preempt_disable
1434 * mov rbx, rax // remember start time if bpf stats are enabled
1435 * lea rdi, [rbp - 24] // R1==ctx of bpf prog
1436 * call addr_of_jited_FENTRY_prog // bpf prog can access skb and dev
1437 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off
1438 * mov rsi, rbx // prog start time
1439 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math
1440 * mov rdi, qword ptr [rbp - 24] // restore skb pointer from stack
1441 * mov rsi, qword ptr [rbp - 16] // restore dev pointer from stack
1442 * call eth_type_trans+5 // execute body of eth_type_trans
1443 * mov qword ptr [rbp - 8], rax // save return value
1444 * call __bpf_prog_enter // rcu_read_lock and preempt_disable
1445 * mov rbx, rax // remember start time in bpf stats are enabled
1446 * lea rdi, [rbp - 24] // R1==ctx of bpf prog
1447 * call addr_of_jited_FEXIT_prog // bpf prog can access skb, dev, return value
1448 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off
1449 * mov rsi, rbx // prog start time
1450 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math
1451 * mov rax, qword ptr [rbp - 8] // restore eth_type_trans's return value
1454 * add rsp, 8 // skip eth_type_trans's frame
1455 * ret // return to its caller
1457 int arch_prepare_bpf_trampoline(void *image, struct btf_func_model *m, u32 flags,
1458 struct bpf_prog **fentry_progs, int fentry_cnt,
1459 struct bpf_prog **fexit_progs, int fexit_cnt,
1462 int cnt = 0, nr_args = m->nr_args;
1463 int stack_size = nr_args * 8;
1466 /* x86-64 supports up to 6 arguments. 7+ can be added in the future */
1470 if ((flags & BPF_TRAMP_F_RESTORE_REGS) &&
1471 (flags & BPF_TRAMP_F_SKIP_FRAME))
1474 if (flags & BPF_TRAMP_F_CALL_ORIG)
1475 stack_size += 8; /* room for return value of orig_call */
1477 if (flags & BPF_TRAMP_F_SKIP_FRAME)
1478 /* skip patched call instruction and point orig_call to actual
1479 * body of the kernel function.
1481 orig_call += X86_PATCH_SIZE;
1485 EMIT1(0x55); /* push rbp */
1486 EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
1487 EMIT4(0x48, 0x83, 0xEC, stack_size); /* sub rsp, stack_size */
1488 EMIT1(0x53); /* push rbx */
1490 save_regs(m, &prog, nr_args, stack_size);
1493 if (invoke_bpf(m, &prog, fentry_progs, fentry_cnt, stack_size))
1496 if (flags & BPF_TRAMP_F_CALL_ORIG) {
1498 restore_regs(m, &prog, nr_args, stack_size);
1500 /* call original function */
1501 if (emit_call(&prog, orig_call, prog))
1503 /* remember return value in a stack for bpf prog to access */
1504 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
1508 if (invoke_bpf(m, &prog, fexit_progs, fexit_cnt, stack_size))
1511 if (flags & BPF_TRAMP_F_RESTORE_REGS)
1512 restore_regs(m, &prog, nr_args, stack_size);
1514 if (flags & BPF_TRAMP_F_CALL_ORIG)
1515 /* restore original return value back into RAX */
1516 emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
1518 EMIT1(0x5B); /* pop rbx */
1519 EMIT1(0xC9); /* leave */
1520 if (flags & BPF_TRAMP_F_SKIP_FRAME)
1521 /* skip our return address and return to parent */
1522 EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */
1523 EMIT1(0xC3); /* ret */
1524 /* One half of the page has active running trampoline.
1525 * Another half is an area for next trampoline.
1526 * Make sure the trampoline generation logic doesn't overflow.
1528 if (WARN_ON_ONCE(prog - (u8 *)image > PAGE_SIZE / 2 - BPF_INSN_SAFETY))
1533 struct x64_jit_data {
1534 struct bpf_binary_header *header;
1538 struct jit_context ctx;
1541 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1543 struct bpf_binary_header *header = NULL;
1544 struct bpf_prog *tmp, *orig_prog = prog;
1545 struct x64_jit_data *jit_data;
1546 int proglen, oldproglen = 0;
1547 struct jit_context ctx = {};
1548 bool tmp_blinded = false;
1549 bool extra_pass = false;
1555 if (!prog->jit_requested)
1558 tmp = bpf_jit_blind_constants(prog);
1560 * If blinding was requested and we failed during blinding,
1561 * we must fall back to the interpreter.
1570 jit_data = prog->aux->jit_data;
1572 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
1577 prog->aux->jit_data = jit_data;
1579 addrs = jit_data->addrs;
1581 ctx = jit_data->ctx;
1582 oldproglen = jit_data->proglen;
1583 image = jit_data->image;
1584 header = jit_data->header;
1586 goto skip_init_addrs;
1588 addrs = kmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
1595 * Before first pass, make a rough estimation of addrs[]
1596 * each BPF instruction is translated to less than 64 bytes
1598 for (proglen = 0, i = 0; i <= prog->len; i++) {
1602 ctx.cleanup_addr = proglen;
1606 * JITed image shrinks with every pass and the loop iterates
1607 * until the image stops shrinking. Very large BPF programs
1608 * may converge on the last pass. In such case do one more
1609 * pass to emit the final image.
1611 for (pass = 0; pass < 20 || image; pass++) {
1612 proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
1617 bpf_jit_binary_free(header);
1622 if (proglen != oldproglen) {
1623 pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
1624 proglen, oldproglen);
1629 if (proglen == oldproglen) {
1631 * The number of entries in extable is the number of BPF_LDX
1632 * insns that access kernel memory via "pointer to BTF type".
1633 * The verifier changed their opcode from LDX|MEM|size
1634 * to LDX|PROBE_MEM|size to make JITing easier.
1636 u32 align = __alignof__(struct exception_table_entry);
1637 u32 extable_size = prog->aux->num_exentries *
1638 sizeof(struct exception_table_entry);
1640 /* allocate module memory for x86 insns and extable */
1641 header = bpf_jit_binary_alloc(roundup(proglen, align) + extable_size,
1642 &image, align, jit_fill_hole);
1647 prog->aux->extable = (void *) image + roundup(proglen, align);
1649 oldproglen = proglen;
1653 if (bpf_jit_enable > 1)
1654 bpf_jit_dump(prog->len, proglen, pass + 1, image);
1657 if (!prog->is_func || extra_pass) {
1658 bpf_tail_call_direct_fixup(prog);
1659 bpf_jit_binary_lock_ro(header);
1661 jit_data->addrs = addrs;
1662 jit_data->ctx = ctx;
1663 jit_data->proglen = proglen;
1664 jit_data->image = image;
1665 jit_data->header = header;
1667 prog->bpf_func = (void *)image;
1669 prog->jited_len = proglen;
1674 if (!image || !prog->is_func || extra_pass) {
1676 bpf_prog_fill_jited_linfo(prog, addrs + 1);
1680 prog->aux->jit_data = NULL;
1684 bpf_jit_prog_release_other(prog, prog == orig_prog ?