1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
6 * Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
8 #include <linux/netdevice.h>
9 #include <linux/filter.h>
10 #include <linux/if_vlan.h>
11 #include <linux/bpf.h>
12 #include <linux/memory.h>
13 #include <linux/sort.h>
14 #include <asm/extable.h>
15 #include <asm/set_memory.h>
16 #include <asm/nospec-branch.h>
17 #include <asm/text-patching.h>
19 static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
32 #define EMIT(bytes, len) \
33 do { prog = emit_code(prog, bytes, len); } while (0)
35 #define EMIT1(b1) EMIT(b1, 1)
36 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
37 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
38 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
40 #define EMIT1_off32(b1, off) \
41 do { EMIT1(b1); EMIT(off, 4); } while (0)
42 #define EMIT2_off32(b1, b2, off) \
43 do { EMIT2(b1, b2); EMIT(off, 4); } while (0)
44 #define EMIT3_off32(b1, b2, b3, off) \
45 do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
46 #define EMIT4_off32(b1, b2, b3, b4, off) \
47 do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
49 static bool is_imm8(int value)
51 return value <= 127 && value >= -128;
54 static bool is_simm32(s64 value)
56 return value == (s64)(s32)value;
59 static bool is_uimm32(u64 value)
61 return value == (u64)(u32)value;
65 #define EMIT_mov(DST, SRC) \
68 EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
71 static int bpf_size_to_x86_bytes(int bpf_size)
73 if (bpf_size == BPF_W)
75 else if (bpf_size == BPF_H)
77 else if (bpf_size == BPF_B)
79 else if (bpf_size == BPF_DW)
86 * List of x86 cond jumps opcodes (. + s8)
87 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
100 /* Pick a register outside of BPF range for JIT internal work */
101 #define AUX_REG (MAX_BPF_JIT_REG + 1)
102 #define X86_REG_R9 (MAX_BPF_JIT_REG + 2)
105 * The following table maps BPF registers to x86-64 registers.
107 * x86-64 register R12 is unused, since if used as base address
108 * register in load/store instructions, it always needs an
109 * extra byte of encoding and is callee saved.
111 * x86-64 register R9 is not used by BPF programs, but can be used by BPF
112 * trampoline. x86-64 register R10 is used for blinding (if enabled).
114 static const int reg2hex[] = {
115 [BPF_REG_0] = 0, /* RAX */
116 [BPF_REG_1] = 7, /* RDI */
117 [BPF_REG_2] = 6, /* RSI */
118 [BPF_REG_3] = 2, /* RDX */
119 [BPF_REG_4] = 1, /* RCX */
120 [BPF_REG_5] = 0, /* R8 */
121 [BPF_REG_6] = 3, /* RBX callee saved */
122 [BPF_REG_7] = 5, /* R13 callee saved */
123 [BPF_REG_8] = 6, /* R14 callee saved */
124 [BPF_REG_9] = 7, /* R15 callee saved */
125 [BPF_REG_FP] = 5, /* RBP readonly */
126 [BPF_REG_AX] = 2, /* R10 temp register */
127 [AUX_REG] = 3, /* R11 temp register */
128 [X86_REG_R9] = 1, /* R9 register, 6th function argument */
131 static const int reg2pt_regs[] = {
132 [BPF_REG_0] = offsetof(struct pt_regs, ax),
133 [BPF_REG_1] = offsetof(struct pt_regs, di),
134 [BPF_REG_2] = offsetof(struct pt_regs, si),
135 [BPF_REG_3] = offsetof(struct pt_regs, dx),
136 [BPF_REG_4] = offsetof(struct pt_regs, cx),
137 [BPF_REG_5] = offsetof(struct pt_regs, r8),
138 [BPF_REG_6] = offsetof(struct pt_regs, bx),
139 [BPF_REG_7] = offsetof(struct pt_regs, r13),
140 [BPF_REG_8] = offsetof(struct pt_regs, r14),
141 [BPF_REG_9] = offsetof(struct pt_regs, r15),
145 * is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15
146 * which need extra byte of encoding.
147 * rax,rcx,...,rbp have simpler encoding
149 static bool is_ereg(u32 reg)
151 return (1 << reg) & (BIT(BPF_REG_5) |
161 * is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64
162 * lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte
163 * of encoding. al,cl,dl,bl have simpler encoding.
165 static bool is_ereg_8l(u32 reg)
167 return is_ereg(reg) ||
168 (1 << reg) & (BIT(BPF_REG_1) |
173 static bool is_axreg(u32 reg)
175 return reg == BPF_REG_0;
178 /* Add modifiers if 'reg' maps to x86-64 registers R8..R15 */
179 static u8 add_1mod(u8 byte, u32 reg)
186 static u8 add_2mod(u8 byte, u32 r1, u32 r2)
195 /* Encode 'dst_reg' register into x86-64 opcode 'byte' */
196 static u8 add_1reg(u8 byte, u32 dst_reg)
198 return byte + reg2hex[dst_reg];
201 /* Encode 'dst_reg' and 'src_reg' registers into x86-64 opcode 'byte' */
202 static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
204 return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
207 /* Some 1-byte opcodes for binary ALU operations */
208 static u8 simple_alu_opcodes[] = {
219 static void jit_fill_hole(void *area, unsigned int size)
221 /* Fill whole space with INT3 instructions */
222 memset(area, 0xcc, size);
226 int cleanup_addr; /* Epilogue code offset */
229 * Program specific offsets of labels in the code; these rely on the
230 * JIT doing at least 2 passes, recording the position on the first
231 * pass, only to generate the correct offset on the second pass.
233 int tail_call_direct_label;
234 int tail_call_indirect_label;
237 /* Maximum number of bytes emitted while JITing one eBPF insn */
238 #define BPF_MAX_INSN_SIZE 128
239 #define BPF_INSN_SAFETY 64
241 /* Number of bytes emit_patch() needs to generate instructions */
242 #define X86_PATCH_SIZE 5
243 /* Number of bytes that will be skipped on tailcall */
244 #define X86_TAIL_CALL_OFFSET 11
246 static void push_callee_regs(u8 **pprog, bool *callee_regs_used)
250 if (callee_regs_used[0])
251 EMIT1(0x53); /* push rbx */
252 if (callee_regs_used[1])
253 EMIT2(0x41, 0x55); /* push r13 */
254 if (callee_regs_used[2])
255 EMIT2(0x41, 0x56); /* push r14 */
256 if (callee_regs_used[3])
257 EMIT2(0x41, 0x57); /* push r15 */
261 static void pop_callee_regs(u8 **pprog, bool *callee_regs_used)
265 if (callee_regs_used[3])
266 EMIT2(0x41, 0x5F); /* pop r15 */
267 if (callee_regs_used[2])
268 EMIT2(0x41, 0x5E); /* pop r14 */
269 if (callee_regs_used[1])
270 EMIT2(0x41, 0x5D); /* pop r13 */
271 if (callee_regs_used[0])
272 EMIT1(0x5B); /* pop rbx */
277 * Emit x86-64 prologue code for BPF program.
278 * bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes
279 * while jumping to another program
281 static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf,
282 bool tail_call_reachable, bool is_subprog)
286 /* BPF trampoline can be made to work without these nops,
287 * but let's waste 5 bytes for now and optimize later
289 memcpy(prog, x86_nops[5], X86_PATCH_SIZE);
290 prog += X86_PATCH_SIZE;
291 if (!ebpf_from_cbpf) {
292 if (tail_call_reachable && !is_subprog)
293 EMIT2(0x31, 0xC0); /* xor eax, eax */
295 EMIT2(0x66, 0x90); /* nop2 */
297 EMIT1(0x55); /* push rbp */
298 EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
299 /* sub rsp, rounded_stack_depth */
301 EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8));
302 if (tail_call_reachable)
303 EMIT1(0x50); /* push rax */
307 static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode)
312 offset = func - (ip + X86_PATCH_SIZE);
313 if (!is_simm32(offset)) {
314 pr_err("Target call %p is out of range\n", func);
317 EMIT1_off32(opcode, offset);
322 static int emit_call(u8 **pprog, void *func, void *ip)
324 return emit_patch(pprog, func, ip, 0xE8);
327 static int emit_jump(u8 **pprog, void *func, void *ip)
329 return emit_patch(pprog, func, ip, 0xE9);
332 static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
333 void *old_addr, void *new_addr,
334 const bool text_live)
336 const u8 *nop_insn = x86_nops[5];
337 u8 old_insn[X86_PATCH_SIZE];
338 u8 new_insn[X86_PATCH_SIZE];
342 memcpy(old_insn, nop_insn, X86_PATCH_SIZE);
345 ret = t == BPF_MOD_CALL ?
346 emit_call(&prog, old_addr, ip) :
347 emit_jump(&prog, old_addr, ip);
352 memcpy(new_insn, nop_insn, X86_PATCH_SIZE);
355 ret = t == BPF_MOD_CALL ?
356 emit_call(&prog, new_addr, ip) :
357 emit_jump(&prog, new_addr, ip);
363 mutex_lock(&text_mutex);
364 if (memcmp(ip, old_insn, X86_PATCH_SIZE))
367 if (memcmp(ip, new_insn, X86_PATCH_SIZE)) {
369 text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL);
371 memcpy(ip, new_insn, X86_PATCH_SIZE);
375 mutex_unlock(&text_mutex);
379 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
380 void *old_addr, void *new_addr)
382 if (!is_kernel_text((long)ip) &&
383 !is_bpf_text_address((long)ip))
384 /* BPF poking in modules is not supported */
387 return __bpf_arch_text_poke(ip, t, old_addr, new_addr, true);
390 #define EMIT_LFENCE() EMIT3(0x0F, 0xAE, 0xE8)
392 static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip)
396 #ifdef CONFIG_RETPOLINE
397 if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_AMD)) {
399 EMIT2(0xFF, 0xE0 + reg);
400 } else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) {
401 emit_jump(&prog, &__x86_indirect_thunk_array[reg], ip);
404 EMIT2(0xFF, 0xE0 + reg);
410 * Generate the following code:
412 * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ...
413 * if (index >= array->map.max_entries)
415 * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT)
417 * prog = array->ptrs[index];
420 * goto *(prog->bpf_func + prologue_size);
423 static void emit_bpf_tail_call_indirect(u8 **pprog, bool *callee_regs_used,
424 u32 stack_depth, u8 *ip,
425 struct jit_context *ctx)
427 int tcc_off = -4 - round_up(stack_depth, 8);
428 u8 *prog = *pprog, *start = *pprog;
432 * rdi - pointer to ctx
433 * rsi - pointer to bpf_array
434 * rdx - index in bpf_array
438 * if (index >= array->map.max_entries)
441 EMIT2(0x89, 0xD2); /* mov edx, edx */
442 EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */
443 offsetof(struct bpf_array, map.max_entries));
445 offset = ctx->tail_call_indirect_label - (prog + 2 - start);
446 EMIT2(X86_JBE, offset); /* jbe out */
449 * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT)
452 EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */
453 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
455 offset = ctx->tail_call_indirect_label - (prog + 2 - start);
456 EMIT2(X86_JAE, offset); /* jae out */
457 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
458 EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */
460 /* prog = array->ptrs[index]; */
461 EMIT4_off32(0x48, 0x8B, 0x8C, 0xD6, /* mov rcx, [rsi + rdx * 8 + offsetof(...)] */
462 offsetof(struct bpf_array, ptrs));
468 EMIT3(0x48, 0x85, 0xC9); /* test rcx,rcx */
470 offset = ctx->tail_call_indirect_label - (prog + 2 - start);
471 EMIT2(X86_JE, offset); /* je out */
473 pop_callee_regs(&prog, callee_regs_used);
475 EMIT1(0x58); /* pop rax */
477 EMIT3_off32(0x48, 0x81, 0xC4, /* add rsp, sd */
478 round_up(stack_depth, 8));
480 /* goto *(prog->bpf_func + X86_TAIL_CALL_OFFSET); */
481 EMIT4(0x48, 0x8B, 0x49, /* mov rcx, qword ptr [rcx + 32] */
482 offsetof(struct bpf_prog, bpf_func));
483 EMIT4(0x48, 0x83, 0xC1, /* add rcx, X86_TAIL_CALL_OFFSET */
484 X86_TAIL_CALL_OFFSET);
486 * Now we're ready to jump into next BPF program
487 * rdi == ctx (1st arg)
488 * rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET
490 emit_indirect_jump(&prog, 1 /* rcx */, ip + (prog - start));
493 ctx->tail_call_indirect_label = prog - start;
497 static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke,
499 bool *callee_regs_used, u32 stack_depth,
500 struct jit_context *ctx)
502 int tcc_off = -4 - round_up(stack_depth, 8);
503 u8 *prog = *pprog, *start = *pprog;
507 * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT)
510 EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */
511 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
513 offset = ctx->tail_call_direct_label - (prog + 2 - start);
514 EMIT2(X86_JAE, offset); /* jae out */
515 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
516 EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */
518 poke->tailcall_bypass = ip + (prog - start);
519 poke->adj_off = X86_TAIL_CALL_OFFSET;
520 poke->tailcall_target = ip + ctx->tail_call_direct_label - X86_PATCH_SIZE;
521 poke->bypass_addr = (u8 *)poke->tailcall_target + X86_PATCH_SIZE;
523 emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE,
524 poke->tailcall_bypass);
526 pop_callee_regs(&prog, callee_regs_used);
527 EMIT1(0x58); /* pop rax */
529 EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8));
531 memcpy(prog, x86_nops[5], X86_PATCH_SIZE);
532 prog += X86_PATCH_SIZE;
535 ctx->tail_call_direct_label = prog - start;
540 static void bpf_tail_call_direct_fixup(struct bpf_prog *prog)
542 struct bpf_jit_poke_descriptor *poke;
543 struct bpf_array *array;
544 struct bpf_prog *target;
547 for (i = 0; i < prog->aux->size_poke_tab; i++) {
548 poke = &prog->aux->poke_tab[i];
549 if (poke->aux && poke->aux != prog->aux)
552 WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable));
554 if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
557 array = container_of(poke->tail_call.map, struct bpf_array, map);
558 mutex_lock(&array->aux->poke_mutex);
559 target = array->ptrs[poke->tail_call.key];
561 /* Plain memcpy is used when image is not live yet
562 * and still not locked as read-only. Once poke
563 * location is active (poke->tailcall_target_stable),
564 * any parallel bpf_arch_text_poke() might occur
565 * still on the read-write image until we finally
566 * locked it as read-only. Both modifications on
567 * the given image are under text_mutex to avoid
570 ret = __bpf_arch_text_poke(poke->tailcall_target,
572 (u8 *)target->bpf_func +
573 poke->adj_off, false);
575 ret = __bpf_arch_text_poke(poke->tailcall_bypass,
577 (u8 *)poke->tailcall_target +
578 X86_PATCH_SIZE, NULL, false);
581 WRITE_ONCE(poke->tailcall_target_stable, true);
582 mutex_unlock(&array->aux->poke_mutex);
586 static void emit_mov_imm32(u8 **pprog, bool sign_propagate,
587 u32 dst_reg, const u32 imm32)
593 * Optimization: if imm32 is positive, use 'mov %eax, imm32'
594 * (which zero-extends imm32) to save 2 bytes.
596 if (sign_propagate && (s32)imm32 < 0) {
597 /* 'mov %rax, imm32' sign extends imm32 */
598 b1 = add_1mod(0x48, dst_reg);
601 EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
606 * Optimization: if imm32 is zero, use 'xor %eax, %eax'
610 if (is_ereg(dst_reg))
611 EMIT1(add_2mod(0x40, dst_reg, dst_reg));
614 EMIT2(b2, add_2reg(b3, dst_reg, dst_reg));
618 /* mov %eax, imm32 */
619 if (is_ereg(dst_reg))
620 EMIT1(add_1mod(0x40, dst_reg));
621 EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
626 static void emit_mov_imm64(u8 **pprog, u32 dst_reg,
627 const u32 imm32_hi, const u32 imm32_lo)
631 if (is_uimm32(((u64)imm32_hi << 32) | (u32)imm32_lo)) {
633 * For emitting plain u32, where sign bit must not be
634 * propagated LLVM tends to load imm64 over mov32
635 * directly, so save couple of bytes by just doing
636 * 'mov %eax, imm32' instead.
638 emit_mov_imm32(&prog, false, dst_reg, imm32_lo);
640 /* movabsq %rax, imm64 */
641 EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
649 static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg)
655 EMIT_mov(dst_reg, src_reg);
658 if (is_ereg(dst_reg) || is_ereg(src_reg))
659 EMIT1(add_2mod(0x40, dst_reg, src_reg));
660 EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
666 /* Emit the suffix (ModR/M etc) for addressing *(ptr_reg + off) and val_reg */
667 static void emit_insn_suffix(u8 **pprog, u32 ptr_reg, u32 val_reg, int off)
672 /* 1-byte signed displacement.
674 * If off == 0 we could skip this and save one extra byte, but
675 * special case of x86 R13 which always needs an offset is not
678 EMIT2(add_2reg(0x40, ptr_reg, val_reg), off);
680 /* 4-byte signed displacement */
681 EMIT1_off32(add_2reg(0x80, ptr_reg, val_reg), off);
687 * Emit a REX byte if it will be necessary to address these registers
689 static void maybe_emit_mod(u8 **pprog, u32 dst_reg, u32 src_reg, bool is64)
694 EMIT1(add_2mod(0x48, dst_reg, src_reg));
695 else if (is_ereg(dst_reg) || is_ereg(src_reg))
696 EMIT1(add_2mod(0x40, dst_reg, src_reg));
701 * Similar version of maybe_emit_mod() for a single register
703 static void maybe_emit_1mod(u8 **pprog, u32 reg, bool is64)
708 EMIT1(add_1mod(0x48, reg));
709 else if (is_ereg(reg))
710 EMIT1(add_1mod(0x40, reg));
714 /* LDX: dst_reg = *(u8*)(src_reg + off) */
715 static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
721 /* Emit 'movzx rax, byte ptr [rax + off]' */
722 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
725 /* Emit 'movzx rax, word ptr [rax + off]' */
726 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
729 /* Emit 'mov eax, dword ptr [rax+0x14]' */
730 if (is_ereg(dst_reg) || is_ereg(src_reg))
731 EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
736 /* Emit 'mov rax, qword ptr [rax+0x14]' */
737 EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
740 emit_insn_suffix(&prog, src_reg, dst_reg, off);
744 /* STX: *(u8*)(dst_reg + off) = src_reg */
745 static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
751 /* Emit 'mov byte ptr [rax + off], al' */
752 if (is_ereg(dst_reg) || is_ereg_8l(src_reg))
753 /* Add extra byte for eregs or SIL,DIL,BPL in src_reg */
754 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
759 if (is_ereg(dst_reg) || is_ereg(src_reg))
760 EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
765 if (is_ereg(dst_reg) || is_ereg(src_reg))
766 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
771 EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
774 emit_insn_suffix(&prog, dst_reg, src_reg, off);
778 static int emit_atomic(u8 **pprog, u8 atomic_op,
779 u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size)
783 EMIT1(0xF0); /* lock prefix */
785 maybe_emit_mod(&prog, dst_reg, src_reg, bpf_size == BPF_DW);
794 /* lock *(u32/u64*)(dst_reg + off) <op>= src_reg */
795 EMIT1(simple_alu_opcodes[atomic_op]);
797 case BPF_ADD | BPF_FETCH:
798 /* src_reg = atomic_fetch_add(dst_reg + off, src_reg); */
802 /* src_reg = atomic_xchg(dst_reg + off, src_reg); */
806 /* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */
810 pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op);
814 emit_insn_suffix(&prog, dst_reg, src_reg, off);
820 bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs)
822 u32 reg = x->fixup >> 8;
824 /* jump over faulting load and clear dest register */
825 *(unsigned long *)((void *)regs + reg) = 0;
826 regs->ip += x->fixup & 0xff;
830 static void detect_reg_usage(struct bpf_insn *insn, int insn_cnt,
831 bool *regs_used, bool *tail_call_seen)
835 for (i = 1; i <= insn_cnt; i++, insn++) {
836 if (insn->code == (BPF_JMP | BPF_TAIL_CALL))
837 *tail_call_seen = true;
838 if (insn->dst_reg == BPF_REG_6 || insn->src_reg == BPF_REG_6)
840 if (insn->dst_reg == BPF_REG_7 || insn->src_reg == BPF_REG_7)
842 if (insn->dst_reg == BPF_REG_8 || insn->src_reg == BPF_REG_8)
844 if (insn->dst_reg == BPF_REG_9 || insn->src_reg == BPF_REG_9)
849 static void emit_nops(u8 **pprog, int len)
857 if (noplen > ASM_NOP_MAX)
858 noplen = ASM_NOP_MAX;
860 for (i = 0; i < noplen; i++)
861 EMIT1(x86_nops[noplen][i]);
868 #define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp)))
870 static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
871 int oldproglen, struct jit_context *ctx, bool jmp_padding)
873 bool tail_call_reachable = bpf_prog->aux->tail_call_reachable;
874 struct bpf_insn *insn = bpf_prog->insnsi;
875 bool callee_regs_used[4] = {};
876 int insn_cnt = bpf_prog->len;
877 bool tail_call_seen = false;
878 bool seen_exit = false;
879 u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
881 int ilen, proglen = 0;
885 detect_reg_usage(insn, insn_cnt, callee_regs_used,
888 /* tail call's presence in current prog implies it is reachable */
889 tail_call_reachable |= tail_call_seen;
891 emit_prologue(&prog, bpf_prog->aux->stack_depth,
892 bpf_prog_was_classic(bpf_prog), tail_call_reachable,
893 bpf_prog->aux->func_idx != 0);
894 push_callee_regs(&prog, callee_regs_used);
898 memcpy(image + proglen, temp, ilen);
903 for (i = 1; i <= insn_cnt; i++, insn++) {
904 const s32 imm32 = insn->imm;
905 u32 dst_reg = insn->dst_reg;
906 u32 src_reg = insn->src_reg;
914 switch (insn->code) {
916 case BPF_ALU | BPF_ADD | BPF_X:
917 case BPF_ALU | BPF_SUB | BPF_X:
918 case BPF_ALU | BPF_AND | BPF_X:
919 case BPF_ALU | BPF_OR | BPF_X:
920 case BPF_ALU | BPF_XOR | BPF_X:
921 case BPF_ALU64 | BPF_ADD | BPF_X:
922 case BPF_ALU64 | BPF_SUB | BPF_X:
923 case BPF_ALU64 | BPF_AND | BPF_X:
924 case BPF_ALU64 | BPF_OR | BPF_X:
925 case BPF_ALU64 | BPF_XOR | BPF_X:
926 maybe_emit_mod(&prog, dst_reg, src_reg,
927 BPF_CLASS(insn->code) == BPF_ALU64);
928 b2 = simple_alu_opcodes[BPF_OP(insn->code)];
929 EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
932 case BPF_ALU64 | BPF_MOV | BPF_X:
933 case BPF_ALU | BPF_MOV | BPF_X:
935 BPF_CLASS(insn->code) == BPF_ALU64,
940 case BPF_ALU | BPF_NEG:
941 case BPF_ALU64 | BPF_NEG:
942 maybe_emit_1mod(&prog, dst_reg,
943 BPF_CLASS(insn->code) == BPF_ALU64);
944 EMIT2(0xF7, add_1reg(0xD8, dst_reg));
947 case BPF_ALU | BPF_ADD | BPF_K:
948 case BPF_ALU | BPF_SUB | BPF_K:
949 case BPF_ALU | BPF_AND | BPF_K:
950 case BPF_ALU | BPF_OR | BPF_K:
951 case BPF_ALU | BPF_XOR | BPF_K:
952 case BPF_ALU64 | BPF_ADD | BPF_K:
953 case BPF_ALU64 | BPF_SUB | BPF_K:
954 case BPF_ALU64 | BPF_AND | BPF_K:
955 case BPF_ALU64 | BPF_OR | BPF_K:
956 case BPF_ALU64 | BPF_XOR | BPF_K:
957 maybe_emit_1mod(&prog, dst_reg,
958 BPF_CLASS(insn->code) == BPF_ALU64);
961 * b3 holds 'normal' opcode, b2 short form only valid
962 * in case dst is eax/rax.
964 switch (BPF_OP(insn->code)) {
988 EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
989 else if (is_axreg(dst_reg))
990 EMIT1_off32(b2, imm32);
992 EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
995 case BPF_ALU64 | BPF_MOV | BPF_K:
996 case BPF_ALU | BPF_MOV | BPF_K:
997 emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64,
1001 case BPF_LD | BPF_IMM | BPF_DW:
1002 emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm);
1007 /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
1008 case BPF_ALU | BPF_MOD | BPF_X:
1009 case BPF_ALU | BPF_DIV | BPF_X:
1010 case BPF_ALU | BPF_MOD | BPF_K:
1011 case BPF_ALU | BPF_DIV | BPF_K:
1012 case BPF_ALU64 | BPF_MOD | BPF_X:
1013 case BPF_ALU64 | BPF_DIV | BPF_X:
1014 case BPF_ALU64 | BPF_MOD | BPF_K:
1015 case BPF_ALU64 | BPF_DIV | BPF_K: {
1016 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
1018 if (dst_reg != BPF_REG_0)
1019 EMIT1(0x50); /* push rax */
1020 if (dst_reg != BPF_REG_3)
1021 EMIT1(0x52); /* push rdx */
1023 if (BPF_SRC(insn->code) == BPF_X) {
1024 if (src_reg == BPF_REG_0 ||
1025 src_reg == BPF_REG_3) {
1026 /* mov r11, src_reg */
1027 EMIT_mov(AUX_REG, src_reg);
1031 /* mov r11, imm32 */
1032 EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
1036 if (dst_reg != BPF_REG_0)
1037 /* mov rax, dst_reg */
1038 emit_mov_reg(&prog, is64, BPF_REG_0, dst_reg);
1042 * equivalent to 'xor rdx, rdx', but one byte less
1047 maybe_emit_1mod(&prog, src_reg, is64);
1048 EMIT2(0xF7, add_1reg(0xF0, src_reg));
1050 if (BPF_OP(insn->code) == BPF_MOD &&
1051 dst_reg != BPF_REG_3)
1052 /* mov dst_reg, rdx */
1053 emit_mov_reg(&prog, is64, dst_reg, BPF_REG_3);
1054 else if (BPF_OP(insn->code) == BPF_DIV &&
1055 dst_reg != BPF_REG_0)
1056 /* mov dst_reg, rax */
1057 emit_mov_reg(&prog, is64, dst_reg, BPF_REG_0);
1059 if (dst_reg != BPF_REG_3)
1060 EMIT1(0x5A); /* pop rdx */
1061 if (dst_reg != BPF_REG_0)
1062 EMIT1(0x58); /* pop rax */
1066 case BPF_ALU | BPF_MUL | BPF_K:
1067 case BPF_ALU64 | BPF_MUL | BPF_K:
1068 maybe_emit_mod(&prog, dst_reg, dst_reg,
1069 BPF_CLASS(insn->code) == BPF_ALU64);
1072 /* imul dst_reg, dst_reg, imm8 */
1073 EMIT3(0x6B, add_2reg(0xC0, dst_reg, dst_reg),
1076 /* imul dst_reg, dst_reg, imm32 */
1078 add_2reg(0xC0, dst_reg, dst_reg),
1082 case BPF_ALU | BPF_MUL | BPF_X:
1083 case BPF_ALU64 | BPF_MUL | BPF_X:
1084 maybe_emit_mod(&prog, src_reg, dst_reg,
1085 BPF_CLASS(insn->code) == BPF_ALU64);
1087 /* imul dst_reg, src_reg */
1088 EMIT3(0x0F, 0xAF, add_2reg(0xC0, src_reg, dst_reg));
1092 case BPF_ALU | BPF_LSH | BPF_K:
1093 case BPF_ALU | BPF_RSH | BPF_K:
1094 case BPF_ALU | BPF_ARSH | BPF_K:
1095 case BPF_ALU64 | BPF_LSH | BPF_K:
1096 case BPF_ALU64 | BPF_RSH | BPF_K:
1097 case BPF_ALU64 | BPF_ARSH | BPF_K:
1098 maybe_emit_1mod(&prog, dst_reg,
1099 BPF_CLASS(insn->code) == BPF_ALU64);
1101 b3 = simple_alu_opcodes[BPF_OP(insn->code)];
1103 EMIT2(0xD1, add_1reg(b3, dst_reg));
1105 EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
1108 case BPF_ALU | BPF_LSH | BPF_X:
1109 case BPF_ALU | BPF_RSH | BPF_X:
1110 case BPF_ALU | BPF_ARSH | BPF_X:
1111 case BPF_ALU64 | BPF_LSH | BPF_X:
1112 case BPF_ALU64 | BPF_RSH | BPF_X:
1113 case BPF_ALU64 | BPF_ARSH | BPF_X:
1115 /* Check for bad case when dst_reg == rcx */
1116 if (dst_reg == BPF_REG_4) {
1117 /* mov r11, dst_reg */
1118 EMIT_mov(AUX_REG, dst_reg);
1122 if (src_reg != BPF_REG_4) { /* common case */
1123 EMIT1(0x51); /* push rcx */
1125 /* mov rcx, src_reg */
1126 EMIT_mov(BPF_REG_4, src_reg);
1129 /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
1130 maybe_emit_1mod(&prog, dst_reg,
1131 BPF_CLASS(insn->code) == BPF_ALU64);
1133 b3 = simple_alu_opcodes[BPF_OP(insn->code)];
1134 EMIT2(0xD3, add_1reg(b3, dst_reg));
1136 if (src_reg != BPF_REG_4)
1137 EMIT1(0x59); /* pop rcx */
1139 if (insn->dst_reg == BPF_REG_4)
1140 /* mov dst_reg, r11 */
1141 EMIT_mov(insn->dst_reg, AUX_REG);
1144 case BPF_ALU | BPF_END | BPF_FROM_BE:
1147 /* Emit 'ror %ax, 8' to swap lower 2 bytes */
1149 if (is_ereg(dst_reg))
1151 EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
1153 /* Emit 'movzwl eax, ax' */
1154 if (is_ereg(dst_reg))
1155 EMIT3(0x45, 0x0F, 0xB7);
1158 EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
1161 /* Emit 'bswap eax' to swap lower 4 bytes */
1162 if (is_ereg(dst_reg))
1166 EMIT1(add_1reg(0xC8, dst_reg));
1169 /* Emit 'bswap rax' to swap 8 bytes */
1170 EMIT3(add_1mod(0x48, dst_reg), 0x0F,
1171 add_1reg(0xC8, dst_reg));
1176 case BPF_ALU | BPF_END | BPF_FROM_LE:
1180 * Emit 'movzwl eax, ax' to zero extend 16-bit
1183 if (is_ereg(dst_reg))
1184 EMIT3(0x45, 0x0F, 0xB7);
1187 EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
1190 /* Emit 'mov eax, eax' to clear upper 32-bits */
1191 if (is_ereg(dst_reg))
1193 EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
1201 /* speculation barrier */
1202 case BPF_ST | BPF_NOSPEC:
1203 if (boot_cpu_has(X86_FEATURE_XMM2))
1207 /* ST: *(u8*)(dst_reg + off) = imm */
1208 case BPF_ST | BPF_MEM | BPF_B:
1209 if (is_ereg(dst_reg))
1214 case BPF_ST | BPF_MEM | BPF_H:
1215 if (is_ereg(dst_reg))
1216 EMIT3(0x66, 0x41, 0xC7);
1220 case BPF_ST | BPF_MEM | BPF_W:
1221 if (is_ereg(dst_reg))
1226 case BPF_ST | BPF_MEM | BPF_DW:
1227 EMIT2(add_1mod(0x48, dst_reg), 0xC7);
1229 st: if (is_imm8(insn->off))
1230 EMIT2(add_1reg(0x40, dst_reg), insn->off);
1232 EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
1234 EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
1237 /* STX: *(u8*)(dst_reg + off) = src_reg */
1238 case BPF_STX | BPF_MEM | BPF_B:
1239 case BPF_STX | BPF_MEM | BPF_H:
1240 case BPF_STX | BPF_MEM | BPF_W:
1241 case BPF_STX | BPF_MEM | BPF_DW:
1242 emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
1245 /* LDX: dst_reg = *(u8*)(src_reg + off) */
1246 case BPF_LDX | BPF_MEM | BPF_B:
1247 case BPF_LDX | BPF_PROBE_MEM | BPF_B:
1248 case BPF_LDX | BPF_MEM | BPF_H:
1249 case BPF_LDX | BPF_PROBE_MEM | BPF_H:
1250 case BPF_LDX | BPF_MEM | BPF_W:
1251 case BPF_LDX | BPF_PROBE_MEM | BPF_W:
1252 case BPF_LDX | BPF_MEM | BPF_DW:
1253 case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
1254 if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
1255 /* Though the verifier prevents negative insn->off in BPF_PROBE_MEM
1256 * add abs(insn->off) to the limit to make sure that negative
1257 * offset won't be an issue.
1258 * insn->off is s16, so it won't affect valid pointers.
1260 u64 limit = TASK_SIZE_MAX + PAGE_SIZE + abs(insn->off);
1261 u8 *end_of_jmp1, *end_of_jmp2;
1263 /* Conservatively check that src_reg + insn->off is a kernel address:
1264 * 1. src_reg + insn->off >= limit
1265 * 2. src_reg + insn->off doesn't become small positive.
1266 * Cannot do src_reg + insn->off >= limit in one branch,
1267 * since it needs two spare registers, but JIT has only one.
1270 /* movabsq r11, limit */
1271 EMIT2(add_1mod(0x48, AUX_REG), add_1reg(0xB8, AUX_REG));
1272 EMIT((u32)limit, 4);
1273 EMIT(limit >> 32, 4);
1274 /* cmp src_reg, r11 */
1275 maybe_emit_mod(&prog, src_reg, AUX_REG, true);
1276 EMIT2(0x39, add_2reg(0xC0, src_reg, AUX_REG));
1277 /* if unsigned '<' goto end_of_jmp2 */
1281 /* mov r11, src_reg */
1282 emit_mov_reg(&prog, true, AUX_REG, src_reg);
1283 /* add r11, insn->off */
1284 maybe_emit_1mod(&prog, AUX_REG, true);
1285 EMIT2_off32(0x81, add_1reg(0xC0, AUX_REG), insn->off);
1286 /* jmp if not carry to start_of_ldx
1287 * Otherwise ERR_PTR(-EINVAL) + 128 will be the user addr
1288 * that has to be rejected.
1290 EMIT2(0x73 /* JNC */, 0);
1293 /* xor dst_reg, dst_reg */
1294 emit_mov_imm32(&prog, false, dst_reg, 0);
1295 /* jmp byte_after_ldx */
1298 /* populate jmp_offset for JB above to jump to xor dst_reg */
1299 end_of_jmp1[-1] = end_of_jmp2 - end_of_jmp1;
1300 /* populate jmp_offset for JNC above to jump to start_of_ldx */
1301 start_of_ldx = prog;
1302 end_of_jmp2[-1] = start_of_ldx - end_of_jmp2;
1304 emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
1305 if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
1306 struct exception_table_entry *ex;
1307 u8 *_insn = image + proglen + (start_of_ldx - temp);
1310 /* populate jmp_offset for JMP above */
1311 start_of_ldx[-1] = prog - start_of_ldx;
1313 if (!bpf_prog->aux->extable)
1316 if (excnt >= bpf_prog->aux->num_exentries) {
1317 pr_err("ex gen bug\n");
1320 ex = &bpf_prog->aux->extable[excnt++];
1322 delta = _insn - (u8 *)&ex->insn;
1323 if (!is_simm32(delta)) {
1324 pr_err("extable->insn doesn't fit into 32-bit\n");
1329 ex->data = EX_TYPE_BPF;
1331 if (dst_reg > BPF_REG_9) {
1332 pr_err("verifier error\n");
1336 * Compute size of x86 insn and its target dest x86 register.
1337 * ex_handler_bpf() will use lower 8 bits to adjust
1338 * pt_regs->ip to jump over this x86 instruction
1339 * and upper bits to figure out which pt_regs to zero out.
1340 * End result: x86 insn "mov rbx, qword ptr [rax+0x14]"
1341 * of 4 bytes will be ignored and rbx will be zero inited.
1343 ex->fixup = (prog - start_of_ldx) | (reg2pt_regs[dst_reg] << 8);
1347 case BPF_STX | BPF_ATOMIC | BPF_W:
1348 case BPF_STX | BPF_ATOMIC | BPF_DW:
1349 if (insn->imm == (BPF_AND | BPF_FETCH) ||
1350 insn->imm == (BPF_OR | BPF_FETCH) ||
1351 insn->imm == (BPF_XOR | BPF_FETCH)) {
1352 bool is64 = BPF_SIZE(insn->code) == BPF_DW;
1353 u32 real_src_reg = src_reg;
1354 u32 real_dst_reg = dst_reg;
1358 * Can't be implemented with a single x86 insn.
1359 * Need to do a CMPXCHG loop.
1362 /* Will need RAX as a CMPXCHG operand so save R0 */
1363 emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0);
1364 if (src_reg == BPF_REG_0)
1365 real_src_reg = BPF_REG_AX;
1366 if (dst_reg == BPF_REG_0)
1367 real_dst_reg = BPF_REG_AX;
1369 branch_target = prog;
1370 /* Load old value */
1371 emit_ldx(&prog, BPF_SIZE(insn->code),
1372 BPF_REG_0, real_dst_reg, insn->off);
1374 * Perform the (commutative) operation locally,
1375 * put the result in the AUX_REG.
1377 emit_mov_reg(&prog, is64, AUX_REG, BPF_REG_0);
1378 maybe_emit_mod(&prog, AUX_REG, real_src_reg, is64);
1379 EMIT2(simple_alu_opcodes[BPF_OP(insn->imm)],
1380 add_2reg(0xC0, AUX_REG, real_src_reg));
1381 /* Attempt to swap in new value */
1382 err = emit_atomic(&prog, BPF_CMPXCHG,
1383 real_dst_reg, AUX_REG,
1385 BPF_SIZE(insn->code));
1389 * ZF tells us whether we won the race. If it's
1390 * cleared we need to try again.
1392 EMIT2(X86_JNE, -(prog - branch_target) - 2);
1393 /* Return the pre-modification value */
1394 emit_mov_reg(&prog, is64, real_src_reg, BPF_REG_0);
1395 /* Restore R0 after clobbering RAX */
1396 emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX);
1400 err = emit_atomic(&prog, insn->imm, dst_reg, src_reg,
1401 insn->off, BPF_SIZE(insn->code));
1407 case BPF_JMP | BPF_CALL:
1408 func = (u8 *) __bpf_call_base + imm32;
1409 if (tail_call_reachable) {
1410 EMIT3_off32(0x48, 0x8B, 0x85,
1411 -(bpf_prog->aux->stack_depth + 8));
1412 if (!imm32 || emit_call(&prog, func, image + addrs[i - 1] + 7))
1415 if (!imm32 || emit_call(&prog, func, image + addrs[i - 1]))
1420 case BPF_JMP | BPF_TAIL_CALL:
1422 emit_bpf_tail_call_direct(&bpf_prog->aux->poke_tab[imm32 - 1],
1423 &prog, image + addrs[i - 1],
1425 bpf_prog->aux->stack_depth,
1428 emit_bpf_tail_call_indirect(&prog,
1430 bpf_prog->aux->stack_depth,
1431 image + addrs[i - 1],
1436 case BPF_JMP | BPF_JEQ | BPF_X:
1437 case BPF_JMP | BPF_JNE | BPF_X:
1438 case BPF_JMP | BPF_JGT | BPF_X:
1439 case BPF_JMP | BPF_JLT | BPF_X:
1440 case BPF_JMP | BPF_JGE | BPF_X:
1441 case BPF_JMP | BPF_JLE | BPF_X:
1442 case BPF_JMP | BPF_JSGT | BPF_X:
1443 case BPF_JMP | BPF_JSLT | BPF_X:
1444 case BPF_JMP | BPF_JSGE | BPF_X:
1445 case BPF_JMP | BPF_JSLE | BPF_X:
1446 case BPF_JMP32 | BPF_JEQ | BPF_X:
1447 case BPF_JMP32 | BPF_JNE | BPF_X:
1448 case BPF_JMP32 | BPF_JGT | BPF_X:
1449 case BPF_JMP32 | BPF_JLT | BPF_X:
1450 case BPF_JMP32 | BPF_JGE | BPF_X:
1451 case BPF_JMP32 | BPF_JLE | BPF_X:
1452 case BPF_JMP32 | BPF_JSGT | BPF_X:
1453 case BPF_JMP32 | BPF_JSLT | BPF_X:
1454 case BPF_JMP32 | BPF_JSGE | BPF_X:
1455 case BPF_JMP32 | BPF_JSLE | BPF_X:
1456 /* cmp dst_reg, src_reg */
1457 maybe_emit_mod(&prog, dst_reg, src_reg,
1458 BPF_CLASS(insn->code) == BPF_JMP);
1459 EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg));
1462 case BPF_JMP | BPF_JSET | BPF_X:
1463 case BPF_JMP32 | BPF_JSET | BPF_X:
1464 /* test dst_reg, src_reg */
1465 maybe_emit_mod(&prog, dst_reg, src_reg,
1466 BPF_CLASS(insn->code) == BPF_JMP);
1467 EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg));
1470 case BPF_JMP | BPF_JSET | BPF_K:
1471 case BPF_JMP32 | BPF_JSET | BPF_K:
1472 /* test dst_reg, imm32 */
1473 maybe_emit_1mod(&prog, dst_reg,
1474 BPF_CLASS(insn->code) == BPF_JMP);
1475 EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
1478 case BPF_JMP | BPF_JEQ | BPF_K:
1479 case BPF_JMP | BPF_JNE | BPF_K:
1480 case BPF_JMP | BPF_JGT | BPF_K:
1481 case BPF_JMP | BPF_JLT | BPF_K:
1482 case BPF_JMP | BPF_JGE | BPF_K:
1483 case BPF_JMP | BPF_JLE | BPF_K:
1484 case BPF_JMP | BPF_JSGT | BPF_K:
1485 case BPF_JMP | BPF_JSLT | BPF_K:
1486 case BPF_JMP | BPF_JSGE | BPF_K:
1487 case BPF_JMP | BPF_JSLE | BPF_K:
1488 case BPF_JMP32 | BPF_JEQ | BPF_K:
1489 case BPF_JMP32 | BPF_JNE | BPF_K:
1490 case BPF_JMP32 | BPF_JGT | BPF_K:
1491 case BPF_JMP32 | BPF_JLT | BPF_K:
1492 case BPF_JMP32 | BPF_JGE | BPF_K:
1493 case BPF_JMP32 | BPF_JLE | BPF_K:
1494 case BPF_JMP32 | BPF_JSGT | BPF_K:
1495 case BPF_JMP32 | BPF_JSLT | BPF_K:
1496 case BPF_JMP32 | BPF_JSGE | BPF_K:
1497 case BPF_JMP32 | BPF_JSLE | BPF_K:
1498 /* test dst_reg, dst_reg to save one extra byte */
1500 maybe_emit_mod(&prog, dst_reg, dst_reg,
1501 BPF_CLASS(insn->code) == BPF_JMP);
1502 EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg));
1506 /* cmp dst_reg, imm8/32 */
1507 maybe_emit_1mod(&prog, dst_reg,
1508 BPF_CLASS(insn->code) == BPF_JMP);
1511 EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
1513 EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
1515 emit_cond_jmp: /* Convert BPF opcode to x86 */
1516 switch (BPF_OP(insn->code)) {
1525 /* GT is unsigned '>', JA in x86 */
1529 /* LT is unsigned '<', JB in x86 */
1533 /* GE is unsigned '>=', JAE in x86 */
1537 /* LE is unsigned '<=', JBE in x86 */
1541 /* Signed '>', GT in x86 */
1545 /* Signed '<', LT in x86 */
1549 /* Signed '>=', GE in x86 */
1553 /* Signed '<=', LE in x86 */
1556 default: /* to silence GCC warning */
1559 jmp_offset = addrs[i + insn->off] - addrs[i];
1560 if (is_imm8(jmp_offset)) {
1562 /* To keep the jmp_offset valid, the extra bytes are
1563 * padded before the jump insn, so we subtract the
1564 * 2 bytes of jmp_cond insn from INSN_SZ_DIFF.
1566 * If the previous pass already emits an imm8
1567 * jmp_cond, then this BPF insn won't shrink, so
1570 * On the other hand, if the previous pass emits an
1571 * imm32 jmp_cond, the extra 4 bytes(*) is padded to
1572 * keep the image from shrinking further.
1574 * (*) imm32 jmp_cond is 6 bytes, and imm8 jmp_cond
1575 * is 2 bytes, so the size difference is 4 bytes.
1577 nops = INSN_SZ_DIFF - 2;
1578 if (nops != 0 && nops != 4) {
1579 pr_err("unexpected jmp_cond padding: %d bytes\n",
1583 emit_nops(&prog, nops);
1585 EMIT2(jmp_cond, jmp_offset);
1586 } else if (is_simm32(jmp_offset)) {
1587 EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
1589 pr_err("cond_jmp gen bug %llx\n", jmp_offset);
1595 case BPF_JMP | BPF_JA:
1596 if (insn->off == -1)
1597 /* -1 jmp instructions will always jump
1598 * backwards two bytes. Explicitly handling
1599 * this case avoids wasting too many passes
1600 * when there are long sequences of replaced
1605 jmp_offset = addrs[i + insn->off] - addrs[i];
1609 * If jmp_padding is enabled, the extra nops will
1610 * be inserted. Otherwise, optimize out nop jumps.
1613 /* There are 3 possible conditions.
1614 * (1) This BPF_JA is already optimized out in
1615 * the previous run, so there is no need
1616 * to pad any extra byte (0 byte).
1617 * (2) The previous pass emits an imm8 jmp,
1618 * so we pad 2 bytes to match the previous
1620 * (3) Similarly, the previous pass emits an
1621 * imm32 jmp, and 5 bytes is padded.
1623 nops = INSN_SZ_DIFF;
1624 if (nops != 0 && nops != 2 && nops != 5) {
1625 pr_err("unexpected nop jump padding: %d bytes\n",
1629 emit_nops(&prog, nops);
1634 if (is_imm8(jmp_offset)) {
1636 /* To avoid breaking jmp_offset, the extra bytes
1637 * are padded before the actual jmp insn, so
1638 * 2 bytes is subtracted from INSN_SZ_DIFF.
1640 * If the previous pass already emits an imm8
1641 * jmp, there is nothing to pad (0 byte).
1643 * If it emits an imm32 jmp (5 bytes) previously
1644 * and now an imm8 jmp (2 bytes), then we pad
1645 * (5 - 2 = 3) bytes to stop the image from
1646 * shrinking further.
1648 nops = INSN_SZ_DIFF - 2;
1649 if (nops != 0 && nops != 3) {
1650 pr_err("unexpected jump padding: %d bytes\n",
1654 emit_nops(&prog, INSN_SZ_DIFF - 2);
1656 EMIT2(0xEB, jmp_offset);
1657 } else if (is_simm32(jmp_offset)) {
1658 EMIT1_off32(0xE9, jmp_offset);
1660 pr_err("jmp gen bug %llx\n", jmp_offset);
1665 case BPF_JMP | BPF_EXIT:
1667 jmp_offset = ctx->cleanup_addr - addrs[i];
1671 /* Update cleanup_addr */
1672 ctx->cleanup_addr = proglen;
1673 pop_callee_regs(&prog, callee_regs_used);
1674 EMIT1(0xC9); /* leave */
1675 EMIT1(0xC3); /* ret */
1680 * By design x86-64 JIT should support all BPF instructions.
1681 * This error will be seen if new instruction was added
1682 * to the interpreter, but not to the JIT, or if there is
1685 pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
1690 if (ilen > BPF_MAX_INSN_SIZE) {
1691 pr_err("bpf_jit: fatal insn size error\n");
1697 * When populating the image, assert that:
1699 * i) We do not write beyond the allocated space, and
1700 * ii) addrs[i] did not change from the prior run, in order
1701 * to validate assumptions made for computing branch
1704 if (unlikely(proglen + ilen > oldproglen ||
1705 proglen + ilen != addrs[i])) {
1706 pr_err("bpf_jit: fatal error\n");
1709 memcpy(image + proglen, temp, ilen);
1716 if (image && excnt != bpf_prog->aux->num_exentries) {
1717 pr_err("extable is not populated\n");
1723 static void save_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
1727 /* Store function arguments to stack.
1728 * For a function that accepts two pointers the sequence will be:
1729 * mov QWORD PTR [rbp-0x10],rdi
1730 * mov QWORD PTR [rbp-0x8],rsi
1732 for (i = 0; i < min(nr_args, 6); i++)
1733 emit_stx(prog, bytes_to_bpf_size(m->arg_size[i]),
1735 i == 5 ? X86_REG_R9 : BPF_REG_1 + i,
1736 -(stack_size - i * 8));
1739 static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
1744 /* Restore function arguments from stack.
1745 * For a function that accepts two pointers the sequence will be:
1746 * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10]
1747 * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8]
1749 for (i = 0; i < min(nr_args, 6); i++)
1750 emit_ldx(prog, bytes_to_bpf_size(m->arg_size[i]),
1751 i == 5 ? X86_REG_R9 : BPF_REG_1 + i,
1753 -(stack_size - i * 8));
1756 static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
1757 struct bpf_prog *p, int stack_size, bool save_ret)
1762 /* arg1: mov rdi, progs[i] */
1763 emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
1764 if (emit_call(&prog,
1765 p->aux->sleepable ? __bpf_prog_enter_sleepable :
1766 __bpf_prog_enter, prog))
1768 /* remember prog start time returned by __bpf_prog_enter */
1769 emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
1771 /* if (__bpf_prog_enter*(prog) == 0)
1772 * goto skip_exec_of_prog;
1774 EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */
1775 /* emit 2 nops that will be replaced with JE insn */
1777 emit_nops(&prog, 2);
1779 /* arg1: lea rdi, [rbp - stack_size] */
1780 EMIT4(0x48, 0x8D, 0x7D, -stack_size);
1781 /* arg2: progs[i]->insnsi for interpreter */
1783 emit_mov_imm64(&prog, BPF_REG_2,
1784 (long) p->insnsi >> 32,
1785 (u32) (long) p->insnsi);
1786 /* call JITed bpf program or interpreter */
1787 if (emit_call(&prog, p->bpf_func, prog))
1791 * BPF_TRAMP_MODIFY_RETURN trampolines can modify the return
1792 * of the previous call which is then passed on the stack to
1793 * the next BPF program.
1795 * BPF_TRAMP_FENTRY trampoline may need to return the return
1796 * value of BPF_PROG_TYPE_STRUCT_OPS prog.
1799 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
1801 /* replace 2 nops with JE insn, since jmp target is known */
1802 jmp_insn[0] = X86_JE;
1803 jmp_insn[1] = prog - jmp_insn - 2;
1805 /* arg1: mov rdi, progs[i] */
1806 emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
1807 /* arg2: mov rsi, rbx <- start time in nsec */
1808 emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
1809 if (emit_call(&prog,
1810 p->aux->sleepable ? __bpf_prog_exit_sleepable :
1811 __bpf_prog_exit, prog))
1818 static void emit_align(u8 **pprog, u32 align)
1820 u8 *target, *prog = *pprog;
1822 target = PTR_ALIGN(prog, align);
1824 emit_nops(&prog, target - prog);
1829 static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond)
1834 offset = func - (ip + 2 + 4);
1835 if (!is_simm32(offset)) {
1836 pr_err("Target %p is out of range\n", func);
1839 EMIT2_off32(0x0F, jmp_cond + 0x10, offset);
1844 static int invoke_bpf(const struct btf_func_model *m, u8 **pprog,
1845 struct bpf_tramp_progs *tp, int stack_size,
1851 for (i = 0; i < tp->nr_progs; i++) {
1852 if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size,
1860 static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
1861 struct bpf_tramp_progs *tp, int stack_size,
1867 /* The first fmod_ret program will receive a garbage return value.
1868 * Set this to 0 to avoid confusing the program.
1870 emit_mov_imm32(&prog, false, BPF_REG_0, 0);
1871 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
1872 for (i = 0; i < tp->nr_progs; i++) {
1873 if (invoke_bpf_prog(m, &prog, tp->progs[i], stack_size, true))
1876 /* mod_ret prog stored return value into [rbp - 8]. Emit:
1877 * if (*(u64 *)(rbp - 8) != 0)
1880 /* cmp QWORD PTR [rbp - 0x8], 0x0 */
1881 EMIT4(0x48, 0x83, 0x7d, 0xf8); EMIT1(0x00);
1883 /* Save the location of the branch and Generate 6 nops
1884 * (4 bytes for an offset and 2 bytes for the jump) These nops
1885 * are replaced with a conditional jump once do_fexit (i.e. the
1886 * start of the fexit invocation) is finalized.
1889 emit_nops(&prog, 4 + 2);
1896 static bool is_valid_bpf_tramp_flags(unsigned int flags)
1898 if ((flags & BPF_TRAMP_F_RESTORE_REGS) &&
1899 (flags & BPF_TRAMP_F_SKIP_FRAME))
1903 * BPF_TRAMP_F_RET_FENTRY_RET is only used by bpf_struct_ops,
1904 * and it must be used alone.
1906 if ((flags & BPF_TRAMP_F_RET_FENTRY_RET) &&
1907 (flags & ~BPF_TRAMP_F_RET_FENTRY_RET))
1914 * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
1915 * its 'struct btf_func_model' will be nr_args=2
1916 * The assembly code when eth_type_trans is executing after trampoline:
1920 * sub rsp, 16 // space for skb and dev
1921 * push rbx // temp regs to pass start time
1922 * mov qword ptr [rbp - 16], rdi // save skb pointer to stack
1923 * mov qword ptr [rbp - 8], rsi // save dev pointer to stack
1924 * call __bpf_prog_enter // rcu_read_lock and preempt_disable
1925 * mov rbx, rax // remember start time in bpf stats are enabled
1926 * lea rdi, [rbp - 16] // R1==ctx of bpf prog
1927 * call addr_of_jited_FENTRY_prog
1928 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off
1929 * mov rsi, rbx // prog start time
1930 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math
1931 * mov rdi, qword ptr [rbp - 16] // restore skb pointer from stack
1932 * mov rsi, qword ptr [rbp - 8] // restore dev pointer from stack
1937 * eth_type_trans has 5 byte nop at the beginning. These 5 bytes will be
1938 * replaced with 'call generated_bpf_trampoline'. When it returns
1939 * eth_type_trans will continue executing with original skb and dev pointers.
1941 * The assembly code when eth_type_trans is called from trampoline:
1945 * sub rsp, 24 // space for skb, dev, return value
1946 * push rbx // temp regs to pass start time
1947 * mov qword ptr [rbp - 24], rdi // save skb pointer to stack
1948 * mov qword ptr [rbp - 16], rsi // save dev pointer to stack
1949 * call __bpf_prog_enter // rcu_read_lock and preempt_disable
1950 * mov rbx, rax // remember start time if bpf stats are enabled
1951 * lea rdi, [rbp - 24] // R1==ctx of bpf prog
1952 * call addr_of_jited_FENTRY_prog // bpf prog can access skb and dev
1953 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off
1954 * mov rsi, rbx // prog start time
1955 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math
1956 * mov rdi, qword ptr [rbp - 24] // restore skb pointer from stack
1957 * mov rsi, qword ptr [rbp - 16] // restore dev pointer from stack
1958 * call eth_type_trans+5 // execute body of eth_type_trans
1959 * mov qword ptr [rbp - 8], rax // save return value
1960 * call __bpf_prog_enter // rcu_read_lock and preempt_disable
1961 * mov rbx, rax // remember start time in bpf stats are enabled
1962 * lea rdi, [rbp - 24] // R1==ctx of bpf prog
1963 * call addr_of_jited_FEXIT_prog // bpf prog can access skb, dev, return value
1964 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off
1965 * mov rsi, rbx // prog start time
1966 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math
1967 * mov rax, qword ptr [rbp - 8] // restore eth_type_trans's return value
1970 * add rsp, 8 // skip eth_type_trans's frame
1971 * ret // return to its caller
1973 int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
1974 const struct btf_func_model *m, u32 flags,
1975 struct bpf_tramp_progs *tprogs,
1978 int ret, i, nr_args = m->nr_args;
1979 int regs_off, ip_off, args_off, stack_size = nr_args * 8;
1980 struct bpf_tramp_progs *fentry = &tprogs[BPF_TRAMP_FENTRY];
1981 struct bpf_tramp_progs *fexit = &tprogs[BPF_TRAMP_FEXIT];
1982 struct bpf_tramp_progs *fmod_ret = &tprogs[BPF_TRAMP_MODIFY_RETURN];
1983 u8 **branches = NULL;
1987 /* x86-64 supports up to 6 arguments. 7+ can be added in the future */
1991 if (!is_valid_bpf_tramp_flags(flags))
1994 /* Generated trampoline stack layout:
1996 * RBP + 8 [ return address ]
1999 * RBP - 8 [ return value ] BPF_TRAMP_F_CALL_ORIG or
2000 * BPF_TRAMP_F_RET_FENTRY_RET flags
2002 * [ reg_argN ] always
2004 * RBP - regs_off [ reg_arg1 ] program's ctx pointer
2006 * RBP - args_off [ args count ] always
2008 * RBP - ip_off [ traced function ] BPF_TRAMP_F_IP_ARG flag
2011 /* room for return value of orig_call or fentry prog */
2012 save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET);
2016 regs_off = stack_size;
2020 args_off = stack_size;
2022 if (flags & BPF_TRAMP_F_IP_ARG)
2023 stack_size += 8; /* room for IP address argument */
2025 ip_off = stack_size;
2027 if (flags & BPF_TRAMP_F_SKIP_FRAME)
2028 /* skip patched call instruction and point orig_call to actual
2029 * body of the kernel function.
2031 orig_call += X86_PATCH_SIZE;
2035 EMIT1(0x55); /* push rbp */
2036 EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
2037 EMIT4(0x48, 0x83, 0xEC, stack_size); /* sub rsp, stack_size */
2038 EMIT1(0x53); /* push rbx */
2040 /* Store number of arguments of the traced function:
2042 * mov QWORD PTR [rbp - args_off], rax
2044 emit_mov_imm64(&prog, BPF_REG_0, 0, (u32) nr_args);
2045 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -args_off);
2047 if (flags & BPF_TRAMP_F_IP_ARG) {
2048 /* Store IP address of the traced function:
2049 * mov rax, QWORD PTR [rbp + 8]
2050 * sub rax, X86_PATCH_SIZE
2051 * mov QWORD PTR [rbp - ip_off], rax
2053 emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, 8);
2054 EMIT4(0x48, 0x83, 0xe8, X86_PATCH_SIZE);
2055 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -ip_off);
2058 save_regs(m, &prog, nr_args, regs_off);
2060 if (flags & BPF_TRAMP_F_CALL_ORIG) {
2061 /* arg1: mov rdi, im */
2062 emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
2063 if (emit_call(&prog, __bpf_tramp_enter, prog)) {
2069 if (fentry->nr_progs)
2070 if (invoke_bpf(m, &prog, fentry, regs_off,
2071 flags & BPF_TRAMP_F_RET_FENTRY_RET))
2074 if (fmod_ret->nr_progs) {
2075 branches = kcalloc(fmod_ret->nr_progs, sizeof(u8 *),
2080 if (invoke_bpf_mod_ret(m, &prog, fmod_ret, regs_off,
2087 if (flags & BPF_TRAMP_F_CALL_ORIG) {
2088 restore_regs(m, &prog, nr_args, regs_off);
2090 /* call original function */
2091 if (emit_call(&prog, orig_call, prog)) {
2095 /* remember return value in a stack for bpf prog to access */
2096 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
2097 im->ip_after_call = prog;
2098 memcpy(prog, x86_nops[5], X86_PATCH_SIZE);
2099 prog += X86_PATCH_SIZE;
2102 if (fmod_ret->nr_progs) {
2103 /* From Intel 64 and IA-32 Architectures Optimization
2104 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
2105 * Coding Rule 11: All branch targets should be 16-byte
2108 emit_align(&prog, 16);
2109 /* Update the branches saved in invoke_bpf_mod_ret with the
2110 * aligned address of do_fexit.
2112 for (i = 0; i < fmod_ret->nr_progs; i++)
2113 emit_cond_near_jump(&branches[i], prog, branches[i],
2117 if (fexit->nr_progs)
2118 if (invoke_bpf(m, &prog, fexit, regs_off, false)) {
2123 if (flags & BPF_TRAMP_F_RESTORE_REGS)
2124 restore_regs(m, &prog, nr_args, regs_off);
2126 /* This needs to be done regardless. If there were fmod_ret programs,
2127 * the return value is only updated on the stack and still needs to be
2130 if (flags & BPF_TRAMP_F_CALL_ORIG) {
2131 im->ip_epilogue = prog;
2132 /* arg1: mov rdi, im */
2133 emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
2134 if (emit_call(&prog, __bpf_tramp_exit, prog)) {
2139 /* restore return value of orig_call or fentry prog back into RAX */
2141 emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
2143 EMIT1(0x5B); /* pop rbx */
2144 EMIT1(0xC9); /* leave */
2145 if (flags & BPF_TRAMP_F_SKIP_FRAME)
2146 /* skip our return address and return to parent */
2147 EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */
2148 EMIT1(0xC3); /* ret */
2149 /* Make sure the trampoline generation logic doesn't overflow */
2150 if (WARN_ON_ONCE(prog > (u8 *)image_end - BPF_INSN_SAFETY)) {
2154 ret = prog - (u8 *)image;
2161 static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs)
2163 u8 *jg_reloc, *prog = *pprog;
2164 int pivot, err, jg_bytes = 1;
2168 /* Leaf node of recursion, i.e. not a range of indices
2171 EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */
2172 if (!is_simm32(progs[a]))
2174 EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3),
2176 err = emit_cond_near_jump(&prog, /* je func */
2177 (void *)progs[a], prog,
2182 emit_indirect_jump(&prog, 2 /* rdx */, prog);
2188 /* Not a leaf node, so we pivot, and recursively descend into
2189 * the lower and upper ranges.
2191 pivot = (b - a) / 2;
2192 EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */
2193 if (!is_simm32(progs[a + pivot]))
2195 EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), progs[a + pivot]);
2197 if (pivot > 2) { /* jg upper_part */
2198 /* Require near jump. */
2200 EMIT2_off32(0x0F, X86_JG + 0x10, 0);
2206 err = emit_bpf_dispatcher(&prog, a, a + pivot, /* emit lower_part */
2211 /* From Intel 64 and IA-32 Architectures Optimization
2212 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
2213 * Coding Rule 11: All branch targets should be 16-byte
2216 emit_align(&prog, 16);
2217 jg_offset = prog - jg_reloc;
2218 emit_code(jg_reloc - jg_bytes, jg_offset, jg_bytes);
2220 err = emit_bpf_dispatcher(&prog, a + pivot + 1, /* emit upper_part */
2229 static int cmp_ips(const void *a, const void *b)
2241 int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs)
2245 sort(funcs, num_funcs, sizeof(funcs[0]), cmp_ips, NULL);
2246 return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs);
2249 struct x64_jit_data {
2250 struct bpf_binary_header *header;
2254 struct jit_context ctx;
2257 #define MAX_PASSES 20
2258 #define PADDING_PASSES (MAX_PASSES - 5)
2260 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
2262 struct bpf_binary_header *header = NULL;
2263 struct bpf_prog *tmp, *orig_prog = prog;
2264 struct x64_jit_data *jit_data;
2265 int proglen, oldproglen = 0;
2266 struct jit_context ctx = {};
2267 bool tmp_blinded = false;
2268 bool extra_pass = false;
2269 bool padding = false;
2275 if (!prog->jit_requested)
2278 tmp = bpf_jit_blind_constants(prog);
2280 * If blinding was requested and we failed during blinding,
2281 * we must fall back to the interpreter.
2290 jit_data = prog->aux->jit_data;
2292 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
2297 prog->aux->jit_data = jit_data;
2299 addrs = jit_data->addrs;
2301 ctx = jit_data->ctx;
2302 oldproglen = jit_data->proglen;
2303 image = jit_data->image;
2304 header = jit_data->header;
2307 goto skip_init_addrs;
2309 addrs = kvmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
2316 * Before first pass, make a rough estimation of addrs[]
2317 * each BPF instruction is translated to less than 64 bytes
2319 for (proglen = 0, i = 0; i <= prog->len; i++) {
2323 ctx.cleanup_addr = proglen;
2327 * JITed image shrinks with every pass and the loop iterates
2328 * until the image stops shrinking. Very large BPF programs
2329 * may converge on the last pass. In such case do one more
2330 * pass to emit the final image.
2332 for (pass = 0; pass < MAX_PASSES || image; pass++) {
2333 if (!padding && pass >= PADDING_PASSES)
2335 proglen = do_jit(prog, addrs, image, oldproglen, &ctx, padding);
2340 bpf_jit_binary_free(header);
2345 if (proglen != oldproglen) {
2346 pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
2347 proglen, oldproglen);
2352 if (proglen == oldproglen) {
2354 * The number of entries in extable is the number of BPF_LDX
2355 * insns that access kernel memory via "pointer to BTF type".
2356 * The verifier changed their opcode from LDX|MEM|size
2357 * to LDX|PROBE_MEM|size to make JITing easier.
2359 u32 align = __alignof__(struct exception_table_entry);
2360 u32 extable_size = prog->aux->num_exentries *
2361 sizeof(struct exception_table_entry);
2363 /* allocate module memory for x86 insns and extable */
2364 header = bpf_jit_binary_alloc(roundup(proglen, align) + extable_size,
2365 &image, align, jit_fill_hole);
2370 prog->aux->extable = (void *) image + roundup(proglen, align);
2372 oldproglen = proglen;
2376 if (bpf_jit_enable > 1)
2377 bpf_jit_dump(prog->len, proglen, pass + 1, image);
2380 if (!prog->is_func || extra_pass) {
2381 bpf_tail_call_direct_fixup(prog);
2382 bpf_jit_binary_lock_ro(header);
2384 jit_data->addrs = addrs;
2385 jit_data->ctx = ctx;
2386 jit_data->proglen = proglen;
2387 jit_data->image = image;
2388 jit_data->header = header;
2390 prog->bpf_func = (void *)image;
2392 prog->jited_len = proglen;
2397 if (!image || !prog->is_func || extra_pass) {
2399 bpf_prog_fill_jited_linfo(prog, addrs + 1);
2403 prog->aux->jit_data = NULL;
2407 bpf_jit_prog_release_other(prog, prog == orig_prog ?
2412 bool bpf_jit_supports_kfunc_call(void)