1 // SPDX-License-Identifier: GPL-2.0
3 * Just-In-Time compiler for eBPF filters on IA32 (32bit x86)
5 * Author: Wang YanQing (udknight@gmail.com)
6 * The code based on code and ideas from:
7 * Eric Dumazet (eric.dumazet@gmail.com)
9 * Shubham Bansal <illusionist.neo@gmail.com>
12 #include <linux/netdevice.h>
13 #include <linux/filter.h>
14 #include <linux/if_vlan.h>
15 #include <asm/cacheflush.h>
16 #include <asm/set_memory.h>
17 #include <asm/nospec-branch.h>
18 #include <linux/bpf.h>
21 * eBPF prog stack layout:
24 * original ESP => +-----+
25 * | | callee saved registers
27 * | ... | eBPF JIT scratch space
28 * BPF_FP,IA32_EBP => +-----+
29 * | ... | eBPF prog stack
31 * |RSVD | JIT scratchpad
32 * current ESP => +-----+
34 * | ... | Function call stack
39 * The callee saved registers:
42 * original ESP => +------------------+ \
44 * current EBP => +------------------+ } callee saved registers
46 * +------------------+ /
50 static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
63 #define EMIT(bytes, len) \
64 do { prog = emit_code(prog, bytes, len); cnt += len; } while (0)
66 #define EMIT1(b1) EMIT(b1, 1)
67 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
68 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
69 #define EMIT4(b1, b2, b3, b4) \
70 EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
72 #define EMIT1_off32(b1, off) \
73 do { EMIT1(b1); EMIT(off, 4); } while (0)
74 #define EMIT2_off32(b1, b2, off) \
75 do { EMIT2(b1, b2); EMIT(off, 4); } while (0)
76 #define EMIT3_off32(b1, b2, b3, off) \
77 do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
78 #define EMIT4_off32(b1, b2, b3, b4, off) \
79 do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
81 #define jmp_label(label, jmp_insn_len) (label - cnt - jmp_insn_len)
83 static bool is_imm8(int value)
85 return value <= 127 && value >= -128;
88 static bool is_simm32(s64 value)
90 return value == (s64) (s32) value;
93 #define STACK_OFFSET(k) (k)
94 #define TCALL_CNT (MAX_BPF_JIT_REG + 0) /* Tail Call Count */
96 #define IA32_EAX (0x0)
97 #define IA32_EBX (0x3)
98 #define IA32_ECX (0x1)
99 #define IA32_EDX (0x2)
100 #define IA32_ESI (0x6)
101 #define IA32_EDI (0x7)
102 #define IA32_EBP (0x5)
103 #define IA32_ESP (0x4)
106 * List of x86 cond jumps opcodes (. + s8)
107 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
110 #define IA32_JAE 0x73
112 #define IA32_JNE 0x75
113 #define IA32_JBE 0x76
116 #define IA32_JGE 0x7D
117 #define IA32_JLE 0x7E
120 #define COND_JMP_OPCODE_INVALID (0xFF)
123 * Map eBPF registers to IA32 32bit registers or stack scratch space.
125 * 1. All the registers, R0-R10, are mapped to scratch space on stack.
126 * 2. We need two 64 bit temp registers to do complex operations on eBPF
128 * 3. For performance reason, the BPF_REG_AX for blinding constant, is
129 * mapped to real hardware register pair, IA32_ESI and IA32_EDI.
131 * As the eBPF registers are all 64 bit registers and IA32 has only 32 bit
132 * registers, we have to map each eBPF registers with two IA32 32 bit regs
133 * or scratch memory space and we have to build eBPF 64 bit register from those.
135 * We use IA32_EAX, IA32_EDX, IA32_ECX, IA32_EBX as temporary registers.
137 static const u8 bpf2ia32[][2] = {
138 /* Return value from in-kernel function, and exit value from eBPF */
139 [BPF_REG_0] = {STACK_OFFSET(0), STACK_OFFSET(4)},
141 /* The arguments from eBPF program to in-kernel function */
142 /* Stored on stack scratch space */
143 [BPF_REG_1] = {STACK_OFFSET(8), STACK_OFFSET(12)},
144 [BPF_REG_2] = {STACK_OFFSET(16), STACK_OFFSET(20)},
145 [BPF_REG_3] = {STACK_OFFSET(24), STACK_OFFSET(28)},
146 [BPF_REG_4] = {STACK_OFFSET(32), STACK_OFFSET(36)},
147 [BPF_REG_5] = {STACK_OFFSET(40), STACK_OFFSET(44)},
149 /* Callee saved registers that in-kernel function will preserve */
150 /* Stored on stack scratch space */
151 [BPF_REG_6] = {STACK_OFFSET(48), STACK_OFFSET(52)},
152 [BPF_REG_7] = {STACK_OFFSET(56), STACK_OFFSET(60)},
153 [BPF_REG_8] = {STACK_OFFSET(64), STACK_OFFSET(68)},
154 [BPF_REG_9] = {STACK_OFFSET(72), STACK_OFFSET(76)},
156 /* Read only Frame Pointer to access Stack */
157 [BPF_REG_FP] = {STACK_OFFSET(80), STACK_OFFSET(84)},
159 /* Temporary register for blinding constants. */
160 [BPF_REG_AX] = {IA32_ESI, IA32_EDI},
162 /* Tail call count. Stored on stack scratch space. */
163 [TCALL_CNT] = {STACK_OFFSET(88), STACK_OFFSET(92)},
166 #define dst_lo dst[0]
167 #define dst_hi dst[1]
168 #define src_lo src[0]
169 #define src_hi src[1]
171 #define STACK_ALIGNMENT 8
173 * Stack space for BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4,
174 * BPF_REG_5, BPF_REG_6, BPF_REG_7, BPF_REG_8, BPF_REG_9,
175 * BPF_REG_FP, BPF_REG_AX and Tail call counts.
177 #define SCRATCH_SIZE 96
179 /* Total stack size used in JITed code */
180 #define _STACK_SIZE (stack_depth + SCRATCH_SIZE)
182 #define STACK_SIZE ALIGN(_STACK_SIZE, STACK_ALIGNMENT)
184 /* Get the offset of eBPF REGISTERs stored on scratch space. */
185 #define STACK_VAR(off) (off)
187 /* Encode 'dst_reg' register into IA32 opcode 'byte' */
188 static u8 add_1reg(u8 byte, u32 dst_reg)
190 return byte + dst_reg;
193 /* Encode 'dst_reg' and 'src_reg' registers into IA32 opcode 'byte' */
194 static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
196 return byte + dst_reg + (src_reg << 3);
199 static void jit_fill_hole(void *area, unsigned int size)
201 /* Fill whole space with int3 instructions */
202 memset(area, 0xcc, size);
205 static inline void emit_ia32_mov_i(const u8 dst, const u32 val, bool dstk,
214 EMIT2(0x33, add_2reg(0xC0, IA32_EAX, IA32_EAX));
215 /* mov dword ptr [ebp+off],eax */
216 EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EAX),
219 EMIT3_off32(0xC7, add_1reg(0x40, IA32_EBP),
220 STACK_VAR(dst), val);
224 EMIT2(0x33, add_2reg(0xC0, dst, dst));
226 EMIT2_off32(0xC7, add_1reg(0xC0, dst),
232 /* dst = imm (4 bytes)*/
233 static inline void emit_ia32_mov_r(const u8 dst, const u8 src, bool dstk,
234 bool sstk, u8 **pprog)
238 u8 sreg = sstk ? IA32_EAX : src;
241 /* mov eax,dword ptr [ebp+off] */
242 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), STACK_VAR(src));
244 /* mov dword ptr [ebp+off],eax */
245 EMIT3(0x89, add_2reg(0x40, IA32_EBP, sreg), STACK_VAR(dst));
248 EMIT2(0x89, add_2reg(0xC0, dst, sreg));
254 static inline void emit_ia32_mov_r64(const bool is64, const u8 dst[],
255 const u8 src[], bool dstk,
256 bool sstk, u8 **pprog,
257 const struct bpf_prog_aux *aux)
259 emit_ia32_mov_r(dst_lo, src_lo, dstk, sstk, pprog);
261 /* complete 8 byte move */
262 emit_ia32_mov_r(dst_hi, src_hi, dstk, sstk, pprog);
263 else if (!aux->verifier_zext)
264 /* zero out high 4 bytes */
265 emit_ia32_mov_i(dst_hi, 0, dstk, pprog);
268 /* Sign extended move */
269 static inline void emit_ia32_mov_i64(const bool is64, const u8 dst[],
270 const u32 val, bool dstk, u8 **pprog)
274 if (is64 && (val & (1<<31)))
276 emit_ia32_mov_i(dst_lo, val, dstk, pprog);
277 emit_ia32_mov_i(dst_hi, hi, dstk, pprog);
281 * ALU operation (32 bit)
284 static inline void emit_ia32_mul_r(const u8 dst, const u8 src, bool dstk,
285 bool sstk, u8 **pprog)
289 u8 sreg = sstk ? IA32_ECX : src;
292 /* mov ecx,dword ptr [ebp+off] */
293 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX), STACK_VAR(src));
296 /* mov eax,dword ptr [ebp+off] */
297 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), STACK_VAR(dst));
300 EMIT2(0x8B, add_2reg(0xC0, dst, IA32_EAX));
303 EMIT2(0xF7, add_1reg(0xE0, sreg));
306 /* mov dword ptr [ebp+off],eax */
307 EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EAX),
311 EMIT2(0x89, add_2reg(0xC0, dst, IA32_EAX));
316 static inline void emit_ia32_to_le_r64(const u8 dst[], s32 val,
317 bool dstk, u8 **pprog,
318 const struct bpf_prog_aux *aux)
322 u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
323 u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
325 if (dstk && val != 64) {
326 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
328 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX),
334 * Emit 'movzwl eax,ax' to zero extend 16-bit
338 EMIT1(add_2reg(0xC0, dreg_lo, dreg_lo));
339 if (!aux->verifier_zext)
340 /* xor dreg_hi,dreg_hi */
341 EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi));
344 if (!aux->verifier_zext)
345 /* xor dreg_hi,dreg_hi */
346 EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi));
353 if (dstk && val != 64) {
354 /* mov dword ptr [ebp+off],dreg_lo */
355 EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo),
357 /* mov dword ptr [ebp+off],dreg_hi */
358 EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_hi),
364 static inline void emit_ia32_to_be_r64(const u8 dst[], s32 val,
365 bool dstk, u8 **pprog,
366 const struct bpf_prog_aux *aux)
370 u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
371 u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
374 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
376 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX),
381 /* Emit 'ror %ax, 8' to swap lower 2 bytes */
383 EMIT3(0xC1, add_1reg(0xC8, dreg_lo), 8);
386 EMIT1(add_2reg(0xC0, dreg_lo, dreg_lo));
388 if (!aux->verifier_zext)
389 /* xor dreg_hi,dreg_hi */
390 EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi));
393 /* Emit 'bswap eax' to swap lower 4 bytes */
395 EMIT1(add_1reg(0xC8, dreg_lo));
397 if (!aux->verifier_zext)
398 /* xor dreg_hi,dreg_hi */
399 EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi));
402 /* Emit 'bswap eax' to swap lower 4 bytes */
404 EMIT1(add_1reg(0xC8, dreg_lo));
406 /* Emit 'bswap edx' to swap lower 4 bytes */
408 EMIT1(add_1reg(0xC8, dreg_hi));
410 /* mov ecx,dreg_hi */
411 EMIT2(0x89, add_2reg(0xC0, IA32_ECX, dreg_hi));
412 /* mov dreg_hi,dreg_lo */
413 EMIT2(0x89, add_2reg(0xC0, dreg_hi, dreg_lo));
414 /* mov dreg_lo,ecx */
415 EMIT2(0x89, add_2reg(0xC0, dreg_lo, IA32_ECX));
420 /* mov dword ptr [ebp+off],dreg_lo */
421 EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo),
423 /* mov dword ptr [ebp+off],dreg_hi */
424 EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_hi),
431 * ALU operation (32 bit)
432 * dst = dst (div|mod) src
434 static inline void emit_ia32_div_mod_r(const u8 op, const u8 dst, const u8 src,
435 bool dstk, bool sstk, u8 **pprog)
441 /* mov ecx,dword ptr [ebp+off] */
442 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX),
444 else if (src != IA32_ECX)
446 EMIT2(0x8B, add_2reg(0xC0, src, IA32_ECX));
449 /* mov eax,dword ptr [ebp+off] */
450 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
454 EMIT2(0x8B, add_2reg(0xC0, dst, IA32_EAX));
457 EMIT2(0x31, add_2reg(0xC0, IA32_EDX, IA32_EDX));
459 EMIT2(0xF7, add_1reg(0xF0, IA32_ECX));
463 EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EDX),
466 EMIT2(0x89, add_2reg(0xC0, dst, IA32_EDX));
469 EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EAX),
472 EMIT2(0x89, add_2reg(0xC0, dst, IA32_EAX));
478 * ALU operation (32 bit)
479 * dst = dst (shift) src
481 static inline void emit_ia32_shift_r(const u8 op, const u8 dst, const u8 src,
482 bool dstk, bool sstk, u8 **pprog)
486 u8 dreg = dstk ? IA32_EAX : dst;
490 /* mov eax,dword ptr [ebp+off] */
491 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), STACK_VAR(dst));
494 /* mov ecx,dword ptr [ebp+off] */
495 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX), STACK_VAR(src));
496 else if (src != IA32_ECX)
498 EMIT2(0x8B, add_2reg(0xC0, src, IA32_ECX));
510 EMIT2(0xD3, add_1reg(b2, dreg));
513 /* mov dword ptr [ebp+off],dreg */
514 EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg), STACK_VAR(dst));
519 * ALU operation (32 bit)
522 static inline void emit_ia32_alu_r(const bool is64, const bool hi, const u8 op,
523 const u8 dst, const u8 src, bool dstk,
524 bool sstk, u8 **pprog)
528 u8 sreg = sstk ? IA32_EAX : src;
529 u8 dreg = dstk ? IA32_EDX : dst;
532 /* mov eax,dword ptr [ebp+off] */
533 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), STACK_VAR(src));
536 /* mov eax,dword ptr [ebp+off] */
537 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX), STACK_VAR(dst));
539 switch (BPF_OP(op)) {
540 /* dst = dst + src */
543 EMIT2(0x11, add_2reg(0xC0, dreg, sreg));
545 EMIT2(0x01, add_2reg(0xC0, dreg, sreg));
547 /* dst = dst - src */
550 EMIT2(0x19, add_2reg(0xC0, dreg, sreg));
552 EMIT2(0x29, add_2reg(0xC0, dreg, sreg));
554 /* dst = dst | src */
556 EMIT2(0x09, add_2reg(0xC0, dreg, sreg));
558 /* dst = dst & src */
560 EMIT2(0x21, add_2reg(0xC0, dreg, sreg));
562 /* dst = dst ^ src */
564 EMIT2(0x31, add_2reg(0xC0, dreg, sreg));
569 /* mov dword ptr [ebp+off],dreg */
570 EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg),
575 /* ALU operation (64 bit) */
576 static inline void emit_ia32_alu_r64(const bool is64, const u8 op,
577 const u8 dst[], const u8 src[],
578 bool dstk, bool sstk,
579 u8 **pprog, const struct bpf_prog_aux *aux)
583 emit_ia32_alu_r(is64, false, op, dst_lo, src_lo, dstk, sstk, &prog);
585 emit_ia32_alu_r(is64, true, op, dst_hi, src_hi, dstk, sstk,
587 else if (!aux->verifier_zext)
588 emit_ia32_mov_i(dst_hi, 0, dstk, &prog);
593 * ALU operation (32 bit)
596 static inline void emit_ia32_alu_i(const bool is64, const bool hi, const u8 op,
597 const u8 dst, const s32 val, bool dstk,
602 u8 dreg = dstk ? IA32_EAX : dst;
606 /* mov eax,dword ptr [ebp+off] */
607 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), STACK_VAR(dst));
611 EMIT2_off32(0xC7, add_1reg(0xC0, IA32_EDX), val);
614 /* dst = dst + val */
618 EMIT3(0x83, add_1reg(0xD0, dreg), val);
620 EMIT2(0x11, add_2reg(0xC0, dreg, sreg));
623 EMIT3(0x83, add_1reg(0xC0, dreg), val);
625 EMIT2(0x01, add_2reg(0xC0, dreg, sreg));
628 /* dst = dst - val */
632 EMIT3(0x83, add_1reg(0xD8, dreg), val);
634 EMIT2(0x19, add_2reg(0xC0, dreg, sreg));
637 EMIT3(0x83, add_1reg(0xE8, dreg), val);
639 EMIT2(0x29, add_2reg(0xC0, dreg, sreg));
642 /* dst = dst | val */
645 EMIT3(0x83, add_1reg(0xC8, dreg), val);
647 EMIT2(0x09, add_2reg(0xC0, dreg, sreg));
649 /* dst = dst & val */
652 EMIT3(0x83, add_1reg(0xE0, dreg), val);
654 EMIT2(0x21, add_2reg(0xC0, dreg, sreg));
656 /* dst = dst ^ val */
659 EMIT3(0x83, add_1reg(0xF0, dreg), val);
661 EMIT2(0x31, add_2reg(0xC0, dreg, sreg));
664 EMIT2(0xF7, add_1reg(0xD8, dreg));
669 /* mov dword ptr [ebp+off],dreg */
670 EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg),
675 /* ALU operation (64 bit) */
676 static inline void emit_ia32_alu_i64(const bool is64, const u8 op,
677 const u8 dst[], const u32 val,
678 bool dstk, u8 **pprog,
679 const struct bpf_prog_aux *aux)
684 if (is64 && (val & (1<<31)))
687 emit_ia32_alu_i(is64, false, op, dst_lo, val, dstk, &prog);
689 emit_ia32_alu_i(is64, true, op, dst_hi, hi, dstk, &prog);
690 else if (!aux->verifier_zext)
691 emit_ia32_mov_i(dst_hi, 0, dstk, &prog);
696 /* dst = ~dst (64 bit) */
697 static inline void emit_ia32_neg64(const u8 dst[], bool dstk, u8 **pprog)
701 u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
702 u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
705 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
707 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX),
712 EMIT2(0xF7, add_1reg(0xD8, dreg_lo));
713 /* adc dreg_hi,0x0 */
714 EMIT3(0x83, add_1reg(0xD0, dreg_hi), 0x00);
716 EMIT2(0xF7, add_1reg(0xD8, dreg_hi));
719 /* mov dword ptr [ebp+off],dreg_lo */
720 EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo),
722 /* mov dword ptr [ebp+off],dreg_hi */
723 EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_hi),
729 /* dst = dst << src */
730 static inline void emit_ia32_lsh_r64(const u8 dst[], const u8 src[],
731 bool dstk, bool sstk, u8 **pprog)
735 static int jmp_label1 = -1;
736 static int jmp_label2 = -1;
737 static int jmp_label3 = -1;
738 u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
739 u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
742 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
744 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX),
749 /* mov ecx,dword ptr [ebp+off] */
750 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX),
754 EMIT2(0x8B, add_2reg(0xC0, src_lo, IA32_ECX));
757 EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 32);
758 /* Jumps when >= 32 */
759 if (is_imm8(jmp_label(jmp_label1, 2)))
760 EMIT2(IA32_JAE, jmp_label(jmp_label1, 2));
762 EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label1, 6));
766 EMIT2(0xD3, add_1reg(0xE0, dreg_hi));
767 /* mov ebx,dreg_lo */
768 EMIT2(0x8B, add_2reg(0xC0, dreg_lo, IA32_EBX));
770 EMIT2(0xD3, add_1reg(0xE0, dreg_lo));
772 /* IA32_ECX = -IA32_ECX + 32 */
774 EMIT2(0xF7, add_1reg(0xD8, IA32_ECX));
776 EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32);
779 EMIT2(0xD3, add_1reg(0xE8, IA32_EBX));
781 EMIT2(0x09, add_2reg(0xC0, dreg_hi, IA32_EBX));
784 if (is_imm8(jmp_label(jmp_label3, 2)))
785 EMIT2(0xEB, jmp_label(jmp_label3, 2));
787 EMIT1_off32(0xE9, jmp_label(jmp_label3, 5));
790 if (jmp_label1 == -1)
794 EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 64);
795 /* Jumps when >= 64 */
796 if (is_imm8(jmp_label(jmp_label2, 2)))
797 EMIT2(IA32_JAE, jmp_label(jmp_label2, 2));
799 EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label2, 6));
803 EMIT3(0x83, add_1reg(0xE8, IA32_ECX), 32);
805 EMIT2(0xD3, add_1reg(0xE0, dreg_lo));
806 /* mov dreg_hi,dreg_lo */
807 EMIT2(0x89, add_2reg(0xC0, dreg_hi, dreg_lo));
809 /* xor dreg_lo,dreg_lo */
810 EMIT2(0x33, add_2reg(0xC0, dreg_lo, dreg_lo));
813 if (is_imm8(jmp_label(jmp_label3, 2)))
814 EMIT2(0xEB, jmp_label(jmp_label3, 2));
816 EMIT1_off32(0xE9, jmp_label(jmp_label3, 5));
819 if (jmp_label2 == -1)
821 /* xor dreg_lo,dreg_lo */
822 EMIT2(0x33, add_2reg(0xC0, dreg_lo, dreg_lo));
823 /* xor dreg_hi,dreg_hi */
824 EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi));
826 if (jmp_label3 == -1)
830 /* mov dword ptr [ebp+off],dreg_lo */
831 EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo),
833 /* mov dword ptr [ebp+off],dreg_hi */
834 EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_hi),
841 /* dst = dst >> src (signed)*/
842 static inline void emit_ia32_arsh_r64(const u8 dst[], const u8 src[],
843 bool dstk, bool sstk, u8 **pprog)
847 static int jmp_label1 = -1;
848 static int jmp_label2 = -1;
849 static int jmp_label3 = -1;
850 u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
851 u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
854 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
856 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX),
861 /* mov ecx,dword ptr [ebp+off] */
862 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX),
866 EMIT2(0x8B, add_2reg(0xC0, src_lo, IA32_ECX));
869 EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 32);
870 /* Jumps when >= 32 */
871 if (is_imm8(jmp_label(jmp_label1, 2)))
872 EMIT2(IA32_JAE, jmp_label(jmp_label1, 2));
874 EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label1, 6));
877 /* lshr dreg_lo,cl */
878 EMIT2(0xD3, add_1reg(0xE8, dreg_lo));
879 /* mov ebx,dreg_hi */
880 EMIT2(0x8B, add_2reg(0xC0, dreg_hi, IA32_EBX));
881 /* ashr dreg_hi,cl */
882 EMIT2(0xD3, add_1reg(0xF8, dreg_hi));
884 /* IA32_ECX = -IA32_ECX + 32 */
886 EMIT2(0xF7, add_1reg(0xD8, IA32_ECX));
888 EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32);
891 EMIT2(0xD3, add_1reg(0xE0, IA32_EBX));
893 EMIT2(0x09, add_2reg(0xC0, dreg_lo, IA32_EBX));
896 if (is_imm8(jmp_label(jmp_label3, 2)))
897 EMIT2(0xEB, jmp_label(jmp_label3, 2));
899 EMIT1_off32(0xE9, jmp_label(jmp_label3, 5));
902 if (jmp_label1 == -1)
906 EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 64);
907 /* Jumps when >= 64 */
908 if (is_imm8(jmp_label(jmp_label2, 2)))
909 EMIT2(IA32_JAE, jmp_label(jmp_label2, 2));
911 EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label2, 6));
915 EMIT3(0x83, add_1reg(0xE8, IA32_ECX), 32);
916 /* ashr dreg_hi,cl */
917 EMIT2(0xD3, add_1reg(0xF8, dreg_hi));
918 /* mov dreg_lo,dreg_hi */
919 EMIT2(0x89, add_2reg(0xC0, dreg_lo, dreg_hi));
921 /* ashr dreg_hi,imm8 */
922 EMIT3(0xC1, add_1reg(0xF8, dreg_hi), 31);
925 if (is_imm8(jmp_label(jmp_label3, 2)))
926 EMIT2(0xEB, jmp_label(jmp_label3, 2));
928 EMIT1_off32(0xE9, jmp_label(jmp_label3, 5));
931 if (jmp_label2 == -1)
933 /* ashr dreg_hi,imm8 */
934 EMIT3(0xC1, add_1reg(0xF8, dreg_hi), 31);
935 /* mov dreg_lo,dreg_hi */
936 EMIT2(0x89, add_2reg(0xC0, dreg_lo, dreg_hi));
938 if (jmp_label3 == -1)
942 /* mov dword ptr [ebp+off],dreg_lo */
943 EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo),
945 /* mov dword ptr [ebp+off],dreg_hi */
946 EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_hi),
953 /* dst = dst >> src */
954 static inline void emit_ia32_rsh_r64(const u8 dst[], const u8 src[], bool dstk,
955 bool sstk, u8 **pprog)
959 static int jmp_label1 = -1;
960 static int jmp_label2 = -1;
961 static int jmp_label3 = -1;
962 u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
963 u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
966 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
968 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX),
973 /* mov ecx,dword ptr [ebp+off] */
974 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX),
978 EMIT2(0x8B, add_2reg(0xC0, src_lo, IA32_ECX));
981 EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 32);
982 /* Jumps when >= 32 */
983 if (is_imm8(jmp_label(jmp_label1, 2)))
984 EMIT2(IA32_JAE, jmp_label(jmp_label1, 2));
986 EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label1, 6));
989 /* lshr dreg_lo,cl */
990 EMIT2(0xD3, add_1reg(0xE8, dreg_lo));
991 /* mov ebx,dreg_hi */
992 EMIT2(0x8B, add_2reg(0xC0, dreg_hi, IA32_EBX));
994 EMIT2(0xD3, add_1reg(0xE8, dreg_hi));
996 /* IA32_ECX = -IA32_ECX + 32 */
998 EMIT2(0xF7, add_1reg(0xD8, IA32_ECX));
1000 EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32);
1003 EMIT2(0xD3, add_1reg(0xE0, IA32_EBX));
1004 /* or dreg_lo,ebx */
1005 EMIT2(0x09, add_2reg(0xC0, dreg_lo, IA32_EBX));
1008 if (is_imm8(jmp_label(jmp_label3, 2)))
1009 EMIT2(0xEB, jmp_label(jmp_label3, 2));
1011 EMIT1_off32(0xE9, jmp_label(jmp_label3, 5));
1014 if (jmp_label1 == -1)
1017 EMIT3(0x83, add_1reg(0xF8, IA32_ECX), 64);
1018 /* Jumps when >= 64 */
1019 if (is_imm8(jmp_label(jmp_label2, 2)))
1020 EMIT2(IA32_JAE, jmp_label(jmp_label2, 2));
1022 EMIT2_off32(0x0F, IA32_JAE + 0x10, jmp_label(jmp_label2, 6));
1026 EMIT3(0x83, add_1reg(0xE8, IA32_ECX), 32);
1027 /* shr dreg_hi,cl */
1028 EMIT2(0xD3, add_1reg(0xE8, dreg_hi));
1029 /* mov dreg_lo,dreg_hi */
1030 EMIT2(0x89, add_2reg(0xC0, dreg_lo, dreg_hi));
1031 /* xor dreg_hi,dreg_hi */
1032 EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi));
1035 if (is_imm8(jmp_label(jmp_label3, 2)))
1036 EMIT2(0xEB, jmp_label(jmp_label3, 2));
1038 EMIT1_off32(0xE9, jmp_label(jmp_label3, 5));
1041 if (jmp_label2 == -1)
1043 /* xor dreg_lo,dreg_lo */
1044 EMIT2(0x33, add_2reg(0xC0, dreg_lo, dreg_lo));
1045 /* xor dreg_hi,dreg_hi */
1046 EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi));
1048 if (jmp_label3 == -1)
1052 /* mov dword ptr [ebp+off],dreg_lo */
1053 EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo),
1055 /* mov dword ptr [ebp+off],dreg_hi */
1056 EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_hi),
1063 /* dst = dst << val */
1064 static inline void emit_ia32_lsh_i64(const u8 dst[], const u32 val,
1065 bool dstk, u8 **pprog)
1069 u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
1070 u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
1073 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
1075 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX),
1078 /* Do LSH operation */
1080 /* shl dreg_hi,imm8 */
1081 EMIT3(0xC1, add_1reg(0xE0, dreg_hi), val);
1082 /* mov ebx,dreg_lo */
1083 EMIT2(0x8B, add_2reg(0xC0, dreg_lo, IA32_EBX));
1084 /* shl dreg_lo,imm8 */
1085 EMIT3(0xC1, add_1reg(0xE0, dreg_lo), val);
1087 /* IA32_ECX = 32 - val */
1091 EMIT3(0x0F, 0xB6, add_2reg(0xC0, IA32_ECX, IA32_ECX));
1093 EMIT2(0xF7, add_1reg(0xD8, IA32_ECX));
1095 EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32);
1098 EMIT2(0xD3, add_1reg(0xE8, IA32_EBX));
1099 /* or dreg_hi,ebx */
1100 EMIT2(0x09, add_2reg(0xC0, dreg_hi, IA32_EBX));
1101 } else if (val >= 32 && val < 64) {
1102 u32 value = val - 32;
1104 /* shl dreg_lo,imm8 */
1105 EMIT3(0xC1, add_1reg(0xE0, dreg_lo), value);
1106 /* mov dreg_hi,dreg_lo */
1107 EMIT2(0x89, add_2reg(0xC0, dreg_hi, dreg_lo));
1108 /* xor dreg_lo,dreg_lo */
1109 EMIT2(0x33, add_2reg(0xC0, dreg_lo, dreg_lo));
1111 /* xor dreg_lo,dreg_lo */
1112 EMIT2(0x33, add_2reg(0xC0, dreg_lo, dreg_lo));
1113 /* xor dreg_hi,dreg_hi */
1114 EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi));
1118 /* mov dword ptr [ebp+off],dreg_lo */
1119 EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo),
1121 /* mov dword ptr [ebp+off],dreg_hi */
1122 EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_hi),
1128 /* dst = dst >> val */
1129 static inline void emit_ia32_rsh_i64(const u8 dst[], const u32 val,
1130 bool dstk, u8 **pprog)
1134 u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
1135 u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
1138 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
1140 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX),
1144 /* Do RSH operation */
1146 /* shr dreg_lo,imm8 */
1147 EMIT3(0xC1, add_1reg(0xE8, dreg_lo), val);
1148 /* mov ebx,dreg_hi */
1149 EMIT2(0x8B, add_2reg(0xC0, dreg_hi, IA32_EBX));
1150 /* shr dreg_hi,imm8 */
1151 EMIT3(0xC1, add_1reg(0xE8, dreg_hi), val);
1153 /* IA32_ECX = 32 - val */
1157 EMIT3(0x0F, 0xB6, add_2reg(0xC0, IA32_ECX, IA32_ECX));
1159 EMIT2(0xF7, add_1reg(0xD8, IA32_ECX));
1161 EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32);
1164 EMIT2(0xD3, add_1reg(0xE0, IA32_EBX));
1165 /* or dreg_lo,ebx */
1166 EMIT2(0x09, add_2reg(0xC0, dreg_lo, IA32_EBX));
1167 } else if (val >= 32 && val < 64) {
1168 u32 value = val - 32;
1170 /* shr dreg_hi,imm8 */
1171 EMIT3(0xC1, add_1reg(0xE8, dreg_hi), value);
1172 /* mov dreg_lo,dreg_hi */
1173 EMIT2(0x89, add_2reg(0xC0, dreg_lo, dreg_hi));
1174 /* xor dreg_hi,dreg_hi */
1175 EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi));
1177 /* xor dreg_lo,dreg_lo */
1178 EMIT2(0x33, add_2reg(0xC0, dreg_lo, dreg_lo));
1179 /* xor dreg_hi,dreg_hi */
1180 EMIT2(0x33, add_2reg(0xC0, dreg_hi, dreg_hi));
1184 /* mov dword ptr [ebp+off],dreg_lo */
1185 EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo),
1187 /* mov dword ptr [ebp+off],dreg_hi */
1188 EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_hi),
1194 /* dst = dst >> val (signed) */
1195 static inline void emit_ia32_arsh_i64(const u8 dst[], const u32 val,
1196 bool dstk, u8 **pprog)
1200 u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
1201 u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
1204 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
1206 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX),
1209 /* Do RSH operation */
1211 /* shr dreg_lo,imm8 */
1212 EMIT3(0xC1, add_1reg(0xE8, dreg_lo), val);
1213 /* mov ebx,dreg_hi */
1214 EMIT2(0x8B, add_2reg(0xC0, dreg_hi, IA32_EBX));
1215 /* ashr dreg_hi,imm8 */
1216 EMIT3(0xC1, add_1reg(0xF8, dreg_hi), val);
1218 /* IA32_ECX = 32 - val */
1222 EMIT3(0x0F, 0xB6, add_2reg(0xC0, IA32_ECX, IA32_ECX));
1224 EMIT2(0xF7, add_1reg(0xD8, IA32_ECX));
1226 EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 32);
1229 EMIT2(0xD3, add_1reg(0xE0, IA32_EBX));
1230 /* or dreg_lo,ebx */
1231 EMIT2(0x09, add_2reg(0xC0, dreg_lo, IA32_EBX));
1232 } else if (val >= 32 && val < 64) {
1233 u32 value = val - 32;
1235 /* ashr dreg_hi,imm8 */
1236 EMIT3(0xC1, add_1reg(0xF8, dreg_hi), value);
1237 /* mov dreg_lo,dreg_hi */
1238 EMIT2(0x89, add_2reg(0xC0, dreg_lo, dreg_hi));
1240 /* ashr dreg_hi,imm8 */
1241 EMIT3(0xC1, add_1reg(0xF8, dreg_hi), 31);
1243 /* ashr dreg_hi,imm8 */
1244 EMIT3(0xC1, add_1reg(0xF8, dreg_hi), 31);
1245 /* mov dreg_lo,dreg_hi */
1246 EMIT2(0x89, add_2reg(0xC0, dreg_lo, dreg_hi));
1250 /* mov dword ptr [ebp+off],dreg_lo */
1251 EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_lo),
1253 /* mov dword ptr [ebp+off],dreg_hi */
1254 EMIT3(0x89, add_2reg(0x40, IA32_EBP, dreg_hi),
1260 static inline void emit_ia32_mul_r64(const u8 dst[], const u8 src[], bool dstk,
1261 bool sstk, u8 **pprog)
1267 /* mov eax,dword ptr [ebp+off] */
1268 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
1271 /* mov eax,dst_hi */
1272 EMIT2(0x8B, add_2reg(0xC0, dst_hi, IA32_EAX));
1275 /* mul dword ptr [ebp+off] */
1276 EMIT3(0xF7, add_1reg(0x60, IA32_EBP), STACK_VAR(src_lo));
1279 EMIT2(0xF7, add_1reg(0xE0, src_lo));
1282 EMIT2(0x89, add_2reg(0xC0, IA32_ECX, IA32_EAX));
1285 /* mov eax,dword ptr [ebp+off] */
1286 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
1289 /* mov eax,dst_lo */
1290 EMIT2(0x8B, add_2reg(0xC0, dst_lo, IA32_EAX));
1293 /* mul dword ptr [ebp+off] */
1294 EMIT3(0xF7, add_1reg(0x60, IA32_EBP), STACK_VAR(src_hi));
1297 EMIT2(0xF7, add_1reg(0xE0, src_hi));
1300 EMIT2(0x01, add_2reg(0xC0, IA32_ECX, IA32_EAX));
1303 /* mov eax,dword ptr [ebp+off] */
1304 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
1307 /* mov eax,dst_lo */
1308 EMIT2(0x8B, add_2reg(0xC0, dst_lo, IA32_EAX));
1311 /* mul dword ptr [ebp+off] */
1312 EMIT3(0xF7, add_1reg(0x60, IA32_EBP), STACK_VAR(src_lo));
1315 EMIT2(0xF7, add_1reg(0xE0, src_lo));
1318 EMIT2(0x01, add_2reg(0xC0, IA32_ECX, IA32_EDX));
1321 /* mov dword ptr [ebp+off],eax */
1322 EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EAX),
1324 /* mov dword ptr [ebp+off],ecx */
1325 EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_ECX),
1328 /* mov dst_lo,eax */
1329 EMIT2(0x89, add_2reg(0xC0, dst_lo, IA32_EAX));
1330 /* mov dst_hi,ecx */
1331 EMIT2(0x89, add_2reg(0xC0, dst_hi, IA32_ECX));
1337 static inline void emit_ia32_mul_i64(const u8 dst[], const u32 val,
1338 bool dstk, u8 **pprog)
1344 hi = val & (1<<31) ? (u32)~0 : 0;
1345 /* movl eax,imm32 */
1346 EMIT2_off32(0xC7, add_1reg(0xC0, IA32_EAX), val);
1348 /* mul dword ptr [ebp+off] */
1349 EMIT3(0xF7, add_1reg(0x60, IA32_EBP), STACK_VAR(dst_hi));
1352 EMIT2(0xF7, add_1reg(0xE0, dst_hi));
1355 EMIT2(0x89, add_2reg(0xC0, IA32_ECX, IA32_EAX));
1357 /* movl eax,imm32 */
1358 EMIT2_off32(0xC7, add_1reg(0xC0, IA32_EAX), hi);
1360 /* mul dword ptr [ebp+off] */
1361 EMIT3(0xF7, add_1reg(0x60, IA32_EBP), STACK_VAR(dst_lo));
1364 EMIT2(0xF7, add_1reg(0xE0, dst_lo));
1366 EMIT2(0x01, add_2reg(0xC0, IA32_ECX, IA32_EAX));
1368 /* movl eax,imm32 */
1369 EMIT2_off32(0xC7, add_1reg(0xC0, IA32_EAX), val);
1371 /* mul dword ptr [ebp+off] */
1372 EMIT3(0xF7, add_1reg(0x60, IA32_EBP), STACK_VAR(dst_lo));
1375 EMIT2(0xF7, add_1reg(0xE0, dst_lo));
1378 EMIT2(0x01, add_2reg(0xC0, IA32_ECX, IA32_EDX));
1381 /* mov dword ptr [ebp+off],eax */
1382 EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EAX),
1384 /* mov dword ptr [ebp+off],ecx */
1385 EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_ECX),
1388 /* mov dword ptr [ebp+off],eax */
1389 EMIT2(0x89, add_2reg(0xC0, dst_lo, IA32_EAX));
1390 /* mov dword ptr [ebp+off],ecx */
1391 EMIT2(0x89, add_2reg(0xC0, dst_hi, IA32_ECX));
1397 static int bpf_size_to_x86_bytes(int bpf_size)
1399 if (bpf_size == BPF_W)
1401 else if (bpf_size == BPF_H)
1403 else if (bpf_size == BPF_B)
1405 else if (bpf_size == BPF_DW)
1406 return 4; /* imm32 */
1411 struct jit_context {
1412 int cleanup_addr; /* Epilogue code offset */
1415 /* Maximum number of bytes emitted while JITing one eBPF insn */
1416 #define BPF_MAX_INSN_SIZE 128
1417 #define BPF_INSN_SAFETY 64
1419 #define PROLOGUE_SIZE 35
1422 * Emit prologue code for BPF program and check it's size.
1423 * bpf_tail_call helper will skip it while jumping into another program.
1425 static void emit_prologue(u8 **pprog, u32 stack_depth)
1429 const u8 *r1 = bpf2ia32[BPF_REG_1];
1430 const u8 fplo = bpf2ia32[BPF_REG_FP][0];
1431 const u8 fphi = bpf2ia32[BPF_REG_FP][1];
1432 const u8 *tcc = bpf2ia32[TCALL_CNT];
1445 /* sub esp,STACK_SIZE */
1446 EMIT2_off32(0x81, 0xEC, STACK_SIZE);
1447 /* sub ebp,SCRATCH_SIZE+12*/
1448 EMIT3(0x83, add_1reg(0xE8, IA32_EBP), SCRATCH_SIZE + 12);
1450 EMIT2(0x31, add_2reg(0xC0, IA32_EBX, IA32_EBX));
1452 /* Set up BPF prog stack base register */
1453 EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EBP), STACK_VAR(fplo));
1454 EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EBX), STACK_VAR(fphi));
1456 /* Move BPF_CTX (EAX) to BPF_REG_R1 */
1457 /* mov dword ptr [ebp+off],eax */
1458 EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EAX), STACK_VAR(r1[0]));
1459 EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EBX), STACK_VAR(r1[1]));
1461 /* Initialize Tail Count */
1462 EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EBX), STACK_VAR(tcc[0]));
1463 EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EBX), STACK_VAR(tcc[1]));
1465 BUILD_BUG_ON(cnt != PROLOGUE_SIZE);
1469 /* Emit epilogue code for BPF program */
1470 static void emit_epilogue(u8 **pprog, u32 stack_depth)
1473 const u8 *r0 = bpf2ia32[BPF_REG_0];
1476 /* mov eax,dword ptr [ebp+off]*/
1477 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), STACK_VAR(r0[0]));
1478 /* mov edx,dword ptr [ebp+off]*/
1479 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX), STACK_VAR(r0[1]));
1481 /* add ebp,SCRATCH_SIZE+12*/
1482 EMIT3(0x83, add_1reg(0xC0, IA32_EBP), SCRATCH_SIZE + 12);
1484 /* mov ebx,dword ptr [ebp-12]*/
1485 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EBX), -12);
1486 /* mov esi,dword ptr [ebp-8]*/
1487 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ESI), -8);
1488 /* mov edi,dword ptr [ebp-4]*/
1489 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDI), -4);
1491 EMIT1(0xC9); /* leave */
1492 EMIT1(0xC3); /* ret */
1497 * Generate the following code:
1498 * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ...
1499 * if (index >= array->map.max_entries)
1501 * if (++tail_call_cnt > MAX_TAIL_CALL_CNT)
1503 * prog = array->ptrs[index];
1506 * goto *(prog->bpf_func + prologue_size);
1509 static void emit_bpf_tail_call(u8 **pprog)
1513 const u8 *r1 = bpf2ia32[BPF_REG_1];
1514 const u8 *r2 = bpf2ia32[BPF_REG_2];
1515 const u8 *r3 = bpf2ia32[BPF_REG_3];
1516 const u8 *tcc = bpf2ia32[TCALL_CNT];
1518 static int jmp_label1 = -1;
1521 * if (index >= array->map.max_entries)
1524 /* mov eax,dword ptr [ebp+off] */
1525 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), STACK_VAR(r2[0]));
1526 /* mov edx,dword ptr [ebp+off] */
1527 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX), STACK_VAR(r3[0]));
1529 /* cmp dword ptr [eax+off],edx */
1530 EMIT3(0x39, add_2reg(0x40, IA32_EAX, IA32_EDX),
1531 offsetof(struct bpf_array, map.max_entries));
1533 EMIT2(IA32_JBE, jmp_label(jmp_label1, 2));
1536 * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
1539 lo = (u32)MAX_TAIL_CALL_CNT;
1540 hi = (u32)((u64)MAX_TAIL_CALL_CNT >> 32);
1541 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX), STACK_VAR(tcc[0]));
1542 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EBX), STACK_VAR(tcc[1]));
1545 EMIT3(0x83, add_1reg(0xF8, IA32_EBX), hi);
1548 EMIT3(0x83, add_1reg(0xF8, IA32_ECX), lo);
1551 EMIT2(IA32_JAE, jmp_label(jmp_label1, 2));
1554 EMIT3(0x83, add_1reg(0xC0, IA32_ECX), 0x01);
1556 EMIT3(0x83, add_1reg(0xD0, IA32_EBX), 0x00);
1558 /* mov dword ptr [ebp+off],eax */
1559 EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_ECX), STACK_VAR(tcc[0]));
1560 /* mov dword ptr [ebp+off],edx */
1561 EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EBX), STACK_VAR(tcc[1]));
1563 /* prog = array->ptrs[index]; */
1564 /* mov edx, [eax + edx * 4 + offsetof(...)] */
1565 EMIT3_off32(0x8B, 0x94, 0x90, offsetof(struct bpf_array, ptrs));
1572 EMIT2(0x85, add_2reg(0xC0, IA32_EDX, IA32_EDX));
1574 EMIT2(IA32_JE, jmp_label(jmp_label1, 2));
1576 /* goto *(prog->bpf_func + prologue_size); */
1577 /* mov edx, dword ptr [edx + 32] */
1578 EMIT3(0x8B, add_2reg(0x40, IA32_EDX, IA32_EDX),
1579 offsetof(struct bpf_prog, bpf_func));
1580 /* add edx,prologue_size */
1581 EMIT3(0x83, add_1reg(0xC0, IA32_EDX), PROLOGUE_SIZE);
1583 /* mov eax,dword ptr [ebp+off] */
1584 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX), STACK_VAR(r1[0]));
1587 * Now we're ready to jump into next BPF program:
1588 * eax == ctx (1st arg)
1589 * edx == prog->bpf_func + prologue_size
1591 RETPOLINE_EDX_BPF_JIT();
1593 if (jmp_label1 == -1)
1600 /* Push the scratch stack register on top of the stack. */
1601 static inline void emit_push_r64(const u8 src[], u8 **pprog)
1606 /* mov ecx,dword ptr [ebp+off] */
1607 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX), STACK_VAR(src_hi));
1611 /* mov ecx,dword ptr [ebp+off] */
1612 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX), STACK_VAR(src_lo));
1619 static u8 get_cond_jmp_opcode(const u8 op, bool is_cmp_lo)
1623 /* Convert BPF opcode to x86 */
1630 jmp_cond = IA32_JNE;
1633 /* GT is unsigned '>', JA in x86 */
1637 /* LT is unsigned '<', JB in x86 */
1641 /* GE is unsigned '>=', JAE in x86 */
1642 jmp_cond = IA32_JAE;
1645 /* LE is unsigned '<=', JBE in x86 */
1646 jmp_cond = IA32_JBE;
1650 /* Signed '>', GT in x86 */
1653 /* GT is unsigned '>', JA in x86 */
1658 /* Signed '<', LT in x86 */
1661 /* LT is unsigned '<', JB in x86 */
1666 /* Signed '>=', GE in x86 */
1667 jmp_cond = IA32_JGE;
1669 /* GE is unsigned '>=', JAE in x86 */
1670 jmp_cond = IA32_JAE;
1674 /* Signed '<=', LE in x86 */
1675 jmp_cond = IA32_JLE;
1677 /* LE is unsigned '<=', JBE in x86 */
1678 jmp_cond = IA32_JBE;
1680 default: /* to silence GCC warning */
1681 jmp_cond = COND_JMP_OPCODE_INVALID;
1688 static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
1689 int oldproglen, struct jit_context *ctx)
1691 struct bpf_insn *insn = bpf_prog->insnsi;
1692 int insn_cnt = bpf_prog->len;
1693 bool seen_exit = false;
1694 u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
1699 emit_prologue(&prog, bpf_prog->aux->stack_depth);
1701 for (i = 0; i < insn_cnt; i++, insn++) {
1702 const s32 imm32 = insn->imm;
1703 const bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
1704 const bool dstk = insn->dst_reg == BPF_REG_AX ? false : true;
1705 const bool sstk = insn->src_reg == BPF_REG_AX ? false : true;
1706 const u8 code = insn->code;
1707 const u8 *dst = bpf2ia32[insn->dst_reg];
1708 const u8 *src = bpf2ia32[insn->src_reg];
1709 const u8 *r0 = bpf2ia32[BPF_REG_0];
1716 /* ALU operations */
1718 case BPF_ALU | BPF_MOV | BPF_K:
1719 case BPF_ALU | BPF_MOV | BPF_X:
1720 case BPF_ALU64 | BPF_MOV | BPF_K:
1721 case BPF_ALU64 | BPF_MOV | BPF_X:
1722 switch (BPF_SRC(code)) {
1725 /* Special mov32 for zext. */
1726 emit_ia32_mov_i(dst_hi, 0, dstk, &prog);
1729 emit_ia32_mov_r64(is64, dst, src, dstk, sstk,
1730 &prog, bpf_prog->aux);
1733 /* Sign-extend immediate value to dst reg */
1734 emit_ia32_mov_i64(is64, dst, imm32,
1739 /* dst = dst + src/imm */
1740 /* dst = dst - src/imm */
1741 /* dst = dst | src/imm */
1742 /* dst = dst & src/imm */
1743 /* dst = dst ^ src/imm */
1744 /* dst = dst * src/imm */
1745 /* dst = dst << src */
1746 /* dst = dst >> src */
1747 case BPF_ALU | BPF_ADD | BPF_K:
1748 case BPF_ALU | BPF_ADD | BPF_X:
1749 case BPF_ALU | BPF_SUB | BPF_K:
1750 case BPF_ALU | BPF_SUB | BPF_X:
1751 case BPF_ALU | BPF_OR | BPF_K:
1752 case BPF_ALU | BPF_OR | BPF_X:
1753 case BPF_ALU | BPF_AND | BPF_K:
1754 case BPF_ALU | BPF_AND | BPF_X:
1755 case BPF_ALU | BPF_XOR | BPF_K:
1756 case BPF_ALU | BPF_XOR | BPF_X:
1757 case BPF_ALU64 | BPF_ADD | BPF_K:
1758 case BPF_ALU64 | BPF_ADD | BPF_X:
1759 case BPF_ALU64 | BPF_SUB | BPF_K:
1760 case BPF_ALU64 | BPF_SUB | BPF_X:
1761 case BPF_ALU64 | BPF_OR | BPF_K:
1762 case BPF_ALU64 | BPF_OR | BPF_X:
1763 case BPF_ALU64 | BPF_AND | BPF_K:
1764 case BPF_ALU64 | BPF_AND | BPF_X:
1765 case BPF_ALU64 | BPF_XOR | BPF_K:
1766 case BPF_ALU64 | BPF_XOR | BPF_X:
1767 switch (BPF_SRC(code)) {
1769 emit_ia32_alu_r64(is64, BPF_OP(code), dst,
1770 src, dstk, sstk, &prog,
1774 emit_ia32_alu_i64(is64, BPF_OP(code), dst,
1780 case BPF_ALU | BPF_MUL | BPF_K:
1781 case BPF_ALU | BPF_MUL | BPF_X:
1782 switch (BPF_SRC(code)) {
1784 emit_ia32_mul_r(dst_lo, src_lo, dstk,
1789 EMIT2_off32(0xC7, add_1reg(0xC0, IA32_ECX),
1791 emit_ia32_mul_r(dst_lo, IA32_ECX, dstk,
1795 if (!bpf_prog->aux->verifier_zext)
1796 emit_ia32_mov_i(dst_hi, 0, dstk, &prog);
1798 case BPF_ALU | BPF_LSH | BPF_X:
1799 case BPF_ALU | BPF_RSH | BPF_X:
1800 case BPF_ALU | BPF_ARSH | BPF_K:
1801 case BPF_ALU | BPF_ARSH | BPF_X:
1802 switch (BPF_SRC(code)) {
1804 emit_ia32_shift_r(BPF_OP(code), dst_lo, src_lo,
1809 EMIT2_off32(0xC7, add_1reg(0xC0, IA32_ECX),
1811 emit_ia32_shift_r(BPF_OP(code), dst_lo,
1812 IA32_ECX, dstk, false,
1816 if (!bpf_prog->aux->verifier_zext)
1817 emit_ia32_mov_i(dst_hi, 0, dstk, &prog);
1819 /* dst = dst / src(imm) */
1820 /* dst = dst % src(imm) */
1821 case BPF_ALU | BPF_DIV | BPF_K:
1822 case BPF_ALU | BPF_DIV | BPF_X:
1823 case BPF_ALU | BPF_MOD | BPF_K:
1824 case BPF_ALU | BPF_MOD | BPF_X:
1825 switch (BPF_SRC(code)) {
1827 emit_ia32_div_mod_r(BPF_OP(code), dst_lo,
1828 src_lo, dstk, sstk, &prog);
1832 EMIT2_off32(0xC7, add_1reg(0xC0, IA32_ECX),
1834 emit_ia32_div_mod_r(BPF_OP(code), dst_lo,
1835 IA32_ECX, dstk, false,
1839 if (!bpf_prog->aux->verifier_zext)
1840 emit_ia32_mov_i(dst_hi, 0, dstk, &prog);
1842 case BPF_ALU64 | BPF_DIV | BPF_K:
1843 case BPF_ALU64 | BPF_DIV | BPF_X:
1844 case BPF_ALU64 | BPF_MOD | BPF_K:
1845 case BPF_ALU64 | BPF_MOD | BPF_X:
1847 /* dst = dst >> imm */
1848 /* dst = dst << imm */
1849 case BPF_ALU | BPF_RSH | BPF_K:
1850 case BPF_ALU | BPF_LSH | BPF_K:
1851 if (unlikely(imm32 > 31))
1854 EMIT2_off32(0xC7, add_1reg(0xC0, IA32_ECX), imm32);
1855 emit_ia32_shift_r(BPF_OP(code), dst_lo, IA32_ECX, dstk,
1857 if (!bpf_prog->aux->verifier_zext)
1858 emit_ia32_mov_i(dst_hi, 0, dstk, &prog);
1860 /* dst = dst << imm */
1861 case BPF_ALU64 | BPF_LSH | BPF_K:
1862 if (unlikely(imm32 > 63))
1864 emit_ia32_lsh_i64(dst, imm32, dstk, &prog);
1866 /* dst = dst >> imm */
1867 case BPF_ALU64 | BPF_RSH | BPF_K:
1868 if (unlikely(imm32 > 63))
1870 emit_ia32_rsh_i64(dst, imm32, dstk, &prog);
1872 /* dst = dst << src */
1873 case BPF_ALU64 | BPF_LSH | BPF_X:
1874 emit_ia32_lsh_r64(dst, src, dstk, sstk, &prog);
1876 /* dst = dst >> src */
1877 case BPF_ALU64 | BPF_RSH | BPF_X:
1878 emit_ia32_rsh_r64(dst, src, dstk, sstk, &prog);
1880 /* dst = dst >> src (signed) */
1881 case BPF_ALU64 | BPF_ARSH | BPF_X:
1882 emit_ia32_arsh_r64(dst, src, dstk, sstk, &prog);
1884 /* dst = dst >> imm (signed) */
1885 case BPF_ALU64 | BPF_ARSH | BPF_K:
1886 if (unlikely(imm32 > 63))
1888 emit_ia32_arsh_i64(dst, imm32, dstk, &prog);
1891 case BPF_ALU | BPF_NEG:
1892 emit_ia32_alu_i(is64, false, BPF_OP(code),
1893 dst_lo, 0, dstk, &prog);
1894 if (!bpf_prog->aux->verifier_zext)
1895 emit_ia32_mov_i(dst_hi, 0, dstk, &prog);
1897 /* dst = ~dst (64 bit) */
1898 case BPF_ALU64 | BPF_NEG:
1899 emit_ia32_neg64(dst, dstk, &prog);
1901 /* dst = dst * src/imm */
1902 case BPF_ALU64 | BPF_MUL | BPF_X:
1903 case BPF_ALU64 | BPF_MUL | BPF_K:
1904 switch (BPF_SRC(code)) {
1906 emit_ia32_mul_r64(dst, src, dstk, sstk, &prog);
1909 emit_ia32_mul_i64(dst, imm32, dstk, &prog);
1913 /* dst = htole(dst) */
1914 case BPF_ALU | BPF_END | BPF_FROM_LE:
1915 emit_ia32_to_le_r64(dst, imm32, dstk, &prog,
1918 /* dst = htobe(dst) */
1919 case BPF_ALU | BPF_END | BPF_FROM_BE:
1920 emit_ia32_to_be_r64(dst, imm32, dstk, &prog,
1924 case BPF_LD | BPF_IMM | BPF_DW: {
1928 emit_ia32_mov_i(dst_lo, lo, dstk, &prog);
1929 emit_ia32_mov_i(dst_hi, hi, dstk, &prog);
1934 /* ST: *(u8*)(dst_reg + off) = imm */
1935 case BPF_ST | BPF_MEM | BPF_H:
1936 case BPF_ST | BPF_MEM | BPF_B:
1937 case BPF_ST | BPF_MEM | BPF_W:
1938 case BPF_ST | BPF_MEM | BPF_DW:
1940 /* mov eax,dword ptr [ebp+off] */
1941 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
1944 /* mov eax,dst_lo */
1945 EMIT2(0x8B, add_2reg(0xC0, dst_lo, IA32_EAX));
1947 switch (BPF_SIZE(code)) {
1949 EMIT(0xC6, 1); break;
1951 EMIT2(0x66, 0xC7); break;
1954 EMIT(0xC7, 1); break;
1957 if (is_imm8(insn->off))
1958 EMIT2(add_1reg(0x40, IA32_EAX), insn->off);
1960 EMIT1_off32(add_1reg(0x80, IA32_EAX),
1962 EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(code)));
1964 if (BPF_SIZE(code) == BPF_DW) {
1967 hi = imm32 & (1<<31) ? (u32)~0 : 0;
1968 EMIT2_off32(0xC7, add_1reg(0x80, IA32_EAX),
1974 /* STX: *(u8*)(dst_reg + off) = src_reg */
1975 case BPF_STX | BPF_MEM | BPF_B:
1976 case BPF_STX | BPF_MEM | BPF_H:
1977 case BPF_STX | BPF_MEM | BPF_W:
1978 case BPF_STX | BPF_MEM | BPF_DW:
1980 /* mov eax,dword ptr [ebp+off] */
1981 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
1984 /* mov eax,dst_lo */
1985 EMIT2(0x8B, add_2reg(0xC0, dst_lo, IA32_EAX));
1988 /* mov edx,dword ptr [ebp+off] */
1989 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX),
1992 /* mov edx,src_lo */
1993 EMIT2(0x8B, add_2reg(0xC0, src_lo, IA32_EDX));
1995 switch (BPF_SIZE(code)) {
1997 EMIT(0x88, 1); break;
1999 EMIT2(0x66, 0x89); break;
2002 EMIT(0x89, 1); break;
2005 if (is_imm8(insn->off))
2006 EMIT2(add_2reg(0x40, IA32_EAX, IA32_EDX),
2009 EMIT1_off32(add_2reg(0x80, IA32_EAX, IA32_EDX),
2012 if (BPF_SIZE(code) == BPF_DW) {
2014 /* mov edi,dword ptr [ebp+off] */
2015 EMIT3(0x8B, add_2reg(0x40, IA32_EBP,
2019 /* mov edi,src_hi */
2020 EMIT2(0x8B, add_2reg(0xC0, src_hi,
2023 if (is_imm8(insn->off + 4)) {
2024 EMIT2(add_2reg(0x40, IA32_EAX,
2028 EMIT1(add_2reg(0x80, IA32_EAX,
2030 EMIT(insn->off + 4, 4);
2035 /* LDX: dst_reg = *(u8*)(src_reg + off) */
2036 case BPF_LDX | BPF_MEM | BPF_B:
2037 case BPF_LDX | BPF_MEM | BPF_H:
2038 case BPF_LDX | BPF_MEM | BPF_W:
2039 case BPF_LDX | BPF_MEM | BPF_DW:
2041 /* mov eax,dword ptr [ebp+off] */
2042 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
2045 /* mov eax,dword ptr [ebp+off] */
2046 EMIT2(0x8B, add_2reg(0xC0, src_lo, IA32_EAX));
2048 switch (BPF_SIZE(code)) {
2050 EMIT2(0x0F, 0xB6); break;
2052 EMIT2(0x0F, 0xB7); break;
2055 EMIT(0x8B, 1); break;
2058 if (is_imm8(insn->off))
2059 EMIT2(add_2reg(0x40, IA32_EAX, IA32_EDX),
2062 EMIT1_off32(add_2reg(0x80, IA32_EAX, IA32_EDX),
2066 /* mov dword ptr [ebp+off],edx */
2067 EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EDX),
2070 /* mov dst_lo,edx */
2071 EMIT2(0x89, add_2reg(0xC0, dst_lo, IA32_EDX));
2072 switch (BPF_SIZE(code)) {
2076 if (!bpf_prog->aux->verifier_zext)
2079 EMIT3(0xC7, add_1reg(0x40, IA32_EBP),
2083 EMIT3(0xC7, add_1reg(0xC0, dst_hi), 0);
2088 add_2reg(0x80, IA32_EAX, IA32_EDX),
2092 add_2reg(0x40, IA32_EBP,
2097 add_2reg(0xC0, dst_hi, IA32_EDX));
2104 case BPF_JMP | BPF_CALL:
2106 const u8 *r1 = bpf2ia32[BPF_REG_1];
2107 const u8 *r2 = bpf2ia32[BPF_REG_2];
2108 const u8 *r3 = bpf2ia32[BPF_REG_3];
2109 const u8 *r4 = bpf2ia32[BPF_REG_4];
2110 const u8 *r5 = bpf2ia32[BPF_REG_5];
2112 if (insn->src_reg == BPF_PSEUDO_CALL)
2115 func = (u8 *) __bpf_call_base + imm32;
2116 jmp_offset = func - (image + addrs[i]);
2118 if (!imm32 || !is_simm32(jmp_offset)) {
2119 pr_err("unsupported BPF func %d addr %p image %p\n",
2120 imm32, func, image);
2124 /* mov eax,dword ptr [ebp+off] */
2125 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
2127 /* mov edx,dword ptr [ebp+off] */
2128 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EDX),
2131 emit_push_r64(r5, &prog);
2132 emit_push_r64(r4, &prog);
2133 emit_push_r64(r3, &prog);
2134 emit_push_r64(r2, &prog);
2136 EMIT1_off32(0xE8, jmp_offset + 9);
2138 /* mov dword ptr [ebp+off],eax */
2139 EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EAX),
2141 /* mov dword ptr [ebp+off],edx */
2142 EMIT3(0x89, add_2reg(0x40, IA32_EBP, IA32_EDX),
2146 EMIT3(0x83, add_1reg(0xC0, IA32_ESP), 32);
2149 case BPF_JMP | BPF_TAIL_CALL:
2150 emit_bpf_tail_call(&prog);
2154 case BPF_JMP | BPF_JEQ | BPF_X:
2155 case BPF_JMP | BPF_JNE | BPF_X:
2156 case BPF_JMP | BPF_JGT | BPF_X:
2157 case BPF_JMP | BPF_JLT | BPF_X:
2158 case BPF_JMP | BPF_JGE | BPF_X:
2159 case BPF_JMP | BPF_JLE | BPF_X:
2160 case BPF_JMP32 | BPF_JEQ | BPF_X:
2161 case BPF_JMP32 | BPF_JNE | BPF_X:
2162 case BPF_JMP32 | BPF_JGT | BPF_X:
2163 case BPF_JMP32 | BPF_JLT | BPF_X:
2164 case BPF_JMP32 | BPF_JGE | BPF_X:
2165 case BPF_JMP32 | BPF_JLE | BPF_X:
2166 case BPF_JMP32 | BPF_JSGT | BPF_X:
2167 case BPF_JMP32 | BPF_JSLE | BPF_X:
2168 case BPF_JMP32 | BPF_JSLT | BPF_X:
2169 case BPF_JMP32 | BPF_JSGE | BPF_X: {
2170 bool is_jmp64 = BPF_CLASS(insn->code) == BPF_JMP;
2171 u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
2172 u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
2173 u8 sreg_lo = sstk ? IA32_ECX : src_lo;
2174 u8 sreg_hi = sstk ? IA32_EBX : src_hi;
2177 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
2181 add_2reg(0x40, IA32_EBP,
2187 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX),
2191 add_2reg(0x40, IA32_EBP,
2197 /* cmp dreg_hi,sreg_hi */
2198 EMIT2(0x39, add_2reg(0xC0, dreg_hi, sreg_hi));
2201 /* cmp dreg_lo,sreg_lo */
2202 EMIT2(0x39, add_2reg(0xC0, dreg_lo, sreg_lo));
2205 case BPF_JMP | BPF_JSGT | BPF_X:
2206 case BPF_JMP | BPF_JSLE | BPF_X:
2207 case BPF_JMP | BPF_JSLT | BPF_X:
2208 case BPF_JMP | BPF_JSGE | BPF_X: {
2209 u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
2210 u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
2211 u8 sreg_lo = sstk ? IA32_ECX : src_lo;
2212 u8 sreg_hi = sstk ? IA32_EBX : src_hi;
2215 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
2218 add_2reg(0x40, IA32_EBP,
2224 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX),
2227 add_2reg(0x40, IA32_EBP,
2232 /* cmp dreg_hi,sreg_hi */
2233 EMIT2(0x39, add_2reg(0xC0, dreg_hi, sreg_hi));
2234 EMIT2(IA32_JNE, 10);
2235 /* cmp dreg_lo,sreg_lo */
2236 EMIT2(0x39, add_2reg(0xC0, dreg_lo, sreg_lo));
2237 goto emit_cond_jmp_signed;
2239 case BPF_JMP | BPF_JSET | BPF_X:
2240 case BPF_JMP32 | BPF_JSET | BPF_X: {
2241 bool is_jmp64 = BPF_CLASS(insn->code) == BPF_JMP;
2242 u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
2243 u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
2244 u8 sreg_lo = sstk ? IA32_ECX : src_lo;
2245 u8 sreg_hi = sstk ? IA32_EBX : src_hi;
2248 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
2252 add_2reg(0x40, IA32_EBP,
2258 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_ECX),
2262 add_2reg(0x40, IA32_EBP,
2266 /* and dreg_lo,sreg_lo */
2267 EMIT2(0x23, add_2reg(0xC0, sreg_lo, dreg_lo));
2268 /* and dreg_hi,sreg_hi */
2269 EMIT2(0x23, add_2reg(0xC0, sreg_hi, dreg_hi));
2270 /* or dreg_lo,dreg_hi */
2271 EMIT2(0x09, add_2reg(0xC0, dreg_lo, dreg_hi));
2274 case BPF_JMP | BPF_JSET | BPF_K:
2275 case BPF_JMP32 | BPF_JSET | BPF_K: {
2276 bool is_jmp64 = BPF_CLASS(insn->code) == BPF_JMP;
2277 u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
2278 u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
2279 u8 sreg_lo = IA32_ECX;
2280 u8 sreg_hi = IA32_EBX;
2284 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
2288 add_2reg(0x40, IA32_EBP,
2294 EMIT2_off32(0xC7, add_1reg(0xC0, sreg_lo), imm32);
2296 /* and dreg_lo,sreg_lo */
2297 EMIT2(0x23, add_2reg(0xC0, sreg_lo, dreg_lo));
2299 hi = imm32 & (1 << 31) ? (u32)~0 : 0;
2301 EMIT2_off32(0xC7, add_1reg(0xC0, sreg_hi), hi);
2302 /* and dreg_hi,sreg_hi */
2303 EMIT2(0x23, add_2reg(0xC0, sreg_hi, dreg_hi));
2304 /* or dreg_lo,dreg_hi */
2305 EMIT2(0x09, add_2reg(0xC0, dreg_lo, dreg_hi));
2309 case BPF_JMP | BPF_JEQ | BPF_K:
2310 case BPF_JMP | BPF_JNE | BPF_K:
2311 case BPF_JMP | BPF_JGT | BPF_K:
2312 case BPF_JMP | BPF_JLT | BPF_K:
2313 case BPF_JMP | BPF_JGE | BPF_K:
2314 case BPF_JMP | BPF_JLE | BPF_K:
2315 case BPF_JMP32 | BPF_JEQ | BPF_K:
2316 case BPF_JMP32 | BPF_JNE | BPF_K:
2317 case BPF_JMP32 | BPF_JGT | BPF_K:
2318 case BPF_JMP32 | BPF_JLT | BPF_K:
2319 case BPF_JMP32 | BPF_JGE | BPF_K:
2320 case BPF_JMP32 | BPF_JLE | BPF_K:
2321 case BPF_JMP32 | BPF_JSGT | BPF_K:
2322 case BPF_JMP32 | BPF_JSLE | BPF_K:
2323 case BPF_JMP32 | BPF_JSLT | BPF_K:
2324 case BPF_JMP32 | BPF_JSGE | BPF_K: {
2325 bool is_jmp64 = BPF_CLASS(insn->code) == BPF_JMP;
2326 u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
2327 u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
2328 u8 sreg_lo = IA32_ECX;
2329 u8 sreg_hi = IA32_EBX;
2333 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
2337 add_2reg(0x40, IA32_EBP,
2343 EMIT2_off32(0xC7, add_1reg(0xC0, IA32_ECX), imm32);
2345 hi = imm32 & (1 << 31) ? (u32)~0 : 0;
2347 EMIT2_off32(0xC7, add_1reg(0xC0, IA32_EBX), hi);
2348 /* cmp dreg_hi,sreg_hi */
2349 EMIT2(0x39, add_2reg(0xC0, dreg_hi, sreg_hi));
2352 /* cmp dreg_lo,sreg_lo */
2353 EMIT2(0x39, add_2reg(0xC0, dreg_lo, sreg_lo));
2355 emit_cond_jmp: jmp_cond = get_cond_jmp_opcode(BPF_OP(code), false);
2356 if (jmp_cond == COND_JMP_OPCODE_INVALID)
2358 jmp_offset = addrs[i + insn->off] - addrs[i];
2359 if (is_imm8(jmp_offset)) {
2360 EMIT2(jmp_cond, jmp_offset);
2361 } else if (is_simm32(jmp_offset)) {
2362 EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
2364 pr_err("cond_jmp gen bug %llx\n", jmp_offset);
2369 case BPF_JMP | BPF_JSGT | BPF_K:
2370 case BPF_JMP | BPF_JSLE | BPF_K:
2371 case BPF_JMP | BPF_JSLT | BPF_K:
2372 case BPF_JMP | BPF_JSGE | BPF_K: {
2373 u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
2374 u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
2375 u8 sreg_lo = IA32_ECX;
2376 u8 sreg_hi = IA32_EBX;
2380 EMIT3(0x8B, add_2reg(0x40, IA32_EBP, IA32_EAX),
2383 add_2reg(0x40, IA32_EBP,
2389 EMIT2_off32(0xC7, add_1reg(0xC0, IA32_ECX), imm32);
2390 hi = imm32 & (1 << 31) ? (u32)~0 : 0;
2392 EMIT2_off32(0xC7, add_1reg(0xC0, IA32_EBX), hi);
2393 /* cmp dreg_hi,sreg_hi */
2394 EMIT2(0x39, add_2reg(0xC0, dreg_hi, sreg_hi));
2395 EMIT2(IA32_JNE, 10);
2396 /* cmp dreg_lo,sreg_lo */
2397 EMIT2(0x39, add_2reg(0xC0, dreg_lo, sreg_lo));
2400 * For simplicity of branch offset computation,
2401 * let's use fixed jump coding here.
2403 emit_cond_jmp_signed: /* Check the condition for low 32-bit comparison */
2404 jmp_cond = get_cond_jmp_opcode(BPF_OP(code), true);
2405 if (jmp_cond == COND_JMP_OPCODE_INVALID)
2407 jmp_offset = addrs[i + insn->off] - addrs[i] + 8;
2408 if (is_simm32(jmp_offset)) {
2409 EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
2411 pr_err("cond_jmp gen bug %llx\n", jmp_offset);
2416 /* Check the condition for high 32-bit comparison */
2417 jmp_cond = get_cond_jmp_opcode(BPF_OP(code), false);
2418 if (jmp_cond == COND_JMP_OPCODE_INVALID)
2420 jmp_offset = addrs[i + insn->off] - addrs[i];
2421 if (is_simm32(jmp_offset)) {
2422 EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
2424 pr_err("cond_jmp gen bug %llx\n", jmp_offset);
2429 case BPF_JMP | BPF_JA:
2430 if (insn->off == -1)
2431 /* -1 jmp instructions will always jump
2432 * backwards two bytes. Explicitly handling
2433 * this case avoids wasting too many passes
2434 * when there are long sequences of replaced
2439 jmp_offset = addrs[i + insn->off] - addrs[i];
2442 /* Optimize out nop jumps */
2445 if (is_imm8(jmp_offset)) {
2446 EMIT2(0xEB, jmp_offset);
2447 } else if (is_simm32(jmp_offset)) {
2448 EMIT1_off32(0xE9, jmp_offset);
2450 pr_err("jmp gen bug %llx\n", jmp_offset);
2454 /* STX XADD: lock *(u32 *)(dst + off) += src */
2455 case BPF_STX | BPF_XADD | BPF_W:
2456 /* STX XADD: lock *(u64 *)(dst + off) += src */
2457 case BPF_STX | BPF_XADD | BPF_DW:
2459 case BPF_JMP | BPF_EXIT:
2461 jmp_offset = ctx->cleanup_addr - addrs[i];
2465 /* Update cleanup_addr */
2466 ctx->cleanup_addr = proglen;
2467 emit_epilogue(&prog, bpf_prog->aux->stack_depth);
2470 pr_info_once("*** NOT YET: opcode %02x ***\n", code);
2474 * This error will be seen if new instruction was added
2475 * to interpreter, but not to JIT or if there is junk in
2478 pr_err("bpf_jit: unknown opcode %02x\n", code);
2483 if (ilen > BPF_MAX_INSN_SIZE) {
2484 pr_err("bpf_jit: fatal insn size error\n");
2489 if (unlikely(proglen + ilen > oldproglen)) {
2490 pr_err("bpf_jit: fatal error\n");
2493 memcpy(image + proglen, temp, ilen);
2502 bool bpf_jit_needs_zext(void)
2507 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
2509 struct bpf_binary_header *header = NULL;
2510 struct bpf_prog *tmp, *orig_prog = prog;
2511 int proglen, oldproglen = 0;
2512 struct jit_context ctx = {};
2513 bool tmp_blinded = false;
2519 if (!prog->jit_requested)
2522 tmp = bpf_jit_blind_constants(prog);
2524 * If blinding was requested and we failed during blinding,
2525 * we must fall back to the interpreter.
2534 addrs = kmalloc_array(prog->len, sizeof(*addrs), GFP_KERNEL);
2541 * Before first pass, make a rough estimation of addrs[]
2542 * each BPF instruction is translated to less than 64 bytes
2544 for (proglen = 0, i = 0; i < prog->len; i++) {
2548 ctx.cleanup_addr = proglen;
2551 * JITed image shrinks with every pass and the loop iterates
2552 * until the image stops shrinking. Very large BPF programs
2553 * may converge on the last pass. In such case do one more
2554 * pass to emit the final image.
2556 for (pass = 0; pass < 20 || image; pass++) {
2557 proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
2562 bpf_jit_binary_free(header);
2567 if (proglen != oldproglen) {
2568 pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
2569 proglen, oldproglen);
2574 if (proglen == oldproglen) {
2575 header = bpf_jit_binary_alloc(proglen, &image,
2582 oldproglen = proglen;
2586 if (bpf_jit_enable > 1)
2587 bpf_jit_dump(prog->len, proglen, pass + 1, image);
2590 bpf_jit_binary_lock_ro(header);
2591 prog->bpf_func = (void *)image;
2593 prog->jited_len = proglen;
2602 bpf_jit_prog_release_other(prog, prog == orig_prog ?