1 // SPDX-License-Identifier: GPL-2.0-only
3 * eBPF JIT compiler for PPC32
5 * Copyright 2020 Christophe Leroy <christophe.leroy@csgroup.eu>
8 * Based on PPC64 eBPF JIT compiler by Naveen N. Rao
10 #include <linux/moduleloader.h>
11 #include <asm/cacheflush.h>
12 #include <asm/asm-compat.h>
13 #include <linux/netdevice.h>
14 #include <linux/filter.h>
15 #include <linux/if_vlan.h>
16 #include <asm/kprobes.h>
17 #include <linux/bpf.h>
24 * [ prev sp ] <-------------
25 * [ nv gpr save area ] 16 * 4 |
26 * fp (r31) --> [ ebpf stack space ] upto 512 |
27 * [ frame header ] 16 |
28 * sp (r1) ---> [ stack pointer ] --------------
31 /* for gpr non volatile registers r17 to r31 (14) + tail call */
32 #define BPF_PPC_STACK_SAVE (15 * 4 + 4)
33 /* stack frame, ensure this is quadword aligned */
34 #define BPF_PPC_STACKFRAME(ctx) (STACK_FRAME_MIN_SIZE + BPF_PPC_STACK_SAVE + (ctx)->stack_size)
36 #define PPC_EX32(r, i) EMIT(PPC_RAW_LI((r), (i) < 0 ? -1 : 0))
38 /* PPC NVR range -- update this if we ever use NVRs below r17 */
39 #define BPF_PPC_NVR_MIN _R17
40 #define BPF_PPC_TC _R16
42 /* BPF register usage */
43 #define TMP_REG (MAX_BPF_JIT_REG + 0)
45 /* BPF to ppc register mappings */
46 void bpf_jit_init_reg_mapping(struct codegen_context *ctx)
48 /* function return value */
49 ctx->b2p[BPF_REG_0] = _R12;
50 /* function arguments */
51 ctx->b2p[BPF_REG_1] = _R4;
52 ctx->b2p[BPF_REG_2] = _R6;
53 ctx->b2p[BPF_REG_3] = _R8;
54 ctx->b2p[BPF_REG_4] = _R10;
55 ctx->b2p[BPF_REG_5] = _R22;
56 /* non volatile registers */
57 ctx->b2p[BPF_REG_6] = _R24;
58 ctx->b2p[BPF_REG_7] = _R26;
59 ctx->b2p[BPF_REG_8] = _R28;
60 ctx->b2p[BPF_REG_9] = _R30;
61 /* frame pointer aka BPF_REG_10 */
62 ctx->b2p[BPF_REG_FP] = _R18;
63 /* eBPF jit internal registers */
64 ctx->b2p[BPF_REG_AX] = _R20;
65 ctx->b2p[TMP_REG] = _R31; /* 32 bits */
68 static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
70 if ((reg >= BPF_PPC_NVR_MIN && reg < 32) || reg == BPF_PPC_TC)
71 return BPF_PPC_STACKFRAME(ctx) - 4 * (32 - reg);
73 WARN(true, "BPF JIT is asking about unknown registers, will crash the stack");
74 /* Use the hole we have left for alignment */
75 return BPF_PPC_STACKFRAME(ctx) - 4;
78 #define SEEN_VREG_MASK 0x1ff80000 /* Volatile registers r3-r12 */
79 #define SEEN_NVREG_FULL_MASK 0x0003ffff /* Non volatile registers r14-r31 */
80 #define SEEN_NVREG_TEMP_MASK 0x00001e01 /* BPF_REG_5, BPF_REG_AX, TMP_REG */
82 static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
85 * We only need a stack frame if:
86 * - we call other functions (kernel helpers), or
87 * - we use non volatile registers, or
88 * - we use tail call counter
89 * - the bpf program uses its stack area
90 * The latter condition is deduced from the usage of BPF_REG_FP
92 return ctx->seen & (SEEN_FUNC | SEEN_TAILCALL | SEEN_NVREG_FULL_MASK) ||
93 bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP));
96 void bpf_jit_realloc_regs(struct codegen_context *ctx)
98 unsigned int nvreg_mask;
100 if (ctx->seen & SEEN_FUNC)
101 nvreg_mask = SEEN_NVREG_TEMP_MASK;
103 nvreg_mask = SEEN_NVREG_FULL_MASK;
105 while (ctx->seen & nvreg_mask &&
106 (ctx->seen & SEEN_VREG_MASK) != SEEN_VREG_MASK) {
107 int old = 32 - fls(ctx->seen & (nvreg_mask & 0xaaaaaaab));
108 int new = 32 - fls(~ctx->seen & (SEEN_VREG_MASK & 0xaaaaaaaa));
111 for (i = BPF_REG_0; i <= TMP_REG; i++) {
112 if (ctx->b2p[i] != old)
115 bpf_set_seen_register(ctx, new);
116 bpf_clear_seen_register(ctx, old);
118 bpf_set_seen_register(ctx, new - 1);
119 bpf_clear_seen_register(ctx, old - 1);
126 void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
130 /* Initialize tail_call_cnt, to be skipped if we do tail calls. */
131 if (ctx->seen & SEEN_TAILCALL)
132 EMIT(PPC_RAW_LI(_R4, 0));
136 #define BPF_TAILCALL_PROLOGUE_SIZE 4
138 if (bpf_has_stack_frame(ctx))
139 EMIT(PPC_RAW_STWU(_R1, _R1, -BPF_PPC_STACKFRAME(ctx)));
141 if (ctx->seen & SEEN_TAILCALL)
142 EMIT(PPC_RAW_STW(_R4, _R1, bpf_jit_stack_offsetof(ctx, BPF_PPC_TC)));
144 /* First arg comes in as a 32 bits pointer. */
145 EMIT(PPC_RAW_MR(bpf_to_ppc(BPF_REG_1), _R3));
146 EMIT(PPC_RAW_LI(bpf_to_ppc(BPF_REG_1) - 1, 0));
149 * We need a stack frame, but we don't necessarily need to
150 * save/restore LR unless we call other functions
152 if (ctx->seen & SEEN_FUNC)
153 EMIT(PPC_RAW_MFLR(_R0));
156 * Back up non-volatile regs -- registers r18-r31
158 for (i = BPF_PPC_NVR_MIN; i <= 31; i++)
159 if (bpf_is_seen_register(ctx, i))
160 EMIT(PPC_RAW_STW(i, _R1, bpf_jit_stack_offsetof(ctx, i)));
162 /* Setup frame pointer to point to the bpf stack area */
163 if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_FP))) {
164 EMIT(PPC_RAW_LI(bpf_to_ppc(BPF_REG_FP) - 1, 0));
165 EMIT(PPC_RAW_ADDI(bpf_to_ppc(BPF_REG_FP), _R1,
166 STACK_FRAME_MIN_SIZE + ctx->stack_size));
169 if (ctx->seen & SEEN_FUNC)
170 EMIT(PPC_RAW_STW(_R0, _R1, BPF_PPC_STACKFRAME(ctx) + PPC_LR_STKOFF));
173 static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
178 for (i = BPF_PPC_NVR_MIN; i <= 31; i++)
179 if (bpf_is_seen_register(ctx, i))
180 EMIT(PPC_RAW_LWZ(i, _R1, bpf_jit_stack_offsetof(ctx, i)));
182 if (ctx->seen & SEEN_FUNC)
183 EMIT(PPC_RAW_LWZ(_R0, _R1, BPF_PPC_STACKFRAME(ctx) + PPC_LR_STKOFF));
185 /* Tear down our stack frame */
186 if (bpf_has_stack_frame(ctx))
187 EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME(ctx)));
189 if (ctx->seen & SEEN_FUNC)
190 EMIT(PPC_RAW_MTLR(_R0));
194 void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
196 EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(BPF_REG_0)));
198 bpf_jit_emit_common_epilogue(image, ctx);
203 /* Relative offset needs to be calculated based on final image location */
204 int bpf_jit_emit_func_call_rel(u32 *image, u32 *fimage, struct codegen_context *ctx, u64 func)
206 s32 rel = (s32)func - (s32)(fimage + ctx->idx);
208 if (image && rel < 0x2000000 && rel >= -0x2000000) {
209 EMIT(PPC_RAW_BL(rel));
211 /* Load function address into r0 */
212 EMIT(PPC_RAW_LIS(_R0, IMM_H(func)));
213 EMIT(PPC_RAW_ORI(_R0, _R0, IMM_L(func)));
214 EMIT(PPC_RAW_MTCTR(_R0));
215 EMIT(PPC_RAW_BCTRL());
221 static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
224 * By now, the eBPF program has already setup parameters in r3-r6
225 * r3-r4/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program
226 * r5-r6/BPF_REG_2 - pointer to bpf_array
227 * r7-r8/BPF_REG_3 - index in bpf_array
229 int b2p_bpf_array = bpf_to_ppc(BPF_REG_2);
230 int b2p_index = bpf_to_ppc(BPF_REG_3);
233 * if (index >= array->map.max_entries)
236 EMIT(PPC_RAW_LWZ(_R0, b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)));
237 EMIT(PPC_RAW_CMPLW(b2p_index, _R0));
238 EMIT(PPC_RAW_LWZ(_R0, _R1, bpf_jit_stack_offsetof(ctx, BPF_PPC_TC)));
239 PPC_BCC_SHORT(COND_GE, out);
242 * if (tail_call_cnt >= MAX_TAIL_CALL_CNT)
245 EMIT(PPC_RAW_CMPLWI(_R0, MAX_TAIL_CALL_CNT));
246 /* tail_call_cnt++; */
247 EMIT(PPC_RAW_ADDIC(_R0, _R0, 1));
248 PPC_BCC_SHORT(COND_GE, out);
250 /* prog = array->ptrs[index]; */
251 EMIT(PPC_RAW_RLWINM(_R3, b2p_index, 2, 0, 29));
252 EMIT(PPC_RAW_ADD(_R3, _R3, b2p_bpf_array));
253 EMIT(PPC_RAW_LWZ(_R3, _R3, offsetof(struct bpf_array, ptrs)));
259 EMIT(PPC_RAW_CMPLWI(_R3, 0));
260 PPC_BCC_SHORT(COND_EQ, out);
262 /* goto *(prog->bpf_func + prologue_size); */
263 EMIT(PPC_RAW_LWZ(_R3, _R3, offsetof(struct bpf_prog, bpf_func)));
264 EMIT(PPC_RAW_ADDIC(_R3, _R3, BPF_TAILCALL_PROLOGUE_SIZE));
265 EMIT(PPC_RAW_MTCTR(_R3));
267 EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(BPF_REG_1)));
269 /* Put tail_call_cnt in r4 */
270 EMIT(PPC_RAW_MR(_R4, _R0));
272 /* tear restore NVRs, ... */
273 bpf_jit_emit_common_epilogue(image, ctx);
275 EMIT(PPC_RAW_BCTR());
281 /* Assemble the body code between the prologue & epilogue */
282 int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, u32 *fimage, struct codegen_context *ctx,
283 u32 *addrs, int pass, bool extra_pass)
285 const struct bpf_insn *insn = fp->insnsi;
289 /* Start of epilogue code - will only be valid 2nd pass onwards */
290 u32 exit_addr = addrs[flen];
292 for (i = 0; i < flen; i++) {
293 u32 code = insn[i].code;
294 u32 prevcode = i ? insn[i - 1].code : 0;
295 u32 dst_reg = bpf_to_ppc(insn[i].dst_reg);
296 u32 dst_reg_h = dst_reg - 1;
297 u32 src_reg = bpf_to_ppc(insn[i].src_reg);
298 u32 src_reg_h = src_reg - 1;
299 u32 src2_reg = dst_reg;
300 u32 src2_reg_h = dst_reg_h;
301 u32 ax_reg = bpf_to_ppc(BPF_REG_AX);
302 u32 tmp_reg = bpf_to_ppc(TMP_REG);
303 u32 size = BPF_SIZE(code);
304 u32 save_reg, ret_reg;
305 s16 off = insn[i].off;
306 s32 imm = insn[i].imm;
307 bool func_addr_fixed;
313 if (i && (BPF_CLASS(code) == BPF_ALU64 || BPF_CLASS(code) == BPF_ALU) &&
314 (BPF_CLASS(prevcode) == BPF_ALU64 || BPF_CLASS(prevcode) == BPF_ALU) &&
315 BPF_OP(prevcode) == BPF_MOV && BPF_SRC(prevcode) == BPF_X &&
316 insn[i - 1].dst_reg == insn[i].dst_reg && insn[i - 1].imm != 1) {
317 src2_reg = bpf_to_ppc(insn[i - 1].src_reg);
318 src2_reg_h = src2_reg - 1;
319 ctx->idx = addrs[i - 1] / 4;
323 * addrs[] maps a BPF bytecode address into a real offset from
324 * the start of the body code.
326 addrs[i] = ctx->idx * 4;
329 * As an optimization, we note down which registers
330 * are used so that we can only save/restore those in our
331 * prologue and epilogue. We do this here regardless of whether
332 * the actual BPF instruction uses src/dst registers or not
333 * (for instance, BPF_CALL does not use them). The expectation
334 * is that those instructions will have src_reg/dst_reg set to
335 * 0. Even otherwise, we just lose some prologue/epilogue
336 * optimization but everything else should work without
339 if (dst_reg >= 3 && dst_reg < 32) {
340 bpf_set_seen_register(ctx, dst_reg);
341 bpf_set_seen_register(ctx, dst_reg_h);
344 if (src_reg >= 3 && src_reg < 32) {
345 bpf_set_seen_register(ctx, src_reg);
346 bpf_set_seen_register(ctx, src_reg_h);
351 * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
353 case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
354 EMIT(PPC_RAW_ADD(dst_reg, src2_reg, src_reg));
356 case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
357 EMIT(PPC_RAW_ADDC(dst_reg, src2_reg, src_reg));
358 EMIT(PPC_RAW_ADDE(dst_reg_h, src2_reg_h, src_reg_h));
360 case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
361 EMIT(PPC_RAW_SUB(dst_reg, src2_reg, src_reg));
363 case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
364 EMIT(PPC_RAW_SUBFC(dst_reg, src_reg, src2_reg));
365 EMIT(PPC_RAW_SUBFE(dst_reg_h, src_reg_h, src2_reg_h));
367 case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
370 case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
372 EMIT(PPC_RAW_MR(dst_reg, src2_reg));
373 } else if (IMM_HA(imm) & 0xffff) {
374 EMIT(PPC_RAW_ADDIS(dst_reg, src2_reg, IMM_HA(imm)));
378 EMIT(PPC_RAW_ADDI(dst_reg, src2_reg, IMM_L(imm)));
380 case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
383 case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
385 EMIT(PPC_RAW_MR(dst_reg, src2_reg));
386 EMIT(PPC_RAW_MR(dst_reg_h, src2_reg_h));
389 if (imm >= -32768 && imm < 32768) {
390 EMIT(PPC_RAW_ADDIC(dst_reg, src2_reg, imm));
393 EMIT(PPC_RAW_ADDC(dst_reg, src2_reg, _R0));
395 if (imm >= 0 || (BPF_OP(code) == BPF_SUB && imm == 0x80000000))
396 EMIT(PPC_RAW_ADDZE(dst_reg_h, src2_reg_h));
398 EMIT(PPC_RAW_ADDME(dst_reg_h, src2_reg_h));
400 case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
401 bpf_set_seen_register(ctx, tmp_reg);
402 EMIT(PPC_RAW_MULW(_R0, src2_reg, src_reg_h));
403 EMIT(PPC_RAW_MULW(dst_reg_h, src2_reg_h, src_reg));
404 EMIT(PPC_RAW_MULHWU(tmp_reg, src2_reg, src_reg));
405 EMIT(PPC_RAW_MULW(dst_reg, src2_reg, src_reg));
406 EMIT(PPC_RAW_ADD(dst_reg_h, dst_reg_h, _R0));
407 EMIT(PPC_RAW_ADD(dst_reg_h, dst_reg_h, tmp_reg));
409 case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
410 EMIT(PPC_RAW_MULW(dst_reg, src2_reg, src_reg));
412 case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
414 EMIT(PPC_RAW_MR(dst_reg, src2_reg));
415 } else if (imm == -1) {
416 EMIT(PPC_RAW_SUBFIC(dst_reg, src2_reg, 0));
417 } else if (is_power_of_2((u32)imm)) {
418 EMIT(PPC_RAW_SLWI(dst_reg, src2_reg, ilog2(imm)));
419 } else if (imm >= -32768 && imm < 32768) {
420 EMIT(PPC_RAW_MULI(dst_reg, src2_reg, imm));
423 EMIT(PPC_RAW_MULW(dst_reg, src2_reg, _R0));
426 case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
428 PPC_LI32(dst_reg, 0);
429 PPC_LI32(dst_reg_h, 0);
430 } else if (imm == 1) {
431 EMIT(PPC_RAW_MR(dst_reg, src2_reg));
432 EMIT(PPC_RAW_MR(dst_reg_h, src2_reg_h));
433 } else if (imm == -1) {
434 EMIT(PPC_RAW_SUBFIC(dst_reg, src2_reg, 0));
435 EMIT(PPC_RAW_SUBFZE(dst_reg_h, src2_reg_h));
436 } else if (imm > 0 && is_power_of_2(imm)) {
438 EMIT(PPC_RAW_RLWINM(dst_reg_h, src2_reg_h, imm, 0, 31 - imm));
439 EMIT(PPC_RAW_RLWIMI(dst_reg_h, dst_reg, imm, 32 - imm, 31));
440 EMIT(PPC_RAW_SLWI(dst_reg, src2_reg, imm));
442 bpf_set_seen_register(ctx, tmp_reg);
443 PPC_LI32(tmp_reg, imm);
444 EMIT(PPC_RAW_MULW(dst_reg_h, src2_reg_h, tmp_reg));
446 EMIT(PPC_RAW_SUB(dst_reg_h, dst_reg_h, src2_reg));
447 EMIT(PPC_RAW_MULHWU(_R0, src2_reg, tmp_reg));
448 EMIT(PPC_RAW_MULW(dst_reg, src2_reg, tmp_reg));
449 EMIT(PPC_RAW_ADD(dst_reg_h, dst_reg_h, _R0));
452 case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
453 EMIT(PPC_RAW_DIVWU(dst_reg, src2_reg, src_reg));
455 case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
456 EMIT(PPC_RAW_DIVWU(_R0, src2_reg, src_reg));
457 EMIT(PPC_RAW_MULW(_R0, src_reg, _R0));
458 EMIT(PPC_RAW_SUB(dst_reg, src2_reg, _R0));
460 case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
462 case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
464 case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
468 EMIT(PPC_RAW_MR(dst_reg, src2_reg));
469 } else if (is_power_of_2((u32)imm)) {
470 EMIT(PPC_RAW_SRWI(dst_reg, src2_reg, ilog2(imm)));
473 EMIT(PPC_RAW_DIVWU(dst_reg, src2_reg, _R0));
476 case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
480 if (!is_power_of_2((u32)imm)) {
481 bpf_set_seen_register(ctx, tmp_reg);
482 PPC_LI32(tmp_reg, imm);
483 EMIT(PPC_RAW_DIVWU(_R0, src2_reg, tmp_reg));
484 EMIT(PPC_RAW_MULW(_R0, tmp_reg, _R0));
485 EMIT(PPC_RAW_SUB(dst_reg, src2_reg, _R0));
486 } else if (imm == 1) {
487 EMIT(PPC_RAW_LI(dst_reg, 0));
489 imm = ilog2((u32)imm);
490 EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 0, 32 - imm, 31));
493 case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */
498 if (!is_power_of_2(imm))
501 EMIT(PPC_RAW_LI(dst_reg, 0));
503 EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 0, 32 - ilog2(imm), 31));
504 EMIT(PPC_RAW_LI(dst_reg_h, 0));
506 case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
509 if (!is_power_of_2(abs(imm)))
513 EMIT(PPC_RAW_SUBFIC(dst_reg, src2_reg, 0));
514 EMIT(PPC_RAW_SUBFZE(dst_reg_h, src2_reg_h));
519 EMIT(PPC_RAW_MR(dst_reg, src2_reg));
520 EMIT(PPC_RAW_MR(dst_reg_h, src2_reg_h));
523 EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 32 - imm, imm, 31));
524 EMIT(PPC_RAW_RLWIMI(dst_reg, src2_reg_h, 32 - imm, 0, imm - 1));
525 EMIT(PPC_RAW_SRAWI(dst_reg_h, src2_reg_h, imm));
528 case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
529 EMIT(PPC_RAW_NEG(dst_reg, src2_reg));
531 case BPF_ALU64 | BPF_NEG: /* dst = -dst */
532 EMIT(PPC_RAW_SUBFIC(dst_reg, src2_reg, 0));
533 EMIT(PPC_RAW_SUBFZE(dst_reg_h, src2_reg_h));
537 * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
539 case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
540 EMIT(PPC_RAW_AND(dst_reg, src2_reg, src_reg));
541 EMIT(PPC_RAW_AND(dst_reg_h, src2_reg_h, src_reg_h));
543 case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
544 EMIT(PPC_RAW_AND(dst_reg, src2_reg, src_reg));
546 case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
548 EMIT(PPC_RAW_LI(dst_reg_h, 0));
550 case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
552 EMIT(PPC_RAW_ANDI(dst_reg, src2_reg, IMM_L(imm)));
553 } else if (!IMM_L(imm)) {
554 EMIT(PPC_RAW_ANDIS(dst_reg, src2_reg, IMM_H(imm)));
555 } else if (imm == (((1 << fls(imm)) - 1) ^ ((1 << (ffs(i) - 1)) - 1))) {
556 EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 0,
557 32 - fls(imm), 32 - ffs(imm)));
560 EMIT(PPC_RAW_AND(dst_reg, src2_reg, _R0));
563 case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
564 EMIT(PPC_RAW_OR(dst_reg, src2_reg, src_reg));
565 EMIT(PPC_RAW_OR(dst_reg_h, src2_reg_h, src_reg_h));
567 case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
568 EMIT(PPC_RAW_OR(dst_reg, src2_reg, src_reg));
570 case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
573 EMIT(PPC_RAW_LI(dst_reg_h, -1));
575 case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
577 EMIT(PPC_RAW_ORI(dst_reg, src2_reg, IMM_L(imm)));
581 EMIT(PPC_RAW_ORIS(dst_reg, src2_reg, IMM_H(imm)));
583 case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
584 if (dst_reg == src_reg) {
585 EMIT(PPC_RAW_LI(dst_reg, 0));
586 EMIT(PPC_RAW_LI(dst_reg_h, 0));
588 EMIT(PPC_RAW_XOR(dst_reg, src2_reg, src_reg));
589 EMIT(PPC_RAW_XOR(dst_reg_h, src2_reg_h, src_reg_h));
592 case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
593 if (dst_reg == src_reg)
594 EMIT(PPC_RAW_LI(dst_reg, 0));
596 EMIT(PPC_RAW_XOR(dst_reg, src2_reg, src_reg));
598 case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
600 EMIT(PPC_RAW_NOR(dst_reg_h, src2_reg_h, src2_reg_h));
602 case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
604 EMIT(PPC_RAW_XORI(dst_reg, src2_reg, IMM_L(imm)));
608 EMIT(PPC_RAW_XORIS(dst_reg, src2_reg, IMM_H(imm)));
610 case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
611 EMIT(PPC_RAW_SLW(dst_reg, src2_reg, src_reg));
613 case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
614 bpf_set_seen_register(ctx, tmp_reg);
615 EMIT(PPC_RAW_SUBFIC(_R0, src_reg, 32));
616 EMIT(PPC_RAW_SLW(dst_reg_h, src2_reg_h, src_reg));
617 EMIT(PPC_RAW_ADDI(tmp_reg, src_reg, 32));
618 EMIT(PPC_RAW_SRW(_R0, src2_reg, _R0));
619 EMIT(PPC_RAW_SLW(tmp_reg, src2_reg, tmp_reg));
620 EMIT(PPC_RAW_OR(dst_reg_h, dst_reg_h, _R0));
621 EMIT(PPC_RAW_SLW(dst_reg, src2_reg, src_reg));
622 EMIT(PPC_RAW_OR(dst_reg_h, dst_reg_h, tmp_reg));
624 case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<= (u32) imm */
626 EMIT(PPC_RAW_SLWI(dst_reg, src2_reg, imm));
628 EMIT(PPC_RAW_MR(dst_reg, src2_reg));
630 case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<= imm */
634 EMIT(PPC_RAW_MR(dst_reg, src2_reg));
635 } else if (imm < 32) {
636 EMIT(PPC_RAW_RLWINM(dst_reg_h, src2_reg_h, imm, 0, 31 - imm));
637 EMIT(PPC_RAW_RLWIMI(dst_reg_h, src2_reg, imm, 32 - imm, 31));
638 EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, imm, 0, 31 - imm));
639 } else if (imm < 64) {
640 EMIT(PPC_RAW_RLWINM(dst_reg_h, src2_reg, imm, 0, 31 - imm));
641 EMIT(PPC_RAW_LI(dst_reg, 0));
643 EMIT(PPC_RAW_LI(dst_reg_h, 0));
644 EMIT(PPC_RAW_LI(dst_reg, 0));
647 case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
648 EMIT(PPC_RAW_SRW(dst_reg, src2_reg, src_reg));
650 case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
651 bpf_set_seen_register(ctx, tmp_reg);
652 EMIT(PPC_RAW_SUBFIC(_R0, src_reg, 32));
653 EMIT(PPC_RAW_SRW(dst_reg, src2_reg, src_reg));
654 EMIT(PPC_RAW_ADDI(tmp_reg, src_reg, 32));
655 EMIT(PPC_RAW_SLW(_R0, src2_reg_h, _R0));
656 EMIT(PPC_RAW_SRW(tmp_reg, dst_reg_h, tmp_reg));
657 EMIT(PPC_RAW_OR(dst_reg, dst_reg, _R0));
658 EMIT(PPC_RAW_SRW(dst_reg_h, src2_reg_h, src_reg));
659 EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp_reg));
661 case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
663 EMIT(PPC_RAW_SRWI(dst_reg, src2_reg, imm));
665 EMIT(PPC_RAW_MR(dst_reg, src2_reg));
667 case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
671 EMIT(PPC_RAW_MR(dst_reg, src2_reg));
672 EMIT(PPC_RAW_MR(dst_reg_h, src2_reg_h));
673 } else if (imm < 32) {
674 EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 32 - imm, imm, 31));
675 EMIT(PPC_RAW_RLWIMI(dst_reg, src2_reg_h, 32 - imm, 0, imm - 1));
676 EMIT(PPC_RAW_RLWINM(dst_reg_h, src2_reg_h, 32 - imm, imm, 31));
677 } else if (imm < 64) {
678 EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg_h, 64 - imm, imm - 32, 31));
679 EMIT(PPC_RAW_LI(dst_reg_h, 0));
681 EMIT(PPC_RAW_LI(dst_reg, 0));
682 EMIT(PPC_RAW_LI(dst_reg_h, 0));
685 case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */
686 EMIT(PPC_RAW_SRAW(dst_reg, src2_reg, src_reg));
688 case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
689 bpf_set_seen_register(ctx, tmp_reg);
690 EMIT(PPC_RAW_SUBFIC(_R0, src_reg, 32));
691 EMIT(PPC_RAW_SRW(dst_reg, src2_reg, src_reg));
692 EMIT(PPC_RAW_SLW(_R0, src2_reg_h, _R0));
693 EMIT(PPC_RAW_ADDI(tmp_reg, src_reg, 32));
694 EMIT(PPC_RAW_OR(dst_reg, dst_reg, _R0));
695 EMIT(PPC_RAW_RLWINM(_R0, tmp_reg, 0, 26, 26));
696 EMIT(PPC_RAW_SRAW(tmp_reg, src2_reg_h, tmp_reg));
697 EMIT(PPC_RAW_SRAW(dst_reg_h, src2_reg_h, src_reg));
698 EMIT(PPC_RAW_SLW(tmp_reg, tmp_reg, _R0));
699 EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp_reg));
701 case BPF_ALU | BPF_ARSH | BPF_K: /* (s32) dst >>= imm */
703 EMIT(PPC_RAW_SRAWI(dst_reg, src2_reg, imm));
705 EMIT(PPC_RAW_MR(dst_reg, src2_reg));
707 case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
711 EMIT(PPC_RAW_MR(dst_reg, src2_reg));
712 EMIT(PPC_RAW_MR(dst_reg_h, src2_reg_h));
713 } else if (imm < 32) {
714 EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 32 - imm, imm, 31));
715 EMIT(PPC_RAW_RLWIMI(dst_reg, src2_reg_h, 32 - imm, 0, imm - 1));
716 EMIT(PPC_RAW_SRAWI(dst_reg_h, src2_reg_h, imm));
717 } else if (imm < 64) {
718 EMIT(PPC_RAW_SRAWI(dst_reg, src2_reg_h, imm - 32));
719 EMIT(PPC_RAW_SRAWI(dst_reg_h, src2_reg_h, 31));
721 EMIT(PPC_RAW_SRAWI(dst_reg, src2_reg_h, 31));
722 EMIT(PPC_RAW_SRAWI(dst_reg_h, src2_reg_h, 31));
729 case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
730 if (dst_reg == src_reg)
732 EMIT(PPC_RAW_MR(dst_reg, src_reg));
733 EMIT(PPC_RAW_MR(dst_reg_h, src_reg_h));
735 case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
736 /* special mov32 for zext */
738 EMIT(PPC_RAW_LI(dst_reg_h, 0));
739 else if (dst_reg != src_reg)
740 EMIT(PPC_RAW_MR(dst_reg, src_reg));
742 case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
743 PPC_LI32(dst_reg, imm);
744 PPC_EX32(dst_reg_h, imm);
746 case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
747 PPC_LI32(dst_reg, imm);
753 case BPF_ALU | BPF_END | BPF_FROM_LE:
756 /* Copy 16 bits to upper part */
757 EMIT(PPC_RAW_RLWIMI(dst_reg, src2_reg, 16, 0, 15));
758 /* Rotate 8 bits right & mask */
759 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 24, 16, 31));
763 * Rotate word left by 8 bits:
764 * 2 bytes are already in their final position
765 * -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
767 EMIT(PPC_RAW_RLWINM(_R0, src2_reg, 8, 0, 31));
768 /* Rotate 24 bits and insert byte 1 */
769 EMIT(PPC_RAW_RLWIMI(_R0, src2_reg, 24, 0, 7));
770 /* Rotate 24 bits and insert byte 3 */
771 EMIT(PPC_RAW_RLWIMI(_R0, src2_reg, 24, 16, 23));
772 EMIT(PPC_RAW_MR(dst_reg, _R0));
775 bpf_set_seen_register(ctx, tmp_reg);
776 EMIT(PPC_RAW_RLWINM(tmp_reg, src2_reg, 8, 0, 31));
777 EMIT(PPC_RAW_RLWINM(_R0, src2_reg_h, 8, 0, 31));
778 /* Rotate 24 bits and insert byte 1 */
779 EMIT(PPC_RAW_RLWIMI(tmp_reg, src2_reg, 24, 0, 7));
780 EMIT(PPC_RAW_RLWIMI(_R0, src2_reg_h, 24, 0, 7));
781 /* Rotate 24 bits and insert byte 3 */
782 EMIT(PPC_RAW_RLWIMI(tmp_reg, src2_reg, 24, 16, 23));
783 EMIT(PPC_RAW_RLWIMI(_R0, src2_reg_h, 24, 16, 23));
784 EMIT(PPC_RAW_MR(dst_reg, _R0));
785 EMIT(PPC_RAW_MR(dst_reg_h, tmp_reg));
789 case BPF_ALU | BPF_END | BPF_FROM_BE:
792 /* zero-extend 16 bits into 32 bits */
793 EMIT(PPC_RAW_RLWINM(dst_reg, src2_reg, 0, 16, 31));
803 * BPF_ST NOSPEC (speculation barrier)
805 case BPF_ST | BPF_NOSPEC:
811 case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
812 EMIT(PPC_RAW_STB(src_reg, dst_reg, off));
814 case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
816 EMIT(PPC_RAW_STB(_R0, dst_reg, off));
818 case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
819 EMIT(PPC_RAW_STH(src_reg, dst_reg, off));
821 case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
823 EMIT(PPC_RAW_STH(_R0, dst_reg, off));
825 case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
826 EMIT(PPC_RAW_STW(src_reg, dst_reg, off));
828 case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
830 EMIT(PPC_RAW_STW(_R0, dst_reg, off));
832 case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
833 EMIT(PPC_RAW_STW(src_reg_h, dst_reg, off));
834 EMIT(PPC_RAW_STW(src_reg, dst_reg, off + 4));
836 case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
838 EMIT(PPC_RAW_STW(_R0, dst_reg, off + 4));
840 EMIT(PPC_RAW_STW(_R0, dst_reg, off));
844 * BPF_STX ATOMIC (atomic ops)
846 case BPF_STX | BPF_ATOMIC | BPF_W:
850 bpf_set_seen_register(ctx, tmp_reg);
851 bpf_set_seen_register(ctx, ax_reg);
853 /* Get offset into TMP_REG */
854 EMIT(PPC_RAW_LI(tmp_reg, off));
855 tmp_idx = ctx->idx * 4;
856 /* load value from memory into r0 */
857 EMIT(PPC_RAW_LWARX(_R0, tmp_reg, dst_reg, 0));
859 /* Save old value in BPF_REG_AX */
861 EMIT(PPC_RAW_MR(ax_reg, _R0));
865 case BPF_ADD | BPF_FETCH:
866 EMIT(PPC_RAW_ADD(_R0, _R0, src_reg));
869 case BPF_AND | BPF_FETCH:
870 EMIT(PPC_RAW_AND(_R0, _R0, src_reg));
873 case BPF_OR | BPF_FETCH:
874 EMIT(PPC_RAW_OR(_R0, _R0, src_reg));
877 case BPF_XOR | BPF_FETCH:
878 EMIT(PPC_RAW_XOR(_R0, _R0, src_reg));
882 * Return old value in BPF_REG_0 for BPF_CMPXCHG &
883 * in src_reg for other cases.
885 ret_reg = bpf_to_ppc(BPF_REG_0);
887 /* Compare with old value in BPF_REG_0 */
888 EMIT(PPC_RAW_CMPW(bpf_to_ppc(BPF_REG_0), _R0));
889 /* Don't set if different from old value */
890 PPC_BCC_SHORT(COND_NE, (ctx->idx + 3) * 4);
896 pr_err_ratelimited("eBPF filter atomic op code %02x (@%d) unsupported\n",
901 /* store new value */
902 EMIT(PPC_RAW_STWCX(save_reg, tmp_reg, dst_reg));
903 /* we're done if this succeeded */
904 PPC_BCC_SHORT(COND_NE, tmp_idx);
906 /* For the BPF_FETCH variant, get old data into src_reg */
907 if (imm & BPF_FETCH) {
908 EMIT(PPC_RAW_MR(ret_reg, ax_reg));
909 if (!fp->aux->verifier_zext)
910 EMIT(PPC_RAW_LI(ret_reg - 1, 0)); /* higher 32-bit */
914 case BPF_STX | BPF_ATOMIC | BPF_DW: /* *(u64 *)(dst + off) += src */
920 case BPF_LDX | BPF_MEM | BPF_B: /* dst = *(u8 *)(ul) (src + off) */
921 case BPF_LDX | BPF_PROBE_MEM | BPF_B:
922 case BPF_LDX | BPF_MEM | BPF_H: /* dst = *(u16 *)(ul) (src + off) */
923 case BPF_LDX | BPF_PROBE_MEM | BPF_H:
924 case BPF_LDX | BPF_MEM | BPF_W: /* dst = *(u32 *)(ul) (src + off) */
925 case BPF_LDX | BPF_PROBE_MEM | BPF_W:
926 case BPF_LDX | BPF_MEM | BPF_DW: /* dst = *(u64 *)(ul) (src + off) */
927 case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
929 * As PTR_TO_BTF_ID that uses BPF_PROBE_MEM mode could either be a valid
930 * kernel pointer or NULL but not a userspace address, execute BPF_PROBE_MEM
931 * load only if addr is kernel address (see is_kernel_addr()), otherwise
932 * set dst_reg=0 and move on.
934 if (BPF_MODE(code) == BPF_PROBE_MEM) {
935 PPC_LI32(_R0, TASK_SIZE - off);
936 EMIT(PPC_RAW_CMPLW(src_reg, _R0));
937 PPC_BCC_SHORT(COND_GT, (ctx->idx + 4) * 4);
938 EMIT(PPC_RAW_LI(dst_reg, 0));
940 * For BPF_DW case, "li reg_h,0" would be needed when
941 * !fp->aux->verifier_zext. Emit NOP otherwise.
943 * Note that "li reg_h,0" is emitted for BPF_B/H/W case,
944 * if necessary. So, jump there instead of emitting an
945 * additional "li reg_h,0" instruction.
947 if (size == BPF_DW && !fp->aux->verifier_zext)
948 EMIT(PPC_RAW_LI(dst_reg_h, 0));
952 * Need to jump two instructions instead of one for BPF_DW case
953 * as there are two load instructions for dst_reg_h & dst_reg
957 PPC_JMP((ctx->idx + 3) * 4);
959 PPC_JMP((ctx->idx + 2) * 4);
964 EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
967 EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
970 EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
973 EMIT(PPC_RAW_LWZ(dst_reg_h, src_reg, off));
974 EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off + 4));
978 if (size != BPF_DW && !fp->aux->verifier_zext)
979 EMIT(PPC_RAW_LI(dst_reg_h, 0));
981 if (BPF_MODE(code) == BPF_PROBE_MEM) {
982 int insn_idx = ctx->idx - 1;
986 * In case of BPF_DW, two lwz instructions are emitted, one
987 * for higher 32-bit and another for lower 32-bit. So, set
988 * ex->insn to the first of the two and jump over both
989 * instructions in fixup.
991 * Similarly, with !verifier_zext, two instructions are
992 * emitted for BPF_B/H/W case. So, set ex->insn to the
993 * instruction that could fault and skip over both
996 if (size == BPF_DW || !fp->aux->verifier_zext) {
1001 ret = bpf_add_extable_entry(fp, image, fimage, pass, ctx, insn_idx,
1010 * 16 byte instruction that uses two 'struct bpf_insn'
1012 case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
1014 PPC_LI32(dst_reg_h, (u32)insn[i + 1].imm);
1015 PPC_LI32(dst_reg, (u32)insn[i].imm);
1016 /* padding to allow full 4 instructions for later patching */
1018 for (j = ctx->idx - tmp_idx; j < 4; j++)
1019 EMIT(PPC_RAW_NOP());
1020 /* Adjust for two bpf instructions */
1021 addrs[++i] = ctx->idx * 4;
1027 case BPF_JMP | BPF_EXIT:
1029 * If this isn't the very last instruction, branch to
1030 * the epilogue. If we _are_ the last instruction,
1031 * we'll just fall through to the epilogue.
1033 if (i != flen - 1) {
1034 ret = bpf_jit_emit_exit_insn(image, ctx, _R0, exit_addr);
1038 /* else fall through to the epilogue */
1042 * Call kernel helper or bpf function
1044 case BPF_JMP | BPF_CALL:
1045 ctx->seen |= SEEN_FUNC;
1047 ret = bpf_jit_get_func_addr(fp, &insn[i], extra_pass,
1048 &func_addr, &func_addr_fixed);
1052 if (bpf_is_seen_register(ctx, bpf_to_ppc(BPF_REG_5))) {
1053 EMIT(PPC_RAW_STW(bpf_to_ppc(BPF_REG_5) - 1, _R1, 8));
1054 EMIT(PPC_RAW_STW(bpf_to_ppc(BPF_REG_5), _R1, 12));
1057 ret = bpf_jit_emit_func_call_rel(image, fimage, ctx, func_addr);
1061 EMIT(PPC_RAW_MR(bpf_to_ppc(BPF_REG_0) - 1, _R3));
1062 EMIT(PPC_RAW_MR(bpf_to_ppc(BPF_REG_0), _R4));
1066 * Jumps and branches
1068 case BPF_JMP | BPF_JA:
1069 PPC_JMP(addrs[i + 1 + off]);
1072 case BPF_JMP | BPF_JGT | BPF_K:
1073 case BPF_JMP | BPF_JGT | BPF_X:
1074 case BPF_JMP | BPF_JSGT | BPF_K:
1075 case BPF_JMP | BPF_JSGT | BPF_X:
1076 case BPF_JMP32 | BPF_JGT | BPF_K:
1077 case BPF_JMP32 | BPF_JGT | BPF_X:
1078 case BPF_JMP32 | BPF_JSGT | BPF_K:
1079 case BPF_JMP32 | BPF_JSGT | BPF_X:
1080 true_cond = COND_GT;
1082 case BPF_JMP | BPF_JLT | BPF_K:
1083 case BPF_JMP | BPF_JLT | BPF_X:
1084 case BPF_JMP | BPF_JSLT | BPF_K:
1085 case BPF_JMP | BPF_JSLT | BPF_X:
1086 case BPF_JMP32 | BPF_JLT | BPF_K:
1087 case BPF_JMP32 | BPF_JLT | BPF_X:
1088 case BPF_JMP32 | BPF_JSLT | BPF_K:
1089 case BPF_JMP32 | BPF_JSLT | BPF_X:
1090 true_cond = COND_LT;
1092 case BPF_JMP | BPF_JGE | BPF_K:
1093 case BPF_JMP | BPF_JGE | BPF_X:
1094 case BPF_JMP | BPF_JSGE | BPF_K:
1095 case BPF_JMP | BPF_JSGE | BPF_X:
1096 case BPF_JMP32 | BPF_JGE | BPF_K:
1097 case BPF_JMP32 | BPF_JGE | BPF_X:
1098 case BPF_JMP32 | BPF_JSGE | BPF_K:
1099 case BPF_JMP32 | BPF_JSGE | BPF_X:
1100 true_cond = COND_GE;
1102 case BPF_JMP | BPF_JLE | BPF_K:
1103 case BPF_JMP | BPF_JLE | BPF_X:
1104 case BPF_JMP | BPF_JSLE | BPF_K:
1105 case BPF_JMP | BPF_JSLE | BPF_X:
1106 case BPF_JMP32 | BPF_JLE | BPF_K:
1107 case BPF_JMP32 | BPF_JLE | BPF_X:
1108 case BPF_JMP32 | BPF_JSLE | BPF_K:
1109 case BPF_JMP32 | BPF_JSLE | BPF_X:
1110 true_cond = COND_LE;
1112 case BPF_JMP | BPF_JEQ | BPF_K:
1113 case BPF_JMP | BPF_JEQ | BPF_X:
1114 case BPF_JMP32 | BPF_JEQ | BPF_K:
1115 case BPF_JMP32 | BPF_JEQ | BPF_X:
1116 true_cond = COND_EQ;
1118 case BPF_JMP | BPF_JNE | BPF_K:
1119 case BPF_JMP | BPF_JNE | BPF_X:
1120 case BPF_JMP32 | BPF_JNE | BPF_K:
1121 case BPF_JMP32 | BPF_JNE | BPF_X:
1122 true_cond = COND_NE;
1124 case BPF_JMP | BPF_JSET | BPF_K:
1125 case BPF_JMP | BPF_JSET | BPF_X:
1126 case BPF_JMP32 | BPF_JSET | BPF_K:
1127 case BPF_JMP32 | BPF_JSET | BPF_X:
1128 true_cond = COND_NE;
1133 case BPF_JMP | BPF_JGT | BPF_X:
1134 case BPF_JMP | BPF_JLT | BPF_X:
1135 case BPF_JMP | BPF_JGE | BPF_X:
1136 case BPF_JMP | BPF_JLE | BPF_X:
1137 case BPF_JMP | BPF_JEQ | BPF_X:
1138 case BPF_JMP | BPF_JNE | BPF_X:
1139 /* unsigned comparison */
1140 EMIT(PPC_RAW_CMPLW(dst_reg_h, src_reg_h));
1141 PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
1142 EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
1144 case BPF_JMP32 | BPF_JGT | BPF_X:
1145 case BPF_JMP32 | BPF_JLT | BPF_X:
1146 case BPF_JMP32 | BPF_JGE | BPF_X:
1147 case BPF_JMP32 | BPF_JLE | BPF_X:
1148 case BPF_JMP32 | BPF_JEQ | BPF_X:
1149 case BPF_JMP32 | BPF_JNE | BPF_X:
1150 /* unsigned comparison */
1151 EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
1153 case BPF_JMP | BPF_JSGT | BPF_X:
1154 case BPF_JMP | BPF_JSLT | BPF_X:
1155 case BPF_JMP | BPF_JSGE | BPF_X:
1156 case BPF_JMP | BPF_JSLE | BPF_X:
1157 /* signed comparison */
1158 EMIT(PPC_RAW_CMPW(dst_reg_h, src_reg_h));
1159 PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
1160 EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
1162 case BPF_JMP32 | BPF_JSGT | BPF_X:
1163 case BPF_JMP32 | BPF_JSLT | BPF_X:
1164 case BPF_JMP32 | BPF_JSGE | BPF_X:
1165 case BPF_JMP32 | BPF_JSLE | BPF_X:
1166 /* signed comparison */
1167 EMIT(PPC_RAW_CMPW(dst_reg, src_reg));
1169 case BPF_JMP | BPF_JSET | BPF_X:
1170 EMIT(PPC_RAW_AND_DOT(_R0, dst_reg_h, src_reg_h));
1171 PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
1172 EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, src_reg));
1174 case BPF_JMP32 | BPF_JSET | BPF_X: {
1175 EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, src_reg));
1177 case BPF_JMP | BPF_JNE | BPF_K:
1178 case BPF_JMP | BPF_JEQ | BPF_K:
1179 case BPF_JMP | BPF_JGT | BPF_K:
1180 case BPF_JMP | BPF_JLT | BPF_K:
1181 case BPF_JMP | BPF_JGE | BPF_K:
1182 case BPF_JMP | BPF_JLE | BPF_K:
1184 * Need sign-extended load, so only positive
1185 * values can be used as imm in cmplwi
1187 if (imm >= 0 && imm < 32768) {
1188 EMIT(PPC_RAW_CMPLWI(dst_reg_h, 0));
1189 PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
1190 EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
1192 /* sign-extending load ... but unsigned comparison */
1194 EMIT(PPC_RAW_CMPLW(dst_reg_h, _R0));
1196 PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
1197 EMIT(PPC_RAW_CMPLW(dst_reg, _R0));
1200 case BPF_JMP32 | BPF_JNE | BPF_K:
1201 case BPF_JMP32 | BPF_JEQ | BPF_K:
1202 case BPF_JMP32 | BPF_JGT | BPF_K:
1203 case BPF_JMP32 | BPF_JLT | BPF_K:
1204 case BPF_JMP32 | BPF_JGE | BPF_K:
1205 case BPF_JMP32 | BPF_JLE | BPF_K:
1206 if (imm >= 0 && imm < 65536) {
1207 EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
1210 EMIT(PPC_RAW_CMPLW(dst_reg, _R0));
1214 case BPF_JMP | BPF_JSGT | BPF_K:
1215 case BPF_JMP | BPF_JSLT | BPF_K:
1216 case BPF_JMP | BPF_JSGE | BPF_K:
1217 case BPF_JMP | BPF_JSLE | BPF_K:
1218 if (imm >= 0 && imm < 65536) {
1219 EMIT(PPC_RAW_CMPWI(dst_reg_h, imm < 0 ? -1 : 0));
1220 PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
1221 EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
1223 /* sign-extending load */
1224 EMIT(PPC_RAW_CMPWI(dst_reg_h, imm < 0 ? -1 : 0));
1226 PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
1227 EMIT(PPC_RAW_CMPLW(dst_reg, _R0));
1230 case BPF_JMP32 | BPF_JSGT | BPF_K:
1231 case BPF_JMP32 | BPF_JSLT | BPF_K:
1232 case BPF_JMP32 | BPF_JSGE | BPF_K:
1233 case BPF_JMP32 | BPF_JSLE | BPF_K:
1235 * signed comparison, so any 16-bit value
1236 * can be used in cmpwi
1238 if (imm >= -32768 && imm < 32768) {
1239 EMIT(PPC_RAW_CMPWI(dst_reg, imm));
1241 /* sign-extending load */
1243 EMIT(PPC_RAW_CMPW(dst_reg, _R0));
1246 case BPF_JMP | BPF_JSET | BPF_K:
1247 /* andi does not sign-extend the immediate */
1248 if (imm >= 0 && imm < 32768) {
1249 /* PPC_ANDI is _only/always_ dot-form */
1250 EMIT(PPC_RAW_ANDI(_R0, dst_reg, imm));
1254 EMIT(PPC_RAW_CMPWI(dst_reg_h, 0));
1255 PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
1257 EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, _R0));
1260 case BPF_JMP32 | BPF_JSET | BPF_K:
1261 /* andi does not sign-extend the immediate */
1262 if (imm >= 0 && imm < 32768) {
1263 /* PPC_ANDI is _only/always_ dot-form */
1264 EMIT(PPC_RAW_ANDI(_R0, dst_reg, imm));
1267 EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, _R0));
1271 PPC_BCC(true_cond, addrs[i + 1 + off]);
1277 case BPF_JMP | BPF_TAIL_CALL:
1278 ctx->seen |= SEEN_TAILCALL;
1279 ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
1286 * The filter contains something cruel & unusual.
1287 * We don't handle it, but also there shouldn't be
1288 * anything missing from our list.
1290 pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n", code, i);
1293 if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext &&
1294 !insn_is_zext(&insn[i + 1]) && !(BPF_OP(code) == BPF_END && imm == 64))
1295 EMIT(PPC_RAW_LI(dst_reg_h, 0));
1298 /* Set end-of-body-code address for exit. */
1299 addrs[i] = ctx->idx * 4;