1 // SPDX-License-Identifier: GPL-2.0-only
3 * eBPF JIT compiler for PPC32
5 * Copyright 2020 Christophe Leroy <christophe.leroy@csgroup.eu>
8 * Based on PPC64 eBPF JIT compiler by Naveen N. Rao
10 #include <linux/moduleloader.h>
11 #include <asm/cacheflush.h>
12 #include <asm/asm-compat.h>
13 #include <linux/netdevice.h>
14 #include <linux/filter.h>
15 #include <linux/if_vlan.h>
16 #include <asm/kprobes.h>
17 #include <linux/bpf.h>
24 * [ prev sp ] <-------------
25 * [ nv gpr save area ] 16 * 4 |
26 * fp (r31) --> [ ebpf stack space ] upto 512 |
27 * [ frame header ] 16 |
28 * sp (r1) ---> [ stack pointer ] --------------
31 /* for gpr non volatile registers r17 to r31 (14) + tail call */
32 #define BPF_PPC_STACK_SAVE (15 * 4 + 4)
33 /* stack frame, ensure this is quadword aligned */
34 #define BPF_PPC_STACKFRAME(ctx) (STACK_FRAME_MIN_SIZE + BPF_PPC_STACK_SAVE + (ctx)->stack_size)
36 /* BPF register usage */
37 #define TMP_REG (MAX_BPF_JIT_REG + 0)
39 /* BPF to ppc register mappings */
40 const int b2p[MAX_BPF_JIT_REG + 1] = {
41 /* function return value */
43 /* function arguments */
49 /* non volatile registers */
54 /* frame pointer aka BPF_REG_10 */
56 /* eBPF jit internal registers */
58 [TMP_REG] = 31, /* 32 bits */
61 static int bpf_to_ppc(struct codegen_context *ctx, int reg)
66 /* PPC NVR range -- update this if we ever use NVRs below r17 */
67 #define BPF_PPC_NVR_MIN 17
70 static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
72 if ((reg >= BPF_PPC_NVR_MIN && reg < 32) || reg == BPF_PPC_TC)
73 return BPF_PPC_STACKFRAME(ctx) - 4 * (32 - reg);
75 WARN(true, "BPF JIT is asking about unknown registers, will crash the stack");
76 /* Use the hole we have left for alignment */
77 return BPF_PPC_STACKFRAME(ctx) - 4;
80 void bpf_jit_realloc_regs(struct codegen_context *ctx)
82 if (ctx->seen & SEEN_FUNC)
85 while (ctx->seen & SEEN_NVREG_MASK &&
86 (ctx->seen & SEEN_VREG_MASK) != SEEN_VREG_MASK) {
87 int old = 32 - fls(ctx->seen & (SEEN_NVREG_MASK & 0xaaaaaaab));
88 int new = 32 - fls(~ctx->seen & (SEEN_VREG_MASK & 0xaaaaaaaa));
91 for (i = BPF_REG_0; i <= TMP_REG; i++) {
92 if (ctx->b2p[i] != old)
95 bpf_set_seen_register(ctx, new);
96 bpf_clear_seen_register(ctx, old);
98 bpf_set_seen_register(ctx, new - 1);
99 bpf_clear_seen_register(ctx, old - 1);
106 void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
110 /* First arg comes in as a 32 bits pointer. */
111 EMIT(PPC_RAW_MR(bpf_to_ppc(ctx, BPF_REG_1), _R3));
112 EMIT(PPC_RAW_LI(bpf_to_ppc(ctx, BPF_REG_1) - 1, 0));
113 EMIT(PPC_RAW_STWU(_R1, _R1, -BPF_PPC_STACKFRAME(ctx)));
116 * Initialize tail_call_cnt in stack frame if we do tail calls.
117 * Otherwise, put in NOPs so that it can be skipped when we are
118 * invoked through a tail call.
120 if (ctx->seen & SEEN_TAILCALL)
121 EMIT(PPC_RAW_STW(bpf_to_ppc(ctx, BPF_REG_1) - 1, _R1,
122 bpf_jit_stack_offsetof(ctx, BPF_PPC_TC)));
126 #define BPF_TAILCALL_PROLOGUE_SIZE 16
129 * We need a stack frame, but we don't necessarily need to
130 * save/restore LR unless we call other functions
132 if (ctx->seen & SEEN_FUNC)
133 EMIT(PPC_RAW_MFLR(_R0));
136 * Back up non-volatile regs -- registers r18-r31
138 for (i = BPF_PPC_NVR_MIN; i <= 31; i++)
139 if (bpf_is_seen_register(ctx, i))
140 EMIT(PPC_RAW_STW(i, _R1, bpf_jit_stack_offsetof(ctx, i)));
142 /* If needed retrieve arguments 9 and 10, ie 5th 64 bits arg.*/
143 if (bpf_is_seen_register(ctx, bpf_to_ppc(ctx, BPF_REG_5))) {
144 EMIT(PPC_RAW_LWZ(bpf_to_ppc(ctx, BPF_REG_5) - 1, _R1, BPF_PPC_STACKFRAME(ctx)) + 8);
145 EMIT(PPC_RAW_LWZ(bpf_to_ppc(ctx, BPF_REG_5), _R1, BPF_PPC_STACKFRAME(ctx)) + 12);
148 /* Setup frame pointer to point to the bpf stack area */
149 if (bpf_is_seen_register(ctx, bpf_to_ppc(ctx, BPF_REG_FP))) {
150 EMIT(PPC_RAW_LI(bpf_to_ppc(ctx, BPF_REG_FP) - 1, 0));
151 EMIT(PPC_RAW_ADDI(bpf_to_ppc(ctx, BPF_REG_FP), _R1,
152 STACK_FRAME_MIN_SIZE + ctx->stack_size));
155 if (ctx->seen & SEEN_FUNC)
156 EMIT(PPC_RAW_STW(_R0, _R1, BPF_PPC_STACKFRAME(ctx) + PPC_LR_STKOFF));
159 static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
164 for (i = BPF_PPC_NVR_MIN; i <= 31; i++)
165 if (bpf_is_seen_register(ctx, i))
166 EMIT(PPC_RAW_LWZ(i, _R1, bpf_jit_stack_offsetof(ctx, i)));
169 void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
171 EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(ctx, BPF_REG_0)));
173 bpf_jit_emit_common_epilogue(image, ctx);
175 /* Tear down our stack frame */
177 if (ctx->seen & SEEN_FUNC)
178 EMIT(PPC_RAW_LWZ(_R0, _R1, BPF_PPC_STACKFRAME(ctx) + PPC_LR_STKOFF));
180 EMIT(PPC_RAW_ADDI(_R1, _R1, BPF_PPC_STACKFRAME(ctx)));
182 if (ctx->seen & SEEN_FUNC)
183 EMIT(PPC_RAW_MTLR(_R0));
188 void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func)
190 s32 rel = (s32)func - (s32)(image + ctx->idx);
192 if (image && rel < 0x2000000 && rel >= -0x2000000) {
198 /* Load function address into r0 */
199 EMIT(PPC_RAW_LIS(_R0, IMM_H(func)));
200 EMIT(PPC_RAW_ORI(_R0, _R0, IMM_L(func)));
201 EMIT(PPC_RAW_MTCTR(_R0));
202 EMIT(PPC_RAW_BCTRL());
206 static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
209 * By now, the eBPF program has already setup parameters in r3-r6
210 * r3-r4/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program
211 * r5-r6/BPF_REG_2 - pointer to bpf_array
212 * r7-r8/BPF_REG_3 - index in bpf_array
214 int b2p_bpf_array = bpf_to_ppc(ctx, BPF_REG_2);
215 int b2p_index = bpf_to_ppc(ctx, BPF_REG_3);
218 * if (index >= array->map.max_entries)
221 EMIT(PPC_RAW_LWZ(_R0, b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)));
222 EMIT(PPC_RAW_CMPLW(b2p_index, _R0));
223 EMIT(PPC_RAW_LWZ(_R0, _R1, bpf_jit_stack_offsetof(ctx, BPF_PPC_TC)));
224 PPC_BCC(COND_GE, out);
227 * if (tail_call_cnt >= MAX_TAIL_CALL_CNT)
230 EMIT(PPC_RAW_CMPLWI(_R0, MAX_TAIL_CALL_CNT));
231 /* tail_call_cnt++; */
232 EMIT(PPC_RAW_ADDIC(_R0, _R0, 1));
233 PPC_BCC(COND_GE, out);
235 /* prog = array->ptrs[index]; */
236 EMIT(PPC_RAW_RLWINM(_R3, b2p_index, 2, 0, 29));
237 EMIT(PPC_RAW_ADD(_R3, _R3, b2p_bpf_array));
238 EMIT(PPC_RAW_LWZ(_R3, _R3, offsetof(struct bpf_array, ptrs)));
239 EMIT(PPC_RAW_STW(_R0, _R1, bpf_jit_stack_offsetof(ctx, BPF_PPC_TC)));
245 EMIT(PPC_RAW_CMPLWI(_R3, 0));
246 PPC_BCC(COND_EQ, out);
248 /* goto *(prog->bpf_func + prologue_size); */
249 EMIT(PPC_RAW_LWZ(_R3, _R3, offsetof(struct bpf_prog, bpf_func)));
251 if (ctx->seen & SEEN_FUNC)
252 EMIT(PPC_RAW_LWZ(_R0, _R1, BPF_PPC_STACKFRAME(ctx) + PPC_LR_STKOFF));
254 EMIT(PPC_RAW_ADDIC(_R3, _R3, BPF_TAILCALL_PROLOGUE_SIZE));
256 if (ctx->seen & SEEN_FUNC)
257 EMIT(PPC_RAW_MTLR(_R0));
259 EMIT(PPC_RAW_MTCTR(_R3));
261 EMIT(PPC_RAW_MR(_R3, bpf_to_ppc(ctx, BPF_REG_1)));
263 /* tear restore NVRs, ... */
264 bpf_jit_emit_common_epilogue(image, ctx);
266 EMIT(PPC_RAW_BCTR());
272 /* Assemble the body code between the prologue & epilogue */
273 int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx,
274 u32 *addrs, int pass)
276 const struct bpf_insn *insn = fp->insnsi;
280 /* Start of epilogue code - will only be valid 2nd pass onwards */
281 u32 exit_addr = addrs[flen];
283 for (i = 0; i < flen; i++) {
284 u32 code = insn[i].code;
285 u32 dst_reg = bpf_to_ppc(ctx, insn[i].dst_reg);
286 u32 dst_reg_h = dst_reg - 1;
287 u32 src_reg = bpf_to_ppc(ctx, insn[i].src_reg);
288 u32 src_reg_h = src_reg - 1;
289 u32 tmp_reg = bpf_to_ppc(ctx, TMP_REG);
290 u32 size = BPF_SIZE(code);
291 s16 off = insn[i].off;
292 s32 imm = insn[i].imm;
293 bool func_addr_fixed;
300 * addrs[] maps a BPF bytecode address into a real offset from
301 * the start of the body code.
303 addrs[i] = ctx->idx * 4;
306 * As an optimization, we note down which registers
307 * are used so that we can only save/restore those in our
308 * prologue and epilogue. We do this here regardless of whether
309 * the actual BPF instruction uses src/dst registers or not
310 * (for instance, BPF_CALL does not use them). The expectation
311 * is that those instructions will have src_reg/dst_reg set to
312 * 0. Even otherwise, we just lose some prologue/epilogue
313 * optimization but everything else should work without
316 if (dst_reg >= 3 && dst_reg < 32) {
317 bpf_set_seen_register(ctx, dst_reg);
318 bpf_set_seen_register(ctx, dst_reg_h);
321 if (src_reg >= 3 && src_reg < 32) {
322 bpf_set_seen_register(ctx, src_reg);
323 bpf_set_seen_register(ctx, src_reg_h);
328 * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
330 case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
331 EMIT(PPC_RAW_ADD(dst_reg, dst_reg, src_reg));
333 case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
334 EMIT(PPC_RAW_ADDC(dst_reg, dst_reg, src_reg));
335 EMIT(PPC_RAW_ADDE(dst_reg_h, dst_reg_h, src_reg_h));
337 case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
338 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, src_reg));
340 case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
341 EMIT(PPC_RAW_SUBFC(dst_reg, src_reg, dst_reg));
342 EMIT(PPC_RAW_SUBFE(dst_reg_h, src_reg_h, dst_reg_h));
344 case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
347 case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
348 if (IMM_HA(imm) & 0xffff)
349 EMIT(PPC_RAW_ADDIS(dst_reg, dst_reg, IMM_HA(imm)));
351 EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
353 case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
356 case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
360 if (imm >= -32768 && imm < 32768) {
361 EMIT(PPC_RAW_ADDIC(dst_reg, dst_reg, imm));
364 EMIT(PPC_RAW_ADDC(dst_reg, dst_reg, _R0));
366 if (imm >= 0 || (BPF_OP(code) == BPF_SUB && imm == 0x80000000))
367 EMIT(PPC_RAW_ADDZE(dst_reg_h, dst_reg_h));
369 EMIT(PPC_RAW_ADDME(dst_reg_h, dst_reg_h));
371 case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
372 bpf_set_seen_register(ctx, tmp_reg);
373 EMIT(PPC_RAW_MULW(_R0, dst_reg, src_reg_h));
374 EMIT(PPC_RAW_MULW(dst_reg_h, dst_reg_h, src_reg));
375 EMIT(PPC_RAW_MULHWU(tmp_reg, dst_reg, src_reg));
376 EMIT(PPC_RAW_MULW(dst_reg, dst_reg, src_reg));
377 EMIT(PPC_RAW_ADD(dst_reg_h, dst_reg_h, _R0));
378 EMIT(PPC_RAW_ADD(dst_reg_h, dst_reg_h, tmp_reg));
380 case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
381 EMIT(PPC_RAW_MULW(dst_reg, dst_reg, src_reg));
383 case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
384 if (imm >= -32768 && imm < 32768) {
385 EMIT(PPC_RAW_MULI(dst_reg, dst_reg, imm));
388 EMIT(PPC_RAW_MULW(dst_reg, dst_reg, _R0));
391 case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
393 PPC_LI32(dst_reg, 0);
394 PPC_LI32(dst_reg_h, 0);
400 EMIT(PPC_RAW_SUBFIC(dst_reg, dst_reg, 0));
401 EMIT(PPC_RAW_SUBFZE(dst_reg_h, dst_reg_h));
404 bpf_set_seen_register(ctx, tmp_reg);
405 PPC_LI32(tmp_reg, imm);
406 EMIT(PPC_RAW_MULW(dst_reg_h, dst_reg_h, tmp_reg));
408 EMIT(PPC_RAW_SUB(dst_reg_h, dst_reg_h, dst_reg));
409 EMIT(PPC_RAW_MULHWU(_R0, dst_reg, tmp_reg));
410 EMIT(PPC_RAW_MULW(dst_reg, dst_reg, tmp_reg));
411 EMIT(PPC_RAW_ADD(dst_reg_h, dst_reg_h, _R0));
413 case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
414 EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, src_reg));
416 case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
417 EMIT(PPC_RAW_DIVWU(_R0, dst_reg, src_reg));
418 EMIT(PPC_RAW_MULW(_R0, src_reg, _R0));
419 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, _R0));
421 case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
423 case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
425 case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
432 EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, _R0));
434 case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
438 if (!is_power_of_2((u32)imm)) {
439 bpf_set_seen_register(ctx, tmp_reg);
440 PPC_LI32(tmp_reg, imm);
441 EMIT(PPC_RAW_DIVWU(_R0, dst_reg, tmp_reg));
442 EMIT(PPC_RAW_MULW(_R0, tmp_reg, _R0));
443 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, _R0));
447 EMIT(PPC_RAW_LI(dst_reg, 0));
449 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 32 - ilog2((u32)imm), 31));
452 case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */
457 if (!is_power_of_2(imm))
460 EMIT(PPC_RAW_LI(dst_reg, 0));
462 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 32 - ilog2(imm), 31));
463 EMIT(PPC_RAW_LI(dst_reg_h, 0));
465 case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
468 if (!is_power_of_2(abs(imm)))
472 EMIT(PPC_RAW_SUBFIC(dst_reg, dst_reg, 0));
473 EMIT(PPC_RAW_SUBFZE(dst_reg_h, dst_reg_h));
479 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 32 - imm, imm, 31));
480 EMIT(PPC_RAW_RLWIMI(dst_reg, dst_reg_h, 32 - imm, 0, imm - 1));
481 EMIT(PPC_RAW_SRAWI(dst_reg_h, dst_reg_h, imm));
483 case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
484 EMIT(PPC_RAW_NEG(dst_reg, dst_reg));
486 case BPF_ALU64 | BPF_NEG: /* dst = -dst */
487 EMIT(PPC_RAW_SUBFIC(dst_reg, dst_reg, 0));
488 EMIT(PPC_RAW_SUBFZE(dst_reg_h, dst_reg_h));
492 * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
494 case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
495 EMIT(PPC_RAW_AND(dst_reg, dst_reg, src_reg));
496 EMIT(PPC_RAW_AND(dst_reg_h, dst_reg_h, src_reg_h));
498 case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
499 EMIT(PPC_RAW_AND(dst_reg, dst_reg, src_reg));
501 case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
503 EMIT(PPC_RAW_LI(dst_reg_h, 0));
505 case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
507 EMIT(PPC_RAW_ANDI(dst_reg, dst_reg, IMM_L(imm)));
508 } else if (!IMM_L(imm)) {
509 EMIT(PPC_RAW_ANDIS(dst_reg, dst_reg, IMM_H(imm)));
510 } else if (imm == (((1 << fls(imm)) - 1) ^ ((1 << (ffs(i) - 1)) - 1))) {
511 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0,
512 32 - fls(imm), 32 - ffs(imm)));
515 EMIT(PPC_RAW_AND(dst_reg, dst_reg, _R0));
518 case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
519 EMIT(PPC_RAW_OR(dst_reg, dst_reg, src_reg));
520 EMIT(PPC_RAW_OR(dst_reg_h, dst_reg_h, src_reg_h));
522 case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
523 EMIT(PPC_RAW_OR(dst_reg, dst_reg, src_reg));
525 case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
528 EMIT(PPC_RAW_LI(dst_reg_h, -1));
530 case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
532 EMIT(PPC_RAW_ORI(dst_reg, dst_reg, IMM_L(imm)));
534 EMIT(PPC_RAW_ORIS(dst_reg, dst_reg, IMM_H(imm)));
536 case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
537 if (dst_reg == src_reg) {
538 EMIT(PPC_RAW_LI(dst_reg, 0));
539 EMIT(PPC_RAW_LI(dst_reg_h, 0));
541 EMIT(PPC_RAW_XOR(dst_reg, dst_reg, src_reg));
542 EMIT(PPC_RAW_XOR(dst_reg_h, dst_reg_h, src_reg_h));
545 case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
546 if (dst_reg == src_reg)
547 EMIT(PPC_RAW_LI(dst_reg, 0));
549 EMIT(PPC_RAW_XOR(dst_reg, dst_reg, src_reg));
551 case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
553 EMIT(PPC_RAW_NOR(dst_reg_h, dst_reg_h, dst_reg_h));
555 case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
557 EMIT(PPC_RAW_XORI(dst_reg, dst_reg, IMM_L(imm)));
559 EMIT(PPC_RAW_XORIS(dst_reg, dst_reg, IMM_H(imm)));
561 case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
562 EMIT(PPC_RAW_SLW(dst_reg, dst_reg, src_reg));
564 case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
565 bpf_set_seen_register(ctx, tmp_reg);
566 EMIT(PPC_RAW_SUBFIC(_R0, src_reg, 32));
567 EMIT(PPC_RAW_SLW(dst_reg_h, dst_reg_h, src_reg));
568 EMIT(PPC_RAW_ADDI(tmp_reg, src_reg, 32));
569 EMIT(PPC_RAW_SRW(_R0, dst_reg, _R0));
570 EMIT(PPC_RAW_SLW(tmp_reg, dst_reg, tmp_reg));
571 EMIT(PPC_RAW_OR(dst_reg_h, dst_reg_h, _R0));
572 EMIT(PPC_RAW_SLW(dst_reg, dst_reg, src_reg));
573 EMIT(PPC_RAW_OR(dst_reg_h, dst_reg_h, tmp_reg));
575 case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<= (u32) imm */
578 EMIT(PPC_RAW_SLWI(dst_reg, dst_reg, imm));
580 case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<= imm */
586 EMIT(PPC_RAW_RLWINM(dst_reg_h, dst_reg_h, imm, 0, 31 - imm));
587 EMIT(PPC_RAW_RLWIMI(dst_reg_h, dst_reg, imm, 32 - imm, 31));
588 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, imm, 0, 31 - imm));
592 EMIT(PPC_RAW_RLWINM(dst_reg_h, dst_reg, imm, 0, 31 - imm));
594 EMIT(PPC_RAW_LI(dst_reg_h, 0));
595 EMIT(PPC_RAW_LI(dst_reg, 0));
597 case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
598 EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg));
600 case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
601 bpf_set_seen_register(ctx, tmp_reg);
602 EMIT(PPC_RAW_SUBFIC(_R0, src_reg, 32));
603 EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg));
604 EMIT(PPC_RAW_ADDI(tmp_reg, src_reg, 32));
605 EMIT(PPC_RAW_SLW(_R0, dst_reg_h, _R0));
606 EMIT(PPC_RAW_SRW(tmp_reg, dst_reg_h, tmp_reg));
607 EMIT(PPC_RAW_OR(dst_reg, dst_reg, _R0));
608 EMIT(PPC_RAW_SRW(dst_reg_h, dst_reg_h, src_reg));
609 EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp_reg));
611 case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
614 EMIT(PPC_RAW_SRWI(dst_reg, dst_reg, imm));
616 case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
622 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 32 - imm, imm, 31));
623 EMIT(PPC_RAW_RLWIMI(dst_reg, dst_reg_h, 32 - imm, 0, imm - 1));
624 EMIT(PPC_RAW_RLWINM(dst_reg_h, dst_reg_h, 32 - imm, imm, 31));
628 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg_h, 64 - imm, imm - 32, 31));
630 EMIT(PPC_RAW_LI(dst_reg, 0));
631 EMIT(PPC_RAW_LI(dst_reg_h, 0));
633 case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */
634 EMIT(PPC_RAW_SRAW(dst_reg, dst_reg, src_reg));
636 case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
637 bpf_set_seen_register(ctx, tmp_reg);
638 EMIT(PPC_RAW_SUBFIC(_R0, src_reg, 32));
639 EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg));
640 EMIT(PPC_RAW_SLW(_R0, dst_reg_h, _R0));
641 EMIT(PPC_RAW_ADDI(tmp_reg, src_reg, 32));
642 EMIT(PPC_RAW_OR(dst_reg, dst_reg, _R0));
643 EMIT(PPC_RAW_RLWINM(_R0, tmp_reg, 0, 26, 26));
644 EMIT(PPC_RAW_SRAW(tmp_reg, dst_reg_h, tmp_reg));
645 EMIT(PPC_RAW_SRAW(dst_reg_h, dst_reg_h, src_reg));
646 EMIT(PPC_RAW_SLW(tmp_reg, tmp_reg, _R0));
647 EMIT(PPC_RAW_OR(dst_reg, dst_reg, tmp_reg));
649 case BPF_ALU | BPF_ARSH | BPF_K: /* (s32) dst >>= imm */
652 EMIT(PPC_RAW_SRAWI(dst_reg, dst_reg, imm));
654 case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
660 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 32 - imm, imm, 31));
661 EMIT(PPC_RAW_RLWIMI(dst_reg, dst_reg_h, 32 - imm, 0, imm - 1));
662 EMIT(PPC_RAW_SRAWI(dst_reg_h, dst_reg_h, imm));
666 EMIT(PPC_RAW_SRAWI(dst_reg, dst_reg_h, imm - 32));
668 EMIT(PPC_RAW_SRAWI(dst_reg, dst_reg_h, 31));
669 EMIT(PPC_RAW_SRAWI(dst_reg_h, dst_reg_h, 31));
675 case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
676 if (dst_reg == src_reg)
678 EMIT(PPC_RAW_MR(dst_reg, src_reg));
679 EMIT(PPC_RAW_MR(dst_reg_h, src_reg_h));
681 case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
682 /* special mov32 for zext */
684 EMIT(PPC_RAW_LI(dst_reg_h, 0));
685 else if (dst_reg != src_reg)
686 EMIT(PPC_RAW_MR(dst_reg, src_reg));
688 case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
689 PPC_LI32(dst_reg, imm);
690 PPC_EX32(dst_reg_h, imm);
692 case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
693 PPC_LI32(dst_reg, imm);
699 case BPF_ALU | BPF_END | BPF_FROM_LE:
702 /* Copy 16 bits to upper part */
703 EMIT(PPC_RAW_RLWIMI(dst_reg, dst_reg, 16, 0, 15));
704 /* Rotate 8 bits right & mask */
705 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 24, 16, 31));
709 * Rotate word left by 8 bits:
710 * 2 bytes are already in their final position
711 * -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
713 EMIT(PPC_RAW_RLWINM(_R0, dst_reg, 8, 0, 31));
714 /* Rotate 24 bits and insert byte 1 */
715 EMIT(PPC_RAW_RLWIMI(_R0, dst_reg, 24, 0, 7));
716 /* Rotate 24 bits and insert byte 3 */
717 EMIT(PPC_RAW_RLWIMI(_R0, dst_reg, 24, 16, 23));
718 EMIT(PPC_RAW_MR(dst_reg, _R0));
721 bpf_set_seen_register(ctx, tmp_reg);
722 EMIT(PPC_RAW_RLWINM(tmp_reg, dst_reg, 8, 0, 31));
723 EMIT(PPC_RAW_RLWINM(_R0, dst_reg_h, 8, 0, 31));
724 /* Rotate 24 bits and insert byte 1 */
725 EMIT(PPC_RAW_RLWIMI(tmp_reg, dst_reg, 24, 0, 7));
726 EMIT(PPC_RAW_RLWIMI(_R0, dst_reg_h, 24, 0, 7));
727 /* Rotate 24 bits and insert byte 3 */
728 EMIT(PPC_RAW_RLWIMI(tmp_reg, dst_reg, 24, 16, 23));
729 EMIT(PPC_RAW_RLWIMI(_R0, dst_reg_h, 24, 16, 23));
730 EMIT(PPC_RAW_MR(dst_reg, _R0));
731 EMIT(PPC_RAW_MR(dst_reg_h, tmp_reg));
735 case BPF_ALU | BPF_END | BPF_FROM_BE:
738 /* zero-extend 16 bits into 32 bits */
739 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 16, 31));
749 * BPF_ST NOSPEC (speculation barrier)
751 case BPF_ST | BPF_NOSPEC:
757 case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
758 EMIT(PPC_RAW_STB(src_reg, dst_reg, off));
760 case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
762 EMIT(PPC_RAW_STB(_R0, dst_reg, off));
764 case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
765 EMIT(PPC_RAW_STH(src_reg, dst_reg, off));
767 case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
769 EMIT(PPC_RAW_STH(_R0, dst_reg, off));
771 case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
772 EMIT(PPC_RAW_STW(src_reg, dst_reg, off));
774 case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
776 EMIT(PPC_RAW_STW(_R0, dst_reg, off));
778 case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
779 EMIT(PPC_RAW_STW(src_reg_h, dst_reg, off));
780 EMIT(PPC_RAW_STW(src_reg, dst_reg, off + 4));
782 case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
784 EMIT(PPC_RAW_STW(_R0, dst_reg, off + 4));
786 EMIT(PPC_RAW_STW(_R0, dst_reg, off));
790 * BPF_STX ATOMIC (atomic ops)
792 case BPF_STX | BPF_ATOMIC | BPF_W:
793 if (imm != BPF_ADD) {
794 pr_err_ratelimited("eBPF filter atomic op code %02x (@%d) unsupported\n",
799 /* *(u32 *)(dst + off) += src */
801 bpf_set_seen_register(ctx, tmp_reg);
802 /* Get offset into TMP_REG */
803 EMIT(PPC_RAW_LI(tmp_reg, off));
804 /* load value from memory into r0 */
805 EMIT(PPC_RAW_LWARX(_R0, tmp_reg, dst_reg, 0));
806 /* add value from src_reg into this */
807 EMIT(PPC_RAW_ADD(_R0, _R0, src_reg));
808 /* store result back */
809 EMIT(PPC_RAW_STWCX(_R0, tmp_reg, dst_reg));
810 /* we're done if this succeeded */
811 PPC_BCC_SHORT(COND_NE, (ctx->idx - 3) * 4);
814 case BPF_STX | BPF_ATOMIC | BPF_DW: /* *(u64 *)(dst + off) += src */
820 case BPF_LDX | BPF_MEM | BPF_B: /* dst = *(u8 *)(ul) (src + off) */
821 case BPF_LDX | BPF_PROBE_MEM | BPF_B:
822 case BPF_LDX | BPF_MEM | BPF_H: /* dst = *(u16 *)(ul) (src + off) */
823 case BPF_LDX | BPF_PROBE_MEM | BPF_H:
824 case BPF_LDX | BPF_MEM | BPF_W: /* dst = *(u32 *)(ul) (src + off) */
825 case BPF_LDX | BPF_PROBE_MEM | BPF_W:
826 case BPF_LDX | BPF_MEM | BPF_DW: /* dst = *(u64 *)(ul) (src + off) */
827 case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
829 * As PTR_TO_BTF_ID that uses BPF_PROBE_MEM mode could either be a valid
830 * kernel pointer or NULL but not a userspace address, execute BPF_PROBE_MEM
831 * load only if addr is kernel address (see is_kernel_addr()), otherwise
832 * set dst_reg=0 and move on.
834 if (BPF_MODE(code) == BPF_PROBE_MEM) {
835 PPC_LI32(_R0, TASK_SIZE - off);
836 EMIT(PPC_RAW_CMPLW(src_reg, _R0));
837 PPC_BCC(COND_GT, (ctx->idx + 5) * 4);
838 EMIT(PPC_RAW_LI(dst_reg, 0));
840 * For BPF_DW case, "li reg_h,0" would be needed when
841 * !fp->aux->verifier_zext. Emit NOP otherwise.
843 * Note that "li reg_h,0" is emitted for BPF_B/H/W case,
844 * if necessary. So, jump there insted of emitting an
845 * additional "li reg_h,0" instruction.
847 if (size == BPF_DW && !fp->aux->verifier_zext)
848 EMIT(PPC_RAW_LI(dst_reg_h, 0));
852 * Need to jump two instructions instead of one for BPF_DW case
853 * as there are two load instructions for dst_reg_h & dst_reg
857 PPC_JMP((ctx->idx + 3) * 4);
859 PPC_JMP((ctx->idx + 2) * 4);
864 EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
867 EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
870 EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
873 EMIT(PPC_RAW_LWZ(dst_reg_h, src_reg, off));
874 EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off + 4));
878 if (size != BPF_DW && !fp->aux->verifier_zext)
879 EMIT(PPC_RAW_LI(dst_reg_h, 0));
881 if (BPF_MODE(code) == BPF_PROBE_MEM) {
882 int insn_idx = ctx->idx - 1;
886 * In case of BPF_DW, two lwz instructions are emitted, one
887 * for higher 32-bit and another for lower 32-bit. So, set
888 * ex->insn to the first of the two and jump over both
889 * instructions in fixup.
891 * Similarly, with !verifier_zext, two instructions are
892 * emitted for BPF_B/H/W case. So, set ex->insn to the
893 * instruction that could fault and skip over both
896 if (size == BPF_DW || !fp->aux->verifier_zext) {
901 ret = bpf_add_extable_entry(fp, image, pass, ctx, insn_idx,
910 * 16 byte instruction that uses two 'struct bpf_insn'
912 case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
914 PPC_LI32(dst_reg_h, (u32)insn[i + 1].imm);
915 PPC_LI32(dst_reg, (u32)insn[i].imm);
916 /* padding to allow full 4 instructions for later patching */
917 for (j = ctx->idx - tmp_idx; j < 4; j++)
919 /* Adjust for two bpf instructions */
920 addrs[++i] = ctx->idx * 4;
926 case BPF_JMP | BPF_EXIT:
928 * If this isn't the very last instruction, branch to
929 * the epilogue. If we _are_ the last instruction,
930 * we'll just fall through to the epilogue.
934 /* else fall through to the epilogue */
938 * Call kernel helper or bpf function
940 case BPF_JMP | BPF_CALL:
941 ctx->seen |= SEEN_FUNC;
943 ret = bpf_jit_get_func_addr(fp, &insn[i], false,
944 &func_addr, &func_addr_fixed);
948 if (bpf_is_seen_register(ctx, bpf_to_ppc(ctx, BPF_REG_5))) {
949 EMIT(PPC_RAW_STW(bpf_to_ppc(ctx, BPF_REG_5) - 1, _R1, 8));
950 EMIT(PPC_RAW_STW(bpf_to_ppc(ctx, BPF_REG_5), _R1, 12));
953 bpf_jit_emit_func_call_rel(image, ctx, func_addr);
955 EMIT(PPC_RAW_MR(bpf_to_ppc(ctx, BPF_REG_0) - 1, _R3));
956 EMIT(PPC_RAW_MR(bpf_to_ppc(ctx, BPF_REG_0), _R4));
962 case BPF_JMP | BPF_JA:
963 PPC_JMP(addrs[i + 1 + off]);
966 case BPF_JMP | BPF_JGT | BPF_K:
967 case BPF_JMP | BPF_JGT | BPF_X:
968 case BPF_JMP | BPF_JSGT | BPF_K:
969 case BPF_JMP | BPF_JSGT | BPF_X:
970 case BPF_JMP32 | BPF_JGT | BPF_K:
971 case BPF_JMP32 | BPF_JGT | BPF_X:
972 case BPF_JMP32 | BPF_JSGT | BPF_K:
973 case BPF_JMP32 | BPF_JSGT | BPF_X:
976 case BPF_JMP | BPF_JLT | BPF_K:
977 case BPF_JMP | BPF_JLT | BPF_X:
978 case BPF_JMP | BPF_JSLT | BPF_K:
979 case BPF_JMP | BPF_JSLT | BPF_X:
980 case BPF_JMP32 | BPF_JLT | BPF_K:
981 case BPF_JMP32 | BPF_JLT | BPF_X:
982 case BPF_JMP32 | BPF_JSLT | BPF_K:
983 case BPF_JMP32 | BPF_JSLT | BPF_X:
986 case BPF_JMP | BPF_JGE | BPF_K:
987 case BPF_JMP | BPF_JGE | BPF_X:
988 case BPF_JMP | BPF_JSGE | BPF_K:
989 case BPF_JMP | BPF_JSGE | BPF_X:
990 case BPF_JMP32 | BPF_JGE | BPF_K:
991 case BPF_JMP32 | BPF_JGE | BPF_X:
992 case BPF_JMP32 | BPF_JSGE | BPF_K:
993 case BPF_JMP32 | BPF_JSGE | BPF_X:
996 case BPF_JMP | BPF_JLE | BPF_K:
997 case BPF_JMP | BPF_JLE | BPF_X:
998 case BPF_JMP | BPF_JSLE | BPF_K:
999 case BPF_JMP | BPF_JSLE | BPF_X:
1000 case BPF_JMP32 | BPF_JLE | BPF_K:
1001 case BPF_JMP32 | BPF_JLE | BPF_X:
1002 case BPF_JMP32 | BPF_JSLE | BPF_K:
1003 case BPF_JMP32 | BPF_JSLE | BPF_X:
1004 true_cond = COND_LE;
1006 case BPF_JMP | BPF_JEQ | BPF_K:
1007 case BPF_JMP | BPF_JEQ | BPF_X:
1008 case BPF_JMP32 | BPF_JEQ | BPF_K:
1009 case BPF_JMP32 | BPF_JEQ | BPF_X:
1010 true_cond = COND_EQ;
1012 case BPF_JMP | BPF_JNE | BPF_K:
1013 case BPF_JMP | BPF_JNE | BPF_X:
1014 case BPF_JMP32 | BPF_JNE | BPF_K:
1015 case BPF_JMP32 | BPF_JNE | BPF_X:
1016 true_cond = COND_NE;
1018 case BPF_JMP | BPF_JSET | BPF_K:
1019 case BPF_JMP | BPF_JSET | BPF_X:
1020 case BPF_JMP32 | BPF_JSET | BPF_K:
1021 case BPF_JMP32 | BPF_JSET | BPF_X:
1022 true_cond = COND_NE;
1027 case BPF_JMP | BPF_JGT | BPF_X:
1028 case BPF_JMP | BPF_JLT | BPF_X:
1029 case BPF_JMP | BPF_JGE | BPF_X:
1030 case BPF_JMP | BPF_JLE | BPF_X:
1031 case BPF_JMP | BPF_JEQ | BPF_X:
1032 case BPF_JMP | BPF_JNE | BPF_X:
1033 /* unsigned comparison */
1034 EMIT(PPC_RAW_CMPLW(dst_reg_h, src_reg_h));
1035 PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
1036 EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
1038 case BPF_JMP32 | BPF_JGT | BPF_X:
1039 case BPF_JMP32 | BPF_JLT | BPF_X:
1040 case BPF_JMP32 | BPF_JGE | BPF_X:
1041 case BPF_JMP32 | BPF_JLE | BPF_X:
1042 case BPF_JMP32 | BPF_JEQ | BPF_X:
1043 case BPF_JMP32 | BPF_JNE | BPF_X:
1044 /* unsigned comparison */
1045 EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
1047 case BPF_JMP | BPF_JSGT | BPF_X:
1048 case BPF_JMP | BPF_JSLT | BPF_X:
1049 case BPF_JMP | BPF_JSGE | BPF_X:
1050 case BPF_JMP | BPF_JSLE | BPF_X:
1051 /* signed comparison */
1052 EMIT(PPC_RAW_CMPW(dst_reg_h, src_reg_h));
1053 PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
1054 EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
1056 case BPF_JMP32 | BPF_JSGT | BPF_X:
1057 case BPF_JMP32 | BPF_JSLT | BPF_X:
1058 case BPF_JMP32 | BPF_JSGE | BPF_X:
1059 case BPF_JMP32 | BPF_JSLE | BPF_X:
1060 /* signed comparison */
1061 EMIT(PPC_RAW_CMPW(dst_reg, src_reg));
1063 case BPF_JMP | BPF_JSET | BPF_X:
1064 EMIT(PPC_RAW_AND_DOT(_R0, dst_reg_h, src_reg_h));
1065 PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
1066 EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, src_reg));
1068 case BPF_JMP32 | BPF_JSET | BPF_X: {
1069 EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, src_reg));
1071 case BPF_JMP | BPF_JNE | BPF_K:
1072 case BPF_JMP | BPF_JEQ | BPF_K:
1073 case BPF_JMP | BPF_JGT | BPF_K:
1074 case BPF_JMP | BPF_JLT | BPF_K:
1075 case BPF_JMP | BPF_JGE | BPF_K:
1076 case BPF_JMP | BPF_JLE | BPF_K:
1078 * Need sign-extended load, so only positive
1079 * values can be used as imm in cmplwi
1081 if (imm >= 0 && imm < 32768) {
1082 EMIT(PPC_RAW_CMPLWI(dst_reg_h, 0));
1083 PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
1084 EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
1086 /* sign-extending load ... but unsigned comparison */
1088 EMIT(PPC_RAW_CMPLW(dst_reg_h, _R0));
1090 PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
1091 EMIT(PPC_RAW_CMPLW(dst_reg, _R0));
1094 case BPF_JMP32 | BPF_JNE | BPF_K:
1095 case BPF_JMP32 | BPF_JEQ | BPF_K:
1096 case BPF_JMP32 | BPF_JGT | BPF_K:
1097 case BPF_JMP32 | BPF_JLT | BPF_K:
1098 case BPF_JMP32 | BPF_JGE | BPF_K:
1099 case BPF_JMP32 | BPF_JLE | BPF_K:
1100 if (imm >= 0 && imm < 65536) {
1101 EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
1104 EMIT(PPC_RAW_CMPLW(dst_reg, _R0));
1108 case BPF_JMP | BPF_JSGT | BPF_K:
1109 case BPF_JMP | BPF_JSLT | BPF_K:
1110 case BPF_JMP | BPF_JSGE | BPF_K:
1111 case BPF_JMP | BPF_JSLE | BPF_K:
1112 if (imm >= 0 && imm < 65536) {
1113 EMIT(PPC_RAW_CMPWI(dst_reg_h, imm < 0 ? -1 : 0));
1114 PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
1115 EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
1117 /* sign-extending load */
1118 EMIT(PPC_RAW_CMPWI(dst_reg_h, imm < 0 ? -1 : 0));
1120 PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
1121 EMIT(PPC_RAW_CMPLW(dst_reg, _R0));
1124 case BPF_JMP32 | BPF_JSGT | BPF_K:
1125 case BPF_JMP32 | BPF_JSLT | BPF_K:
1126 case BPF_JMP32 | BPF_JSGE | BPF_K:
1127 case BPF_JMP32 | BPF_JSLE | BPF_K:
1129 * signed comparison, so any 16-bit value
1130 * can be used in cmpwi
1132 if (imm >= -32768 && imm < 32768) {
1133 EMIT(PPC_RAW_CMPWI(dst_reg, imm));
1135 /* sign-extending load */
1137 EMIT(PPC_RAW_CMPW(dst_reg, _R0));
1140 case BPF_JMP | BPF_JSET | BPF_K:
1141 /* andi does not sign-extend the immediate */
1142 if (imm >= 0 && imm < 32768) {
1143 /* PPC_ANDI is _only/always_ dot-form */
1144 EMIT(PPC_RAW_ANDI(_R0, dst_reg, imm));
1148 EMIT(PPC_RAW_CMPWI(dst_reg_h, 0));
1149 PPC_BCC_SHORT(COND_NE, (ctx->idx + 2) * 4);
1151 EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, _R0));
1154 case BPF_JMP32 | BPF_JSET | BPF_K:
1155 /* andi does not sign-extend the immediate */
1156 if (imm >= 0 && imm < 32768) {
1157 /* PPC_ANDI is _only/always_ dot-form */
1158 EMIT(PPC_RAW_ANDI(_R0, dst_reg, imm));
1161 EMIT(PPC_RAW_AND_DOT(_R0, dst_reg, _R0));
1165 PPC_BCC(true_cond, addrs[i + 1 + off]);
1171 case BPF_JMP | BPF_TAIL_CALL:
1172 ctx->seen |= SEEN_TAILCALL;
1173 ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
1180 * The filter contains something cruel & unusual.
1181 * We don't handle it, but also there shouldn't be
1182 * anything missing from our list.
1184 pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n", code, i);
1187 if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext &&
1188 !insn_is_zext(&insn[i + 1]) && !(BPF_OP(code) == BPF_END && imm == 64))
1189 EMIT(PPC_RAW_LI(dst_reg_h, 0));
1192 /* Set end-of-body-code address for exit. */
1193 addrs[i] = ctx->idx * 4;