1 // SPDX-License-Identifier: GPL-2.0-only
3 * bpf_jit_comp64.c: eBPF JIT compiler
5 * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
8 * Based on the powerpc classic BPF JIT compiler by Matt Evans
10 #include <linux/moduleloader.h>
11 #include <asm/cacheflush.h>
12 #include <asm/asm-compat.h>
13 #include <linux/netdevice.h>
14 #include <linux/filter.h>
15 #include <linux/if_vlan.h>
16 #include <asm/kprobes.h>
17 #include <linux/bpf.h>
18 #include <asm/security_features.h>
20 #include "bpf_jit64.h"
22 static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
25 * We only need a stack frame if:
26 * - we call other functions (kernel helpers), or
27 * - the bpf program uses its stack area
28 * The latter condition is deduced from the usage of BPF_REG_FP
30 return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, b2p[BPF_REG_FP]);
34 * When not setting up our own stackframe, the redzone usage is:
36 * [ prev sp ] <-------------
38 * sp (r1) ---> [ stack pointer ] --------------
39 * [ nv gpr save area ] 5*8
41 * [ local_tmp_var ] 16
42 * [ unused red zone ] 208 bytes protected
44 static int bpf_jit_stack_local(struct codegen_context *ctx)
46 if (bpf_has_stack_frame(ctx))
47 return STACK_FRAME_MIN_SIZE + ctx->stack_size;
49 return -(BPF_PPC_STACK_SAVE + 24);
52 static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
54 return bpf_jit_stack_local(ctx) + 16;
57 static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
59 if (reg >= BPF_PPC_NVR_MIN && reg < 32)
60 return (bpf_has_stack_frame(ctx) ?
61 (BPF_PPC_STACKFRAME + ctx->stack_size) : 0)
64 pr_err("BPF JIT is asking about unknown registers");
68 void bpf_jit_realloc_regs(struct codegen_context *ctx)
72 void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
77 * Initialize tail_call_cnt if we do tail calls.
78 * Otherwise, put in NOPs so that it can be skipped when we are
79 * invoked through a tail call.
81 if (ctx->seen & SEEN_TAILCALL) {
82 EMIT(PPC_RAW_LI(b2p[TMP_REG_1], 0));
83 /* this goes in the redzone */
84 PPC_BPF_STL(b2p[TMP_REG_1], 1, -(BPF_PPC_STACK_SAVE + 8));
90 #define BPF_TAILCALL_PROLOGUE_SIZE 8
92 if (bpf_has_stack_frame(ctx)) {
94 * We need a stack frame, but we don't necessarily need to
95 * save/restore LR unless we call other functions
97 if (ctx->seen & SEEN_FUNC) {
98 EMIT(PPC_RAW_MFLR(_R0));
99 PPC_BPF_STL(0, 1, PPC_LR_STKOFF);
102 PPC_BPF_STLU(1, 1, -(BPF_PPC_STACKFRAME + ctx->stack_size));
106 * Back up non-volatile regs -- BPF registers 6-10
107 * If we haven't created our own stack frame, we save these
108 * in the protected zone below the previous stack frame
110 for (i = BPF_REG_6; i <= BPF_REG_10; i++)
111 if (bpf_is_seen_register(ctx, b2p[i]))
112 PPC_BPF_STL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
114 /* Setup frame pointer to point to the bpf stack area */
115 if (bpf_is_seen_register(ctx, b2p[BPF_REG_FP]))
116 EMIT(PPC_RAW_ADDI(b2p[BPF_REG_FP], 1,
117 STACK_FRAME_MIN_SIZE + ctx->stack_size));
120 static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
125 for (i = BPF_REG_6; i <= BPF_REG_10; i++)
126 if (bpf_is_seen_register(ctx, b2p[i]))
127 PPC_BPF_LL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
129 /* Tear down our stack frame */
130 if (bpf_has_stack_frame(ctx)) {
131 EMIT(PPC_RAW_ADDI(1, 1, BPF_PPC_STACKFRAME + ctx->stack_size));
132 if (ctx->seen & SEEN_FUNC) {
133 PPC_BPF_LL(0, 1, PPC_LR_STKOFF);
134 EMIT(PPC_RAW_MTLR(0));
139 void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
141 bpf_jit_emit_common_epilogue(image, ctx);
143 /* Move result to r3 */
144 EMIT(PPC_RAW_MR(3, b2p[BPF_REG_0]));
149 static void bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx,
152 #ifdef PPC64_ELF_ABI_v1
153 /* func points to the function descriptor */
154 PPC_LI64(b2p[TMP_REG_2], func);
155 /* Load actual entry point from function descriptor */
156 PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_2], 0);
157 /* ... and move it to CTR */
158 EMIT(PPC_RAW_MTCTR(b2p[TMP_REG_1]));
160 * Load TOC from function descriptor at offset 8.
161 * We can clobber r2 since we get called through a
162 * function pointer (so caller will save/restore r2)
163 * and since we don't use a TOC ourself.
165 PPC_BPF_LL(2, b2p[TMP_REG_2], 8);
167 /* We can clobber r12 */
168 PPC_FUNC_ADDR(12, func);
169 EMIT(PPC_RAW_MTCTR(12));
171 EMIT(PPC_RAW_BCTRL());
174 void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func)
176 unsigned int i, ctx_idx = ctx->idx;
178 /* Load function address into r12 */
181 /* For bpf-to-bpf function calls, the callee's address is unknown
182 * until the last extra pass. As seen above, we use PPC_LI64() to
183 * load the callee's address, but this may optimize the number of
184 * instructions required based on the nature of the address.
186 * Since we don't want the number of instructions emitted to change,
187 * we pad the optimized PPC_LI64() call with NOPs to guarantee that
188 * we always have a five-instruction sequence, which is the maximum
189 * that PPC_LI64() can emit.
191 for (i = ctx->idx - ctx_idx; i < 5; i++)
194 #ifdef PPC64_ELF_ABI_v1
196 * Load TOC from function descriptor at offset 8.
197 * We can clobber r2 since we get called through a
198 * function pointer (so caller will save/restore r2)
199 * and since we don't use a TOC ourself.
201 PPC_BPF_LL(2, 12, 8);
202 /* Load actual entry point from function descriptor */
203 PPC_BPF_LL(12, 12, 0);
206 EMIT(PPC_RAW_MTCTR(12));
207 EMIT(PPC_RAW_BCTRL());
210 static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
213 * By now, the eBPF program has already setup parameters in r3, r4 and r5
214 * r3/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program
215 * r4/BPF_REG_2 - pointer to bpf_array
216 * r5/BPF_REG_3 - index in bpf_array
218 int b2p_bpf_array = b2p[BPF_REG_2];
219 int b2p_index = b2p[BPF_REG_3];
222 * if (index >= array->map.max_entries)
225 EMIT(PPC_RAW_LWZ(b2p[TMP_REG_1], b2p_bpf_array, offsetof(struct bpf_array, map.max_entries)));
226 EMIT(PPC_RAW_RLWINM(b2p_index, b2p_index, 0, 0, 31));
227 EMIT(PPC_RAW_CMPLW(b2p_index, b2p[TMP_REG_1]));
228 PPC_BCC(COND_GE, out);
231 * if (tail_call_cnt >= MAX_TAIL_CALL_CNT)
234 PPC_BPF_LL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
235 EMIT(PPC_RAW_CMPLWI(b2p[TMP_REG_1], MAX_TAIL_CALL_CNT));
236 PPC_BCC(COND_GE, out);
241 EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], 1));
242 PPC_BPF_STL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
244 /* prog = array->ptrs[index]; */
245 EMIT(PPC_RAW_MULI(b2p[TMP_REG_1], b2p_index, 8));
246 EMIT(PPC_RAW_ADD(b2p[TMP_REG_1], b2p[TMP_REG_1], b2p_bpf_array));
247 PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs));
253 EMIT(PPC_RAW_CMPLDI(b2p[TMP_REG_1], 0));
254 PPC_BCC(COND_EQ, out);
256 /* goto *(prog->bpf_func + prologue_size); */
257 PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func));
258 #ifdef PPC64_ELF_ABI_v1
259 /* skip past the function descriptor */
260 EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1],
261 FUNCTION_DESCR_SIZE + BPF_TAILCALL_PROLOGUE_SIZE));
263 EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], BPF_TAILCALL_PROLOGUE_SIZE));
265 EMIT(PPC_RAW_MTCTR(b2p[TMP_REG_1]));
267 /* tear down stack, restore NVRs, ... */
268 bpf_jit_emit_common_epilogue(image, ctx);
270 EMIT(PPC_RAW_BCTR());
277 * We spill into the redzone always, even if the bpf program has its own stackframe.
278 * Offsets hardcoded based on BPF_PPC_STACK_SAVE -- see bpf_jit_stack_local()
280 void bpf_stf_barrier(void);
283 " .global bpf_stf_barrier ;"
284 " bpf_stf_barrier: ;"
298 /* Assemble the body code between the prologue & epilogue */
299 int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx,
300 u32 *addrs, int pass)
302 enum stf_barrier_type stf_barrier = stf_barrier_type_get();
303 const struct bpf_insn *insn = fp->insnsi;
307 /* Start of epilogue code - will only be valid 2nd pass onwards */
308 u32 exit_addr = addrs[flen];
310 for (i = 0; i < flen; i++) {
311 u32 code = insn[i].code;
312 u32 dst_reg = b2p[insn[i].dst_reg];
313 u32 src_reg = b2p[insn[i].src_reg];
314 u32 size = BPF_SIZE(code);
315 s16 off = insn[i].off;
316 s32 imm = insn[i].imm;
317 bool func_addr_fixed;
325 * addrs[] maps a BPF bytecode address into a real offset from
326 * the start of the body code.
328 addrs[i] = ctx->idx * 4;
331 * As an optimization, we note down which non-volatile registers
332 * are used so that we can only save/restore those in our
333 * prologue and epilogue. We do this here regardless of whether
334 * the actual BPF instruction uses src/dst registers or not
335 * (for instance, BPF_CALL does not use them). The expectation
336 * is that those instructions will have src_reg/dst_reg set to
337 * 0. Even otherwise, we just lose some prologue/epilogue
338 * optimization but everything else should work without
341 if (dst_reg >= BPF_PPC_NVR_MIN && dst_reg < 32)
342 bpf_set_seen_register(ctx, dst_reg);
343 if (src_reg >= BPF_PPC_NVR_MIN && src_reg < 32)
344 bpf_set_seen_register(ctx, src_reg);
348 * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
350 case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
351 case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
352 EMIT(PPC_RAW_ADD(dst_reg, dst_reg, src_reg));
353 goto bpf_alu32_trunc;
354 case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
355 case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
356 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, src_reg));
357 goto bpf_alu32_trunc;
358 case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
359 case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
361 goto bpf_alu32_trunc;
362 } else if (imm >= -32768 && imm < 32768) {
363 EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(imm)));
365 PPC_LI32(b2p[TMP_REG_1], imm);
366 EMIT(PPC_RAW_ADD(dst_reg, dst_reg, b2p[TMP_REG_1]));
368 goto bpf_alu32_trunc;
369 case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
370 case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
372 goto bpf_alu32_trunc;
373 } else if (imm > -32768 && imm <= 32768) {
374 EMIT(PPC_RAW_ADDI(dst_reg, dst_reg, IMM_L(-imm)));
376 PPC_LI32(b2p[TMP_REG_1], imm);
377 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]));
379 goto bpf_alu32_trunc;
380 case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
381 case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
382 if (BPF_CLASS(code) == BPF_ALU)
383 EMIT(PPC_RAW_MULW(dst_reg, dst_reg, src_reg));
385 EMIT(PPC_RAW_MULD(dst_reg, dst_reg, src_reg));
386 goto bpf_alu32_trunc;
387 case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
388 case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
389 if (imm >= -32768 && imm < 32768)
390 EMIT(PPC_RAW_MULI(dst_reg, dst_reg, IMM_L(imm)));
392 PPC_LI32(b2p[TMP_REG_1], imm);
393 if (BPF_CLASS(code) == BPF_ALU)
394 EMIT(PPC_RAW_MULW(dst_reg, dst_reg,
397 EMIT(PPC_RAW_MULD(dst_reg, dst_reg,
400 goto bpf_alu32_trunc;
401 case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
402 case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
403 if (BPF_OP(code) == BPF_MOD) {
404 EMIT(PPC_RAW_DIVWU(b2p[TMP_REG_1], dst_reg, src_reg));
405 EMIT(PPC_RAW_MULW(b2p[TMP_REG_1], src_reg,
407 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]));
409 EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg, src_reg));
410 goto bpf_alu32_trunc;
411 case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
412 case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
413 if (BPF_OP(code) == BPF_MOD) {
414 EMIT(PPC_RAW_DIVDU(b2p[TMP_REG_1], dst_reg, src_reg));
415 EMIT(PPC_RAW_MULD(b2p[TMP_REG_1], src_reg,
417 EMIT(PPC_RAW_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]));
419 EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg, src_reg));
421 case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
422 case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
423 case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */
424 case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
428 if (BPF_OP(code) == BPF_DIV) {
429 goto bpf_alu32_trunc;
431 EMIT(PPC_RAW_LI(dst_reg, 0));
436 PPC_LI32(b2p[TMP_REG_1], imm);
437 switch (BPF_CLASS(code)) {
439 if (BPF_OP(code) == BPF_MOD) {
440 EMIT(PPC_RAW_DIVWU(b2p[TMP_REG_2],
443 EMIT(PPC_RAW_MULW(b2p[TMP_REG_1],
446 EMIT(PPC_RAW_SUB(dst_reg, dst_reg,
449 EMIT(PPC_RAW_DIVWU(dst_reg, dst_reg,
453 if (BPF_OP(code) == BPF_MOD) {
454 EMIT(PPC_RAW_DIVDU(b2p[TMP_REG_2],
457 EMIT(PPC_RAW_MULD(b2p[TMP_REG_1],
460 EMIT(PPC_RAW_SUB(dst_reg, dst_reg,
463 EMIT(PPC_RAW_DIVDU(dst_reg, dst_reg,
467 goto bpf_alu32_trunc;
468 case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
469 case BPF_ALU64 | BPF_NEG: /* dst = -dst */
470 EMIT(PPC_RAW_NEG(dst_reg, dst_reg));
471 goto bpf_alu32_trunc;
474 * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
476 case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
477 case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
478 EMIT(PPC_RAW_AND(dst_reg, dst_reg, src_reg));
479 goto bpf_alu32_trunc;
480 case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
481 case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
483 EMIT(PPC_RAW_ANDI(dst_reg, dst_reg, IMM_L(imm)));
486 PPC_LI32(b2p[TMP_REG_1], imm);
487 EMIT(PPC_RAW_AND(dst_reg, dst_reg, b2p[TMP_REG_1]));
489 goto bpf_alu32_trunc;
490 case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
491 case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
492 EMIT(PPC_RAW_OR(dst_reg, dst_reg, src_reg));
493 goto bpf_alu32_trunc;
494 case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
495 case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
496 if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
498 PPC_LI32(b2p[TMP_REG_1], imm);
499 EMIT(PPC_RAW_OR(dst_reg, dst_reg, b2p[TMP_REG_1]));
502 EMIT(PPC_RAW_ORI(dst_reg, dst_reg, IMM_L(imm)));
504 EMIT(PPC_RAW_ORIS(dst_reg, dst_reg, IMM_H(imm)));
506 goto bpf_alu32_trunc;
507 case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
508 case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
509 EMIT(PPC_RAW_XOR(dst_reg, dst_reg, src_reg));
510 goto bpf_alu32_trunc;
511 case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
512 case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
513 if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
515 PPC_LI32(b2p[TMP_REG_1], imm);
516 EMIT(PPC_RAW_XOR(dst_reg, dst_reg, b2p[TMP_REG_1]));
519 EMIT(PPC_RAW_XORI(dst_reg, dst_reg, IMM_L(imm)));
521 EMIT(PPC_RAW_XORIS(dst_reg, dst_reg, IMM_H(imm)));
523 goto bpf_alu32_trunc;
524 case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
525 /* slw clears top 32 bits */
526 EMIT(PPC_RAW_SLW(dst_reg, dst_reg, src_reg));
527 /* skip zero extension move, but set address map. */
528 if (insn_is_zext(&insn[i + 1]))
529 addrs[++i] = ctx->idx * 4;
531 case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
532 EMIT(PPC_RAW_SLD(dst_reg, dst_reg, src_reg));
534 case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<== (u32) imm */
535 /* with imm 0, we still need to clear top 32 bits */
536 EMIT(PPC_RAW_SLWI(dst_reg, dst_reg, imm));
537 if (insn_is_zext(&insn[i + 1]))
538 addrs[++i] = ctx->idx * 4;
540 case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<== imm */
542 EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, imm));
544 case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
545 EMIT(PPC_RAW_SRW(dst_reg, dst_reg, src_reg));
546 if (insn_is_zext(&insn[i + 1]))
547 addrs[++i] = ctx->idx * 4;
549 case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
550 EMIT(PPC_RAW_SRD(dst_reg, dst_reg, src_reg));
552 case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
553 EMIT(PPC_RAW_SRWI(dst_reg, dst_reg, imm));
554 if (insn_is_zext(&insn[i + 1]))
555 addrs[++i] = ctx->idx * 4;
557 case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
559 EMIT(PPC_RAW_SRDI(dst_reg, dst_reg, imm));
561 case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */
562 EMIT(PPC_RAW_SRAW(dst_reg, dst_reg, src_reg));
563 goto bpf_alu32_trunc;
564 case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
565 EMIT(PPC_RAW_SRAD(dst_reg, dst_reg, src_reg));
567 case BPF_ALU | BPF_ARSH | BPF_K: /* (s32) dst >>= imm */
568 EMIT(PPC_RAW_SRAWI(dst_reg, dst_reg, imm));
569 goto bpf_alu32_trunc;
570 case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
572 EMIT(PPC_RAW_SRADI(dst_reg, dst_reg, imm));
578 case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
579 case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
581 /* special mov32 for zext */
582 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
585 EMIT(PPC_RAW_MR(dst_reg, src_reg));
586 goto bpf_alu32_trunc;
587 case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
588 case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
589 PPC_LI32(dst_reg, imm);
591 goto bpf_alu32_trunc;
592 else if (insn_is_zext(&insn[i + 1]))
593 addrs[++i] = ctx->idx * 4;
597 /* Truncate to 32-bits */
598 if (BPF_CLASS(code) == BPF_ALU && !fp->aux->verifier_zext)
599 EMIT(PPC_RAW_RLWINM(dst_reg, dst_reg, 0, 0, 31));
605 case BPF_ALU | BPF_END | BPF_FROM_LE:
606 case BPF_ALU | BPF_END | BPF_FROM_BE:
607 #ifdef __BIG_ENDIAN__
608 if (BPF_SRC(code) == BPF_FROM_BE)
610 #else /* !__BIG_ENDIAN__ */
611 if (BPF_SRC(code) == BPF_FROM_LE)
616 /* Rotate 8 bits left & mask with 0x0000ff00 */
617 EMIT(PPC_RAW_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 16, 23));
618 /* Rotate 8 bits right & insert LSB to reg */
619 EMIT(PPC_RAW_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 24, 31));
620 /* Move result back to dst_reg */
621 EMIT(PPC_RAW_MR(dst_reg, b2p[TMP_REG_1]));
625 * Rotate word left by 8 bits:
626 * 2 bytes are already in their final position
627 * -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
629 EMIT(PPC_RAW_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 0, 31));
630 /* Rotate 24 bits and insert byte 1 */
631 EMIT(PPC_RAW_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 0, 7));
632 /* Rotate 24 bits and insert byte 3 */
633 EMIT(PPC_RAW_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 16, 23));
634 EMIT(PPC_RAW_MR(dst_reg, b2p[TMP_REG_1]));
637 /* Store the value to stack and then use byte-reverse loads */
638 PPC_BPF_STL(dst_reg, 1, bpf_jit_stack_local(ctx));
639 EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx)));
640 if (cpu_has_feature(CPU_FTR_ARCH_206)) {
641 EMIT(PPC_RAW_LDBRX(dst_reg, 0, b2p[TMP_REG_1]));
643 EMIT(PPC_RAW_LWBRX(dst_reg, 0, b2p[TMP_REG_1]));
644 if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
645 EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, 32));
646 EMIT(PPC_RAW_LI(b2p[TMP_REG_2], 4));
647 EMIT(PPC_RAW_LWBRX(b2p[TMP_REG_2], b2p[TMP_REG_2], b2p[TMP_REG_1]));
648 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
649 EMIT(PPC_RAW_SLDI(b2p[TMP_REG_2], b2p[TMP_REG_2], 32));
650 EMIT(PPC_RAW_OR(dst_reg, dst_reg, b2p[TMP_REG_2]));
659 /* zero-extend 16 bits into 64 bits */
660 EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 48));
661 if (insn_is_zext(&insn[i + 1]))
662 addrs[++i] = ctx->idx * 4;
665 if (!fp->aux->verifier_zext)
666 /* zero-extend 32 bits into 64 bits */
667 EMIT(PPC_RAW_RLDICL(dst_reg, dst_reg, 0, 32));
676 * BPF_ST NOSPEC (speculation barrier)
678 case BPF_ST | BPF_NOSPEC:
679 if (!security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) ||
680 !security_ftr_enabled(SEC_FTR_STF_BARRIER))
683 switch (stf_barrier) {
684 case STF_BARRIER_EIEIO:
685 EMIT(PPC_RAW_EIEIO() | 0x02000000);
687 case STF_BARRIER_SYNC_ORI:
688 EMIT(PPC_RAW_SYNC());
689 EMIT(PPC_RAW_LD(b2p[TMP_REG_1], _R13, 0));
690 EMIT(PPC_RAW_ORI(_R31, _R31, 0));
692 case STF_BARRIER_FALLBACK:
693 EMIT(PPC_RAW_MFLR(b2p[TMP_REG_1]));
694 PPC_LI64(12, dereference_kernel_function_descriptor(bpf_stf_barrier));
695 EMIT(PPC_RAW_MTCTR(12));
696 EMIT(PPC_RAW_BCTRL());
697 EMIT(PPC_RAW_MTLR(b2p[TMP_REG_1]));
699 case STF_BARRIER_NONE:
707 case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
708 case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
709 if (BPF_CLASS(code) == BPF_ST) {
710 EMIT(PPC_RAW_LI(b2p[TMP_REG_1], imm));
711 src_reg = b2p[TMP_REG_1];
713 EMIT(PPC_RAW_STB(src_reg, dst_reg, off));
715 case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
716 case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
717 if (BPF_CLASS(code) == BPF_ST) {
718 EMIT(PPC_RAW_LI(b2p[TMP_REG_1], imm));
719 src_reg = b2p[TMP_REG_1];
721 EMIT(PPC_RAW_STH(src_reg, dst_reg, off));
723 case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
724 case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
725 if (BPF_CLASS(code) == BPF_ST) {
726 PPC_LI32(b2p[TMP_REG_1], imm);
727 src_reg = b2p[TMP_REG_1];
729 EMIT(PPC_RAW_STW(src_reg, dst_reg, off));
731 case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
732 case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
733 if (BPF_CLASS(code) == BPF_ST) {
734 PPC_LI32(b2p[TMP_REG_1], imm);
735 src_reg = b2p[TMP_REG_1];
737 PPC_BPF_STL(src_reg, dst_reg, off);
741 * BPF_STX ATOMIC (atomic ops)
743 case BPF_STX | BPF_ATOMIC | BPF_W:
744 if (imm != BPF_ADD) {
746 "eBPF filter atomic op code %02x (@%d) unsupported\n",
751 /* *(u32 *)(dst + off) += src */
753 /* Get EA into TMP_REG_1 */
754 EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], dst_reg, off));
755 tmp_idx = ctx->idx * 4;
756 /* load value from memory into TMP_REG_2 */
757 EMIT(PPC_RAW_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0));
758 /* add value from src_reg into this */
759 EMIT(PPC_RAW_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg));
760 /* store result back */
761 EMIT(PPC_RAW_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]));
762 /* we're done if this succeeded */
763 PPC_BCC_SHORT(COND_NE, tmp_idx);
765 case BPF_STX | BPF_ATOMIC | BPF_DW:
766 if (imm != BPF_ADD) {
768 "eBPF filter atomic op code %02x (@%d) unsupported\n",
772 /* *(u64 *)(dst + off) += src */
774 EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], dst_reg, off));
775 tmp_idx = ctx->idx * 4;
776 EMIT(PPC_RAW_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0));
777 EMIT(PPC_RAW_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg));
778 EMIT(PPC_RAW_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]));
779 PPC_BCC_SHORT(COND_NE, tmp_idx);
785 /* dst = *(u8 *)(ul) (src + off) */
786 case BPF_LDX | BPF_MEM | BPF_B:
787 case BPF_LDX | BPF_PROBE_MEM | BPF_B:
788 /* dst = *(u16 *)(ul) (src + off) */
789 case BPF_LDX | BPF_MEM | BPF_H:
790 case BPF_LDX | BPF_PROBE_MEM | BPF_H:
791 /* dst = *(u32 *)(ul) (src + off) */
792 case BPF_LDX | BPF_MEM | BPF_W:
793 case BPF_LDX | BPF_PROBE_MEM | BPF_W:
794 /* dst = *(u64 *)(ul) (src + off) */
795 case BPF_LDX | BPF_MEM | BPF_DW:
796 case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
798 * As PTR_TO_BTF_ID that uses BPF_PROBE_MEM mode could either be a valid
799 * kernel pointer or NULL but not a userspace address, execute BPF_PROBE_MEM
800 * load only if addr is kernel address (see is_kernel_addr()), otherwise
801 * set dst_reg=0 and move on.
803 if (BPF_MODE(code) == BPF_PROBE_MEM) {
804 EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], src_reg, off));
805 if (IS_ENABLED(CONFIG_PPC_BOOK3E_64))
806 PPC_LI64(b2p[TMP_REG_2], 0x8000000000000000ul);
808 PPC_LI64(b2p[TMP_REG_2], PAGE_OFFSET);
809 EMIT(PPC_RAW_CMPLD(b2p[TMP_REG_1], b2p[TMP_REG_2]));
810 PPC_BCC(COND_GT, (ctx->idx + 4) * 4);
811 EMIT(PPC_RAW_LI(dst_reg, 0));
813 * Check if 'off' is word aligned because PPC_BPF_LL()
814 * (BPF_DW case) generates two instructions if 'off' is not
815 * word-aligned and one instruction otherwise.
817 if (BPF_SIZE(code) == BPF_DW && (off & 3))
818 PPC_JMP((ctx->idx + 3) * 4);
820 PPC_JMP((ctx->idx + 2) * 4);
825 EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
828 EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
831 EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
834 PPC_BPF_LL(dst_reg, src_reg, off);
838 if (size != BPF_DW && insn_is_zext(&insn[i + 1]))
839 addrs[++i] = ctx->idx * 4;
841 if (BPF_MODE(code) == BPF_PROBE_MEM) {
842 ret = bpf_add_extable_entry(fp, image, pass, ctx, ctx->idx - 1,
851 * 16 byte instruction that uses two 'struct bpf_insn'
853 case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
854 imm64 = ((u64)(u32) insn[i].imm) |
855 (((u64)(u32) insn[i+1].imm) << 32);
857 PPC_LI64(dst_reg, imm64);
858 /* padding to allow full 5 instructions for later patching */
859 for (j = ctx->idx - tmp_idx; j < 5; j++)
861 /* Adjust for two bpf instructions */
862 addrs[++i] = ctx->idx * 4;
868 case BPF_JMP | BPF_EXIT:
870 * If this isn't the very last instruction, branch to
871 * the epilogue. If we _are_ the last instruction,
872 * we'll just fall through to the epilogue.
876 /* else fall through to the epilogue */
880 * Call kernel helper or bpf function
882 case BPF_JMP | BPF_CALL:
883 ctx->seen |= SEEN_FUNC;
885 ret = bpf_jit_get_func_addr(fp, &insn[i], false,
886 &func_addr, &func_addr_fixed);
891 bpf_jit_emit_func_call_hlp(image, ctx, func_addr);
893 bpf_jit_emit_func_call_rel(image, ctx, func_addr);
894 /* move return value from r3 to BPF_REG_0 */
895 EMIT(PPC_RAW_MR(b2p[BPF_REG_0], 3));
901 case BPF_JMP | BPF_JA:
902 PPC_JMP(addrs[i + 1 + off]);
905 case BPF_JMP | BPF_JGT | BPF_K:
906 case BPF_JMP | BPF_JGT | BPF_X:
907 case BPF_JMP | BPF_JSGT | BPF_K:
908 case BPF_JMP | BPF_JSGT | BPF_X:
909 case BPF_JMP32 | BPF_JGT | BPF_K:
910 case BPF_JMP32 | BPF_JGT | BPF_X:
911 case BPF_JMP32 | BPF_JSGT | BPF_K:
912 case BPF_JMP32 | BPF_JSGT | BPF_X:
915 case BPF_JMP | BPF_JLT | BPF_K:
916 case BPF_JMP | BPF_JLT | BPF_X:
917 case BPF_JMP | BPF_JSLT | BPF_K:
918 case BPF_JMP | BPF_JSLT | BPF_X:
919 case BPF_JMP32 | BPF_JLT | BPF_K:
920 case BPF_JMP32 | BPF_JLT | BPF_X:
921 case BPF_JMP32 | BPF_JSLT | BPF_K:
922 case BPF_JMP32 | BPF_JSLT | BPF_X:
925 case BPF_JMP | BPF_JGE | BPF_K:
926 case BPF_JMP | BPF_JGE | BPF_X:
927 case BPF_JMP | BPF_JSGE | BPF_K:
928 case BPF_JMP | BPF_JSGE | BPF_X:
929 case BPF_JMP32 | BPF_JGE | BPF_K:
930 case BPF_JMP32 | BPF_JGE | BPF_X:
931 case BPF_JMP32 | BPF_JSGE | BPF_K:
932 case BPF_JMP32 | BPF_JSGE | BPF_X:
935 case BPF_JMP | BPF_JLE | BPF_K:
936 case BPF_JMP | BPF_JLE | BPF_X:
937 case BPF_JMP | BPF_JSLE | BPF_K:
938 case BPF_JMP | BPF_JSLE | BPF_X:
939 case BPF_JMP32 | BPF_JLE | BPF_K:
940 case BPF_JMP32 | BPF_JLE | BPF_X:
941 case BPF_JMP32 | BPF_JSLE | BPF_K:
942 case BPF_JMP32 | BPF_JSLE | BPF_X:
945 case BPF_JMP | BPF_JEQ | BPF_K:
946 case BPF_JMP | BPF_JEQ | BPF_X:
947 case BPF_JMP32 | BPF_JEQ | BPF_K:
948 case BPF_JMP32 | BPF_JEQ | BPF_X:
951 case BPF_JMP | BPF_JNE | BPF_K:
952 case BPF_JMP | BPF_JNE | BPF_X:
953 case BPF_JMP32 | BPF_JNE | BPF_K:
954 case BPF_JMP32 | BPF_JNE | BPF_X:
957 case BPF_JMP | BPF_JSET | BPF_K:
958 case BPF_JMP | BPF_JSET | BPF_X:
959 case BPF_JMP32 | BPF_JSET | BPF_K:
960 case BPF_JMP32 | BPF_JSET | BPF_X:
966 case BPF_JMP | BPF_JGT | BPF_X:
967 case BPF_JMP | BPF_JLT | BPF_X:
968 case BPF_JMP | BPF_JGE | BPF_X:
969 case BPF_JMP | BPF_JLE | BPF_X:
970 case BPF_JMP | BPF_JEQ | BPF_X:
971 case BPF_JMP | BPF_JNE | BPF_X:
972 case BPF_JMP32 | BPF_JGT | BPF_X:
973 case BPF_JMP32 | BPF_JLT | BPF_X:
974 case BPF_JMP32 | BPF_JGE | BPF_X:
975 case BPF_JMP32 | BPF_JLE | BPF_X:
976 case BPF_JMP32 | BPF_JEQ | BPF_X:
977 case BPF_JMP32 | BPF_JNE | BPF_X:
978 /* unsigned comparison */
979 if (BPF_CLASS(code) == BPF_JMP32)
980 EMIT(PPC_RAW_CMPLW(dst_reg, src_reg));
982 EMIT(PPC_RAW_CMPLD(dst_reg, src_reg));
984 case BPF_JMP | BPF_JSGT | BPF_X:
985 case BPF_JMP | BPF_JSLT | BPF_X:
986 case BPF_JMP | BPF_JSGE | BPF_X:
987 case BPF_JMP | BPF_JSLE | BPF_X:
988 case BPF_JMP32 | BPF_JSGT | BPF_X:
989 case BPF_JMP32 | BPF_JSLT | BPF_X:
990 case BPF_JMP32 | BPF_JSGE | BPF_X:
991 case BPF_JMP32 | BPF_JSLE | BPF_X:
992 /* signed comparison */
993 if (BPF_CLASS(code) == BPF_JMP32)
994 EMIT(PPC_RAW_CMPW(dst_reg, src_reg));
996 EMIT(PPC_RAW_CMPD(dst_reg, src_reg));
998 case BPF_JMP | BPF_JSET | BPF_X:
999 case BPF_JMP32 | BPF_JSET | BPF_X:
1000 if (BPF_CLASS(code) == BPF_JMP) {
1001 EMIT(PPC_RAW_AND_DOT(b2p[TMP_REG_1], dst_reg,
1004 int tmp_reg = b2p[TMP_REG_1];
1006 EMIT(PPC_RAW_AND(tmp_reg, dst_reg, src_reg));
1007 EMIT(PPC_RAW_RLWINM_DOT(tmp_reg, tmp_reg, 0, 0,
1011 case BPF_JMP | BPF_JNE | BPF_K:
1012 case BPF_JMP | BPF_JEQ | BPF_K:
1013 case BPF_JMP | BPF_JGT | BPF_K:
1014 case BPF_JMP | BPF_JLT | BPF_K:
1015 case BPF_JMP | BPF_JGE | BPF_K:
1016 case BPF_JMP | BPF_JLE | BPF_K:
1017 case BPF_JMP32 | BPF_JNE | BPF_K:
1018 case BPF_JMP32 | BPF_JEQ | BPF_K:
1019 case BPF_JMP32 | BPF_JGT | BPF_K:
1020 case BPF_JMP32 | BPF_JLT | BPF_K:
1021 case BPF_JMP32 | BPF_JGE | BPF_K:
1022 case BPF_JMP32 | BPF_JLE | BPF_K:
1024 bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
1027 * Need sign-extended load, so only positive
1028 * values can be used as imm in cmpldi
1030 if (imm >= 0 && imm < 32768) {
1032 EMIT(PPC_RAW_CMPLWI(dst_reg, imm));
1034 EMIT(PPC_RAW_CMPLDI(dst_reg, imm));
1036 /* sign-extending load */
1037 PPC_LI32(b2p[TMP_REG_1], imm);
1038 /* ... but unsigned comparison */
1040 EMIT(PPC_RAW_CMPLW(dst_reg,
1043 EMIT(PPC_RAW_CMPLD(dst_reg,
1048 case BPF_JMP | BPF_JSGT | BPF_K:
1049 case BPF_JMP | BPF_JSLT | BPF_K:
1050 case BPF_JMP | BPF_JSGE | BPF_K:
1051 case BPF_JMP | BPF_JSLE | BPF_K:
1052 case BPF_JMP32 | BPF_JSGT | BPF_K:
1053 case BPF_JMP32 | BPF_JSLT | BPF_K:
1054 case BPF_JMP32 | BPF_JSGE | BPF_K:
1055 case BPF_JMP32 | BPF_JSLE | BPF_K:
1057 bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
1060 * signed comparison, so any 16-bit value
1061 * can be used in cmpdi
1063 if (imm >= -32768 && imm < 32768) {
1065 EMIT(PPC_RAW_CMPWI(dst_reg, imm));
1067 EMIT(PPC_RAW_CMPDI(dst_reg, imm));
1069 PPC_LI32(b2p[TMP_REG_1], imm);
1071 EMIT(PPC_RAW_CMPW(dst_reg,
1074 EMIT(PPC_RAW_CMPD(dst_reg,
1079 case BPF_JMP | BPF_JSET | BPF_K:
1080 case BPF_JMP32 | BPF_JSET | BPF_K:
1081 /* andi does not sign-extend the immediate */
1082 if (imm >= 0 && imm < 32768)
1083 /* PPC_ANDI is _only/always_ dot-form */
1084 EMIT(PPC_RAW_ANDI(b2p[TMP_REG_1], dst_reg, imm));
1086 int tmp_reg = b2p[TMP_REG_1];
1088 PPC_LI32(tmp_reg, imm);
1089 if (BPF_CLASS(code) == BPF_JMP) {
1090 EMIT(PPC_RAW_AND_DOT(tmp_reg, dst_reg,
1093 EMIT(PPC_RAW_AND(tmp_reg, dst_reg,
1095 EMIT(PPC_RAW_RLWINM_DOT(tmp_reg, tmp_reg,
1101 PPC_BCC(true_cond, addrs[i + 1 + off]);
1107 case BPF_JMP | BPF_TAIL_CALL:
1108 ctx->seen |= SEEN_TAILCALL;
1109 ret = bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
1116 * The filter contains something cruel & unusual.
1117 * We don't handle it, but also there shouldn't be
1118 * anything missing from our list.
1120 pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n",
1126 /* Set end-of-body-code address for exit. */
1127 addrs[i] = ctx->idx * 4;