2 * Copyright (C) 2016-2018 Netronome Systems, Inc.
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
9 * The BSD 2-Clause License:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #define pr_fmt(fmt) "NFP net bpf: " fmt
36 #include <linux/bug.h>
37 #include <linux/bpf.h>
38 #include <linux/filter.h>
39 #include <linux/kernel.h>
40 #include <linux/pkt_cls.h>
41 #include <linux/reciprocal_div.h>
42 #include <linux/unistd.h>
45 #include "../nfp_asm.h"
46 #include "../nfp_net_ctrl.h"
48 /* --- NFP prog --- */
49 /* Foreach "multiple" entries macros provide pos and next<n> pointers.
50 * It's safe to modify the next pointers (but not pos).
52 #define nfp_for_each_insn_walk2(nfp_prog, pos, next) \
53 for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \
54 next = list_next_entry(pos, l); \
55 &(nfp_prog)->insns != &pos->l && \
56 &(nfp_prog)->insns != &next->l; \
57 pos = nfp_meta_next(pos), \
58 next = nfp_meta_next(pos))
60 #define nfp_for_each_insn_walk3(nfp_prog, pos, next, next2) \
61 for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \
62 next = list_next_entry(pos, l), \
63 next2 = list_next_entry(next, l); \
64 &(nfp_prog)->insns != &pos->l && \
65 &(nfp_prog)->insns != &next->l && \
66 &(nfp_prog)->insns != &next2->l; \
67 pos = nfp_meta_next(pos), \
68 next = nfp_meta_next(pos), \
69 next2 = nfp_meta_next(next))
72 nfp_meta_has_prev(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
74 return meta->l.prev != &nfp_prog->insns;
77 static void nfp_prog_push(struct nfp_prog *nfp_prog, u64 insn)
79 if (nfp_prog->__prog_alloc_len / sizeof(u64) == nfp_prog->prog_len) {
80 pr_warn("instruction limit reached (%u NFP instructions)\n",
82 nfp_prog->error = -ENOSPC;
86 nfp_prog->prog[nfp_prog->prog_len] = insn;
90 static unsigned int nfp_prog_current_offset(struct nfp_prog *nfp_prog)
92 return nfp_prog->prog_len;
96 nfp_prog_confirm_current_offset(struct nfp_prog *nfp_prog, unsigned int off)
98 /* If there is a recorded error we may have dropped instructions;
99 * that doesn't have to be due to translator bug, and the translation
100 * will fail anyway, so just return OK.
104 return !WARN_ON_ONCE(nfp_prog_current_offset(nfp_prog) != off);
107 /* --- Emitters --- */
109 __emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op,
110 u8 mode, u8 xfer, u8 areg, u8 breg, u8 size, enum cmd_ctx_swap ctx,
115 insn = FIELD_PREP(OP_CMD_A_SRC, areg) |
116 FIELD_PREP(OP_CMD_CTX, ctx) |
117 FIELD_PREP(OP_CMD_B_SRC, breg) |
118 FIELD_PREP(OP_CMD_TOKEN, cmd_tgt_act[op].token) |
119 FIELD_PREP(OP_CMD_XFER, xfer) |
120 FIELD_PREP(OP_CMD_CNT, size) |
121 FIELD_PREP(OP_CMD_SIG, ctx != CMD_CTX_NO_SWAP) |
122 FIELD_PREP(OP_CMD_TGT_CMD, cmd_tgt_act[op].tgt_cmd) |
123 FIELD_PREP(OP_CMD_INDIR, indir) |
124 FIELD_PREP(OP_CMD_MODE, mode);
126 nfp_prog_push(nfp_prog, insn);
130 emit_cmd_any(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer,
131 swreg lreg, swreg rreg, u8 size, enum cmd_ctx_swap ctx, bool indir)
133 struct nfp_insn_re_regs reg;
136 err = swreg_to_restricted(reg_none(), lreg, rreg, ®, false);
138 nfp_prog->error = err;
142 pr_err("cmd can't swap arguments\n");
143 nfp_prog->error = -EFAULT;
146 if (reg.dst_lmextn || reg.src_lmextn) {
147 pr_err("cmd can't use LMextn\n");
148 nfp_prog->error = -EFAULT;
152 __emit_cmd(nfp_prog, op, mode, xfer, reg.areg, reg.breg, size, ctx,
157 emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer,
158 swreg lreg, swreg rreg, u8 size, enum cmd_ctx_swap ctx)
160 emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, ctx, false);
164 emit_cmd_indir(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer,
165 swreg lreg, swreg rreg, u8 size, enum cmd_ctx_swap ctx)
167 emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, ctx, true);
171 __emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, enum br_ev_pip ev_pip,
172 enum br_ctx_signal_state css, u16 addr, u8 defer)
174 u16 addr_lo, addr_hi;
177 addr_lo = addr & (OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO));
178 addr_hi = addr != addr_lo;
181 FIELD_PREP(OP_BR_MASK, mask) |
182 FIELD_PREP(OP_BR_EV_PIP, ev_pip) |
183 FIELD_PREP(OP_BR_CSS, css) |
184 FIELD_PREP(OP_BR_DEFBR, defer) |
185 FIELD_PREP(OP_BR_ADDR_LO, addr_lo) |
186 FIELD_PREP(OP_BR_ADDR_HI, addr_hi);
188 nfp_prog_push(nfp_prog, insn);
192 emit_br_relo(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer,
193 enum nfp_relo_type relo)
195 if (mask == BR_UNC && defer > 2) {
196 pr_err("BUG: branch defer out of bounds %d\n", defer);
197 nfp_prog->error = -EFAULT;
201 __emit_br(nfp_prog, mask,
202 mask != BR_UNC ? BR_EV_PIP_COND : BR_EV_PIP_UNCOND,
203 BR_CSS_NONE, addr, defer);
205 nfp_prog->prog[nfp_prog->prog_len - 1] |=
206 FIELD_PREP(OP_RELO_TYPE, relo);
210 emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer)
212 emit_br_relo(nfp_prog, mask, addr, defer, RELO_BR_REL);
216 __emit_br_bit(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 addr, u8 defer,
217 bool set, bool src_lmextn)
219 u16 addr_lo, addr_hi;
222 addr_lo = addr & (OP_BR_BIT_ADDR_LO >> __bf_shf(OP_BR_BIT_ADDR_LO));
223 addr_hi = addr != addr_lo;
225 insn = OP_BR_BIT_BASE |
226 FIELD_PREP(OP_BR_BIT_A_SRC, areg) |
227 FIELD_PREP(OP_BR_BIT_B_SRC, breg) |
228 FIELD_PREP(OP_BR_BIT_BV, set) |
229 FIELD_PREP(OP_BR_BIT_DEFBR, defer) |
230 FIELD_PREP(OP_BR_BIT_ADDR_LO, addr_lo) |
231 FIELD_PREP(OP_BR_BIT_ADDR_HI, addr_hi) |
232 FIELD_PREP(OP_BR_BIT_SRC_LMEXTN, src_lmextn);
234 nfp_prog_push(nfp_prog, insn);
238 emit_br_bit_relo(struct nfp_prog *nfp_prog, swreg src, u8 bit, u16 addr,
239 u8 defer, bool set, enum nfp_relo_type relo)
241 struct nfp_insn_re_regs reg;
244 /* NOTE: The bit to test is specified as an rotation amount, such that
245 * the bit to test will be placed on the MSB of the result when
246 * doing a rotate right. For bit X, we need right rotate X + 1.
250 err = swreg_to_restricted(reg_none(), src, reg_imm(bit), ®, false);
252 nfp_prog->error = err;
256 __emit_br_bit(nfp_prog, reg.areg, reg.breg, addr, defer, set,
259 nfp_prog->prog[nfp_prog->prog_len - 1] |=
260 FIELD_PREP(OP_RELO_TYPE, relo);
264 emit_br_bset(struct nfp_prog *nfp_prog, swreg src, u8 bit, u16 addr, u8 defer)
266 emit_br_bit_relo(nfp_prog, src, bit, addr, defer, true, RELO_BR_REL);
270 __emit_br_alu(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi,
271 u8 defer, bool dst_lmextn, bool src_lmextn)
275 insn = OP_BR_ALU_BASE |
276 FIELD_PREP(OP_BR_ALU_A_SRC, areg) |
277 FIELD_PREP(OP_BR_ALU_B_SRC, breg) |
278 FIELD_PREP(OP_BR_ALU_DEFBR, defer) |
279 FIELD_PREP(OP_BR_ALU_IMM_HI, imm_hi) |
280 FIELD_PREP(OP_BR_ALU_SRC_LMEXTN, src_lmextn) |
281 FIELD_PREP(OP_BR_ALU_DST_LMEXTN, dst_lmextn);
283 nfp_prog_push(nfp_prog, insn);
286 static void emit_rtn(struct nfp_prog *nfp_prog, swreg base, u8 defer)
288 struct nfp_insn_ur_regs reg;
291 err = swreg_to_unrestricted(reg_none(), base, reg_imm(0), ®);
293 nfp_prog->error = err;
297 __emit_br_alu(nfp_prog, reg.areg, reg.breg, 0, defer, reg.dst_lmextn,
302 __emit_immed(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi,
303 enum immed_width width, bool invert,
304 enum immed_shift shift, bool wr_both,
305 bool dst_lmextn, bool src_lmextn)
309 insn = OP_IMMED_BASE |
310 FIELD_PREP(OP_IMMED_A_SRC, areg) |
311 FIELD_PREP(OP_IMMED_B_SRC, breg) |
312 FIELD_PREP(OP_IMMED_IMM, imm_hi) |
313 FIELD_PREP(OP_IMMED_WIDTH, width) |
314 FIELD_PREP(OP_IMMED_INV, invert) |
315 FIELD_PREP(OP_IMMED_SHIFT, shift) |
316 FIELD_PREP(OP_IMMED_WR_AB, wr_both) |
317 FIELD_PREP(OP_IMMED_SRC_LMEXTN, src_lmextn) |
318 FIELD_PREP(OP_IMMED_DST_LMEXTN, dst_lmextn);
320 nfp_prog_push(nfp_prog, insn);
324 emit_immed(struct nfp_prog *nfp_prog, swreg dst, u16 imm,
325 enum immed_width width, bool invert, enum immed_shift shift)
327 struct nfp_insn_ur_regs reg;
330 if (swreg_type(dst) == NN_REG_IMM) {
331 nfp_prog->error = -EFAULT;
335 err = swreg_to_unrestricted(dst, dst, reg_imm(imm & 0xff), ®);
337 nfp_prog->error = err;
341 /* Use reg.dst when destination is No-Dest. */
342 __emit_immed(nfp_prog,
343 swreg_type(dst) == NN_REG_NONE ? reg.dst : reg.areg,
344 reg.breg, imm >> 8, width, invert, shift,
345 reg.wr_both, reg.dst_lmextn, reg.src_lmextn);
349 __emit_shf(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab,
350 enum shf_sc sc, u8 shift,
351 u16 areg, enum shf_op op, u16 breg, bool i8, bool sw, bool wr_both,
352 bool dst_lmextn, bool src_lmextn)
356 if (!FIELD_FIT(OP_SHF_SHIFT, shift)) {
357 nfp_prog->error = -EFAULT;
361 if (sc == SHF_SC_L_SHF)
365 FIELD_PREP(OP_SHF_A_SRC, areg) |
366 FIELD_PREP(OP_SHF_SC, sc) |
367 FIELD_PREP(OP_SHF_B_SRC, breg) |
368 FIELD_PREP(OP_SHF_I8, i8) |
369 FIELD_PREP(OP_SHF_SW, sw) |
370 FIELD_PREP(OP_SHF_DST, dst) |
371 FIELD_PREP(OP_SHF_SHIFT, shift) |
372 FIELD_PREP(OP_SHF_OP, op) |
373 FIELD_PREP(OP_SHF_DST_AB, dst_ab) |
374 FIELD_PREP(OP_SHF_WR_AB, wr_both) |
375 FIELD_PREP(OP_SHF_SRC_LMEXTN, src_lmextn) |
376 FIELD_PREP(OP_SHF_DST_LMEXTN, dst_lmextn);
378 nfp_prog_push(nfp_prog, insn);
382 emit_shf(struct nfp_prog *nfp_prog, swreg dst,
383 swreg lreg, enum shf_op op, swreg rreg, enum shf_sc sc, u8 shift)
385 struct nfp_insn_re_regs reg;
388 err = swreg_to_restricted(dst, lreg, rreg, ®, true);
390 nfp_prog->error = err;
394 __emit_shf(nfp_prog, reg.dst, reg.dst_ab, sc, shift,
395 reg.areg, op, reg.breg, reg.i8, reg.swap, reg.wr_both,
396 reg.dst_lmextn, reg.src_lmextn);
400 emit_shf_indir(struct nfp_prog *nfp_prog, swreg dst,
401 swreg lreg, enum shf_op op, swreg rreg, enum shf_sc sc)
403 if (sc == SHF_SC_R_ROT) {
404 pr_err("indirect shift is not allowed on rotation\n");
405 nfp_prog->error = -EFAULT;
409 emit_shf(nfp_prog, dst, lreg, op, rreg, sc, 0);
413 __emit_alu(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab,
414 u16 areg, enum alu_op op, u16 breg, bool swap, bool wr_both,
415 bool dst_lmextn, bool src_lmextn)
420 FIELD_PREP(OP_ALU_A_SRC, areg) |
421 FIELD_PREP(OP_ALU_B_SRC, breg) |
422 FIELD_PREP(OP_ALU_DST, dst) |
423 FIELD_PREP(OP_ALU_SW, swap) |
424 FIELD_PREP(OP_ALU_OP, op) |
425 FIELD_PREP(OP_ALU_DST_AB, dst_ab) |
426 FIELD_PREP(OP_ALU_WR_AB, wr_both) |
427 FIELD_PREP(OP_ALU_SRC_LMEXTN, src_lmextn) |
428 FIELD_PREP(OP_ALU_DST_LMEXTN, dst_lmextn);
430 nfp_prog_push(nfp_prog, insn);
434 emit_alu(struct nfp_prog *nfp_prog, swreg dst,
435 swreg lreg, enum alu_op op, swreg rreg)
437 struct nfp_insn_ur_regs reg;
440 err = swreg_to_unrestricted(dst, lreg, rreg, ®);
442 nfp_prog->error = err;
446 __emit_alu(nfp_prog, reg.dst, reg.dst_ab,
447 reg.areg, op, reg.breg, reg.swap, reg.wr_both,
448 reg.dst_lmextn, reg.src_lmextn);
452 __emit_mul(struct nfp_prog *nfp_prog, enum alu_dst_ab dst_ab, u16 areg,
453 enum mul_type type, enum mul_step step, u16 breg, bool swap,
454 bool wr_both, bool dst_lmextn, bool src_lmextn)
459 FIELD_PREP(OP_MUL_A_SRC, areg) |
460 FIELD_PREP(OP_MUL_B_SRC, breg) |
461 FIELD_PREP(OP_MUL_STEP, step) |
462 FIELD_PREP(OP_MUL_DST_AB, dst_ab) |
463 FIELD_PREP(OP_MUL_SW, swap) |
464 FIELD_PREP(OP_MUL_TYPE, type) |
465 FIELD_PREP(OP_MUL_WR_AB, wr_both) |
466 FIELD_PREP(OP_MUL_SRC_LMEXTN, src_lmextn) |
467 FIELD_PREP(OP_MUL_DST_LMEXTN, dst_lmextn);
469 nfp_prog_push(nfp_prog, insn);
473 emit_mul(struct nfp_prog *nfp_prog, swreg lreg, enum mul_type type,
474 enum mul_step step, swreg rreg)
476 struct nfp_insn_ur_regs reg;
480 if (type == MUL_TYPE_START && step != MUL_STEP_NONE) {
481 nfp_prog->error = -EINVAL;
485 if (step == MUL_LAST || step == MUL_LAST_2) {
486 /* When type is step and step Number is LAST or LAST2, left
487 * source is used as destination.
489 err = swreg_to_unrestricted(lreg, reg_none(), rreg, ®);
492 err = swreg_to_unrestricted(reg_none(), lreg, rreg, ®);
497 nfp_prog->error = err;
501 __emit_mul(nfp_prog, reg.dst_ab, areg, type, step, reg.breg, reg.swap,
502 reg.wr_both, reg.dst_lmextn, reg.src_lmextn);
506 __emit_ld_field(struct nfp_prog *nfp_prog, enum shf_sc sc,
507 u8 areg, u8 bmask, u8 breg, u8 shift, bool imm8,
508 bool zero, bool swap, bool wr_both,
509 bool dst_lmextn, bool src_lmextn)
514 FIELD_PREP(OP_LDF_A_SRC, areg) |
515 FIELD_PREP(OP_LDF_SC, sc) |
516 FIELD_PREP(OP_LDF_B_SRC, breg) |
517 FIELD_PREP(OP_LDF_I8, imm8) |
518 FIELD_PREP(OP_LDF_SW, swap) |
519 FIELD_PREP(OP_LDF_ZF, zero) |
520 FIELD_PREP(OP_LDF_BMASK, bmask) |
521 FIELD_PREP(OP_LDF_SHF, shift) |
522 FIELD_PREP(OP_LDF_WR_AB, wr_both) |
523 FIELD_PREP(OP_LDF_SRC_LMEXTN, src_lmextn) |
524 FIELD_PREP(OP_LDF_DST_LMEXTN, dst_lmextn);
526 nfp_prog_push(nfp_prog, insn);
530 emit_ld_field_any(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src,
531 enum shf_sc sc, u8 shift, bool zero)
533 struct nfp_insn_re_regs reg;
536 /* Note: ld_field is special as it uses one of the src regs as dst */
537 err = swreg_to_restricted(dst, dst, src, ®, true);
539 nfp_prog->error = err;
543 __emit_ld_field(nfp_prog, sc, reg.areg, bmask, reg.breg, shift,
544 reg.i8, zero, reg.swap, reg.wr_both,
545 reg.dst_lmextn, reg.src_lmextn);
549 emit_ld_field(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src,
550 enum shf_sc sc, u8 shift)
552 emit_ld_field_any(nfp_prog, dst, bmask, src, sc, shift, false);
556 __emit_lcsr(struct nfp_prog *nfp_prog, u16 areg, u16 breg, bool wr, u16 addr,
557 bool dst_lmextn, bool src_lmextn)
561 insn = OP_LCSR_BASE |
562 FIELD_PREP(OP_LCSR_A_SRC, areg) |
563 FIELD_PREP(OP_LCSR_B_SRC, breg) |
564 FIELD_PREP(OP_LCSR_WRITE, wr) |
565 FIELD_PREP(OP_LCSR_ADDR, addr / 4) |
566 FIELD_PREP(OP_LCSR_SRC_LMEXTN, src_lmextn) |
567 FIELD_PREP(OP_LCSR_DST_LMEXTN, dst_lmextn);
569 nfp_prog_push(nfp_prog, insn);
572 static void emit_csr_wr(struct nfp_prog *nfp_prog, swreg src, u16 addr)
574 struct nfp_insn_ur_regs reg;
577 /* This instruction takes immeds instead of reg_none() for the ignored
578 * operand, but we can't encode 2 immeds in one instr with our normal
579 * swreg infra so if param is an immed, we encode as reg_none() and
580 * copy the immed to both operands.
582 if (swreg_type(src) == NN_REG_IMM) {
583 err = swreg_to_unrestricted(reg_none(), src, reg_none(), ®);
586 err = swreg_to_unrestricted(reg_none(), src, reg_imm(0), ®);
589 nfp_prog->error = err;
593 __emit_lcsr(nfp_prog, reg.areg, reg.breg, true, addr,
594 false, reg.src_lmextn);
597 /* CSR value is read in following immed[gpr, 0] */
598 static void __emit_csr_rd(struct nfp_prog *nfp_prog, u16 addr)
600 __emit_lcsr(nfp_prog, 0, 0, false, addr, false, false);
603 static void emit_nop(struct nfp_prog *nfp_prog)
605 __emit_immed(nfp_prog, UR_REG_IMM, UR_REG_IMM, 0, 0, 0, 0, 0, 0, 0);
608 /* --- Wrappers --- */
609 static bool pack_immed(u32 imm, u16 *val, enum immed_shift *shift)
611 if (!(imm & 0xffff0000)) {
613 *shift = IMMED_SHIFT_0B;
614 } else if (!(imm & 0xff0000ff)) {
616 *shift = IMMED_SHIFT_1B;
617 } else if (!(imm & 0x0000ffff)) {
619 *shift = IMMED_SHIFT_2B;
627 static void wrp_immed(struct nfp_prog *nfp_prog, swreg dst, u32 imm)
629 enum immed_shift shift;
632 if (pack_immed(imm, &val, &shift)) {
633 emit_immed(nfp_prog, dst, val, IMMED_WIDTH_ALL, false, shift);
634 } else if (pack_immed(~imm, &val, &shift)) {
635 emit_immed(nfp_prog, dst, val, IMMED_WIDTH_ALL, true, shift);
637 emit_immed(nfp_prog, dst, imm & 0xffff, IMMED_WIDTH_ALL,
638 false, IMMED_SHIFT_0B);
639 emit_immed(nfp_prog, dst, imm >> 16, IMMED_WIDTH_WORD,
640 false, IMMED_SHIFT_2B);
645 wrp_immed_relo(struct nfp_prog *nfp_prog, swreg dst, u32 imm,
646 enum nfp_relo_type relo)
649 pr_err("relocation of a large immediate!\n");
650 nfp_prog->error = -EFAULT;
653 emit_immed(nfp_prog, dst, imm, IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B);
655 nfp_prog->prog[nfp_prog->prog_len - 1] |=
656 FIELD_PREP(OP_RELO_TYPE, relo);
659 /* ur_load_imm_any() - encode immediate or use tmp register (unrestricted)
660 * If the @imm is small enough encode it directly in operand and return
661 * otherwise load @imm to a spare register and return its encoding.
663 static swreg ur_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, swreg tmp_reg)
665 if (FIELD_FIT(UR_REG_IMM_MAX, imm))
668 wrp_immed(nfp_prog, tmp_reg, imm);
672 /* re_load_imm_any() - encode immediate or use tmp register (restricted)
673 * If the @imm is small enough encode it directly in operand and return
674 * otherwise load @imm to a spare register and return its encoding.
676 static swreg re_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, swreg tmp_reg)
678 if (FIELD_FIT(RE_REG_IMM_MAX, imm))
681 wrp_immed(nfp_prog, tmp_reg, imm);
685 static void wrp_nops(struct nfp_prog *nfp_prog, unsigned int count)
691 static void wrp_mov(struct nfp_prog *nfp_prog, swreg dst, swreg src)
693 emit_alu(nfp_prog, dst, reg_none(), ALU_OP_NONE, src);
696 static void wrp_reg_mov(struct nfp_prog *nfp_prog, u16 dst, u16 src)
698 wrp_mov(nfp_prog, reg_both(dst), reg_b(src));
701 /* wrp_reg_subpart() - load @field_len bytes from @offset of @src, write the
702 * result to @dst from low end.
705 wrp_reg_subpart(struct nfp_prog *nfp_prog, swreg dst, swreg src, u8 field_len,
708 enum shf_sc sc = offset ? SHF_SC_R_SHF : SHF_SC_NONE;
709 u8 mask = (1 << field_len) - 1;
711 emit_ld_field_any(nfp_prog, dst, mask, src, sc, offset * 8, true);
714 /* wrp_reg_or_subpart() - load @field_len bytes from low end of @src, or the
715 * result to @dst from offset, there is no change on the other bits of @dst.
718 wrp_reg_or_subpart(struct nfp_prog *nfp_prog, swreg dst, swreg src,
719 u8 field_len, u8 offset)
721 enum shf_sc sc = offset ? SHF_SC_L_SHF : SHF_SC_NONE;
722 u8 mask = ((1 << field_len) - 1) << offset;
724 emit_ld_field(nfp_prog, dst, mask, src, sc, 32 - offset * 8);
728 addr40_offset(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
729 swreg *rega, swreg *regb)
731 if (offset == reg_imm(0)) {
732 *rega = reg_a(src_gpr);
733 *regb = reg_b(src_gpr + 1);
737 emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(src_gpr), ALU_OP_ADD, offset);
738 emit_alu(nfp_prog, imm_b(nfp_prog), reg_b(src_gpr + 1), ALU_OP_ADD_C,
740 *rega = imm_a(nfp_prog);
741 *regb = imm_b(nfp_prog);
744 /* NFP has Command Push Pull bus which supports bluk memory operations. */
745 static int nfp_cpp_memcpy(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
747 bool descending_seq = meta->ldst_gather_len < 0;
748 s16 len = abs(meta->ldst_gather_len);
754 off = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
755 src_40bit_addr = meta->ptr.type == PTR_TO_MAP_VALUE;
756 src_base = reg_a(meta->insn.src_reg * 2);
757 xfer_num = round_up(len, 4) / 4;
760 addr40_offset(nfp_prog, meta->insn.src_reg * 2, off, &src_base,
763 /* Setup PREV_ALU fields to override memory read length. */
765 wrp_immed(nfp_prog, reg_none(),
766 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1));
768 /* Memory read from source addr into transfer-in registers. */
769 emit_cmd_any(nfp_prog, CMD_TGT_READ32_SWAP,
770 src_40bit_addr ? CMD_MODE_40b_BA : CMD_MODE_32b, 0,
771 src_base, off, xfer_num - 1, CMD_CTX_SWAP, len > 32);
773 /* Move from transfer-in to transfer-out. */
774 for (i = 0; i < xfer_num; i++)
775 wrp_mov(nfp_prog, reg_xfer(i), reg_xfer(i));
777 off = re_load_imm_any(nfp_prog, meta->paired_st->off, imm_b(nfp_prog));
780 /* Use single direct_ref write8. */
781 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0,
782 reg_a(meta->paired_st->dst_reg * 2), off, len - 1,
784 } else if (len <= 32 && IS_ALIGNED(len, 4)) {
785 /* Use single direct_ref write32. */
786 emit_cmd(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0,
787 reg_a(meta->paired_st->dst_reg * 2), off, xfer_num - 1,
789 } else if (len <= 32) {
790 /* Use single indirect_ref write8. */
791 wrp_immed(nfp_prog, reg_none(),
792 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, len - 1));
793 emit_cmd_indir(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0,
794 reg_a(meta->paired_st->dst_reg * 2), off,
795 len - 1, CMD_CTX_SWAP);
796 } else if (IS_ALIGNED(len, 4)) {
797 /* Use single indirect_ref write32. */
798 wrp_immed(nfp_prog, reg_none(),
799 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1));
800 emit_cmd_indir(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0,
801 reg_a(meta->paired_st->dst_reg * 2), off,
802 xfer_num - 1, CMD_CTX_SWAP);
803 } else if (len <= 40) {
804 /* Use one direct_ref write32 to write the first 32-bytes, then
805 * another direct_ref write8 to write the remaining bytes.
807 emit_cmd(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0,
808 reg_a(meta->paired_st->dst_reg * 2), off, 7,
811 off = re_load_imm_any(nfp_prog, meta->paired_st->off + 32,
813 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 8,
814 reg_a(meta->paired_st->dst_reg * 2), off, len - 33,
817 /* Use one indirect_ref write32 to write 4-bytes aligned length,
818 * then another direct_ref write8 to write the remaining bytes.
822 wrp_immed(nfp_prog, reg_none(),
823 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 2));
824 emit_cmd_indir(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0,
825 reg_a(meta->paired_st->dst_reg * 2), off,
826 xfer_num - 2, CMD_CTX_SWAP);
827 new_off = meta->paired_st->off + (xfer_num - 1) * 4;
828 off = re_load_imm_any(nfp_prog, new_off, imm_b(nfp_prog));
829 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b,
830 xfer_num - 1, reg_a(meta->paired_st->dst_reg * 2), off,
831 (len & 0x3) - 1, CMD_CTX_SWAP);
834 /* TODO: The following extra load is to make sure data flow be identical
835 * before and after we do memory copy optimization.
837 * The load destination register is not guaranteed to be dead, so we
838 * need to make sure it is loaded with the value the same as before
839 * this transformation.
841 * These extra loads could be removed once we have accurate register
846 else if (BPF_SIZE(meta->insn.code) != BPF_DW)
847 xfer_num = xfer_num - 1;
849 xfer_num = xfer_num - 2;
851 switch (BPF_SIZE(meta->insn.code)) {
853 wrp_reg_subpart(nfp_prog, reg_both(meta->insn.dst_reg * 2),
854 reg_xfer(xfer_num), 1,
855 IS_ALIGNED(len, 4) ? 3 : (len & 3) - 1);
858 wrp_reg_subpart(nfp_prog, reg_both(meta->insn.dst_reg * 2),
859 reg_xfer(xfer_num), 2, (len & 3) ^ 2);
862 wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2),
866 wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2),
868 wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1),
869 reg_xfer(xfer_num + 1));
873 if (BPF_SIZE(meta->insn.code) != BPF_DW)
874 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
880 data_ld(struct nfp_prog *nfp_prog, swreg offset, u8 dst_gpr, int size)
885 /* We load the value from the address indicated in @offset and then
886 * shift out the data we don't need. Note: this is big endian!
889 shift = size < 4 ? 4 - size : 0;
891 emit_cmd(nfp_prog, CMD_TGT_READ8, CMD_MODE_32b, 0,
892 pptr_reg(nfp_prog), offset, sz - 1, CMD_CTX_SWAP);
896 emit_shf(nfp_prog, reg_both(dst_gpr), reg_none(), SHF_OP_NONE,
897 reg_xfer(0), SHF_SC_R_SHF, shift * 8);
899 for (; i * 4 < size; i++)
900 wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i));
903 wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0);
909 data_ld_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr,
910 swreg lreg, swreg rreg, int size, enum cmd_mode mode)
915 /* We load the value from the address indicated in rreg + lreg and then
916 * mask out the data we don't need. Note: this is little endian!
919 mask = size < 4 ? GENMASK(size - 1, 0) : 0;
921 emit_cmd(nfp_prog, CMD_TGT_READ32_SWAP, mode, 0,
922 lreg, rreg, sz / 4 - 1, CMD_CTX_SWAP);
926 emit_ld_field_any(nfp_prog, reg_both(dst_gpr), mask,
927 reg_xfer(0), SHF_SC_NONE, 0, true);
929 for (; i * 4 < size; i++)
930 wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i));
933 wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0);
939 data_ld_host_order_addr32(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
942 return data_ld_host_order(nfp_prog, dst_gpr, reg_a(src_gpr), offset,
947 data_ld_host_order_addr40(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
952 addr40_offset(nfp_prog, src_gpr, offset, ®a, ®b);
954 return data_ld_host_order(nfp_prog, dst_gpr, rega, regb,
955 size, CMD_MODE_40b_BA);
959 construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, u16 src, u8 size)
963 /* Calculate the true offset (src_reg + imm) */
964 tmp_reg = ur_load_imm_any(nfp_prog, offset, imm_b(nfp_prog));
965 emit_alu(nfp_prog, imm_both(nfp_prog), reg_a(src), ALU_OP_ADD, tmp_reg);
967 /* Check packet length (size guaranteed to fit b/c it's u8) */
968 emit_alu(nfp_prog, imm_a(nfp_prog),
969 imm_a(nfp_prog), ALU_OP_ADD, reg_imm(size));
970 emit_alu(nfp_prog, reg_none(),
971 plen_reg(nfp_prog), ALU_OP_SUB, imm_a(nfp_prog));
972 emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT);
975 return data_ld(nfp_prog, imm_b(nfp_prog), 0, size);
978 static int construct_data_ld(struct nfp_prog *nfp_prog, u16 offset, u8 size)
982 /* Check packet length */
983 tmp_reg = ur_load_imm_any(nfp_prog, offset + size, imm_a(nfp_prog));
984 emit_alu(nfp_prog, reg_none(), plen_reg(nfp_prog), ALU_OP_SUB, tmp_reg);
985 emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT);
988 tmp_reg = re_load_imm_any(nfp_prog, offset, imm_b(nfp_prog));
989 return data_ld(nfp_prog, tmp_reg, 0, size);
993 data_stx_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset,
998 for (i = 0; i * 4 < size; i++)
999 wrp_mov(nfp_prog, reg_xfer(i), reg_a(src_gpr + i));
1001 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0,
1002 reg_a(dst_gpr), offset, size - 1, CMD_CTX_SWAP);
1008 data_st_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset,
1011 wrp_immed(nfp_prog, reg_xfer(0), imm);
1013 wrp_immed(nfp_prog, reg_xfer(1), imm >> 32);
1015 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0,
1016 reg_a(dst_gpr), offset, size - 1, CMD_CTX_SWAP);
1022 (*lmem_step)(struct nfp_prog *nfp_prog, u8 gpr, u8 gpr_byte, s32 off,
1023 unsigned int size, bool first, bool new_gpr, bool last, bool lm3,
1027 wrp_lmem_load(struct nfp_prog *nfp_prog, u8 dst, u8 dst_byte, s32 off,
1028 unsigned int size, bool first, bool new_gpr, bool last, bool lm3,
1031 bool should_inc = needs_inc && new_gpr && !last;
1038 if (WARN_ON_ONCE(dst_byte + size > 4 || off % 4 + size > 4))
1043 /* Move the entire word */
1045 wrp_mov(nfp_prog, reg_both(dst),
1046 should_inc ? reg_lm_inc(3) : reg_lm(lm3 ? 3 : 0, idx));
1050 if (WARN_ON_ONCE(lm3 && idx > RE_REG_LM_IDX_MAX))
1055 mask = (1 << size) - 1;
1058 if (WARN_ON_ONCE(mask > 0xf))
1061 shf = abs(src_byte - dst_byte) * 8;
1062 if (src_byte == dst_byte) {
1064 } else if (src_byte < dst_byte) {
1071 /* ld_field can address fewer indexes, if offset too large do RMW.
1072 * Because we RMV twice we waste 2 cycles on unaligned 8 byte writes.
1074 if (idx <= RE_REG_LM_IDX_MAX) {
1075 reg = reg_lm(lm3 ? 3 : 0, idx);
1077 reg = imm_a(nfp_prog);
1078 /* If it's not the first part of the load and we start a new GPR
1079 * that means we are loading a second part of the LMEM word into
1080 * a new GPR. IOW we've already looked that LMEM word and
1081 * therefore it has been loaded into imm_a().
1083 if (first || !new_gpr)
1084 wrp_mov(nfp_prog, reg, reg_lm(0, idx));
1087 emit_ld_field_any(nfp_prog, reg_both(dst), mask, reg, sc, shf, new_gpr);
1090 wrp_mov(nfp_prog, reg_none(), reg_lm_inc(3));
1096 wrp_lmem_store(struct nfp_prog *nfp_prog, u8 src, u8 src_byte, s32 off,
1097 unsigned int size, bool first, bool new_gpr, bool last, bool lm3,
1100 bool should_inc = needs_inc && new_gpr && !last;
1107 if (WARN_ON_ONCE(src_byte + size > 4 || off % 4 + size > 4))
1112 /* Move the entire word */
1115 should_inc ? reg_lm_inc(3) : reg_lm(lm3 ? 3 : 0, idx),
1120 if (WARN_ON_ONCE(lm3 && idx > RE_REG_LM_IDX_MAX))
1125 mask = (1 << size) - 1;
1128 if (WARN_ON_ONCE(mask > 0xf))
1131 shf = abs(src_byte - dst_byte) * 8;
1132 if (src_byte == dst_byte) {
1134 } else if (src_byte < dst_byte) {
1141 /* ld_field can address fewer indexes, if offset too large do RMW.
1142 * Because we RMV twice we waste 2 cycles on unaligned 8 byte writes.
1144 if (idx <= RE_REG_LM_IDX_MAX) {
1145 reg = reg_lm(lm3 ? 3 : 0, idx);
1147 reg = imm_a(nfp_prog);
1148 /* Only first and last LMEM locations are going to need RMW,
1149 * the middle location will be overwritten fully.
1152 wrp_mov(nfp_prog, reg, reg_lm(0, idx));
1155 emit_ld_field(nfp_prog, reg, mask, reg_b(src), sc, shf);
1157 if (new_gpr || last) {
1158 if (idx > RE_REG_LM_IDX_MAX)
1159 wrp_mov(nfp_prog, reg_lm(0, idx), reg);
1161 wrp_mov(nfp_prog, reg_none(), reg_lm_inc(3));
1168 mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1169 unsigned int size, unsigned int ptr_off, u8 gpr, u8 ptr_gpr,
1170 bool clr_gpr, lmem_step step)
1172 s32 off = nfp_prog->stack_frame_depth + meta->insn.off + ptr_off;
1173 bool first = true, last;
1174 bool needs_inc = false;
1175 swreg stack_off_reg;
1181 if (meta->ptr_not_const) {
1182 /* Use of the last encountered ptr_off is OK, they all have
1183 * the same alignment. Depend on low bits of value being
1184 * discarded when written to LMaddr register.
1186 stack_off_reg = ur_load_imm_any(nfp_prog, meta->insn.off,
1187 stack_imm(nfp_prog));
1189 emit_alu(nfp_prog, imm_b(nfp_prog),
1190 reg_a(ptr_gpr), ALU_OP_ADD, stack_off_reg);
1193 } else if (off + size <= 64) {
1194 /* We can reach bottom 64B with LMaddr0 */
1196 } else if (round_down(off, 32) == round_down(off + size - 1, 32)) {
1197 /* We have to set up a new pointer. If we know the offset
1198 * and the entire access falls into a single 32 byte aligned
1199 * window we won't have to increment the LM pointer.
1200 * The 32 byte alignment is imporant because offset is ORed in
1201 * not added when doing *l$indexN[off].
1203 stack_off_reg = ur_load_imm_any(nfp_prog, round_down(off, 32),
1204 stack_imm(nfp_prog));
1205 emit_alu(nfp_prog, imm_b(nfp_prog),
1206 stack_reg(nfp_prog), ALU_OP_ADD, stack_off_reg);
1210 stack_off_reg = ur_load_imm_any(nfp_prog, round_down(off, 4),
1211 stack_imm(nfp_prog));
1213 emit_alu(nfp_prog, imm_b(nfp_prog),
1214 stack_reg(nfp_prog), ALU_OP_ADD, stack_off_reg);
1219 emit_csr_wr(nfp_prog, imm_b(nfp_prog), NFP_CSR_ACT_LM_ADDR3);
1220 /* For size < 4 one slot will be filled by zeroing of upper. */
1221 wrp_nops(nfp_prog, clr_gpr && size < 8 ? 2 : 3);
1224 if (clr_gpr && size < 8)
1225 wrp_immed(nfp_prog, reg_both(gpr + 1), 0);
1231 slice_size = min(size, 4 - gpr_byte);
1232 slice_end = min(off + slice_size, round_up(off + 1, 4));
1233 slice_size = slice_end - off;
1235 last = slice_size == size;
1240 ret = step(nfp_prog, gpr, gpr_byte, off, slice_size,
1241 first, gpr != prev_gpr, last, lm3, needs_inc);
1248 gpr_byte += slice_size;
1249 if (gpr_byte >= 4) {
1262 wrp_alu_imm(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u32 imm)
1266 if (alu_op == ALU_OP_AND) {
1268 wrp_immed(nfp_prog, reg_both(dst), 0);
1272 if (alu_op == ALU_OP_OR) {
1274 wrp_immed(nfp_prog, reg_both(dst), ~0U);
1278 if (alu_op == ALU_OP_XOR) {
1280 emit_alu(nfp_prog, reg_both(dst), reg_none(),
1281 ALU_OP_NOT, reg_b(dst));
1286 tmp_reg = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog));
1287 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, tmp_reg);
1291 wrp_alu64_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1292 enum alu_op alu_op, bool skip)
1294 const struct bpf_insn *insn = &meta->insn;
1295 u64 imm = insn->imm; /* sign extend */
1302 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, imm & ~0U);
1303 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, alu_op, imm >> 32);
1309 wrp_alu64_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1312 u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2;
1314 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src));
1315 emit_alu(nfp_prog, reg_both(dst + 1),
1316 reg_a(dst + 1), alu_op, reg_b(src + 1));
1322 wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1323 enum alu_op alu_op, bool skip)
1325 const struct bpf_insn *insn = &meta->insn;
1332 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, insn->imm);
1333 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
1339 wrp_alu32_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1342 u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2;
1344 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src));
1345 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
1351 wrp_test_reg_one(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u8 src,
1352 enum br_mask br_mask, u16 off)
1354 emit_alu(nfp_prog, reg_none(), reg_a(dst), alu_op, reg_b(src));
1355 emit_br(nfp_prog, br_mask, off, 0);
1359 wrp_test_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1360 enum alu_op alu_op, enum br_mask br_mask)
1362 const struct bpf_insn *insn = &meta->insn;
1364 wrp_test_reg_one(nfp_prog, insn->dst_reg * 2, alu_op,
1365 insn->src_reg * 2, br_mask, insn->off);
1366 wrp_test_reg_one(nfp_prog, insn->dst_reg * 2 + 1, alu_op,
1367 insn->src_reg * 2 + 1, br_mask, insn->off);
1372 static const struct jmp_code_map {
1373 enum br_mask br_mask;
1375 } jmp_code_map[] = {
1376 [BPF_JGT >> 4] = { BR_BLO, true },
1377 [BPF_JGE >> 4] = { BR_BHS, false },
1378 [BPF_JLT >> 4] = { BR_BLO, false },
1379 [BPF_JLE >> 4] = { BR_BHS, true },
1380 [BPF_JSGT >> 4] = { BR_BLT, true },
1381 [BPF_JSGE >> 4] = { BR_BGE, false },
1382 [BPF_JSLT >> 4] = { BR_BLT, false },
1383 [BPF_JSLE >> 4] = { BR_BGE, true },
1386 static const struct jmp_code_map *nfp_jmp_code_get(struct nfp_insn_meta *meta)
1390 op = BPF_OP(meta->insn.code) >> 4;
1391 /* br_mask of 0 is BR_BEQ which we don't use in jump code table */
1392 if (WARN_ONCE(op >= ARRAY_SIZE(jmp_code_map) ||
1393 !jmp_code_map[op].br_mask,
1394 "no code found for jump instruction"))
1397 return &jmp_code_map[op];
1400 static int cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1402 const struct bpf_insn *insn = &meta->insn;
1403 u64 imm = insn->imm; /* sign extend */
1404 const struct jmp_code_map *code;
1405 enum alu_op alu_op, carry_op;
1406 u8 reg = insn->dst_reg * 2;
1409 code = nfp_jmp_code_get(meta);
1413 alu_op = meta->jump_neg_op ? ALU_OP_ADD : ALU_OP_SUB;
1414 carry_op = meta->jump_neg_op ? ALU_OP_ADD_C : ALU_OP_SUB_C;
1416 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
1418 emit_alu(nfp_prog, reg_none(), reg_a(reg), alu_op, tmp_reg);
1420 emit_alu(nfp_prog, reg_none(), tmp_reg, alu_op, reg_a(reg));
1422 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
1424 emit_alu(nfp_prog, reg_none(),
1425 reg_a(reg + 1), carry_op, tmp_reg);
1427 emit_alu(nfp_prog, reg_none(),
1428 tmp_reg, carry_op, reg_a(reg + 1));
1430 emit_br(nfp_prog, code->br_mask, insn->off, 0);
1435 static int cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1437 const struct bpf_insn *insn = &meta->insn;
1438 const struct jmp_code_map *code;
1441 code = nfp_jmp_code_get(meta);
1445 areg = insn->dst_reg * 2;
1446 breg = insn->src_reg * 2;
1454 emit_alu(nfp_prog, reg_none(), reg_a(areg), ALU_OP_SUB, reg_b(breg));
1455 emit_alu(nfp_prog, reg_none(),
1456 reg_a(areg + 1), ALU_OP_SUB_C, reg_b(breg + 1));
1457 emit_br(nfp_prog, code->br_mask, insn->off, 0);
1462 static void wrp_end32(struct nfp_prog *nfp_prog, swreg reg_in, u8 gpr_out)
1464 emit_ld_field(nfp_prog, reg_both(gpr_out), 0xf, reg_in,
1466 emit_ld_field(nfp_prog, reg_both(gpr_out), 0x5, reg_a(gpr_out),
1471 wrp_mul_u32(struct nfp_prog *nfp_prog, swreg dst_hi, swreg dst_lo, swreg lreg,
1472 swreg rreg, bool gen_high_half)
1474 emit_mul(nfp_prog, lreg, MUL_TYPE_START, MUL_STEP_NONE, rreg);
1475 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_1, rreg);
1476 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_2, rreg);
1477 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_3, rreg);
1478 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_4, rreg);
1479 emit_mul(nfp_prog, dst_lo, MUL_TYPE_STEP_32x32, MUL_LAST, reg_none());
1481 emit_mul(nfp_prog, dst_hi, MUL_TYPE_STEP_32x32, MUL_LAST_2,
1484 wrp_immed(nfp_prog, dst_hi, 0);
1488 wrp_mul_u16(struct nfp_prog *nfp_prog, swreg dst_hi, swreg dst_lo, swreg lreg,
1491 emit_mul(nfp_prog, lreg, MUL_TYPE_START, MUL_STEP_NONE, rreg);
1492 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_16x16, MUL_STEP_1, rreg);
1493 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_16x16, MUL_STEP_2, rreg);
1494 emit_mul(nfp_prog, dst_lo, MUL_TYPE_STEP_16x16, MUL_LAST, reg_none());
1498 wrp_mul(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1499 bool gen_high_half, bool ropnd_from_reg)
1501 swreg multiplier, multiplicand, dst_hi, dst_lo;
1502 const struct bpf_insn *insn = &meta->insn;
1503 u32 lopnd_max, ropnd_max;
1506 dst_reg = insn->dst_reg;
1507 multiplicand = reg_a(dst_reg * 2);
1508 dst_hi = reg_both(dst_reg * 2 + 1);
1509 dst_lo = reg_both(dst_reg * 2);
1510 lopnd_max = meta->umax_dst;
1511 if (ropnd_from_reg) {
1512 multiplier = reg_b(insn->src_reg * 2);
1513 ropnd_max = meta->umax_src;
1515 u32 imm = insn->imm;
1517 multiplier = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog));
1520 if (lopnd_max > U16_MAX || ropnd_max > U16_MAX)
1521 wrp_mul_u32(nfp_prog, dst_hi, dst_lo, multiplicand, multiplier,
1524 wrp_mul_u16(nfp_prog, dst_hi, dst_lo, multiplicand, multiplier);
1529 static int wrp_div_imm(struct nfp_prog *nfp_prog, u8 dst, u64 imm)
1531 swreg dst_both = reg_both(dst), dst_a = reg_a(dst), dst_b = reg_a(dst);
1532 struct reciprocal_value_adv rvalue;
1536 if (imm > U32_MAX) {
1537 wrp_immed(nfp_prog, dst_both, 0);
1541 /* NOTE: because we are using "reciprocal_value_adv" which doesn't
1542 * support "divisor > (1u << 31)", we need to JIT separate NFP sequence
1543 * to handle such case which actually equals to the result of unsigned
1544 * comparison "dst >= imm" which could be calculated using the following
1547 * alu[--, dst, -, imm]
1549 * alu[dst, imm, +carry, 0]
1552 if (imm > 1U << 31) {
1553 swreg tmp_b = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog));
1555 emit_alu(nfp_prog, reg_none(), dst_a, ALU_OP_SUB, tmp_b);
1556 wrp_immed(nfp_prog, imm_a(nfp_prog), 0);
1557 emit_alu(nfp_prog, dst_both, imm_a(nfp_prog), ALU_OP_ADD_C,
1562 rvalue = reciprocal_value_adv(imm, 32);
1564 if (rvalue.is_wide_m && !(imm & 1)) {
1565 pre_shift = fls(imm & -imm) - 1;
1566 rvalue = reciprocal_value_adv(imm >> pre_shift, 32 - pre_shift);
1570 magic = ur_load_imm_any(nfp_prog, rvalue.m, imm_b(nfp_prog));
1571 if (imm == 1U << exp) {
1572 emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b,
1574 } else if (rvalue.is_wide_m) {
1575 wrp_mul_u32(nfp_prog, imm_both(nfp_prog), reg_none(), dst_a,
1577 emit_alu(nfp_prog, dst_both, dst_a, ALU_OP_SUB,
1579 emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b,
1581 emit_alu(nfp_prog, dst_both, dst_a, ALU_OP_ADD,
1583 emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b,
1584 SHF_SC_R_SHF, rvalue.sh - 1);
1587 emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE,
1588 dst_b, SHF_SC_R_SHF, pre_shift);
1589 wrp_mul_u32(nfp_prog, dst_both, reg_none(), dst_a, magic, true);
1590 emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE,
1591 dst_b, SHF_SC_R_SHF, rvalue.sh);
1597 static int adjust_head(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1599 swreg tmp = imm_a(nfp_prog), tmp_len = imm_b(nfp_prog);
1600 struct nfp_bpf_cap_adjust_head *adjust_head;
1601 u32 ret_einval, end;
1603 adjust_head = &nfp_prog->bpf->adjust_head;
1605 /* Optimized version - 5 vs 14 cycles */
1606 if (nfp_prog->adjust_head_location != UINT_MAX) {
1607 if (WARN_ON_ONCE(nfp_prog->adjust_head_location != meta->n))
1610 emit_alu(nfp_prog, pptr_reg(nfp_prog),
1611 reg_a(2 * 2), ALU_OP_ADD, pptr_reg(nfp_prog));
1612 emit_alu(nfp_prog, plen_reg(nfp_prog),
1613 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2));
1614 emit_alu(nfp_prog, pv_len(nfp_prog),
1615 pv_len(nfp_prog), ALU_OP_SUB, reg_a(2 * 2));
1617 wrp_immed(nfp_prog, reg_both(0), 0);
1618 wrp_immed(nfp_prog, reg_both(1), 0);
1620 /* TODO: when adjust head is guaranteed to succeed we can
1621 * also eliminate the following if (r0 == 0) branch.
1627 ret_einval = nfp_prog_current_offset(nfp_prog) + 14;
1628 end = ret_einval + 2;
1630 /* We need to use a temp because offset is just a part of the pkt ptr */
1631 emit_alu(nfp_prog, tmp,
1632 reg_a(2 * 2), ALU_OP_ADD_2B, pptr_reg(nfp_prog));
1634 /* Validate result will fit within FW datapath constraints */
1635 emit_alu(nfp_prog, reg_none(),
1636 tmp, ALU_OP_SUB, reg_imm(adjust_head->off_min));
1637 emit_br(nfp_prog, BR_BLO, ret_einval, 0);
1638 emit_alu(nfp_prog, reg_none(),
1639 reg_imm(adjust_head->off_max), ALU_OP_SUB, tmp);
1640 emit_br(nfp_prog, BR_BLO, ret_einval, 0);
1642 /* Validate the length is at least ETH_HLEN */
1643 emit_alu(nfp_prog, tmp_len,
1644 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2));
1645 emit_alu(nfp_prog, reg_none(),
1646 tmp_len, ALU_OP_SUB, reg_imm(ETH_HLEN));
1647 emit_br(nfp_prog, BR_BMI, ret_einval, 0);
1649 /* Load the ret code */
1650 wrp_immed(nfp_prog, reg_both(0), 0);
1651 wrp_immed(nfp_prog, reg_both(1), 0);
1653 /* Modify the packet metadata */
1654 emit_ld_field(nfp_prog, pptr_reg(nfp_prog), 0x3, tmp, SHF_SC_NONE, 0);
1656 /* Skip over the -EINVAL ret code (defer 2) */
1657 emit_br(nfp_prog, BR_UNC, end, 2);
1659 emit_alu(nfp_prog, plen_reg(nfp_prog),
1660 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2));
1661 emit_alu(nfp_prog, pv_len(nfp_prog),
1662 pv_len(nfp_prog), ALU_OP_SUB, reg_a(2 * 2));
1664 /* return -EINVAL target */
1665 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_einval))
1668 wrp_immed(nfp_prog, reg_both(0), -22);
1669 wrp_immed(nfp_prog, reg_both(1), ~0);
1671 if (!nfp_prog_confirm_current_offset(nfp_prog, end))
1677 static int adjust_tail(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1679 u32 ret_einval, end;
1682 BUILD_BUG_ON(plen_reg(nfp_prog) != reg_b(STATIC_REG_PKT_LEN));
1684 plen = imm_a(nfp_prog);
1685 delta = reg_a(2 * 2);
1687 ret_einval = nfp_prog_current_offset(nfp_prog) + 9;
1688 end = nfp_prog_current_offset(nfp_prog) + 11;
1690 /* Calculate resulting length */
1691 emit_alu(nfp_prog, plen, plen_reg(nfp_prog), ALU_OP_ADD, delta);
1692 /* delta == 0 is not allowed by the kernel, add must overflow to make
1695 emit_br(nfp_prog, BR_BCC, ret_einval, 0);
1697 /* if (new_len < 14) then -EINVAL */
1698 emit_alu(nfp_prog, reg_none(), plen, ALU_OP_SUB, reg_imm(ETH_HLEN));
1699 emit_br(nfp_prog, BR_BMI, ret_einval, 0);
1701 emit_alu(nfp_prog, plen_reg(nfp_prog),
1702 plen_reg(nfp_prog), ALU_OP_ADD, delta);
1703 emit_alu(nfp_prog, pv_len(nfp_prog),
1704 pv_len(nfp_prog), ALU_OP_ADD, delta);
1706 emit_br(nfp_prog, BR_UNC, end, 2);
1707 wrp_immed(nfp_prog, reg_both(0), 0);
1708 wrp_immed(nfp_prog, reg_both(1), 0);
1710 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_einval))
1713 wrp_immed(nfp_prog, reg_both(0), -22);
1714 wrp_immed(nfp_prog, reg_both(1), ~0);
1716 if (!nfp_prog_confirm_current_offset(nfp_prog, end))
1723 map_call_stack_common(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1729 /* We only have to reload LM0 if the key is not at start of stack */
1730 lm_off = nfp_prog->stack_frame_depth;
1731 lm_off += meta->arg2.reg.var_off.value + meta->arg2.reg.off;
1732 load_lm_ptr = meta->arg2.var_off || lm_off;
1734 /* Set LM0 to start of key */
1736 emit_csr_wr(nfp_prog, reg_b(2 * 2), NFP_CSR_ACT_LM_ADDR0);
1737 if (meta->func_id == BPF_FUNC_map_update_elem)
1738 emit_csr_wr(nfp_prog, reg_b(3 * 2), NFP_CSR_ACT_LM_ADDR2);
1740 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO + meta->func_id,
1742 ret_tgt = nfp_prog_current_offset(nfp_prog) + 2;
1744 /* Load map ID into A0 */
1745 wrp_mov(nfp_prog, reg_a(0), reg_a(2));
1747 /* Load the return address into B0 */
1748 wrp_immed_relo(nfp_prog, reg_b(0), ret_tgt, RELO_IMMED_REL);
1750 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt))
1753 /* Reset the LM0 pointer */
1757 emit_csr_wr(nfp_prog, stack_reg(nfp_prog), NFP_CSR_ACT_LM_ADDR0);
1758 wrp_nops(nfp_prog, 3);
1764 nfp_get_prandom_u32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1766 __emit_csr_rd(nfp_prog, NFP_CSR_PSEUDO_RND_NUM);
1767 /* CSR value is read in following immed[gpr, 0] */
1768 emit_immed(nfp_prog, reg_both(0), 0,
1769 IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B);
1770 emit_immed(nfp_prog, reg_both(1), 0,
1771 IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B);
1776 nfp_perf_event_output(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1781 ptr_type = ur_load_imm_any(nfp_prog, meta->arg1.type, imm_a(nfp_prog));
1783 ret_tgt = nfp_prog_current_offset(nfp_prog) + 3;
1785 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO + meta->func_id,
1788 /* Load ptr type into A1 */
1789 wrp_mov(nfp_prog, reg_a(1), ptr_type);
1791 /* Load the return address into B0 */
1792 wrp_immed_relo(nfp_prog, reg_b(0), ret_tgt, RELO_IMMED_REL);
1794 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt))
1801 nfp_queue_select(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1805 jmp_tgt = nfp_prog_current_offset(nfp_prog) + 5;
1807 /* Make sure the queue id fits into FW field */
1808 emit_alu(nfp_prog, reg_none(), reg_a(meta->insn.src_reg * 2),
1809 ALU_OP_AND_NOT_B, reg_imm(0xff));
1810 emit_br(nfp_prog, BR_BEQ, jmp_tgt, 2);
1812 /* Set the 'queue selected' bit and the queue value */
1813 emit_shf(nfp_prog, pv_qsel_set(nfp_prog),
1814 pv_qsel_set(nfp_prog), SHF_OP_OR, reg_imm(1),
1815 SHF_SC_L_SHF, PKT_VEL_QSEL_SET_BIT);
1816 emit_ld_field(nfp_prog,
1817 pv_qsel_val(nfp_prog), 0x1, reg_b(meta->insn.src_reg * 2),
1819 /* Delay slots end here, we will jump over next instruction if queue
1820 * value fits into the field.
1822 emit_ld_field(nfp_prog,
1823 pv_qsel_val(nfp_prog), 0x1, reg_imm(NFP_NET_RXR_MAX),
1826 if (!nfp_prog_confirm_current_offset(nfp_prog, jmp_tgt))
1832 /* --- Callbacks --- */
1833 static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1835 const struct bpf_insn *insn = &meta->insn;
1836 u8 dst = insn->dst_reg * 2;
1837 u8 src = insn->src_reg * 2;
1839 if (insn->src_reg == BPF_REG_10) {
1840 swreg stack_depth_reg;
1842 stack_depth_reg = ur_load_imm_any(nfp_prog,
1843 nfp_prog->stack_frame_depth,
1844 stack_imm(nfp_prog));
1845 emit_alu(nfp_prog, reg_both(dst), stack_reg(nfp_prog),
1846 ALU_OP_ADD, stack_depth_reg);
1847 wrp_immed(nfp_prog, reg_both(dst + 1), 0);
1849 wrp_reg_mov(nfp_prog, dst, src);
1850 wrp_reg_mov(nfp_prog, dst + 1, src + 1);
1856 static int mov_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1858 u64 imm = meta->insn.imm; /* sign extend */
1860 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2), imm & ~0U);
1861 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), imm >> 32);
1866 static int xor_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1868 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_XOR);
1871 static int xor_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1873 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_XOR, !meta->insn.imm);
1876 static int and_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1878 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_AND);
1881 static int and_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1883 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm);
1886 static int or_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1888 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_OR);
1891 static int or_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1893 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm);
1896 static int add_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1898 const struct bpf_insn *insn = &meta->insn;
1900 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2),
1901 reg_a(insn->dst_reg * 2), ALU_OP_ADD,
1902 reg_b(insn->src_reg * 2));
1903 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1),
1904 reg_a(insn->dst_reg * 2 + 1), ALU_OP_ADD_C,
1905 reg_b(insn->src_reg * 2 + 1));
1910 static int add_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1912 const struct bpf_insn *insn = &meta->insn;
1913 u64 imm = insn->imm; /* sign extend */
1915 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, ALU_OP_ADD, imm & ~0U);
1916 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, ALU_OP_ADD_C, imm >> 32);
1921 static int sub_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1923 const struct bpf_insn *insn = &meta->insn;
1925 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2),
1926 reg_a(insn->dst_reg * 2), ALU_OP_SUB,
1927 reg_b(insn->src_reg * 2));
1928 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1),
1929 reg_a(insn->dst_reg * 2 + 1), ALU_OP_SUB_C,
1930 reg_b(insn->src_reg * 2 + 1));
1935 static int sub_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1937 const struct bpf_insn *insn = &meta->insn;
1938 u64 imm = insn->imm; /* sign extend */
1940 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, ALU_OP_SUB, imm & ~0U);
1941 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, ALU_OP_SUB_C, imm >> 32);
1946 static int mul_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1948 return wrp_mul(nfp_prog, meta, true, true);
1951 static int mul_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1953 return wrp_mul(nfp_prog, meta, true, false);
1956 static int div_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1958 const struct bpf_insn *insn = &meta->insn;
1960 return wrp_div_imm(nfp_prog, insn->dst_reg * 2, insn->imm);
1963 static int div_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1965 /* NOTE: verifier hook has rejected cases for which verifier doesn't
1966 * know whether the source operand is constant or not.
1968 return wrp_div_imm(nfp_prog, meta->insn.dst_reg * 2, meta->umin_src);
1971 static int neg_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1973 const struct bpf_insn *insn = &meta->insn;
1975 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2), reg_imm(0),
1976 ALU_OP_SUB, reg_b(insn->dst_reg * 2));
1977 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1), reg_imm(0),
1978 ALU_OP_SUB_C, reg_b(insn->dst_reg * 2 + 1));
1984 * if shift_amt >= 32
1985 * dst_high = dst_low << shift_amt[4:0]
1988 * dst_high = (dst_high, dst_low) >> (32 - shift_amt)
1989 * dst_low = dst_low << shift_amt
1991 * The indirect shift will use the same logic at runtime.
1993 static int __shl_imm64(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt)
1995 if (shift_amt < 32) {
1996 emit_shf(nfp_prog, reg_both(dst + 1), reg_a(dst + 1),
1997 SHF_OP_NONE, reg_b(dst), SHF_SC_R_DSHF,
1999 emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE,
2000 reg_b(dst), SHF_SC_L_SHF, shift_amt);
2001 } else if (shift_amt == 32) {
2002 wrp_reg_mov(nfp_prog, dst + 1, dst);
2003 wrp_immed(nfp_prog, reg_both(dst), 0);
2004 } else if (shift_amt > 32) {
2005 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE,
2006 reg_b(dst), SHF_SC_L_SHF, shift_amt - 32);
2007 wrp_immed(nfp_prog, reg_both(dst), 0);
2013 static int shl_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2015 const struct bpf_insn *insn = &meta->insn;
2016 u8 dst = insn->dst_reg * 2;
2018 return __shl_imm64(nfp_prog, dst, insn->imm);
2021 static void shl_reg64_lt32_high(struct nfp_prog *nfp_prog, u8 dst, u8 src)
2023 emit_alu(nfp_prog, imm_both(nfp_prog), reg_imm(32), ALU_OP_SUB,
2025 emit_alu(nfp_prog, reg_none(), imm_a(nfp_prog), ALU_OP_OR, reg_imm(0));
2026 emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_a(dst + 1), SHF_OP_NONE,
2027 reg_b(dst), SHF_SC_R_DSHF);
2030 /* NOTE: for indirect left shift, HIGH part should be calculated first. */
2031 static void shl_reg64_lt32_low(struct nfp_prog *nfp_prog, u8 dst, u8 src)
2033 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0));
2034 emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE,
2035 reg_b(dst), SHF_SC_L_SHF);
2038 static void shl_reg64_lt32(struct nfp_prog *nfp_prog, u8 dst, u8 src)
2040 shl_reg64_lt32_high(nfp_prog, dst, src);
2041 shl_reg64_lt32_low(nfp_prog, dst, src);
2044 static void shl_reg64_ge32(struct nfp_prog *nfp_prog, u8 dst, u8 src)
2046 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0));
2047 emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE,
2048 reg_b(dst), SHF_SC_L_SHF);
2049 wrp_immed(nfp_prog, reg_both(dst), 0);
2052 static int shl_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2054 const struct bpf_insn *insn = &meta->insn;
2058 dst = insn->dst_reg * 2;
2059 umin = meta->umin_src;
2060 umax = meta->umax_src;
2062 return __shl_imm64(nfp_prog, dst, umin);
2064 src = insn->src_reg * 2;
2066 shl_reg64_lt32(nfp_prog, dst, src);
2067 } else if (umin >= 32) {
2068 shl_reg64_ge32(nfp_prog, dst, src);
2070 /* Generate different instruction sequences depending on runtime
2071 * value of shift amount.
2073 u16 label_ge32, label_end;
2075 label_ge32 = nfp_prog_current_offset(nfp_prog) + 7;
2076 emit_br_bset(nfp_prog, reg_a(src), 5, label_ge32, 0);
2078 shl_reg64_lt32_high(nfp_prog, dst, src);
2079 label_end = nfp_prog_current_offset(nfp_prog) + 6;
2080 emit_br(nfp_prog, BR_UNC, label_end, 2);
2081 /* shl_reg64_lt32_low packed in delay slot. */
2082 shl_reg64_lt32_low(nfp_prog, dst, src);
2084 if (!nfp_prog_confirm_current_offset(nfp_prog, label_ge32))
2086 shl_reg64_ge32(nfp_prog, dst, src);
2088 if (!nfp_prog_confirm_current_offset(nfp_prog, label_end))
2096 * if shift_amt >= 32
2098 * dst_low = dst_high >> shift_amt[4:0]
2100 * dst_high = dst_high >> shift_amt
2101 * dst_low = (dst_high, dst_low) >> shift_amt
2103 * The indirect shift will use the same logic at runtime.
2105 static int __shr_imm64(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt)
2107 if (shift_amt < 32) {
2108 emit_shf(nfp_prog, reg_both(dst), reg_a(dst + 1), SHF_OP_NONE,
2109 reg_b(dst), SHF_SC_R_DSHF, shift_amt);
2110 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE,
2111 reg_b(dst + 1), SHF_SC_R_SHF, shift_amt);
2112 } else if (shift_amt == 32) {
2113 wrp_reg_mov(nfp_prog, dst, dst + 1);
2114 wrp_immed(nfp_prog, reg_both(dst + 1), 0);
2115 } else if (shift_amt > 32) {
2116 emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE,
2117 reg_b(dst + 1), SHF_SC_R_SHF, shift_amt - 32);
2118 wrp_immed(nfp_prog, reg_both(dst + 1), 0);
2124 static int shr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2126 const struct bpf_insn *insn = &meta->insn;
2127 u8 dst = insn->dst_reg * 2;
2129 return __shr_imm64(nfp_prog, dst, insn->imm);
2132 /* NOTE: for indirect right shift, LOW part should be calculated first. */
2133 static void shr_reg64_lt32_high(struct nfp_prog *nfp_prog, u8 dst, u8 src)
2135 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0));
2136 emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE,
2137 reg_b(dst + 1), SHF_SC_R_SHF);
2140 static void shr_reg64_lt32_low(struct nfp_prog *nfp_prog, u8 dst, u8 src)
2142 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0));
2143 emit_shf_indir(nfp_prog, reg_both(dst), reg_a(dst + 1), SHF_OP_NONE,
2144 reg_b(dst), SHF_SC_R_DSHF);
2147 static void shr_reg64_lt32(struct nfp_prog *nfp_prog, u8 dst, u8 src)
2149 shr_reg64_lt32_low(nfp_prog, dst, src);
2150 shr_reg64_lt32_high(nfp_prog, dst, src);
2153 static void shr_reg64_ge32(struct nfp_prog *nfp_prog, u8 dst, u8 src)
2155 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0));
2156 emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE,
2157 reg_b(dst + 1), SHF_SC_R_SHF);
2158 wrp_immed(nfp_prog, reg_both(dst + 1), 0);
2161 static int shr_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2163 const struct bpf_insn *insn = &meta->insn;
2167 dst = insn->dst_reg * 2;
2168 umin = meta->umin_src;
2169 umax = meta->umax_src;
2171 return __shr_imm64(nfp_prog, dst, umin);
2173 src = insn->src_reg * 2;
2175 shr_reg64_lt32(nfp_prog, dst, src);
2176 } else if (umin >= 32) {
2177 shr_reg64_ge32(nfp_prog, dst, src);
2179 /* Generate different instruction sequences depending on runtime
2180 * value of shift amount.
2182 u16 label_ge32, label_end;
2184 label_ge32 = nfp_prog_current_offset(nfp_prog) + 6;
2185 emit_br_bset(nfp_prog, reg_a(src), 5, label_ge32, 0);
2186 shr_reg64_lt32_low(nfp_prog, dst, src);
2187 label_end = nfp_prog_current_offset(nfp_prog) + 6;
2188 emit_br(nfp_prog, BR_UNC, label_end, 2);
2189 /* shr_reg64_lt32_high packed in delay slot. */
2190 shr_reg64_lt32_high(nfp_prog, dst, src);
2192 if (!nfp_prog_confirm_current_offset(nfp_prog, label_ge32))
2194 shr_reg64_ge32(nfp_prog, dst, src);
2196 if (!nfp_prog_confirm_current_offset(nfp_prog, label_end))
2203 /* Code logic is the same as __shr_imm64 except ashr requires signedness bit
2204 * told through PREV_ALU result.
2206 static int __ashr_imm64(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt)
2208 if (shift_amt < 32) {
2209 emit_shf(nfp_prog, reg_both(dst), reg_a(dst + 1), SHF_OP_NONE,
2210 reg_b(dst), SHF_SC_R_DSHF, shift_amt);
2211 /* Set signedness bit. */
2212 emit_alu(nfp_prog, reg_none(), reg_a(dst + 1), ALU_OP_OR,
2214 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR,
2215 reg_b(dst + 1), SHF_SC_R_SHF, shift_amt);
2216 } else if (shift_amt == 32) {
2217 /* NOTE: this also helps setting signedness bit. */
2218 wrp_reg_mov(nfp_prog, dst, dst + 1);
2219 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR,
2220 reg_b(dst + 1), SHF_SC_R_SHF, 31);
2221 } else if (shift_amt > 32) {
2222 emit_alu(nfp_prog, reg_none(), reg_a(dst + 1), ALU_OP_OR,
2224 emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR,
2225 reg_b(dst + 1), SHF_SC_R_SHF, shift_amt - 32);
2226 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR,
2227 reg_b(dst + 1), SHF_SC_R_SHF, 31);
2233 static int ashr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2235 const struct bpf_insn *insn = &meta->insn;
2236 u8 dst = insn->dst_reg * 2;
2238 return __ashr_imm64(nfp_prog, dst, insn->imm);
2241 static void ashr_reg64_lt32_high(struct nfp_prog *nfp_prog, u8 dst, u8 src)
2243 /* NOTE: the first insn will set both indirect shift amount (source A)
2244 * and signedness bit (MSB of result).
2246 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_b(dst + 1));
2247 emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR,
2248 reg_b(dst + 1), SHF_SC_R_SHF);
2251 static void ashr_reg64_lt32_low(struct nfp_prog *nfp_prog, u8 dst, u8 src)
2253 /* NOTE: it is the same as logic shift because we don't need to shift in
2254 * signedness bit when the shift amount is less than 32.
2256 return shr_reg64_lt32_low(nfp_prog, dst, src);
2259 static void ashr_reg64_lt32(struct nfp_prog *nfp_prog, u8 dst, u8 src)
2261 ashr_reg64_lt32_low(nfp_prog, dst, src);
2262 ashr_reg64_lt32_high(nfp_prog, dst, src);
2265 static void ashr_reg64_ge32(struct nfp_prog *nfp_prog, u8 dst, u8 src)
2267 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_b(dst + 1));
2268 emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR,
2269 reg_b(dst + 1), SHF_SC_R_SHF);
2270 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR,
2271 reg_b(dst + 1), SHF_SC_R_SHF, 31);
2274 /* Like ashr_imm64, but need to use indirect shift. */
2275 static int ashr_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2277 const struct bpf_insn *insn = &meta->insn;
2281 dst = insn->dst_reg * 2;
2282 umin = meta->umin_src;
2283 umax = meta->umax_src;
2285 return __ashr_imm64(nfp_prog, dst, umin);
2287 src = insn->src_reg * 2;
2289 ashr_reg64_lt32(nfp_prog, dst, src);
2290 } else if (umin >= 32) {
2291 ashr_reg64_ge32(nfp_prog, dst, src);
2293 u16 label_ge32, label_end;
2295 label_ge32 = nfp_prog_current_offset(nfp_prog) + 6;
2296 emit_br_bset(nfp_prog, reg_a(src), 5, label_ge32, 0);
2297 ashr_reg64_lt32_low(nfp_prog, dst, src);
2298 label_end = nfp_prog_current_offset(nfp_prog) + 6;
2299 emit_br(nfp_prog, BR_UNC, label_end, 2);
2300 /* ashr_reg64_lt32_high packed in delay slot. */
2301 ashr_reg64_lt32_high(nfp_prog, dst, src);
2303 if (!nfp_prog_confirm_current_offset(nfp_prog, label_ge32))
2305 ashr_reg64_ge32(nfp_prog, dst, src);
2307 if (!nfp_prog_confirm_current_offset(nfp_prog, label_end))
2314 static int mov_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2316 const struct bpf_insn *insn = &meta->insn;
2318 wrp_reg_mov(nfp_prog, insn->dst_reg * 2, insn->src_reg * 2);
2319 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
2324 static int mov_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2326 const struct bpf_insn *insn = &meta->insn;
2328 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2), insn->imm);
2329 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
2334 static int xor_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2336 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_XOR);
2339 static int xor_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2341 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR, !~meta->insn.imm);
2344 static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2346 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_AND);
2349 static int and_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2351 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm);
2354 static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2356 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_OR);
2359 static int or_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2361 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm);
2364 static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2366 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_ADD);
2369 static int add_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2371 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD, !meta->insn.imm);
2374 static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2376 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_SUB);
2379 static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2381 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB, !meta->insn.imm);
2384 static int mul_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2386 return wrp_mul(nfp_prog, meta, false, true);
2389 static int mul_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2391 return wrp_mul(nfp_prog, meta, false, false);
2394 static int div_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2396 return div_reg64(nfp_prog, meta);
2399 static int div_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2401 return div_imm64(nfp_prog, meta);
2404 static int neg_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2406 u8 dst = meta->insn.dst_reg * 2;
2408 emit_alu(nfp_prog, reg_both(dst), reg_imm(0), ALU_OP_SUB, reg_b(dst));
2409 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
2414 static int shl_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2416 const struct bpf_insn *insn = &meta->insn;
2419 return 1; /* TODO: zero shift means indirect */
2421 emit_shf(nfp_prog, reg_both(insn->dst_reg * 2),
2422 reg_none(), SHF_OP_NONE, reg_b(insn->dst_reg * 2),
2423 SHF_SC_L_SHF, insn->imm);
2424 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
2429 static int end_reg32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2431 const struct bpf_insn *insn = &meta->insn;
2432 u8 gpr = insn->dst_reg * 2;
2434 switch (insn->imm) {
2436 emit_ld_field(nfp_prog, reg_both(gpr), 0x9, reg_b(gpr),
2438 emit_ld_field(nfp_prog, reg_both(gpr), 0xe, reg_a(gpr),
2441 wrp_immed(nfp_prog, reg_both(gpr + 1), 0);
2444 wrp_end32(nfp_prog, reg_a(gpr), gpr);
2445 wrp_immed(nfp_prog, reg_both(gpr + 1), 0);
2448 wrp_mov(nfp_prog, imm_a(nfp_prog), reg_b(gpr + 1));
2450 wrp_end32(nfp_prog, reg_a(gpr), gpr + 1);
2451 wrp_end32(nfp_prog, imm_a(nfp_prog), gpr);
2458 static int imm_ld8_part2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2460 struct nfp_insn_meta *prev = nfp_meta_prev(meta);
2464 dst = prev->insn.dst_reg * 2;
2465 imm_lo = prev->insn.imm;
2466 imm_hi = meta->insn.imm;
2468 wrp_immed(nfp_prog, reg_both(dst), imm_lo);
2470 /* mov is always 1 insn, load imm may be two, so try to use mov */
2471 if (imm_hi == imm_lo)
2472 wrp_mov(nfp_prog, reg_both(dst + 1), reg_a(dst));
2474 wrp_immed(nfp_prog, reg_both(dst + 1), imm_hi);
2479 static int imm_ld8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2481 meta->double_cb = imm_ld8_part2;
2485 static int data_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2487 return construct_data_ld(nfp_prog, meta->insn.imm, 1);
2490 static int data_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2492 return construct_data_ld(nfp_prog, meta->insn.imm, 2);
2495 static int data_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2497 return construct_data_ld(nfp_prog, meta->insn.imm, 4);
2500 static int data_ind_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2502 return construct_data_ind_ld(nfp_prog, meta->insn.imm,
2503 meta->insn.src_reg * 2, 1);
2506 static int data_ind_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2508 return construct_data_ind_ld(nfp_prog, meta->insn.imm,
2509 meta->insn.src_reg * 2, 2);
2512 static int data_ind_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2514 return construct_data_ind_ld(nfp_prog, meta->insn.imm,
2515 meta->insn.src_reg * 2, 4);
2519 mem_ldx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
2520 unsigned int size, unsigned int ptr_off)
2522 return mem_op_stack(nfp_prog, meta, size, ptr_off,
2523 meta->insn.dst_reg * 2, meta->insn.src_reg * 2,
2524 true, wrp_lmem_load);
2527 static int mem_ldx_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
2530 swreg dst = reg_both(meta->insn.dst_reg * 2);
2532 switch (meta->insn.off) {
2533 case offsetof(struct __sk_buff, len):
2534 if (size != FIELD_SIZEOF(struct __sk_buff, len))
2536 wrp_mov(nfp_prog, dst, plen_reg(nfp_prog));
2538 case offsetof(struct __sk_buff, data):
2539 if (size != FIELD_SIZEOF(struct __sk_buff, data))
2541 wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog));
2543 case offsetof(struct __sk_buff, data_end):
2544 if (size != FIELD_SIZEOF(struct __sk_buff, data_end))
2546 emit_alu(nfp_prog, dst,
2547 plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog));
2553 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
2558 static int mem_ldx_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
2561 swreg dst = reg_both(meta->insn.dst_reg * 2);
2563 switch (meta->insn.off) {
2564 case offsetof(struct xdp_md, data):
2565 if (size != FIELD_SIZEOF(struct xdp_md, data))
2567 wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog));
2569 case offsetof(struct xdp_md, data_end):
2570 if (size != FIELD_SIZEOF(struct xdp_md, data_end))
2572 emit_alu(nfp_prog, dst,
2573 plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog));
2579 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
2585 mem_ldx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
2590 tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
2592 return data_ld_host_order_addr32(nfp_prog, meta->insn.src_reg * 2,
2593 tmp_reg, meta->insn.dst_reg * 2, size);
2597 mem_ldx_emem(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
2602 tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
2604 return data_ld_host_order_addr40(nfp_prog, meta->insn.src_reg * 2,
2605 tmp_reg, meta->insn.dst_reg * 2, size);
2609 mem_ldx_data_init_pktcache(struct nfp_prog *nfp_prog,
2610 struct nfp_insn_meta *meta)
2612 s16 range_start = meta->pkt_cache.range_start;
2613 s16 range_end = meta->pkt_cache.range_end;
2614 swreg src_base, off;
2618 off = re_load_imm_any(nfp_prog, range_start, imm_b(nfp_prog));
2619 src_base = reg_a(meta->insn.src_reg * 2);
2620 len = range_end - range_start;
2621 xfer_num = round_up(len, REG_WIDTH) / REG_WIDTH;
2623 indir = len > 8 * REG_WIDTH;
2624 /* Setup PREV_ALU for indirect mode. */
2626 wrp_immed(nfp_prog, reg_none(),
2627 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1));
2629 /* Cache memory into transfer-in registers. */
2630 emit_cmd_any(nfp_prog, CMD_TGT_READ32_SWAP, CMD_MODE_32b, 0, src_base,
2631 off, xfer_num - 1, CMD_CTX_SWAP, indir);
2635 mem_ldx_data_from_pktcache_unaligned(struct nfp_prog *nfp_prog,
2636 struct nfp_insn_meta *meta,
2639 s16 range_start = meta->pkt_cache.range_start;
2640 s16 insn_off = meta->insn.off - range_start;
2641 swreg dst_lo, dst_hi, src_lo, src_mid;
2642 u8 dst_gpr = meta->insn.dst_reg * 2;
2643 u8 len_lo = size, len_mid = 0;
2644 u8 idx = insn_off / REG_WIDTH;
2645 u8 off = insn_off % REG_WIDTH;
2647 dst_hi = reg_both(dst_gpr + 1);
2648 dst_lo = reg_both(dst_gpr);
2649 src_lo = reg_xfer(idx);
2651 /* The read length could involve as many as three registers. */
2652 if (size > REG_WIDTH - off) {
2653 /* Calculate the part in the second register. */
2654 len_lo = REG_WIDTH - off;
2655 len_mid = size - len_lo;
2657 /* Calculate the part in the third register. */
2658 if (size > 2 * REG_WIDTH - off)
2659 len_mid = REG_WIDTH;
2662 wrp_reg_subpart(nfp_prog, dst_lo, src_lo, len_lo, off);
2665 wrp_immed(nfp_prog, dst_hi, 0);
2669 src_mid = reg_xfer(idx + 1);
2671 if (size <= REG_WIDTH) {
2672 wrp_reg_or_subpart(nfp_prog, dst_lo, src_mid, len_mid, len_lo);
2673 wrp_immed(nfp_prog, dst_hi, 0);
2675 swreg src_hi = reg_xfer(idx + 2);
2677 wrp_reg_or_subpart(nfp_prog, dst_lo, src_mid,
2678 REG_WIDTH - len_lo, len_lo);
2679 wrp_reg_subpart(nfp_prog, dst_hi, src_mid, len_lo,
2680 REG_WIDTH - len_lo);
2681 wrp_reg_or_subpart(nfp_prog, dst_hi, src_hi, REG_WIDTH - len_lo,
2689 mem_ldx_data_from_pktcache_aligned(struct nfp_prog *nfp_prog,
2690 struct nfp_insn_meta *meta,
2693 swreg dst_lo, dst_hi, src_lo;
2696 idx = (meta->insn.off - meta->pkt_cache.range_start) / REG_WIDTH;
2697 dst_gpr = meta->insn.dst_reg * 2;
2698 dst_hi = reg_both(dst_gpr + 1);
2699 dst_lo = reg_both(dst_gpr);
2700 src_lo = reg_xfer(idx);
2702 if (size < REG_WIDTH) {
2703 wrp_reg_subpart(nfp_prog, dst_lo, src_lo, size, 0);
2704 wrp_immed(nfp_prog, dst_hi, 0);
2705 } else if (size == REG_WIDTH) {
2706 wrp_mov(nfp_prog, dst_lo, src_lo);
2707 wrp_immed(nfp_prog, dst_hi, 0);
2709 swreg src_hi = reg_xfer(idx + 1);
2711 wrp_mov(nfp_prog, dst_lo, src_lo);
2712 wrp_mov(nfp_prog, dst_hi, src_hi);
2719 mem_ldx_data_from_pktcache(struct nfp_prog *nfp_prog,
2720 struct nfp_insn_meta *meta, unsigned int size)
2722 u8 off = meta->insn.off - meta->pkt_cache.range_start;
2724 if (IS_ALIGNED(off, REG_WIDTH))
2725 return mem_ldx_data_from_pktcache_aligned(nfp_prog, meta, size);
2727 return mem_ldx_data_from_pktcache_unaligned(nfp_prog, meta, size);
2731 mem_ldx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
2734 if (meta->ldst_gather_len)
2735 return nfp_cpp_memcpy(nfp_prog, meta);
2737 if (meta->ptr.type == PTR_TO_CTX) {
2738 if (nfp_prog->type == BPF_PROG_TYPE_XDP)
2739 return mem_ldx_xdp(nfp_prog, meta, size);
2741 return mem_ldx_skb(nfp_prog, meta, size);
2744 if (meta->ptr.type == PTR_TO_PACKET) {
2745 if (meta->pkt_cache.range_end) {
2746 if (meta->pkt_cache.do_init)
2747 mem_ldx_data_init_pktcache(nfp_prog, meta);
2749 return mem_ldx_data_from_pktcache(nfp_prog, meta, size);
2751 return mem_ldx_data(nfp_prog, meta, size);
2755 if (meta->ptr.type == PTR_TO_STACK)
2756 return mem_ldx_stack(nfp_prog, meta, size,
2757 meta->ptr.off + meta->ptr.var_off.value);
2759 if (meta->ptr.type == PTR_TO_MAP_VALUE)
2760 return mem_ldx_emem(nfp_prog, meta, size);
2765 static int mem_ldx1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2767 return mem_ldx(nfp_prog, meta, 1);
2770 static int mem_ldx2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2772 return mem_ldx(nfp_prog, meta, 2);
2775 static int mem_ldx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2777 return mem_ldx(nfp_prog, meta, 4);
2780 static int mem_ldx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2782 return mem_ldx(nfp_prog, meta, 8);
2786 mem_st_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
2789 u64 imm = meta->insn.imm; /* sign extend */
2792 off_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
2794 return data_st_host_order(nfp_prog, meta->insn.dst_reg * 2, off_reg,
2798 static int mem_st(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
2801 if (meta->ptr.type == PTR_TO_PACKET)
2802 return mem_st_data(nfp_prog, meta, size);
2807 static int mem_st1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2809 return mem_st(nfp_prog, meta, 1);
2812 static int mem_st2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2814 return mem_st(nfp_prog, meta, 2);
2817 static int mem_st4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2819 return mem_st(nfp_prog, meta, 4);
2822 static int mem_st8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2824 return mem_st(nfp_prog, meta, 8);
2828 mem_stx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
2833 off_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
2835 return data_stx_host_order(nfp_prog, meta->insn.dst_reg * 2, off_reg,
2836 meta->insn.src_reg * 2, size);
2840 mem_stx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
2841 unsigned int size, unsigned int ptr_off)
2843 return mem_op_stack(nfp_prog, meta, size, ptr_off,
2844 meta->insn.src_reg * 2, meta->insn.dst_reg * 2,
2845 false, wrp_lmem_store);
2848 static int mem_stx_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2850 switch (meta->insn.off) {
2851 case offsetof(struct xdp_md, rx_queue_index):
2852 return nfp_queue_select(nfp_prog, meta);
2855 WARN_ON_ONCE(1); /* verifier should have rejected bad accesses */
2860 mem_stx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
2863 if (meta->ptr.type == PTR_TO_PACKET)
2864 return mem_stx_data(nfp_prog, meta, size);
2866 if (meta->ptr.type == PTR_TO_STACK)
2867 return mem_stx_stack(nfp_prog, meta, size,
2868 meta->ptr.off + meta->ptr.var_off.value);
2873 static int mem_stx1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2875 return mem_stx(nfp_prog, meta, 1);
2878 static int mem_stx2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2880 return mem_stx(nfp_prog, meta, 2);
2883 static int mem_stx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2885 if (meta->ptr.type == PTR_TO_CTX)
2886 if (nfp_prog->type == BPF_PROG_TYPE_XDP)
2887 return mem_stx_xdp(nfp_prog, meta);
2888 return mem_stx(nfp_prog, meta, 4);
2891 static int mem_stx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2893 return mem_stx(nfp_prog, meta, 8);
2897 mem_xadd(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, bool is64)
2899 u8 dst_gpr = meta->insn.dst_reg * 2;
2900 u8 src_gpr = meta->insn.src_reg * 2;
2901 unsigned int full_add, out;
2902 swreg addra, addrb, off;
2904 off = ur_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
2906 /* We can fit 16 bits into command immediate, if we know the immediate
2907 * is guaranteed to either always or never fit into 16 bit we only
2908 * generate code to handle that particular case, otherwise generate
2911 out = nfp_prog_current_offset(nfp_prog);
2912 full_add = nfp_prog_current_offset(nfp_prog);
2914 if (meta->insn.off) {
2918 if (meta->xadd_maybe_16bit) {
2922 if (meta->xadd_over_16bit)
2924 if (meta->xadd_maybe_16bit && meta->xadd_over_16bit) {
2929 /* Generate the branch for choosing add_imm vs add */
2930 if (meta->xadd_maybe_16bit && meta->xadd_over_16bit) {
2931 swreg max_imm = imm_a(nfp_prog);
2933 wrp_immed(nfp_prog, max_imm, 0xffff);
2934 emit_alu(nfp_prog, reg_none(),
2935 max_imm, ALU_OP_SUB, reg_b(src_gpr));
2936 emit_alu(nfp_prog, reg_none(),
2937 reg_imm(0), ALU_OP_SUB_C, reg_b(src_gpr + 1));
2938 emit_br(nfp_prog, BR_BLO, full_add, meta->insn.off ? 2 : 0);
2942 /* If insn has an offset add to the address */
2943 if (!meta->insn.off) {
2944 addra = reg_a(dst_gpr);
2945 addrb = reg_b(dst_gpr + 1);
2947 emit_alu(nfp_prog, imma_a(nfp_prog),
2948 reg_a(dst_gpr), ALU_OP_ADD, off);
2949 emit_alu(nfp_prog, imma_b(nfp_prog),
2950 reg_a(dst_gpr + 1), ALU_OP_ADD_C, reg_imm(0));
2951 addra = imma_a(nfp_prog);
2952 addrb = imma_b(nfp_prog);
2955 /* Generate the add_imm if 16 bits are possible */
2956 if (meta->xadd_maybe_16bit) {
2957 swreg prev_alu = imm_a(nfp_prog);
2959 wrp_immed(nfp_prog, prev_alu,
2960 FIELD_PREP(CMD_OVE_DATA, 2) |
2962 FIELD_PREP(CMD_OV_LEN, 0x8 | is64 << 2));
2963 wrp_reg_or_subpart(nfp_prog, prev_alu, reg_b(src_gpr), 2, 2);
2964 emit_cmd_indir(nfp_prog, CMD_TGT_ADD_IMM, CMD_MODE_40b_BA, 0,
2965 addra, addrb, 0, CMD_CTX_NO_SWAP);
2967 if (meta->xadd_over_16bit)
2968 emit_br(nfp_prog, BR_UNC, out, 0);
2971 if (!nfp_prog_confirm_current_offset(nfp_prog, full_add))
2974 /* Generate the add if 16 bits are not guaranteed */
2975 if (meta->xadd_over_16bit) {
2976 emit_cmd(nfp_prog, CMD_TGT_ADD, CMD_MODE_40b_BA, 0,
2977 addra, addrb, is64 << 2,
2978 is64 ? CMD_CTX_SWAP_DEFER2 : CMD_CTX_SWAP_DEFER1);
2980 wrp_mov(nfp_prog, reg_xfer(0), reg_a(src_gpr));
2982 wrp_mov(nfp_prog, reg_xfer(1), reg_a(src_gpr + 1));
2985 if (!nfp_prog_confirm_current_offset(nfp_prog, out))
2991 static int mem_xadd4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2993 return mem_xadd(nfp_prog, meta, false);
2996 static int mem_xadd8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2998 return mem_xadd(nfp_prog, meta, true);
3001 static int jump(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3003 emit_br(nfp_prog, BR_UNC, meta->insn.off, 0);
3008 static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3010 const struct bpf_insn *insn = &meta->insn;
3011 u64 imm = insn->imm; /* sign extend */
3012 swreg or1, or2, tmp_reg;
3014 or1 = reg_a(insn->dst_reg * 2);
3015 or2 = reg_b(insn->dst_reg * 2 + 1);
3018 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
3019 emit_alu(nfp_prog, imm_a(nfp_prog),
3020 reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg);
3021 or1 = imm_a(nfp_prog);
3025 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
3026 emit_alu(nfp_prog, imm_b(nfp_prog),
3027 reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg);
3028 or2 = imm_b(nfp_prog);
3031 emit_alu(nfp_prog, reg_none(), or1, ALU_OP_OR, or2);
3032 emit_br(nfp_prog, BR_BEQ, insn->off, 0);
3037 static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3039 const struct bpf_insn *insn = &meta->insn;
3040 u64 imm = insn->imm; /* sign extend */
3049 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
3050 emit_alu(nfp_prog, reg_none(),
3051 reg_a(insn->dst_reg * 2), ALU_OP_AND, tmp_reg);
3052 emit_br(nfp_prog, BR_BNE, insn->off, 0);
3056 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
3057 emit_alu(nfp_prog, reg_none(),
3058 reg_a(insn->dst_reg * 2 + 1), ALU_OP_AND, tmp_reg);
3059 emit_br(nfp_prog, BR_BNE, insn->off, 0);
3065 static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3067 const struct bpf_insn *insn = &meta->insn;
3068 u64 imm = insn->imm; /* sign extend */
3072 emit_alu(nfp_prog, reg_none(), reg_a(insn->dst_reg * 2),
3073 ALU_OP_OR, reg_b(insn->dst_reg * 2 + 1));
3074 emit_br(nfp_prog, BR_BNE, insn->off, 0);
3078 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
3079 emit_alu(nfp_prog, reg_none(),
3080 reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg);
3081 emit_br(nfp_prog, BR_BNE, insn->off, 0);
3083 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
3084 emit_alu(nfp_prog, reg_none(),
3085 reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg);
3086 emit_br(nfp_prog, BR_BNE, insn->off, 0);
3091 static int jeq_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3093 const struct bpf_insn *insn = &meta->insn;
3095 emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(insn->dst_reg * 2),
3096 ALU_OP_XOR, reg_b(insn->src_reg * 2));
3097 emit_alu(nfp_prog, imm_b(nfp_prog), reg_a(insn->dst_reg * 2 + 1),
3098 ALU_OP_XOR, reg_b(insn->src_reg * 2 + 1));
3099 emit_alu(nfp_prog, reg_none(),
3100 imm_a(nfp_prog), ALU_OP_OR, imm_b(nfp_prog));
3101 emit_br(nfp_prog, BR_BEQ, insn->off, 0);
3106 static int jset_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3108 return wrp_test_reg(nfp_prog, meta, ALU_OP_AND, BR_BNE);
3111 static int jne_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3113 return wrp_test_reg(nfp_prog, meta, ALU_OP_XOR, BR_BNE);
3117 bpf_to_bpf_call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3119 u32 ret_tgt, stack_depth;
3122 stack_depth = round_up(nfp_prog->stack_frame_depth, STACK_FRAME_ALIGN);
3123 /* Space for saving the return address is accounted for by the callee,
3124 * so stack_depth can be zero for the main function.
3127 tmp_reg = ur_load_imm_any(nfp_prog, stack_depth,
3128 stack_imm(nfp_prog));
3129 emit_alu(nfp_prog, stack_reg(nfp_prog),
3130 stack_reg(nfp_prog), ALU_OP_ADD, tmp_reg);
3131 emit_csr_wr(nfp_prog, stack_reg(nfp_prog),
3132 NFP_CSR_ACT_LM_ADDR0);
3135 /* The following steps are performed:
3136 * 1. Put the start offset of the callee into imm_b(). This will
3137 * require a fixup step, as we do not necessarily know this
3139 * 2. Put the return address from the callee to the caller into
3140 * register ret_reg().
3141 * 3. (After defer slots are consumed) Jump to the subroutine that
3142 * pushes the registers to the stack.
3143 * The subroutine acts as a trampoline, and returns to the address in
3144 * imm_b(), i.e. jumps to the callee.
3146 * Using ret_reg() to pass the return address to the callee is set here
3147 * as a convention. The callee can then push this address onto its
3148 * stack frame in its prologue. The advantages of passing the return
3149 * address through ret_reg(), instead of pushing it to the stack right
3150 * here, are the following:
3151 * - It looks cleaner.
3152 * - If the called function is called multiple time, we get a lower
3154 * - We save two no-op instructions that should be added just before
3155 * the emit_br() when stack depth is not null otherwise.
3156 * - If we ever find a register to hold the return address during whole
3157 * execution of the callee, we will not have to push the return
3158 * address to the stack for leaf functions.
3160 ret_tgt = nfp_prog_current_offset(nfp_prog) + 3;
3161 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2,
3162 RELO_BR_GO_CALL_PUSH_REGS);
3163 wrp_immed_relo(nfp_prog, imm_b(nfp_prog), 0, RELO_IMMED_REL);
3164 wrp_immed_relo(nfp_prog, ret_reg(nfp_prog), ret_tgt, RELO_IMMED_REL);
3166 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt))
3170 tmp_reg = ur_load_imm_any(nfp_prog, stack_depth,
3171 stack_imm(nfp_prog));
3172 emit_alu(nfp_prog, stack_reg(nfp_prog),
3173 stack_reg(nfp_prog), ALU_OP_SUB, tmp_reg);
3174 emit_csr_wr(nfp_prog, stack_reg(nfp_prog),
3175 NFP_CSR_ACT_LM_ADDR0);
3176 wrp_nops(nfp_prog, 3);
3182 static int helper_call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3184 switch (meta->insn.imm) {
3185 case BPF_FUNC_xdp_adjust_head:
3186 return adjust_head(nfp_prog, meta);
3187 case BPF_FUNC_xdp_adjust_tail:
3188 return adjust_tail(nfp_prog, meta);
3189 case BPF_FUNC_map_lookup_elem:
3190 case BPF_FUNC_map_update_elem:
3191 case BPF_FUNC_map_delete_elem:
3192 return map_call_stack_common(nfp_prog, meta);
3193 case BPF_FUNC_get_prandom_u32:
3194 return nfp_get_prandom_u32(nfp_prog, meta);
3195 case BPF_FUNC_perf_event_output:
3196 return nfp_perf_event_output(nfp_prog, meta);
3198 WARN_ONCE(1, "verifier allowed unsupported function\n");
3203 static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3205 if (is_mbpf_pseudo_call(meta))
3206 return bpf_to_bpf_call(nfp_prog, meta);
3208 return helper_call(nfp_prog, meta);
3211 static bool nfp_is_main_function(struct nfp_insn_meta *meta)
3213 return meta->subprog_idx == 0;
3216 static int goto_out(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3218 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 0, RELO_BR_GO_OUT);
3224 nfp_subprog_epilogue(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3226 /* Pop R6~R9 to the stack via related subroutine.
3227 * Pop return address for BPF-to-BPF call from the stack and load it
3228 * into ret_reg() before we jump. This means that the subroutine does
3229 * not come back here, we make it jump back to the subprogram caller
3232 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 1,
3233 RELO_BR_GO_CALL_POP_REGS);
3234 wrp_mov(nfp_prog, ret_reg(nfp_prog), reg_lm(0, 0));
3239 static int jmp_exit(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3241 if (nfp_is_main_function(meta))
3242 return goto_out(nfp_prog, meta);
3244 return nfp_subprog_epilogue(nfp_prog, meta);
3247 static const instr_cb_t instr_cb[256] = {
3248 [BPF_ALU64 | BPF_MOV | BPF_X] = mov_reg64,
3249 [BPF_ALU64 | BPF_MOV | BPF_K] = mov_imm64,
3250 [BPF_ALU64 | BPF_XOR | BPF_X] = xor_reg64,
3251 [BPF_ALU64 | BPF_XOR | BPF_K] = xor_imm64,
3252 [BPF_ALU64 | BPF_AND | BPF_X] = and_reg64,
3253 [BPF_ALU64 | BPF_AND | BPF_K] = and_imm64,
3254 [BPF_ALU64 | BPF_OR | BPF_X] = or_reg64,
3255 [BPF_ALU64 | BPF_OR | BPF_K] = or_imm64,
3256 [BPF_ALU64 | BPF_ADD | BPF_X] = add_reg64,
3257 [BPF_ALU64 | BPF_ADD | BPF_K] = add_imm64,
3258 [BPF_ALU64 | BPF_SUB | BPF_X] = sub_reg64,
3259 [BPF_ALU64 | BPF_SUB | BPF_K] = sub_imm64,
3260 [BPF_ALU64 | BPF_MUL | BPF_X] = mul_reg64,
3261 [BPF_ALU64 | BPF_MUL | BPF_K] = mul_imm64,
3262 [BPF_ALU64 | BPF_DIV | BPF_X] = div_reg64,
3263 [BPF_ALU64 | BPF_DIV | BPF_K] = div_imm64,
3264 [BPF_ALU64 | BPF_NEG] = neg_reg64,
3265 [BPF_ALU64 | BPF_LSH | BPF_X] = shl_reg64,
3266 [BPF_ALU64 | BPF_LSH | BPF_K] = shl_imm64,
3267 [BPF_ALU64 | BPF_RSH | BPF_X] = shr_reg64,
3268 [BPF_ALU64 | BPF_RSH | BPF_K] = shr_imm64,
3269 [BPF_ALU64 | BPF_ARSH | BPF_X] = ashr_reg64,
3270 [BPF_ALU64 | BPF_ARSH | BPF_K] = ashr_imm64,
3271 [BPF_ALU | BPF_MOV | BPF_X] = mov_reg,
3272 [BPF_ALU | BPF_MOV | BPF_K] = mov_imm,
3273 [BPF_ALU | BPF_XOR | BPF_X] = xor_reg,
3274 [BPF_ALU | BPF_XOR | BPF_K] = xor_imm,
3275 [BPF_ALU | BPF_AND | BPF_X] = and_reg,
3276 [BPF_ALU | BPF_AND | BPF_K] = and_imm,
3277 [BPF_ALU | BPF_OR | BPF_X] = or_reg,
3278 [BPF_ALU | BPF_OR | BPF_K] = or_imm,
3279 [BPF_ALU | BPF_ADD | BPF_X] = add_reg,
3280 [BPF_ALU | BPF_ADD | BPF_K] = add_imm,
3281 [BPF_ALU | BPF_SUB | BPF_X] = sub_reg,
3282 [BPF_ALU | BPF_SUB | BPF_K] = sub_imm,
3283 [BPF_ALU | BPF_MUL | BPF_X] = mul_reg,
3284 [BPF_ALU | BPF_MUL | BPF_K] = mul_imm,
3285 [BPF_ALU | BPF_DIV | BPF_X] = div_reg,
3286 [BPF_ALU | BPF_DIV | BPF_K] = div_imm,
3287 [BPF_ALU | BPF_NEG] = neg_reg,
3288 [BPF_ALU | BPF_LSH | BPF_K] = shl_imm,
3289 [BPF_ALU | BPF_END | BPF_X] = end_reg32,
3290 [BPF_LD | BPF_IMM | BPF_DW] = imm_ld8,
3291 [BPF_LD | BPF_ABS | BPF_B] = data_ld1,
3292 [BPF_LD | BPF_ABS | BPF_H] = data_ld2,
3293 [BPF_LD | BPF_ABS | BPF_W] = data_ld4,
3294 [BPF_LD | BPF_IND | BPF_B] = data_ind_ld1,
3295 [BPF_LD | BPF_IND | BPF_H] = data_ind_ld2,
3296 [BPF_LD | BPF_IND | BPF_W] = data_ind_ld4,
3297 [BPF_LDX | BPF_MEM | BPF_B] = mem_ldx1,
3298 [BPF_LDX | BPF_MEM | BPF_H] = mem_ldx2,
3299 [BPF_LDX | BPF_MEM | BPF_W] = mem_ldx4,
3300 [BPF_LDX | BPF_MEM | BPF_DW] = mem_ldx8,
3301 [BPF_STX | BPF_MEM | BPF_B] = mem_stx1,
3302 [BPF_STX | BPF_MEM | BPF_H] = mem_stx2,
3303 [BPF_STX | BPF_MEM | BPF_W] = mem_stx4,
3304 [BPF_STX | BPF_MEM | BPF_DW] = mem_stx8,
3305 [BPF_STX | BPF_XADD | BPF_W] = mem_xadd4,
3306 [BPF_STX | BPF_XADD | BPF_DW] = mem_xadd8,
3307 [BPF_ST | BPF_MEM | BPF_B] = mem_st1,
3308 [BPF_ST | BPF_MEM | BPF_H] = mem_st2,
3309 [BPF_ST | BPF_MEM | BPF_W] = mem_st4,
3310 [BPF_ST | BPF_MEM | BPF_DW] = mem_st8,
3311 [BPF_JMP | BPF_JA | BPF_K] = jump,
3312 [BPF_JMP | BPF_JEQ | BPF_K] = jeq_imm,
3313 [BPF_JMP | BPF_JGT | BPF_K] = cmp_imm,
3314 [BPF_JMP | BPF_JGE | BPF_K] = cmp_imm,
3315 [BPF_JMP | BPF_JLT | BPF_K] = cmp_imm,
3316 [BPF_JMP | BPF_JLE | BPF_K] = cmp_imm,
3317 [BPF_JMP | BPF_JSGT | BPF_K] = cmp_imm,
3318 [BPF_JMP | BPF_JSGE | BPF_K] = cmp_imm,
3319 [BPF_JMP | BPF_JSLT | BPF_K] = cmp_imm,
3320 [BPF_JMP | BPF_JSLE | BPF_K] = cmp_imm,
3321 [BPF_JMP | BPF_JSET | BPF_K] = jset_imm,
3322 [BPF_JMP | BPF_JNE | BPF_K] = jne_imm,
3323 [BPF_JMP | BPF_JEQ | BPF_X] = jeq_reg,
3324 [BPF_JMP | BPF_JGT | BPF_X] = cmp_reg,
3325 [BPF_JMP | BPF_JGE | BPF_X] = cmp_reg,
3326 [BPF_JMP | BPF_JLT | BPF_X] = cmp_reg,
3327 [BPF_JMP | BPF_JLE | BPF_X] = cmp_reg,
3328 [BPF_JMP | BPF_JSGT | BPF_X] = cmp_reg,
3329 [BPF_JMP | BPF_JSGE | BPF_X] = cmp_reg,
3330 [BPF_JMP | BPF_JSLT | BPF_X] = cmp_reg,
3331 [BPF_JMP | BPF_JSLE | BPF_X] = cmp_reg,
3332 [BPF_JMP | BPF_JSET | BPF_X] = jset_reg,
3333 [BPF_JMP | BPF_JNE | BPF_X] = jne_reg,
3334 [BPF_JMP | BPF_CALL] = call,
3335 [BPF_JMP | BPF_EXIT] = jmp_exit,
3338 /* --- Assembler logic --- */
3339 static int nfp_fixup_branches(struct nfp_prog *nfp_prog)
3341 struct nfp_insn_meta *meta, *jmp_dst;
3344 list_for_each_entry(meta, &nfp_prog->insns, l) {
3347 if (meta->insn.code == (BPF_JMP | BPF_CALL))
3349 if (BPF_CLASS(meta->insn.code) != BPF_JMP)
3352 if (list_is_last(&meta->l, &nfp_prog->insns))
3353 br_idx = nfp_prog->last_bpf_off;
3355 br_idx = list_next_entry(meta, l)->off - 1;
3357 if (!nfp_is_br(nfp_prog->prog[br_idx])) {
3358 pr_err("Fixup found block not ending in branch %d %02x %016llx!!\n",
3359 br_idx, meta->insn.code, nfp_prog->prog[br_idx]);
3362 /* Leave special branches for later */
3363 if (FIELD_GET(OP_RELO_TYPE, nfp_prog->prog[br_idx]) !=
3367 if (!meta->jmp_dst) {
3368 pr_err("Non-exit jump doesn't have destination info recorded!!\n");
3372 jmp_dst = meta->jmp_dst;
3374 if (jmp_dst->skip) {
3375 pr_err("Branch landing on removed instruction!!\n");
3379 for (idx = meta->off; idx <= br_idx; idx++) {
3380 if (!nfp_is_br(nfp_prog->prog[idx]))
3382 br_set_offset(&nfp_prog->prog[idx], jmp_dst->off);
3389 static void nfp_intro(struct nfp_prog *nfp_prog)
3391 wrp_immed(nfp_prog, plen_reg(nfp_prog), GENMASK(13, 0));
3392 emit_alu(nfp_prog, plen_reg(nfp_prog),
3393 plen_reg(nfp_prog), ALU_OP_AND, pv_len(nfp_prog));
3397 nfp_subprog_prologue(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3399 /* Save return address into the stack. */
3400 wrp_mov(nfp_prog, reg_lm(0, 0), ret_reg(nfp_prog));
3404 nfp_start_subprog(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3406 unsigned int depth = nfp_prog->subprog[meta->subprog_idx].stack_depth;
3408 nfp_prog->stack_frame_depth = round_up(depth, 4);
3409 nfp_subprog_prologue(nfp_prog, meta);
3412 bool nfp_is_subprog_start(struct nfp_insn_meta *meta)
3414 return meta->flags & FLAG_INSN_IS_SUBPROG_START;
3417 static void nfp_outro_tc_da(struct nfp_prog *nfp_prog)
3419 /* TC direct-action mode:
3420 * 0,1 ok NOT SUPPORTED[1]
3421 * 2 drop 0x22 -> drop, count as stat1
3422 * 4,5 nuke 0x02 -> drop
3423 * 7 redir 0x44 -> redir, count as stat2
3424 * * unspec 0x11 -> pass, count as stat0
3426 * [1] We can't support OK and RECLASSIFY because we can't tell TC
3427 * the exact decision made. We are forced to support UNSPEC
3428 * to handle aborts so that's the only one we handle for passing
3429 * packets up the stack.
3431 /* Target for aborts */
3432 nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog);
3434 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT);
3436 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
3437 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF, 16);
3439 /* Target for normal exits */
3440 nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog);
3442 /* if R0 > 7 jump to abort */
3443 emit_alu(nfp_prog, reg_none(), reg_imm(7), ALU_OP_SUB, reg_b(0));
3444 emit_br(nfp_prog, BR_BLO, nfp_prog->tgt_abort, 0);
3445 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
3447 wrp_immed(nfp_prog, reg_b(2), 0x41221211);
3448 wrp_immed(nfp_prog, reg_b(3), 0x41001211);
3450 emit_shf(nfp_prog, reg_a(1),
3451 reg_none(), SHF_OP_NONE, reg_b(0), SHF_SC_L_SHF, 2);
3453 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0));
3454 emit_shf(nfp_prog, reg_a(2),
3455 reg_imm(0xf), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0);
3457 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0));
3458 emit_shf(nfp_prog, reg_b(2),
3459 reg_imm(0xf), SHF_OP_AND, reg_b(3), SHF_SC_R_SHF, 0);
3461 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT);
3463 emit_shf(nfp_prog, reg_b(2),
3464 reg_a(2), SHF_OP_OR, reg_b(2), SHF_SC_L_SHF, 4);
3465 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16);
3468 static void nfp_outro_xdp(struct nfp_prog *nfp_prog)
3470 /* XDP return codes:
3471 * 0 aborted 0x82 -> drop, count as stat3
3472 * 1 drop 0x22 -> drop, count as stat1
3473 * 2 pass 0x11 -> pass, count as stat0
3474 * 3 tx 0x44 -> redir, count as stat2
3475 * * unknown 0x82 -> drop, count as stat3
3477 /* Target for aborts */
3478 nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog);
3480 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT);
3482 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
3483 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x82), SHF_SC_L_SHF, 16);
3485 /* Target for normal exits */
3486 nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog);
3488 /* if R0 > 3 jump to abort */
3489 emit_alu(nfp_prog, reg_none(), reg_imm(3), ALU_OP_SUB, reg_b(0));
3490 emit_br(nfp_prog, BR_BLO, nfp_prog->tgt_abort, 0);
3492 wrp_immed(nfp_prog, reg_b(2), 0x44112282);
3494 emit_shf(nfp_prog, reg_a(1),
3495 reg_none(), SHF_OP_NONE, reg_b(0), SHF_SC_L_SHF, 3);
3497 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0));
3498 emit_shf(nfp_prog, reg_b(2),
3499 reg_imm(0xff), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0);
3501 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT);
3503 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
3504 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16);
3507 static void nfp_push_callee_registers(struct nfp_prog *nfp_prog)
3511 /* Subroutine: Save all callee saved registers (R6 ~ R9).
3512 * imm_b() holds the return address.
3514 nfp_prog->tgt_call_push_regs = nfp_prog_current_offset(nfp_prog);
3515 for (reg = BPF_REG_6; reg <= BPF_REG_9; reg++) {
3516 u8 adj = (reg - BPF_REG_0) * 2;
3517 u8 idx = (reg - BPF_REG_6) * 2;
3519 /* The first slot in the stack frame is used to push the return
3520 * address in bpf_to_bpf_call(), start just after.
3522 wrp_mov(nfp_prog, reg_lm(0, 1 + idx), reg_b(adj));
3524 if (reg == BPF_REG_8)
3525 /* Prepare to jump back, last 3 insns use defer slots */
3526 emit_rtn(nfp_prog, imm_b(nfp_prog), 3);
3528 wrp_mov(nfp_prog, reg_lm(0, 1 + idx + 1), reg_b(adj + 1));
3532 static void nfp_pop_callee_registers(struct nfp_prog *nfp_prog)
3536 /* Subroutine: Restore all callee saved registers (R6 ~ R9).
3537 * ret_reg() holds the return address.
3539 nfp_prog->tgt_call_pop_regs = nfp_prog_current_offset(nfp_prog);
3540 for (reg = BPF_REG_6; reg <= BPF_REG_9; reg++) {
3541 u8 adj = (reg - BPF_REG_0) * 2;
3542 u8 idx = (reg - BPF_REG_6) * 2;
3544 /* The first slot in the stack frame holds the return address,
3545 * start popping just after that.
3547 wrp_mov(nfp_prog, reg_both(adj), reg_lm(0, 1 + idx));
3549 if (reg == BPF_REG_8)
3550 /* Prepare to jump back, last 3 insns use defer slots */
3551 emit_rtn(nfp_prog, ret_reg(nfp_prog), 3);
3553 wrp_mov(nfp_prog, reg_both(adj + 1), reg_lm(0, 1 + idx + 1));
3557 static void nfp_outro(struct nfp_prog *nfp_prog)
3559 switch (nfp_prog->type) {
3560 case BPF_PROG_TYPE_SCHED_CLS:
3561 nfp_outro_tc_da(nfp_prog);
3563 case BPF_PROG_TYPE_XDP:
3564 nfp_outro_xdp(nfp_prog);
3570 if (nfp_prog->subprog_cnt == 1)
3573 nfp_push_callee_registers(nfp_prog);
3574 nfp_pop_callee_registers(nfp_prog);
3577 static int nfp_translate(struct nfp_prog *nfp_prog)
3579 struct nfp_insn_meta *meta;
3583 depth = nfp_prog->subprog[0].stack_depth;
3584 nfp_prog->stack_frame_depth = round_up(depth, 4);
3586 nfp_intro(nfp_prog);
3587 if (nfp_prog->error)
3588 return nfp_prog->error;
3590 list_for_each_entry(meta, &nfp_prog->insns, l) {
3591 instr_cb_t cb = instr_cb[meta->insn.code];
3593 meta->off = nfp_prog_current_offset(nfp_prog);
3595 if (nfp_is_subprog_start(meta)) {
3596 nfp_start_subprog(nfp_prog, meta);
3597 if (nfp_prog->error)
3598 return nfp_prog->error;
3602 nfp_prog->n_translated++;
3606 if (nfp_meta_has_prev(nfp_prog, meta) &&
3607 nfp_meta_prev(meta)->double_cb)
3608 cb = nfp_meta_prev(meta)->double_cb;
3611 err = cb(nfp_prog, meta);
3614 if (nfp_prog->error)
3615 return nfp_prog->error;
3617 nfp_prog->n_translated++;
3620 nfp_prog->last_bpf_off = nfp_prog_current_offset(nfp_prog) - 1;
3622 nfp_outro(nfp_prog);
3623 if (nfp_prog->error)
3624 return nfp_prog->error;
3626 wrp_nops(nfp_prog, NFP_USTORE_PREFETCH_WINDOW);
3627 if (nfp_prog->error)
3628 return nfp_prog->error;
3630 return nfp_fixup_branches(nfp_prog);
3633 /* --- Optimizations --- */
3634 static void nfp_bpf_opt_reg_init(struct nfp_prog *nfp_prog)
3636 struct nfp_insn_meta *meta;
3638 list_for_each_entry(meta, &nfp_prog->insns, l) {
3639 struct bpf_insn insn = meta->insn;
3641 /* Programs converted from cBPF start with register xoring */
3642 if (insn.code == (BPF_ALU64 | BPF_XOR | BPF_X) &&
3643 insn.src_reg == insn.dst_reg)
3646 /* Programs start with R6 = R1 but we ignore the skb pointer */
3647 if (insn.code == (BPF_ALU64 | BPF_MOV | BPF_X) &&
3648 insn.src_reg == 1 && insn.dst_reg == 6)
3651 /* Return as soon as something doesn't match */
3657 /* abs(insn.imm) will fit better into unrestricted reg immediate -
3658 * convert add/sub of a negative number into a sub/add of a positive one.
3660 static void nfp_bpf_opt_neg_add_sub(struct nfp_prog *nfp_prog)
3662 struct nfp_insn_meta *meta;
3664 list_for_each_entry(meta, &nfp_prog->insns, l) {
3665 struct bpf_insn insn = meta->insn;
3670 if (BPF_CLASS(insn.code) != BPF_ALU &&
3671 BPF_CLASS(insn.code) != BPF_ALU64 &&
3672 BPF_CLASS(insn.code) != BPF_JMP)
3674 if (BPF_SRC(insn.code) != BPF_K)
3679 if (BPF_CLASS(insn.code) == BPF_JMP) {
3680 switch (BPF_OP(insn.code)) {
3685 meta->jump_neg_op = true;
3691 if (BPF_OP(insn.code) == BPF_ADD)
3692 insn.code = BPF_CLASS(insn.code) | BPF_SUB;
3693 else if (BPF_OP(insn.code) == BPF_SUB)
3694 insn.code = BPF_CLASS(insn.code) | BPF_ADD;
3698 meta->insn.code = insn.code | BPF_K;
3701 meta->insn.imm = -insn.imm;
3705 /* Remove masking after load since our load guarantees this is not needed */
3706 static void nfp_bpf_opt_ld_mask(struct nfp_prog *nfp_prog)
3708 struct nfp_insn_meta *meta1, *meta2;
3709 const s32 exp_mask[] = {
3710 [BPF_B] = 0x000000ffU,
3711 [BPF_H] = 0x0000ffffU,
3712 [BPF_W] = 0xffffffffU,
3715 nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) {
3716 struct bpf_insn insn, next;
3721 if (BPF_CLASS(insn.code) != BPF_LD)
3723 if (BPF_MODE(insn.code) != BPF_ABS &&
3724 BPF_MODE(insn.code) != BPF_IND)
3727 if (next.code != (BPF_ALU64 | BPF_AND | BPF_K))
3730 if (!exp_mask[BPF_SIZE(insn.code)])
3732 if (exp_mask[BPF_SIZE(insn.code)] != next.imm)
3735 if (next.src_reg || next.dst_reg)
3738 if (meta2->flags & FLAG_INSN_IS_JUMP_DST)
3745 static void nfp_bpf_opt_ld_shift(struct nfp_prog *nfp_prog)
3747 struct nfp_insn_meta *meta1, *meta2, *meta3;
3749 nfp_for_each_insn_walk3(nfp_prog, meta1, meta2, meta3) {
3750 struct bpf_insn insn, next1, next2;
3753 next1 = meta2->insn;
3754 next2 = meta3->insn;
3756 if (BPF_CLASS(insn.code) != BPF_LD)
3758 if (BPF_MODE(insn.code) != BPF_ABS &&
3759 BPF_MODE(insn.code) != BPF_IND)
3761 if (BPF_SIZE(insn.code) != BPF_W)
3764 if (!(next1.code == (BPF_LSH | BPF_K | BPF_ALU64) &&
3765 next2.code == (BPF_RSH | BPF_K | BPF_ALU64)) &&
3766 !(next1.code == (BPF_RSH | BPF_K | BPF_ALU64) &&
3767 next2.code == (BPF_LSH | BPF_K | BPF_ALU64)))
3770 if (next1.src_reg || next1.dst_reg ||
3771 next2.src_reg || next2.dst_reg)
3774 if (next1.imm != 0x20 || next2.imm != 0x20)
3777 if (meta2->flags & FLAG_INSN_IS_JUMP_DST ||
3778 meta3->flags & FLAG_INSN_IS_JUMP_DST)
3786 /* load/store pair that forms memory copy sould look like the following:
3788 * ld_width R, [addr_src + offset_src]
3789 * st_width [addr_dest + offset_dest], R
3791 * The destination register of load and source register of store should
3792 * be the same, load and store should also perform at the same width.
3793 * If either of addr_src or addr_dest is stack pointer, we don't do the
3794 * CPP optimization as stack is modelled by registers on NFP.
3797 curr_pair_is_memcpy(struct nfp_insn_meta *ld_meta,
3798 struct nfp_insn_meta *st_meta)
3800 struct bpf_insn *ld = &ld_meta->insn;
3801 struct bpf_insn *st = &st_meta->insn;
3803 if (!is_mbpf_load(ld_meta) || !is_mbpf_store(st_meta))
3806 if (ld_meta->ptr.type != PTR_TO_PACKET &&
3807 ld_meta->ptr.type != PTR_TO_MAP_VALUE)
3810 if (st_meta->ptr.type != PTR_TO_PACKET)
3813 if (BPF_SIZE(ld->code) != BPF_SIZE(st->code))
3816 if (ld->dst_reg != st->src_reg)
3819 /* There is jump to the store insn in this pair. */
3820 if (st_meta->flags & FLAG_INSN_IS_JUMP_DST)
3826 /* Currently, we only support chaining load/store pairs if:
3828 * - Their address base registers are the same.
3829 * - Their address offsets are in the same order.
3830 * - They operate at the same memory width.
3831 * - There is no jump into the middle of them.
3834 curr_pair_chain_with_previous(struct nfp_insn_meta *ld_meta,
3835 struct nfp_insn_meta *st_meta,
3836 struct bpf_insn *prev_ld,
3837 struct bpf_insn *prev_st)
3839 u8 prev_size, curr_size, prev_ld_base, prev_st_base, prev_ld_dst;
3840 struct bpf_insn *ld = &ld_meta->insn;
3841 struct bpf_insn *st = &st_meta->insn;
3842 s16 prev_ld_off, prev_st_off;
3844 /* This pair is the start pair. */
3848 prev_size = BPF_LDST_BYTES(prev_ld);
3849 curr_size = BPF_LDST_BYTES(ld);
3850 prev_ld_base = prev_ld->src_reg;
3851 prev_st_base = prev_st->dst_reg;
3852 prev_ld_dst = prev_ld->dst_reg;
3853 prev_ld_off = prev_ld->off;
3854 prev_st_off = prev_st->off;
3856 if (ld->dst_reg != prev_ld_dst)
3859 if (ld->src_reg != prev_ld_base || st->dst_reg != prev_st_base)
3862 if (curr_size != prev_size)
3865 /* There is jump to the head of this pair. */
3866 if (ld_meta->flags & FLAG_INSN_IS_JUMP_DST)
3869 /* Both in ascending order. */
3870 if (prev_ld_off + prev_size == ld->off &&
3871 prev_st_off + prev_size == st->off)
3874 /* Both in descending order. */
3875 if (ld->off + curr_size == prev_ld_off &&
3876 st->off + curr_size == prev_st_off)
3882 /* Return TRUE if cross memory access happens. Cross memory access means
3883 * store area is overlapping with load area that a later load might load
3884 * the value from previous store, for this case we can't treat the sequence
3885 * as an memory copy.
3888 cross_mem_access(struct bpf_insn *ld, struct nfp_insn_meta *head_ld_meta,
3889 struct nfp_insn_meta *head_st_meta)
3891 s16 head_ld_off, head_st_off, ld_off;
3893 /* Different pointer types does not overlap. */
3894 if (head_ld_meta->ptr.type != head_st_meta->ptr.type)
3897 /* load and store are both PTR_TO_PACKET, check ID info. */
3898 if (head_ld_meta->ptr.id != head_st_meta->ptr.id)
3901 /* Canonicalize the offsets. Turn all of them against the original
3904 head_ld_off = head_ld_meta->insn.off + head_ld_meta->ptr.off;
3905 head_st_off = head_st_meta->insn.off + head_st_meta->ptr.off;
3906 ld_off = ld->off + head_ld_meta->ptr.off;
3908 /* Ascending order cross. */
3909 if (ld_off > head_ld_off &&
3910 head_ld_off < head_st_off && ld_off >= head_st_off)
3913 /* Descending order cross. */
3914 if (ld_off < head_ld_off &&
3915 head_ld_off > head_st_off && ld_off <= head_st_off)
3921 /* This pass try to identify the following instructoin sequences.
3923 * load R, [regA + offA]
3924 * store [regB + offB], R
3925 * load R, [regA + offA + const_imm_A]
3926 * store [regB + offB + const_imm_A], R
3927 * load R, [regA + offA + 2 * const_imm_A]
3928 * store [regB + offB + 2 * const_imm_A], R
3931 * Above sequence is typically generated by compiler when lowering
3932 * memcpy. NFP prefer using CPP instructions to accelerate it.
3934 static void nfp_bpf_opt_ldst_gather(struct nfp_prog *nfp_prog)
3936 struct nfp_insn_meta *head_ld_meta = NULL;
3937 struct nfp_insn_meta *head_st_meta = NULL;
3938 struct nfp_insn_meta *meta1, *meta2;
3939 struct bpf_insn *prev_ld = NULL;
3940 struct bpf_insn *prev_st = NULL;
3943 nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) {
3944 struct bpf_insn *ld = &meta1->insn;
3945 struct bpf_insn *st = &meta2->insn;
3947 /* Reset record status if any of the following if true:
3948 * - The current insn pair is not load/store.
3949 * - The load/store pair doesn't chain with previous one.
3950 * - The chained load/store pair crossed with previous pair.
3951 * - The chained load/store pair has a total size of memory
3952 * copy beyond 128 bytes which is the maximum length a
3953 * single NFP CPP command can transfer.
3955 if (!curr_pair_is_memcpy(meta1, meta2) ||
3956 !curr_pair_chain_with_previous(meta1, meta2, prev_ld,
3958 (head_ld_meta && (cross_mem_access(ld, head_ld_meta,
3960 head_ld_meta->ldst_gather_len >= 128))) {
3965 s16 prev_ld_off = prev_ld->off;
3966 s16 prev_st_off = prev_st->off;
3967 s16 head_ld_off = head_ld_meta->insn.off;
3969 if (prev_ld_off < head_ld_off) {
3970 head_ld_meta->insn.off = prev_ld_off;
3971 head_st_meta->insn.off = prev_st_off;
3972 head_ld_meta->ldst_gather_len =
3973 -head_ld_meta->ldst_gather_len;
3976 head_ld_meta->paired_st = &head_st_meta->insn;
3977 head_st_meta->skip = true;
3979 head_ld_meta->ldst_gather_len = 0;
3982 /* If the chain is ended by an load/store pair then this
3983 * could serve as the new head of the the next chain.
3985 if (curr_pair_is_memcpy(meta1, meta2)) {
3986 head_ld_meta = meta1;
3987 head_st_meta = meta2;
3988 head_ld_meta->ldst_gather_len =
3990 meta1 = nfp_meta_next(meta1);
3991 meta2 = nfp_meta_next(meta2);
3996 head_ld_meta = NULL;
3997 head_st_meta = NULL;
4006 if (!head_ld_meta) {
4007 head_ld_meta = meta1;
4008 head_st_meta = meta2;
4014 head_ld_meta->ldst_gather_len += BPF_LDST_BYTES(ld);
4015 meta1 = nfp_meta_next(meta1);
4016 meta2 = nfp_meta_next(meta2);
4023 static void nfp_bpf_opt_pkt_cache(struct nfp_prog *nfp_prog)
4025 struct nfp_insn_meta *meta, *range_node = NULL;
4026 s16 range_start = 0, range_end = 0;
4027 bool cache_avail = false;
4028 struct bpf_insn *insn;
4029 s32 range_ptr_off = 0;
4030 u32 range_ptr_id = 0;
4032 list_for_each_entry(meta, &nfp_prog->insns, l) {
4033 if (meta->flags & FLAG_INSN_IS_JUMP_DST)
4034 cache_avail = false;
4041 if (is_mbpf_store_pkt(meta) ||
4042 insn->code == (BPF_JMP | BPF_CALL) ||
4043 is_mbpf_classic_store_pkt(meta) ||
4044 is_mbpf_classic_load(meta)) {
4045 cache_avail = false;
4049 if (!is_mbpf_load(meta))
4052 if (meta->ptr.type != PTR_TO_PACKET || meta->ldst_gather_len) {
4053 cache_avail = false;
4060 goto end_current_then_start_new;
4064 /* Check ID to make sure two reads share the same
4065 * variable offset against PTR_TO_PACKET, and check OFF
4066 * to make sure they also share the same constant
4069 * OFFs don't really need to be the same, because they
4070 * are the constant offsets against PTR_TO_PACKET, so
4071 * for different OFFs, we could canonicalize them to
4072 * offsets against original packet pointer. We don't
4075 if (meta->ptr.id == range_ptr_id &&
4076 meta->ptr.off == range_ptr_off) {
4077 s16 new_start = range_start;
4078 s16 end, off = insn->off;
4079 s16 new_end = range_end;
4080 bool changed = false;
4082 if (off < range_start) {
4087 end = off + BPF_LDST_BYTES(insn);
4088 if (end > range_end) {
4096 if (new_end - new_start <= 64) {
4097 /* Install new range. */
4098 range_start = new_start;
4099 range_end = new_end;
4104 end_current_then_start_new:
4105 range_node->pkt_cache.range_start = range_start;
4106 range_node->pkt_cache.range_end = range_end;
4109 range_node->pkt_cache.do_init = true;
4110 range_ptr_id = range_node->ptr.id;
4111 range_ptr_off = range_node->ptr.off;
4112 range_start = insn->off;
4113 range_end = insn->off + BPF_LDST_BYTES(insn);
4117 range_node->pkt_cache.range_start = range_start;
4118 range_node->pkt_cache.range_end = range_end;
4121 list_for_each_entry(meta, &nfp_prog->insns, l) {
4125 if (is_mbpf_load_pkt(meta) && !meta->ldst_gather_len) {
4126 if (meta->pkt_cache.do_init) {
4127 range_start = meta->pkt_cache.range_start;
4128 range_end = meta->pkt_cache.range_end;
4130 meta->pkt_cache.range_start = range_start;
4131 meta->pkt_cache.range_end = range_end;
4137 static int nfp_bpf_optimize(struct nfp_prog *nfp_prog)
4139 nfp_bpf_opt_reg_init(nfp_prog);
4141 nfp_bpf_opt_neg_add_sub(nfp_prog);
4142 nfp_bpf_opt_ld_mask(nfp_prog);
4143 nfp_bpf_opt_ld_shift(nfp_prog);
4144 nfp_bpf_opt_ldst_gather(nfp_prog);
4145 nfp_bpf_opt_pkt_cache(nfp_prog);
4150 static int nfp_bpf_replace_map_ptrs(struct nfp_prog *nfp_prog)
4152 struct nfp_insn_meta *meta1, *meta2;
4153 struct nfp_bpf_map *nfp_map;
4154 struct bpf_map *map;
4157 nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) {
4158 if (meta1->skip || meta2->skip)
4161 if (meta1->insn.code != (BPF_LD | BPF_IMM | BPF_DW) ||
4162 meta1->insn.src_reg != BPF_PSEUDO_MAP_FD)
4165 map = (void *)(unsigned long)((u32)meta1->insn.imm |
4166 (u64)meta2->insn.imm << 32);
4167 if (bpf_map_offload_neutral(map)) {
4170 nfp_map = map_to_offmap(map)->dev_priv;
4174 meta1->insn.imm = id;
4175 meta2->insn.imm = 0;
4181 static int nfp_bpf_ustore_calc(u64 *prog, unsigned int len)
4183 __le64 *ustore = (__force __le64 *)prog;
4186 for (i = 0; i < len; i++) {
4189 err = nfp_ustore_check_valid_no_ecc(prog[i]);
4193 ustore[i] = cpu_to_le64(nfp_ustore_calc_ecc_insn(prog[i]));
4199 static void nfp_bpf_prog_trim(struct nfp_prog *nfp_prog)
4203 prog = kvmalloc_array(nfp_prog->prog_len, sizeof(u64), GFP_KERNEL);
4207 nfp_prog->__prog_alloc_len = nfp_prog->prog_len * sizeof(u64);
4208 memcpy(prog, nfp_prog->prog, nfp_prog->__prog_alloc_len);
4209 kvfree(nfp_prog->prog);
4210 nfp_prog->prog = prog;
4213 int nfp_bpf_jit(struct nfp_prog *nfp_prog)
4217 ret = nfp_bpf_replace_map_ptrs(nfp_prog);
4221 ret = nfp_bpf_optimize(nfp_prog);
4225 ret = nfp_translate(nfp_prog);
4227 pr_err("Translation failed with error %d (translated: %u)\n",
4228 ret, nfp_prog->n_translated);
4232 nfp_bpf_prog_trim(nfp_prog);
4237 void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt)
4239 struct nfp_insn_meta *meta;
4241 /* Another pass to record jump information. */
4242 list_for_each_entry(meta, &nfp_prog->insns, l) {
4243 struct nfp_insn_meta *dst_meta;
4244 u64 code = meta->insn.code;
4245 unsigned int dst_idx;
4248 if (BPF_CLASS(code) != BPF_JMP)
4250 if (BPF_OP(code) == BPF_EXIT)
4252 if (is_mbpf_helper_call(meta))
4255 /* If opcode is BPF_CALL at this point, this can only be a
4256 * BPF-to-BPF call (a.k.a pseudo call).
4258 pseudo_call = BPF_OP(code) == BPF_CALL;
4261 dst_idx = meta->n + 1 + meta->insn.imm;
4263 dst_idx = meta->n + 1 + meta->insn.off;
4265 dst_meta = nfp_bpf_goto_meta(nfp_prog, meta, dst_idx, cnt);
4268 dst_meta->flags |= FLAG_INSN_IS_SUBPROG_START;
4270 dst_meta->flags |= FLAG_INSN_IS_JUMP_DST;
4271 meta->jmp_dst = dst_meta;
4275 bool nfp_bpf_supported_opcode(u8 code)
4277 return !!instr_cb[code];
4280 void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv)
4286 prog = kmemdup(nfp_prog->prog, nfp_prog->prog_len * sizeof(u64),
4289 return ERR_PTR(-ENOMEM);
4291 for (i = 0; i < nfp_prog->prog_len; i++) {
4292 enum nfp_relo_type special;
4296 special = FIELD_GET(OP_RELO_TYPE, prog[i]);
4301 br_add_offset(&prog[i], bv->start_off);
4303 case RELO_BR_GO_OUT:
4304 br_set_offset(&prog[i],
4305 nfp_prog->tgt_out + bv->start_off);
4307 case RELO_BR_GO_ABORT:
4308 br_set_offset(&prog[i],
4309 nfp_prog->tgt_abort + bv->start_off);
4311 case RELO_BR_GO_CALL_PUSH_REGS:
4312 off = nfp_prog->tgt_call_push_regs + bv->start_off;
4313 br_set_offset(&prog[i], off);
4315 case RELO_BR_GO_CALL_POP_REGS:
4316 off = nfp_prog->tgt_call_pop_regs + bv->start_off;
4317 br_set_offset(&prog[i], off);
4319 case RELO_BR_NEXT_PKT:
4320 br_set_offset(&prog[i], bv->tgt_done);
4322 case RELO_BR_HELPER:
4323 val = br_get_offset(prog[i]);
4326 case BPF_FUNC_map_lookup_elem:
4327 val = nfp_prog->bpf->helpers.map_lookup;
4329 case BPF_FUNC_map_update_elem:
4330 val = nfp_prog->bpf->helpers.map_update;
4332 case BPF_FUNC_map_delete_elem:
4333 val = nfp_prog->bpf->helpers.map_delete;
4335 case BPF_FUNC_perf_event_output:
4336 val = nfp_prog->bpf->helpers.perf_event_output;
4339 pr_err("relocation of unknown helper %d\n",
4344 br_set_offset(&prog[i], val);
4346 case RELO_IMMED_REL:
4347 immed_add_value(&prog[i], bv->start_off);
4351 prog[i] &= ~OP_RELO_TYPE;
4354 err = nfp_bpf_ustore_calc(prog, nfp_prog->prog_len);
4362 return ERR_PTR(err);