bpf, arm64: Add support for lse atomics in bpf_arena
authorPuranjay Mohan <puranjay@kernel.org>
Fri, 26 Apr 2024 16:11:16 +0000 (16:11 +0000)
committerAlexei Starovoitov <ast@kernel.org>
Wed, 8 May 2024 14:39:05 +0000 (07:39 -0700)
When LSE atomics are available, BPF atomic instructions are implemented
as single ARM64 atomic instructions, therefore it is easy to enable
these in bpf_arena using the currently available exception handling
setup.

LL_SC atomics use loops and therefore would need more work to enable in
bpf_arena.

Enable LSE atomics based instructions in bpf_arena and use the
bpf_jit_supports_insn() callback to reject atomics in bpf_arena if LSE
atomics are not available.

All atomics and arena_atomics selftests are passing:

  [root@ip-172-31-2-216 bpf]# ./test_progs -a atomics,arena_atomics
  #3/1     arena_atomics/add:OK
  #3/2     arena_atomics/sub:OK
  #3/3     arena_atomics/and:OK
  #3/4     arena_atomics/or:OK
  #3/5     arena_atomics/xor:OK
  #3/6     arena_atomics/cmpxchg:OK
  #3/7     arena_atomics/xchg:OK
  #3       arena_atomics:OK
  #10/1    atomics/add:OK
  #10/2    atomics/sub:OK
  #10/3    atomics/and:OK
  #10/4    atomics/or:OK
  #10/5    atomics/xor:OK
  #10/6    atomics/cmpxchg:OK
  #10/7    atomics/xchg:OK
  #10      atomics:OK
  Summary: 2/14 PASSED, 0 SKIPPED, 0 FAILED

Signed-off-by: Puranjay Mohan <puranjay@kernel.org>
Link: https://lore.kernel.org/r/20240426161116.441-1-puranjay@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
arch/arm64/net/bpf_jit_comp.c
tools/testing/selftests/bpf/DENYLIST.aarch64

index 76b91f3..53347d4 100644 (file)
@@ -494,20 +494,26 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
 static int emit_lse_atomic(const struct bpf_insn *insn, struct jit_ctx *ctx)
 {
        const u8 code = insn->code;
+       const u8 arena_vm_base = bpf2a64[ARENA_VM_START];
        const u8 dst = bpf2a64[insn->dst_reg];
        const u8 src = bpf2a64[insn->src_reg];
        const u8 tmp = bpf2a64[TMP_REG_1];
        const u8 tmp2 = bpf2a64[TMP_REG_2];
        const bool isdw = BPF_SIZE(code) == BPF_DW;
+       const bool arena = BPF_MODE(code) == BPF_PROBE_ATOMIC;
        const s16 off = insn->off;
-       u8 reg;
+       u8 reg = dst;
 
-       if (!off) {
-               reg = dst;
-       } else {
-               emit_a64_mov_i(1, tmp, off, ctx);
-               emit(A64_ADD(1, tmp, tmp, dst), ctx);
-               reg = tmp;
+       if (off || arena) {
+               if (off) {
+                       emit_a64_mov_i(1, tmp, off, ctx);
+                       emit(A64_ADD(1, tmp, tmp, dst), ctx);
+                       reg = tmp;
+               }
+               if (arena) {
+                       emit(A64_ADD(1, tmp, reg, arena_vm_base), ctx);
+                       reg = tmp;
+               }
        }
 
        switch (insn->imm) {
@@ -576,6 +582,12 @@ static int emit_ll_sc_atomic(const struct bpf_insn *insn, struct jit_ctx *ctx)
        u8 reg;
        s32 jmp_offset;
 
+       if (BPF_MODE(code) == BPF_PROBE_ATOMIC) {
+               /* ll_sc based atomics don't support unsafe pointers yet. */
+               pr_err_once("unknown atomic opcode %02x\n", code);
+               return -EINVAL;
+       }
+
        if (!off) {
                reg = dst;
        } else {
@@ -777,7 +789,8 @@ static int add_exception_handler(const struct bpf_insn *insn,
 
        if (BPF_MODE(insn->code) != BPF_PROBE_MEM &&
                BPF_MODE(insn->code) != BPF_PROBE_MEMSX &&
-                       BPF_MODE(insn->code) != BPF_PROBE_MEM32)
+                       BPF_MODE(insn->code) != BPF_PROBE_MEM32 &&
+                               BPF_MODE(insn->code) != BPF_PROBE_ATOMIC)
                return 0;
 
        if (!ctx->prog->aux->extable ||
@@ -1474,12 +1487,18 @@ emit_cond_jmp:
 
        case BPF_STX | BPF_ATOMIC | BPF_W:
        case BPF_STX | BPF_ATOMIC | BPF_DW:
+       case BPF_STX | BPF_PROBE_ATOMIC | BPF_W:
+       case BPF_STX | BPF_PROBE_ATOMIC | BPF_DW:
                if (cpus_have_cap(ARM64_HAS_LSE_ATOMICS))
                        ret = emit_lse_atomic(insn, ctx);
                else
                        ret = emit_ll_sc_atomic(insn, ctx);
                if (ret)
                        return ret;
+
+               ret = add_exception_handler(insn, ctx, dst);
+               if (ret)
+                       return ret;
                break;
 
        default:
@@ -2527,6 +2546,19 @@ bool bpf_jit_supports_arena(void)
        return true;
 }
 
+bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena)
+{
+       if (!in_arena)
+               return true;
+       switch (insn->code) {
+       case BPF_STX | BPF_ATOMIC | BPF_W:
+       case BPF_STX | BPF_ATOMIC | BPF_DW:
+               if (!cpus_have_cap(ARM64_HAS_LSE_ATOMICS))
+                       return false;
+       }
+       return true;
+}
+
 void bpf_jit_free(struct bpf_prog *prog)
 {
        if (prog->jited) {
index cf657fc..0445ac3 100644 (file)
@@ -10,4 +10,3 @@ fill_link_info/kprobe_multi_link_info            # bpf_program__attach_kprobe_mu
 fill_link_info/kretprobe_multi_link_info         # bpf_program__attach_kprobe_multi_opts unexpected error: -95
 fill_link_info/kprobe_multi_invalid_ubuff        # bpf_program__attach_kprobe_multi_opts unexpected error: -95
 missed/kprobe_recursion                          # missed_kprobe_recursion__attach unexpected error: -95 (errno 95)
-arena_atomics