selftests/bpf: Add verifier tests for better jmp32 register bounds
authorYonghong Song <yhs@fb.com>
Thu, 21 Nov 2019 17:06:51 +0000 (09:06 -0800)
committerAlexei Starovoitov <ast@kernel.org>
Mon, 25 Nov 2019 00:58:46 +0000 (16:58 -0800)
Three test cases are added.
Test 1: jmp32 'reg op imm'.
Test 2: jmp32 'reg op reg' where dst 'reg' has unknown constant
        and src 'reg' has known constant
Test 3: jmp32 'reg op reg' where dst 'reg' has known constant
        and src 'reg' has unknown constant

Signed-off-by: Yonghong Song <yhs@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20191121170651.449096-1-yhs@fb.com
tools/testing/selftests/bpf/verifier/jmp32.c

index f0961c5..bf0322e 100644 (file)
        .result = ACCEPT,
        .retval = 2,
 },
+{
+       "jgt32: range bound deduction, reg op imm",
+       .insns = {
+       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+       BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
+       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+       BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
+       BPF_EMIT_CALL(BPF_FUNC_get_cgroup_classid),
+       BPF_JMP32_IMM(BPF_JGT, BPF_REG_0, 1, 5),
+       BPF_MOV32_REG(BPF_REG_6, BPF_REG_0),
+       BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32),
+       BPF_ALU64_IMM(BPF_RSH, BPF_REG_6, 32),
+       BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_6),
+       BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
+       BPF_MOV32_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       .fixup_map_hash_48b = { 4 },
+       .result = ACCEPT,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "jgt32: range bound deduction, reg1 op reg2, reg1 unknown",
+       .insns = {
+       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+       BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
+       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+       BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
+       BPF_EMIT_CALL(BPF_FUNC_get_cgroup_classid),
+       BPF_MOV32_IMM(BPF_REG_2, 1),
+       BPF_JMP32_REG(BPF_JGT, BPF_REG_0, BPF_REG_2, 5),
+       BPF_MOV32_REG(BPF_REG_6, BPF_REG_0),
+       BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32),
+       BPF_ALU64_IMM(BPF_RSH, BPF_REG_6, 32),
+       BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_6),
+       BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
+       BPF_MOV32_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       .fixup_map_hash_48b = { 4 },
+       .result = ACCEPT,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},
+{
+       "jle32: range bound deduction, reg1 op reg2, reg2 unknown",
+       .insns = {
+       BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+       BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
+       BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+       BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+       BPF_LD_MAP_FD(BPF_REG_1, 0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
+       BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
+       BPF_EMIT_CALL(BPF_FUNC_get_cgroup_classid),
+       BPF_MOV32_IMM(BPF_REG_2, 1),
+       BPF_JMP32_REG(BPF_JLE, BPF_REG_2, BPF_REG_0, 5),
+       BPF_MOV32_REG(BPF_REG_6, BPF_REG_0),
+       BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32),
+       BPF_ALU64_IMM(BPF_RSH, BPF_REG_6, 32),
+       BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_6),
+       BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
+       BPF_MOV32_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       .fixup_map_hash_48b = { 4 },
+       .result = ACCEPT,
+       .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+},