bpf, offload: Reorder offload callback 'prepare' in verifier
[linux-2.6-microblaze.git] / kernel / bpf / verifier.c
index 8fd552c..09849e4 100644 (file)
@@ -6496,6 +6496,7 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
 {
        struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : tmp_aux;
        struct bpf_verifier_state *vstate = env->cur_state;
+       bool off_is_imm = tnum_is_const(off_reg->var_off);
        bool off_is_neg = off_reg->smin_value < 0;
        bool ptr_is_dst_reg = ptr_reg == dst_reg;
        u8 opcode = BPF_OP(insn->code);
@@ -6526,6 +6527,7 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
                alu_limit = abs(tmp_aux->alu_limit - alu_limit);
        } else {
                alu_state  = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
+               alu_state |= off_is_imm ? BPF_ALU_IMMEDIATE : 0;
                alu_state |= ptr_is_dst_reg ?
                             BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
        }
@@ -7082,11 +7084,10 @@ static void scalar32_min_max_and(struct bpf_reg_state *dst_reg,
        s32 smin_val = src_reg->s32_min_value;
        u32 umax_val = src_reg->u32_max_value;
 
-       /* Assuming scalar64_min_max_and will be called so its safe
-        * to skip updating register for known 32-bit case.
-        */
-       if (src_known && dst_known)
+       if (src_known && dst_known) {
+               __mark_reg32_known(dst_reg, var32_off.value);
                return;
+       }
 
        /* We get our minimum from the var_off, since that's inherently
         * bitwise.  Our maximum is the minimum of the operands' maxima.
@@ -7106,7 +7107,6 @@ static void scalar32_min_max_and(struct bpf_reg_state *dst_reg,
                dst_reg->s32_min_value = dst_reg->u32_min_value;
                dst_reg->s32_max_value = dst_reg->u32_max_value;
        }
-
 }
 
 static void scalar_min_max_and(struct bpf_reg_state *dst_reg,
@@ -7153,11 +7153,10 @@ static void scalar32_min_max_or(struct bpf_reg_state *dst_reg,
        s32 smin_val = src_reg->s32_min_value;
        u32 umin_val = src_reg->u32_min_value;
 
-       /* Assuming scalar64_min_max_or will be called so it is safe
-        * to skip updating register for known case.
-        */
-       if (src_known && dst_known)
+       if (src_known && dst_known) {
+               __mark_reg32_known(dst_reg, var32_off.value);
                return;
+       }
 
        /* We get our maximum from the var_off, and our minimum is the
         * maximum of the operands' minima
@@ -7222,11 +7221,10 @@ static void scalar32_min_max_xor(struct bpf_reg_state *dst_reg,
        struct tnum var32_off = tnum_subreg(dst_reg->var_off);
        s32 smin_val = src_reg->s32_min_value;
 
-       /* Assuming scalar64_min_max_xor will be called so it is safe
-        * to skip updating register for known case.
-        */
-       if (src_known && dst_known)
+       if (src_known && dst_known) {
+               __mark_reg32_known(dst_reg, var32_off.value);
                return;
+       }
 
        /* We get both minimum and maximum from the var32_off. */
        dst_reg->u32_min_value = var32_off.value;
@@ -12371,7 +12369,7 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
                        const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X;
                        const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X;
                        struct bpf_insn *patch = &insn_buf[0];
-                       bool issrc, isneg;
+                       bool issrc, isneg, isimm;
                        u32 off_reg;
 
                        aux = &env->insn_aux_data[i + delta];
@@ -12382,28 +12380,29 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
                        isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
                        issrc = (aux->alu_state & BPF_ALU_SANITIZE) ==
                                BPF_ALU_SANITIZE_SRC;
+                       isimm = aux->alu_state & BPF_ALU_IMMEDIATE;
 
                        off_reg = issrc ? insn->src_reg : insn->dst_reg;
-                       if (isneg)
-                               *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
-                       *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
-                       *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
-                       *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
-                       *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
-                       *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
-                       if (issrc) {
-                               *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX,
-                                                        off_reg);
-                               insn->src_reg = BPF_REG_AX;
+                       if (isimm) {
+                               *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
                        } else {
-                               *patch++ = BPF_ALU64_REG(BPF_AND, off_reg,
-                                                        BPF_REG_AX);
+                               if (isneg)
+                                       *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
+                               *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
+                               *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
+                               *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
+                               *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
+                               *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
+                               *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX, off_reg);
                        }
+                       if (!issrc)
+                               *patch++ = BPF_MOV64_REG(insn->dst_reg, insn->src_reg);
+                       insn->src_reg = BPF_REG_AX;
                        if (isneg)
                                insn->code = insn->code == code_add ?
                                             code_sub : code_add;
                        *patch++ = *insn;
-                       if (issrc && isneg)
+                       if (issrc && isneg && !isimm)
                                *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
                        cnt = patch - insn_buf;
 
@@ -13197,6 +13196,17 @@ int bpf_check_attach_target(struct bpf_verifier_log *log,
        return 0;
 }
 
+BTF_SET_START(btf_id_deny)
+BTF_ID_UNUSED
+#ifdef CONFIG_SMP
+BTF_ID(func, migrate_disable)
+BTF_ID(func, migrate_enable)
+#endif
+#if !defined CONFIG_PREEMPT_RCU && !defined CONFIG_TINY_RCU
+BTF_ID(func, rcu_read_unlock_strict)
+#endif
+BTF_SET_END(btf_id_deny)
+
 static int check_attach_btf_id(struct bpf_verifier_env *env)
 {
        struct bpf_prog *prog = env->prog;
@@ -13256,6 +13266,9 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
                ret = bpf_lsm_verify_prog(&env->log, prog);
                if (ret < 0)
                        return ret;
+       } else if (prog->type == BPF_PROG_TYPE_TRACING &&
+                  btf_id_set_contains(&btf_id_deny, btf_id)) {
+               return -EINVAL;
        }
 
        key = bpf_trampoline_compute_key(tgt_prog, prog->aux->attach_btf, btf_id);
@@ -13355,12 +13368,6 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
        if (is_priv)
                env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ;
 
-       if (bpf_prog_is_dev_bound(env->prog->aux)) {
-               ret = bpf_prog_offload_verifier_prep(env->prog);
-               if (ret)
-                       goto skip_full_check;
-       }
-
        env->explored_states = kvcalloc(state_htab_size(env),
                                       sizeof(struct bpf_verifier_state_list *),
                                       GFP_USER);
@@ -13388,6 +13395,12 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
        if (ret < 0)
                goto skip_full_check;
 
+       if (bpf_prog_is_dev_bound(env->prog->aux)) {
+               ret = bpf_prog_offload_verifier_prep(env->prog);
+               if (ret)
+                       goto skip_full_check;
+       }
+
        ret = check_cfg(env);
        if (ret < 0)
                goto skip_full_check;