Merge https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
[linux-2.6-microblaze.git] / kernel / bpf / verifier.c
index a20d773..096fdac 100644 (file)
@@ -1562,6 +1562,21 @@ static void __reg_bound_offset(struct bpf_reg_state *reg)
        reg->var_off = tnum_or(tnum_clear_subreg(var64_off), var32_off);
 }
 
+static void reg_bounds_sync(struct bpf_reg_state *reg)
+{
+       /* We might have learned new bounds from the var_off. */
+       __update_reg_bounds(reg);
+       /* We might have learned something about the sign bit. */
+       __reg_deduce_bounds(reg);
+       /* We might have learned some bits from the bounds. */
+       __reg_bound_offset(reg);
+       /* Intersecting with the old var_off might have improved our bounds
+        * slightly, e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
+        * then new var_off is (0; 0x7f...fc) which improves our umax.
+        */
+       __update_reg_bounds(reg);
+}
+
 static bool __reg32_bound_s64(s32 a)
 {
        return a >= 0 && a <= S32_MAX;
@@ -1603,16 +1618,8 @@ static void __reg_combine_32_into_64(struct bpf_reg_state *reg)
                 * so they do not impact tnum bounds calculation.
                 */
                __mark_reg64_unbounded(reg);
-               __update_reg_bounds(reg);
        }
-
-       /* Intersecting with the old var_off might have improved our bounds
-        * slightly.  e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
-        * then new var_off is (0; 0x7f...fc) which improves our umax.
-        */
-       __reg_deduce_bounds(reg);
-       __reg_bound_offset(reg);
-       __update_reg_bounds(reg);
+       reg_bounds_sync(reg);
 }
 
 static bool __reg64_bound_s32(s64 a)
@@ -1628,7 +1635,6 @@ static bool __reg64_bound_u32(u64 a)
 static void __reg_combine_64_into_32(struct bpf_reg_state *reg)
 {
        __mark_reg32_unbounded(reg);
-
        if (__reg64_bound_s32(reg->smin_value) && __reg64_bound_s32(reg->smax_value)) {
                reg->s32_min_value = (s32)reg->smin_value;
                reg->s32_max_value = (s32)reg->smax_value;
@@ -1637,14 +1643,7 @@ static void __reg_combine_64_into_32(struct bpf_reg_state *reg)
                reg->u32_min_value = (u32)reg->umin_value;
                reg->u32_max_value = (u32)reg->umax_value;
        }
-
-       /* Intersecting with the old var_off might have improved our bounds
-        * slightly.  e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
-        * then new var_off is (0; 0x7f...fc) which improves our umax.
-        */
-       __reg_deduce_bounds(reg);
-       __reg_bound_offset(reg);
-       __update_reg_bounds(reg);
+       reg_bounds_sync(reg);
 }
 
 /* Mark a register as having a completely unknown (scalar) value. */
@@ -5534,17 +5533,6 @@ static bool arg_type_is_mem_size(enum bpf_arg_type type)
               type == ARG_CONST_SIZE_OR_ZERO;
 }
 
-static bool arg_type_is_alloc_size(enum bpf_arg_type type)
-{
-       return type == ARG_CONST_ALLOC_SIZE_OR_ZERO;
-}
-
-static bool arg_type_is_int_ptr(enum bpf_arg_type type)
-{
-       return type == ARG_PTR_TO_INT ||
-              type == ARG_PTR_TO_LONG;
-}
-
 static bool arg_type_is_release(enum bpf_arg_type type)
 {
        return type & OBJ_RELEASE;
@@ -5930,7 +5918,8 @@ skip_type_check:
                meta->ref_obj_id = reg->ref_obj_id;
        }
 
-       if (arg_type == ARG_CONST_MAP_PTR) {
+       switch (base_type(arg_type)) {
+       case ARG_CONST_MAP_PTR:
                /* bpf_map_xxx(map_ptr) call: remember that map_ptr */
                if (meta->map_ptr) {
                        /* Use map_uid (which is unique id of inner map) to reject:
@@ -5955,7 +5944,8 @@ skip_type_check:
                }
                meta->map_ptr = reg->map_ptr;
                meta->map_uid = reg->map_uid;
-       } else if (arg_type == ARG_PTR_TO_MAP_KEY) {
+               break;
+       case ARG_PTR_TO_MAP_KEY:
                /* bpf_map_xxx(..., map_ptr, ..., key) call:
                 * check that [key, key + map->key_size) are within
                 * stack limits and initialized
@@ -5972,7 +5962,8 @@ skip_type_check:
                err = check_helper_mem_access(env, regno,
                                              meta->map_ptr->key_size, false,
                                              NULL);
-       } else if (base_type(arg_type) == ARG_PTR_TO_MAP_VALUE) {
+               break;
+       case ARG_PTR_TO_MAP_VALUE:
                if (type_may_be_null(arg_type) && register_is_null(reg))
                        return 0;
 
@@ -5988,14 +5979,16 @@ skip_type_check:
                err = check_helper_mem_access(env, regno,
                                              meta->map_ptr->value_size, false,
                                              meta);
-       } else if (arg_type == ARG_PTR_TO_PERCPU_BTF_ID) {
+               break;
+       case ARG_PTR_TO_PERCPU_BTF_ID:
                if (!reg->btf_id) {
                        verbose(env, "Helper has invalid btf_id in R%d\n", regno);
                        return -EACCES;
                }
                meta->ret_btf = reg->btf;
                meta->ret_btf_id = reg->btf_id;
-       } else if (arg_type == ARG_PTR_TO_SPIN_LOCK) {
+               break;
+       case ARG_PTR_TO_SPIN_LOCK:
                if (meta->func_id == BPF_FUNC_spin_lock) {
                        if (process_spin_lock(env, regno, true))
                                return -EACCES;
@@ -6006,12 +5999,15 @@ skip_type_check:
                        verbose(env, "verifier internal error\n");
                        return -EFAULT;
                }
-       } else if (arg_type == ARG_PTR_TO_TIMER) {
+               break;
+       case ARG_PTR_TO_TIMER:
                if (process_timer_func(env, regno, meta))
                        return -EACCES;
-       } else if (arg_type == ARG_PTR_TO_FUNC) {
+               break;
+       case ARG_PTR_TO_FUNC:
                meta->subprogno = reg->subprogno;
-       } else if (base_type(arg_type) == ARG_PTR_TO_MEM) {
+               break;
+       case ARG_PTR_TO_MEM:
                /* The access to this pointer is only checked when we hit the
                 * next is_mem_size argument below.
                 */
@@ -6021,11 +6017,14 @@ skip_type_check:
                                                      fn->arg_size[arg], false,
                                                      meta);
                }
-       } else if (arg_type_is_mem_size(arg_type)) {
-               bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO);
-
-               err = check_mem_size_reg(env, reg, regno, zero_size_allowed, meta);
-       } else if (arg_type_is_dynptr(arg_type)) {
+               break;
+       case ARG_CONST_SIZE:
+               err = check_mem_size_reg(env, reg, regno, false, meta);
+               break;
+       case ARG_CONST_SIZE_OR_ZERO:
+               err = check_mem_size_reg(env, reg, regno, true, meta);
+               break;
+       case ARG_PTR_TO_DYNPTR:
                if (arg_type & MEM_UNINIT) {
                        if (!is_dynptr_reg_valid_uninit(env, reg)) {
                                verbose(env, "Dynptr has to be an uninitialized dynptr\n");
@@ -6059,21 +6058,28 @@ skip_type_check:
                                err_extra, arg + 1);
                        return -EINVAL;
                }
-       } else if (arg_type_is_alloc_size(arg_type)) {
+               break;
+       case ARG_CONST_ALLOC_SIZE_OR_ZERO:
                if (!tnum_is_const(reg->var_off)) {
                        verbose(env, "R%d is not a known constant'\n",
                                regno);
                        return -EACCES;
                }
                meta->mem_size = reg->var_off.value;
-       } else if (arg_type_is_int_ptr(arg_type)) {
+               break;
+       case ARG_PTR_TO_INT:
+       case ARG_PTR_TO_LONG:
+       {
                int size = int_ptr_type_to_size(arg_type);
 
                err = check_helper_mem_access(env, regno, size, false, meta);
                if (err)
                        return err;
                err = check_ptr_alignment(env, reg, 0, size, true);
-       } else if (arg_type == ARG_PTR_TO_CONST_STR) {
+               break;
+       }
+       case ARG_PTR_TO_CONST_STR:
+       {
                struct bpf_map *map = reg->map_ptr;
                int map_off;
                u64 map_addr;
@@ -6112,9 +6118,12 @@ skip_type_check:
                        verbose(env, "string is not zero-terminated\n");
                        return -EINVAL;
                }
-       } else if (arg_type == ARG_PTR_TO_KPTR) {
+               break;
+       }
+       case ARG_PTR_TO_KPTR:
                if (process_kptr_func(env, regno, meta))
                        return -EACCES;
+               break;
        }
 
        return err;
@@ -6965,9 +6974,7 @@ static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type,
        ret_reg->s32_max_value = meta->msize_max_value;
        ret_reg->smin_value = -MAX_ERRNO;
        ret_reg->s32_min_value = -MAX_ERRNO;
-       __reg_deduce_bounds(ret_reg);
-       __reg_bound_offset(ret_reg);
-       __update_reg_bounds(ret_reg);
+       reg_bounds_sync(ret_reg);
 }
 
 static int
@@ -7163,6 +7170,7 @@ static void update_loop_inline_state(struct bpf_verifier_env *env, u32 subprogno
 static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
                             int *insn_idx_p)
 {
+       enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
        const struct bpf_func_proto *fn = NULL;
        enum bpf_return_type ret_type;
        enum bpf_type_flag ret_flag;
@@ -7322,6 +7330,19 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
                                reg_type_str(env, regs[BPF_REG_1].type));
                        return -EACCES;
                }
+               break;
+       case BPF_FUNC_set_retval:
+               if (prog_type == BPF_PROG_TYPE_LSM &&
+                   env->prog->expected_attach_type == BPF_LSM_CGROUP) {
+                       if (!env->prog->aux->attach_func_proto->type) {
+                               /* Make sure programs that attach to void
+                                * hooks don't try to modify return value.
+                                */
+                               verbose(env, "BPF_LSM_CGROUP that attach to void LSM hooks can't modify return value!\n");
+                               return -EINVAL;
+                       }
+               }
+               break;
        }
 
        if (err)
@@ -7541,6 +7562,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
        int err, insn_idx = *insn_idx_p;
        const struct btf_param *args;
        struct btf *desc_btf;
+       u32 *kfunc_flags;
        bool acq;
 
        /* skip for now, but return error when we find this in fixup_kfunc_call */
@@ -7556,18 +7578,16 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
        func_name = btf_name_by_offset(desc_btf, func->name_off);
        func_proto = btf_type_by_id(desc_btf, func->type);
 
-       if (!btf_kfunc_id_set_contains(desc_btf, resolve_prog_type(env->prog),
-                                     BTF_KFUNC_TYPE_CHECK, func_id)) {
+       kfunc_flags = btf_kfunc_id_set_contains(desc_btf, resolve_prog_type(env->prog), func_id);
+       if (!kfunc_flags) {
                verbose(env, "calling kernel function %s is not allowed\n",
                        func_name);
                return -EACCES;
        }
-
-       acq = btf_kfunc_id_set_contains(desc_btf, resolve_prog_type(env->prog),
-                                       BTF_KFUNC_TYPE_ACQUIRE, func_id);
+       acq = *kfunc_flags & KF_ACQUIRE;
 
        /* Check the arguments */
-       err = btf_check_kfunc_arg_match(env, desc_btf, func_id, regs);
+       err = btf_check_kfunc_arg_match(env, desc_btf, func_id, regs, *kfunc_flags);
        if (err < 0)
                return err;
        /* In case of release function, we get register number of refcounted
@@ -7611,8 +7631,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
                regs[BPF_REG_0].btf = desc_btf;
                regs[BPF_REG_0].type = PTR_TO_BTF_ID;
                regs[BPF_REG_0].btf_id = ptr_type_id;
-               if (btf_kfunc_id_set_contains(desc_btf, resolve_prog_type(env->prog),
-                                             BTF_KFUNC_TYPE_RET_NULL, func_id)) {
+               if (*kfunc_flags & KF_RET_NULL) {
                        regs[BPF_REG_0].type |= PTR_MAYBE_NULL;
                        /* For mark_ptr_or_null_reg, see 93c230e3f5bd6 */
                        regs[BPF_REG_0].id = ++env->id_gen;
@@ -8255,11 +8274,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
 
        if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type))
                return -EINVAL;
-
-       __update_reg_bounds(dst_reg);
-       __reg_deduce_bounds(dst_reg);
-       __reg_bound_offset(dst_reg);
-
+       reg_bounds_sync(dst_reg);
        if (sanitize_check_bounds(env, insn, dst_reg) < 0)
                return -EACCES;
        if (sanitize_needed(opcode)) {
@@ -8997,10 +9012,7 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
        /* ALU32 ops are zero extended into 64bit register */
        if (alu32)
                zext_32_to_64(dst_reg);
-
-       __update_reg_bounds(dst_reg);
-       __reg_deduce_bounds(dst_reg);
-       __reg_bound_offset(dst_reg);
+       reg_bounds_sync(dst_reg);
        return 0;
 }
 
@@ -9096,7 +9108,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
 
        if (opcode == BPF_END || opcode == BPF_NEG) {
                if (opcode == BPF_NEG) {
-                       if (BPF_SRC(insn->code) != 0 ||
+                       if (BPF_SRC(insn->code) != BPF_K ||
                            insn->src_reg != BPF_REG_0 ||
                            insn->off != 0 || insn->imm != 0) {
                                verbose(env, "BPF_NEG uses reserved fields\n");
@@ -9189,10 +9201,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
                                                         insn->dst_reg);
                                }
                                zext_32_to_64(dst_reg);
-
-                               __update_reg_bounds(dst_reg);
-                               __reg_deduce_bounds(dst_reg);
-                               __reg_bound_offset(dst_reg);
+                               reg_bounds_sync(dst_reg);
                        }
                } else {
                        /* case: R = imm
@@ -9630,26 +9639,33 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg,
                return;
 
        switch (opcode) {
+       /* JEQ/JNE comparison doesn't change the register equivalence.
+        *
+        * r1 = r2;
+        * if (r1 == 42) goto label;
+        * ...
+        * label: // here both r1 and r2 are known to be 42.
+        *
+        * Hence when marking register as known preserve it's ID.
+        */
        case BPF_JEQ:
+               if (is_jmp32) {
+                       __mark_reg32_known(true_reg, val32);
+                       true_32off = tnum_subreg(true_reg->var_off);
+               } else {
+                       ___mark_reg_known(true_reg, val);
+                       true_64off = true_reg->var_off;
+               }
+               break;
        case BPF_JNE:
-       {
-               struct bpf_reg_state *reg =
-                       opcode == BPF_JEQ ? true_reg : false_reg;
-
-               /* JEQ/JNE comparison doesn't change the register equivalence.
-                * r1 = r2;
-                * if (r1 == 42) goto label;
-                * ...
-                * label: // here both r1 and r2 are known to be 42.
-                *
-                * Hence when marking register as known preserve it's ID.
-                */
-               if (is_jmp32)
-                       __mark_reg32_known(reg, val32);
-               else
-                       ___mark_reg_known(reg, val);
+               if (is_jmp32) {
+                       __mark_reg32_known(false_reg, val32);
+                       false_32off = tnum_subreg(false_reg->var_off);
+               } else {
+                       ___mark_reg_known(false_reg, val);
+                       false_64off = false_reg->var_off;
+               }
                break;
-       }
        case BPF_JSET:
                if (is_jmp32) {
                        false_32off = tnum_and(false_32off, tnum_const(~val32));
@@ -9788,21 +9804,8 @@ static void __reg_combine_min_max(struct bpf_reg_state *src_reg,
                                                        dst_reg->smax_value);
        src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off,
                                                             dst_reg->var_off);
-       /* We might have learned new bounds from the var_off. */
-       __update_reg_bounds(src_reg);
-       __update_reg_bounds(dst_reg);
-       /* We might have learned something about the sign bit. */
-       __reg_deduce_bounds(src_reg);
-       __reg_deduce_bounds(dst_reg);
-       /* We might have learned some bits from the bounds. */
-       __reg_bound_offset(src_reg);
-       __reg_bound_offset(dst_reg);
-       /* Intersecting with the old var_off might have improved our bounds
-        * slightly.  e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
-        * then new var_off is (0; 0x7f...fc) which improves our umax.
-        */
-       __update_reg_bounds(src_reg);
-       __update_reg_bounds(dst_reg);
+       reg_bounds_sync(src_reg);
+       reg_bounds_sync(dst_reg);
 }
 
 static void reg_combine_min_max(struct bpf_reg_state *true_src,
@@ -10432,11 +10435,21 @@ static int check_return_code(struct bpf_verifier_env *env)
        const bool is_subprog = frame->subprogno;
 
        /* LSM and struct_ops func-ptr's return type could be "void" */
-       if (!is_subprog &&
-           (prog_type == BPF_PROG_TYPE_STRUCT_OPS ||
-            prog_type == BPF_PROG_TYPE_LSM) &&
-           !prog->aux->attach_func_proto->type)
-               return 0;
+       if (!is_subprog) {
+               switch (prog_type) {
+               case BPF_PROG_TYPE_LSM:
+                       if (prog->expected_attach_type == BPF_LSM_CGROUP)
+                               /* See below, can be 0 or 0-1 depending on hook. */
+                               break;
+                       fallthrough;
+               case BPF_PROG_TYPE_STRUCT_OPS:
+                       if (!prog->aux->attach_func_proto->type)
+                               return 0;
+                       break;
+               default:
+                       break;
+               }
+       }
 
        /* eBPF calling convention is such that R0 is used
         * to return the value from eBPF program.
@@ -10527,6 +10540,22 @@ static int check_return_code(struct bpf_verifier_env *env)
        case BPF_PROG_TYPE_SK_LOOKUP:
                range = tnum_range(SK_DROP, SK_PASS);
                break;
+
+       case BPF_PROG_TYPE_LSM:
+               if (env->prog->expected_attach_type != BPF_LSM_CGROUP) {
+                       /* Regular BPF_PROG_TYPE_LSM programs can return
+                        * any value.
+                        */
+                       return 0;
+               }
+               if (!env->prog->aux->attach_func_proto->type) {
+                       /* Make sure programs that attach to void
+                        * hooks don't try to modify return value.
+                        */
+                       range = tnum_range(1, 1);
+               }
+               break;
+
        case BPF_PROG_TYPE_EXT:
                /* freplace program can return anything as its return value
                 * depends on the to-be-replaced kernel func or bpf program.
@@ -10543,6 +10572,10 @@ static int check_return_code(struct bpf_verifier_env *env)
 
        if (!tnum_in(range, reg->var_off)) {
                verbose_invalid_scalar(env, reg, &range, "program exit", "R0");
+               if (prog->expected_attach_type == BPF_LSM_CGROUP &&
+                   prog_type == BPF_PROG_TYPE_LSM &&
+                   !prog->aux->attach_func_proto->type)
+                       verbose(env, "Note, BPF_LSM_CGROUP that attach to void LSM hooks can't modify return value!\n");
                return -EINVAL;
        }
 
@@ -12539,6 +12572,7 @@ static bool is_tracing_prog_type(enum bpf_prog_type type)
        case BPF_PROG_TYPE_TRACEPOINT:
        case BPF_PROG_TYPE_PERF_EVENT:
        case BPF_PROG_TYPE_RAW_TRACEPOINT:
+       case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
                return true;
        default:
                return false;
@@ -13597,6 +13631,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
                /* Below members will be freed only at prog->aux */
                func[i]->aux->btf = prog->aux->btf;
                func[i]->aux->func_info = prog->aux->func_info;
+               func[i]->aux->func_info_cnt = prog->aux->func_info_cnt;
                func[i]->aux->poke_tab = prog->aux->poke_tab;
                func[i]->aux->size_poke_tab = prog->aux->size_poke_tab;
 
@@ -13609,9 +13644,6 @@ static int jit_subprogs(struct bpf_verifier_env *env)
                                poke->aux = func[i]->aux;
                }
 
-               /* Use bpf_prog_F_tag to indicate functions in stack traces.
-                * Long term would need debug info to populate names
-                */
                func[i]->aux->name[0] = 'F';
                func[i]->aux->stack_depth = env->subprog_info[i].stack_depth;
                func[i]->jit_requested = 1;
@@ -14417,7 +14449,7 @@ static struct bpf_prog *inline_bpf_loop(struct bpf_verifier_env *env,
        /* Note: insn_buf[12] is an offset of BPF_CALL_REL instruction */
        call_insn_offset = position + 12;
        callback_offset = callback_start - call_insn_offset - 1;
-       env->prog->insnsi[call_insn_offset].imm = callback_offset;
+       new_prog->insnsi[call_insn_offset].imm = callback_offset;
 
        return new_prog;
 }
@@ -14902,6 +14934,7 @@ int bpf_check_attach_target(struct bpf_verifier_log *log,
                fallthrough;
        case BPF_MODIFY_RETURN:
        case BPF_LSM_MAC:
+       case BPF_LSM_CGROUP:
        case BPF_TRACE_FENTRY:
        case BPF_TRACE_FEXIT:
                if (!btf_type_is_func(t)) {