return rec;
}
+static bool mask_raw_tp_reg_cond(const struct bpf_verifier_env *env, struct bpf_reg_state *reg) {
+ return reg->type == (PTR_TO_BTF_ID | PTR_TRUSTED | PTR_MAYBE_NULL) &&
+ bpf_prog_is_raw_tp(env->prog) && !reg->ref_obj_id;
+}
+
+static bool mask_raw_tp_reg(const struct bpf_verifier_env *env, struct bpf_reg_state *reg)
+{
+ if (!mask_raw_tp_reg_cond(env, reg))
+ return false;
+ reg->type &= ~PTR_MAYBE_NULL;
+ return true;
+}
+
+static void unmask_raw_tp_reg(struct bpf_reg_state *reg, bool result)
+{
+ if (result)
+ reg->type |= PTR_MAYBE_NULL;
+}
+
static bool subprog_is_global(const struct bpf_verifier_env *env, int subprog)
{
struct bpf_func_info_aux *aux = env->prog->aux->func_info_aux;
const char *field_name = NULL;
enum bpf_type_flag flag = 0;
u32 btf_id = 0;
+ bool mask;
int ret;
if (!env->allow_ptr_leaks) {
if (ret < 0)
return ret;
-
+ /* For raw_tp progs, we allow dereference of PTR_MAYBE_NULL
+ * trusted PTR_TO_BTF_ID, these are the ones that are possibly
+ * arguments to the raw_tp. Since internal checks in for trusted
+ * reg in check_ptr_to_btf_access would consider PTR_MAYBE_NULL
+ * modifier as problematic, mask it out temporarily for the
+ * check. Don't apply this to pointers with ref_obj_id > 0, as
+ * those won't be raw_tp args.
+ *
+ * We may end up applying this relaxation to other trusted
+ * PTR_TO_BTF_ID with maybe null flag, since we cannot
+ * distinguish PTR_MAYBE_NULL tagged for arguments vs normal
+ * tagging, but that should expand allowed behavior, and not
+ * cause regression for existing behavior.
+ */
+ mask = mask_raw_tp_reg(env, reg);
if (ret != PTR_TO_BTF_ID) {
/* just mark; */
clear_trusted_flags(&flag);
}
- if (atype == BPF_READ && value_regno >= 0)
+ if (atype == BPF_READ && value_regno >= 0) {
mark_btf_ld_reg(env, regs, value_regno, ret, reg->btf, btf_id, flag);
+ /* We've assigned a new type to regno, so don't undo masking. */
+ if (regno == value_regno)
+ mask = false;
+ }
+ unmask_raw_tp_reg(reg, mask);
return 0;
}
if (!err && t == BPF_READ && value_regno >= 0)
mark_reg_unknown(env, regs, value_regno);
} else if (base_type(reg->type) == PTR_TO_BTF_ID &&
- !type_may_be_null(reg->type)) {
+ (mask_raw_tp_reg_cond(env, reg) || !type_may_be_null(reg->type))) {
err = check_ptr_to_btf_access(env, regs, regno, off, size, t,
value_regno);
} else if (reg->type == CONST_PTR_TO_MAP) {
enum bpf_reg_type type = reg->type;
u32 *arg_btf_id = NULL;
int err = 0;
+ bool mask;
if (arg_type == ARG_DONTCARE)
return 0;
base_type(arg_type) == ARG_PTR_TO_SPIN_LOCK)
arg_btf_id = fn->arg_btf_id[arg];
+ mask = mask_raw_tp_reg(env, reg);
err = check_reg_type(env, regno, arg_type, arg_btf_id, meta);
- if (err)
- return err;
- err = check_func_arg_reg_off(env, reg, regno, arg_type);
+ err = err ?: check_func_arg_reg_off(env, reg, regno, arg_type);
+ unmask_raw_tp_reg(reg, mask);
if (err)
return err;
return ret;
} else if (base_type(arg->arg_type) == ARG_PTR_TO_BTF_ID) {
struct bpf_call_arg_meta meta;
+ bool mask;
int err;
if (register_is_null(reg) && type_may_be_null(arg->arg_type))
continue;
memset(&meta, 0, sizeof(meta)); /* leave func_id as zero */
+ mask = mask_raw_tp_reg(env, reg);
err = check_reg_type(env, regno, arg->arg_type, &arg->btf_id, &meta);
err = err ?: check_func_arg_reg_off(env, reg, regno, arg->arg_type);
+ unmask_raw_tp_reg(reg, mask);
if (err)
return err;
} else {
enum bpf_arg_type arg_type = ARG_DONTCARE;
u32 regno = i + 1, ref_id, type_size;
bool is_ret_buf_sz = false;
+ bool mask = false;
int kf_arg_type;
t = btf_type_skip_modifiers(btf, args[i].type, NULL);
return -EINVAL;
}
+ mask = mask_raw_tp_reg(env, reg);
if ((is_kfunc_trusted_args(meta) || is_kfunc_rcu(meta)) &&
(register_is_null(reg) || type_may_be_null(reg->type)) &&
!is_kfunc_arg_nullable(meta->btf, &args[i])) {
verbose(env, "Possibly NULL pointer passed to trusted arg%d\n", i);
+ unmask_raw_tp_reg(reg, mask);
return -EACCES;
}
+ unmask_raw_tp_reg(reg, mask);
if (reg->ref_obj_id) {
if (is_kfunc_release(meta) && meta->ref_obj_id) {
if (!is_kfunc_trusted_args(meta) && !is_kfunc_rcu(meta))
break;
+ /* Allow passing maybe NULL raw_tp arguments to
+ * kfuncs for compatibility. Don't apply this to
+ * arguments with ref_obj_id > 0.
+ */
+ mask = mask_raw_tp_reg(env, reg);
if (!is_trusted_reg(reg)) {
if (!is_kfunc_rcu(meta)) {
verbose(env, "R%d must be referenced or trusted\n", regno);
+ unmask_raw_tp_reg(reg, mask);
return -EINVAL;
}
if (!is_rcu_reg(reg)) {
verbose(env, "R%d must be a rcu pointer\n", regno);
+ unmask_raw_tp_reg(reg, mask);
return -EINVAL;
}
}
+ unmask_raw_tp_reg(reg, mask);
fallthrough;
case KF_ARG_PTR_TO_CTX:
case KF_ARG_PTR_TO_DYNPTR:
if (is_kfunc_release(meta) && reg->ref_obj_id)
arg_type |= OBJ_RELEASE;
+ mask = mask_raw_tp_reg(env, reg);
ret = check_func_arg_reg_off(env, reg, regno, arg_type);
+ unmask_raw_tp_reg(reg, mask);
if (ret < 0)
return ret;
ref_tname = btf_name_by_offset(btf, ref_t->name_off);
fallthrough;
case KF_ARG_PTR_TO_BTF_ID:
+ mask = mask_raw_tp_reg(env, reg);
/* Only base_type is checked, further checks are done here */
if ((base_type(reg->type) != PTR_TO_BTF_ID ||
(bpf_type_has_unsafe_modifiers(reg->type) && !is_rcu_reg(reg))) &&
verbose(env, "expected %s or socket\n",
reg_type_str(env, base_type(reg->type) |
(type_flag(reg->type) & BPF_REG_TRUSTED_MODIFIERS)));
+ unmask_raw_tp_reg(reg, mask);
return -EINVAL;
}
ret = process_kf_arg_ptr_to_btf_id(env, reg, ref_t, ref_tname, ref_id, meta, i);
+ unmask_raw_tp_reg(reg, mask);
if (ret < 0)
return ret;
break;
*/
static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
struct bpf_insn *insn,
- const struct bpf_reg_state *ptr_reg,
+ struct bpf_reg_state *ptr_reg,
const struct bpf_reg_state *off_reg)
{
struct bpf_verifier_state *vstate = env->cur_state;
struct bpf_sanitize_info info = {};
u8 opcode = BPF_OP(insn->code);
u32 dst = insn->dst_reg;
+ bool mask;
int ret;
dst_reg = ®s[dst];
return -EACCES;
}
+ mask = mask_raw_tp_reg(env, ptr_reg);
if (ptr_reg->type & PTR_MAYBE_NULL) {
verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n",
dst, reg_type_str(env, ptr_reg->type));
+ unmask_raw_tp_reg(ptr_reg, mask);
return -EACCES;
}
+ unmask_raw_tp_reg(ptr_reg, mask);
switch (base_type(ptr_reg->type)) {
case PTR_TO_CTX:
* for this case.
*/
case PTR_TO_BTF_ID | MEM_ALLOC | PTR_UNTRUSTED:
+ case PTR_TO_BTF_ID | PTR_TRUSTED | PTR_MAYBE_NULL:
if (type == BPF_READ) {
if (BPF_MODE(insn->code) == BPF_MEM)
insn->code = BPF_LDX | BPF_PROBE_MEM |