Merge https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
[linux-2.6-microblaze.git] / kernel / bpf / verifier.c
index 0efbac0..096fdac 100644 (file)
@@ -5533,17 +5533,6 @@ static bool arg_type_is_mem_size(enum bpf_arg_type type)
               type == ARG_CONST_SIZE_OR_ZERO;
 }
 
-static bool arg_type_is_alloc_size(enum bpf_arg_type type)
-{
-       return type == ARG_CONST_ALLOC_SIZE_OR_ZERO;
-}
-
-static bool arg_type_is_int_ptr(enum bpf_arg_type type)
-{
-       return type == ARG_PTR_TO_INT ||
-              type == ARG_PTR_TO_LONG;
-}
-
 static bool arg_type_is_release(enum bpf_arg_type type)
 {
        return type & OBJ_RELEASE;
@@ -5847,6 +5836,7 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
        struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
        enum bpf_arg_type arg_type = fn->arg_type[arg];
        enum bpf_reg_type type = reg->type;
+       u32 *arg_btf_id = NULL;
        int err = 0;
 
        if (arg_type == ARG_DONTCARE)
@@ -5883,7 +5873,11 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
                 */
                goto skip_type_check;
 
-       err = check_reg_type(env, regno, arg_type, fn->arg_btf_id[arg], meta);
+       /* arg_btf_id and arg_size are in a union. */
+       if (base_type(arg_type) == ARG_PTR_TO_BTF_ID)
+               arg_btf_id = fn->arg_btf_id[arg];
+
+       err = check_reg_type(env, regno, arg_type, arg_btf_id, meta);
        if (err)
                return err;
 
@@ -5924,7 +5918,8 @@ skip_type_check:
                meta->ref_obj_id = reg->ref_obj_id;
        }
 
-       if (arg_type == ARG_CONST_MAP_PTR) {
+       switch (base_type(arg_type)) {
+       case ARG_CONST_MAP_PTR:
                /* bpf_map_xxx(map_ptr) call: remember that map_ptr */
                if (meta->map_ptr) {
                        /* Use map_uid (which is unique id of inner map) to reject:
@@ -5949,7 +5944,8 @@ skip_type_check:
                }
                meta->map_ptr = reg->map_ptr;
                meta->map_uid = reg->map_uid;
-       } else if (arg_type == ARG_PTR_TO_MAP_KEY) {
+               break;
+       case ARG_PTR_TO_MAP_KEY:
                /* bpf_map_xxx(..., map_ptr, ..., key) call:
                 * check that [key, key + map->key_size) are within
                 * stack limits and initialized
@@ -5966,7 +5962,8 @@ skip_type_check:
                err = check_helper_mem_access(env, regno,
                                              meta->map_ptr->key_size, false,
                                              NULL);
-       } else if (base_type(arg_type) == ARG_PTR_TO_MAP_VALUE) {
+               break;
+       case ARG_PTR_TO_MAP_VALUE:
                if (type_may_be_null(arg_type) && register_is_null(reg))
                        return 0;
 
@@ -5982,14 +5979,16 @@ skip_type_check:
                err = check_helper_mem_access(env, regno,
                                              meta->map_ptr->value_size, false,
                                              meta);
-       } else if (arg_type == ARG_PTR_TO_PERCPU_BTF_ID) {
+               break;
+       case ARG_PTR_TO_PERCPU_BTF_ID:
                if (!reg->btf_id) {
                        verbose(env, "Helper has invalid btf_id in R%d\n", regno);
                        return -EACCES;
                }
                meta->ret_btf = reg->btf;
                meta->ret_btf_id = reg->btf_id;
-       } else if (arg_type == ARG_PTR_TO_SPIN_LOCK) {
+               break;
+       case ARG_PTR_TO_SPIN_LOCK:
                if (meta->func_id == BPF_FUNC_spin_lock) {
                        if (process_spin_lock(env, regno, true))
                                return -EACCES;
@@ -6000,21 +5999,32 @@ skip_type_check:
                        verbose(env, "verifier internal error\n");
                        return -EFAULT;
                }
-       } else if (arg_type == ARG_PTR_TO_TIMER) {
+               break;
+       case ARG_PTR_TO_TIMER:
                if (process_timer_func(env, regno, meta))
                        return -EACCES;
-       } else if (arg_type == ARG_PTR_TO_FUNC) {
+               break;
+       case ARG_PTR_TO_FUNC:
                meta->subprogno = reg->subprogno;
-       } else if (base_type(arg_type) == ARG_PTR_TO_MEM) {
+               break;
+       case ARG_PTR_TO_MEM:
                /* The access to this pointer is only checked when we hit the
                 * next is_mem_size argument below.
                 */
                meta->raw_mode = arg_type & MEM_UNINIT;
-       } else if (arg_type_is_mem_size(arg_type)) {
-               bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO);
-
-               err = check_mem_size_reg(env, reg, regno, zero_size_allowed, meta);
-       } else if (arg_type_is_dynptr(arg_type)) {
+               if (arg_type & MEM_FIXED_SIZE) {
+                       err = check_helper_mem_access(env, regno,
+                                                     fn->arg_size[arg], false,
+                                                     meta);
+               }
+               break;
+       case ARG_CONST_SIZE:
+               err = check_mem_size_reg(env, reg, regno, false, meta);
+               break;
+       case ARG_CONST_SIZE_OR_ZERO:
+               err = check_mem_size_reg(env, reg, regno, true, meta);
+               break;
+       case ARG_PTR_TO_DYNPTR:
                if (arg_type & MEM_UNINIT) {
                        if (!is_dynptr_reg_valid_uninit(env, reg)) {
                                verbose(env, "Dynptr has to be an uninitialized dynptr\n");
@@ -6048,21 +6058,28 @@ skip_type_check:
                                err_extra, arg + 1);
                        return -EINVAL;
                }
-       } else if (arg_type_is_alloc_size(arg_type)) {
+               break;
+       case ARG_CONST_ALLOC_SIZE_OR_ZERO:
                if (!tnum_is_const(reg->var_off)) {
                        verbose(env, "R%d is not a known constant'\n",
                                regno);
                        return -EACCES;
                }
                meta->mem_size = reg->var_off.value;
-       } else if (arg_type_is_int_ptr(arg_type)) {
+               break;
+       case ARG_PTR_TO_INT:
+       case ARG_PTR_TO_LONG:
+       {
                int size = int_ptr_type_to_size(arg_type);
 
                err = check_helper_mem_access(env, regno, size, false, meta);
                if (err)
                        return err;
                err = check_ptr_alignment(env, reg, 0, size, true);
-       } else if (arg_type == ARG_PTR_TO_CONST_STR) {
+               break;
+       }
+       case ARG_PTR_TO_CONST_STR:
+       {
                struct bpf_map *map = reg->map_ptr;
                int map_off;
                u64 map_addr;
@@ -6101,9 +6118,12 @@ skip_type_check:
                        verbose(env, "string is not zero-terminated\n");
                        return -EINVAL;
                }
-       } else if (arg_type == ARG_PTR_TO_KPTR) {
+               break;
+       }
+       case ARG_PTR_TO_KPTR:
                if (process_kptr_func(env, regno, meta))
                        return -EACCES;
+               break;
        }
 
        return err;
@@ -6143,7 +6163,8 @@ static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id)
 
 static bool allow_tail_call_in_subprogs(struct bpf_verifier_env *env)
 {
-       return env->prog->jit_requested && IS_ENABLED(CONFIG_X86_64);
+       return env->prog->jit_requested &&
+              bpf_jit_supports_subprog_tailcalls();
 }
 
 static int check_map_func_compatibility(struct bpf_verifier_env *env,
@@ -6399,11 +6420,19 @@ static bool check_raw_mode_ok(const struct bpf_func_proto *fn)
        return count <= 1;
 }
 
-static bool check_args_pair_invalid(enum bpf_arg_type arg_curr,
-                                   enum bpf_arg_type arg_next)
+static bool check_args_pair_invalid(const struct bpf_func_proto *fn, int arg)
 {
-       return (base_type(arg_curr) == ARG_PTR_TO_MEM) !=
-               arg_type_is_mem_size(arg_next);
+       bool is_fixed = fn->arg_type[arg] & MEM_FIXED_SIZE;
+       bool has_size = fn->arg_size[arg] != 0;
+       bool is_next_size = false;
+
+       if (arg + 1 < ARRAY_SIZE(fn->arg_type))
+               is_next_size = arg_type_is_mem_size(fn->arg_type[arg + 1]);
+
+       if (base_type(fn->arg_type[arg]) != ARG_PTR_TO_MEM)
+               return is_next_size;
+
+       return has_size == is_next_size || is_next_size == is_fixed;
 }
 
 static bool check_arg_pair_ok(const struct bpf_func_proto *fn)
@@ -6414,11 +6443,11 @@ static bool check_arg_pair_ok(const struct bpf_func_proto *fn)
         * helper function specification.
         */
        if (arg_type_is_mem_size(fn->arg1_type) ||
-           base_type(fn->arg5_type) == ARG_PTR_TO_MEM ||
-           check_args_pair_invalid(fn->arg1_type, fn->arg2_type) ||
-           check_args_pair_invalid(fn->arg2_type, fn->arg3_type) ||
-           check_args_pair_invalid(fn->arg3_type, fn->arg4_type) ||
-           check_args_pair_invalid(fn->arg4_type, fn->arg5_type))
+           check_args_pair_invalid(fn, 0) ||
+           check_args_pair_invalid(fn, 1) ||
+           check_args_pair_invalid(fn, 2) ||
+           check_args_pair_invalid(fn, 3) ||
+           check_args_pair_invalid(fn, 4))
                return false;
 
        return true;
@@ -6459,7 +6488,10 @@ static bool check_btf_id_ok(const struct bpf_func_proto *fn)
                if (base_type(fn->arg_type[i]) == ARG_PTR_TO_BTF_ID && !fn->arg_btf_id[i])
                        return false;
 
-               if (base_type(fn->arg_type[i]) != ARG_PTR_TO_BTF_ID && fn->arg_btf_id[i])
+               if (base_type(fn->arg_type[i]) != ARG_PTR_TO_BTF_ID && fn->arg_btf_id[i] &&
+                   /* arg_btf_id and arg_size are in a union. */
+                   (base_type(fn->arg_type[i]) != ARG_PTR_TO_MEM ||
+                    !(fn->arg_type[i] & MEM_FIXED_SIZE)))
                        return false;
        }
 
@@ -7100,9 +7132,45 @@ static int check_get_func_ip(struct bpf_verifier_env *env)
        return -ENOTSUPP;
 }
 
+static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env)
+{
+       return &env->insn_aux_data[env->insn_idx];
+}
+
+static bool loop_flag_is_zero(struct bpf_verifier_env *env)
+{
+       struct bpf_reg_state *regs = cur_regs(env);
+       struct bpf_reg_state *reg = &regs[BPF_REG_4];
+       bool reg_is_null = register_is_null(reg);
+
+       if (reg_is_null)
+               mark_chain_precision(env, BPF_REG_4);
+
+       return reg_is_null;
+}
+
+static void update_loop_inline_state(struct bpf_verifier_env *env, u32 subprogno)
+{
+       struct bpf_loop_inline_state *state = &cur_aux(env)->loop_inline_state;
+
+       if (!state->initialized) {
+               state->initialized = 1;
+               state->fit_for_inline = loop_flag_is_zero(env);
+               state->callback_subprogno = subprogno;
+               return;
+       }
+
+       if (!state->fit_for_inline)
+               return;
+
+       state->fit_for_inline = (loop_flag_is_zero(env) &&
+                                state->callback_subprogno == subprogno);
+}
+
 static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
                             int *insn_idx_p)
 {
+       enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
        const struct bpf_func_proto *fn = NULL;
        enum bpf_return_type ret_type;
        enum bpf_type_flag ret_flag;
@@ -7252,6 +7320,7 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
                err = check_bpf_snprintf_call(env, regs);
                break;
        case BPF_FUNC_loop:
+               update_loop_inline_state(env, meta.subprogno);
                err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
                                        set_loop_callback_state);
                break;
@@ -7261,6 +7330,19 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
                                reg_type_str(env, regs[BPF_REG_1].type));
                        return -EACCES;
                }
+               break;
+       case BPF_FUNC_set_retval:
+               if (prog_type == BPF_PROG_TYPE_LSM &&
+                   env->prog->expected_attach_type == BPF_LSM_CGROUP) {
+                       if (!env->prog->aux->attach_func_proto->type) {
+                               /* Make sure programs that attach to void
+                                * hooks don't try to modify return value.
+                                */
+                               verbose(env, "BPF_LSM_CGROUP that attach to void LSM hooks can't modify return value!\n");
+                               return -EINVAL;
+                       }
+               }
+               break;
        }
 
        if (err)
@@ -7480,6 +7562,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
        int err, insn_idx = *insn_idx_p;
        const struct btf_param *args;
        struct btf *desc_btf;
+       u32 *kfunc_flags;
        bool acq;
 
        /* skip for now, but return error when we find this in fixup_kfunc_call */
@@ -7495,18 +7578,16 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
        func_name = btf_name_by_offset(desc_btf, func->name_off);
        func_proto = btf_type_by_id(desc_btf, func->type);
 
-       if (!btf_kfunc_id_set_contains(desc_btf, resolve_prog_type(env->prog),
-                                     BTF_KFUNC_TYPE_CHECK, func_id)) {
+       kfunc_flags = btf_kfunc_id_set_contains(desc_btf, resolve_prog_type(env->prog), func_id);
+       if (!kfunc_flags) {
                verbose(env, "calling kernel function %s is not allowed\n",
                        func_name);
                return -EACCES;
        }
-
-       acq = btf_kfunc_id_set_contains(desc_btf, resolve_prog_type(env->prog),
-                                       BTF_KFUNC_TYPE_ACQUIRE, func_id);
+       acq = *kfunc_flags & KF_ACQUIRE;
 
        /* Check the arguments */
-       err = btf_check_kfunc_arg_match(env, desc_btf, func_id, regs);
+       err = btf_check_kfunc_arg_match(env, desc_btf, func_id, regs, *kfunc_flags);
        if (err < 0)
                return err;
        /* In case of release function, we get register number of refcounted
@@ -7550,8 +7631,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
                regs[BPF_REG_0].btf = desc_btf;
                regs[BPF_REG_0].type = PTR_TO_BTF_ID;
                regs[BPF_REG_0].btf_id = ptr_type_id;
-               if (btf_kfunc_id_set_contains(desc_btf, resolve_prog_type(env->prog),
-                                             BTF_KFUNC_TYPE_RET_NULL, func_id)) {
+               if (*kfunc_flags & KF_RET_NULL) {
                        regs[BPF_REG_0].type |= PTR_MAYBE_NULL;
                        /* For mark_ptr_or_null_reg, see 93c230e3f5bd6 */
                        regs[BPF_REG_0].id = ++env->id_gen;
@@ -7658,11 +7738,6 @@ static bool check_reg_sane_offset(struct bpf_verifier_env *env,
        return true;
 }
 
-static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env)
-{
-       return &env->insn_aux_data[env->insn_idx];
-}
-
 enum {
        REASON_BOUNDS   = -1,
        REASON_TYPE     = -2,
@@ -9033,7 +9108,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
 
        if (opcode == BPF_END || opcode == BPF_NEG) {
                if (opcode == BPF_NEG) {
-                       if (BPF_SRC(insn->code) != 0 ||
+                       if (BPF_SRC(insn->code) != BPF_K ||
                            insn->src_reg != BPF_REG_0 ||
                            insn->off != 0 || insn->imm != 0) {
                                verbose(env, "BPF_NEG uses reserved fields\n");
@@ -10360,11 +10435,21 @@ static int check_return_code(struct bpf_verifier_env *env)
        const bool is_subprog = frame->subprogno;
 
        /* LSM and struct_ops func-ptr's return type could be "void" */
-       if (!is_subprog &&
-           (prog_type == BPF_PROG_TYPE_STRUCT_OPS ||
-            prog_type == BPF_PROG_TYPE_LSM) &&
-           !prog->aux->attach_func_proto->type)
-               return 0;
+       if (!is_subprog) {
+               switch (prog_type) {
+               case BPF_PROG_TYPE_LSM:
+                       if (prog->expected_attach_type == BPF_LSM_CGROUP)
+                               /* See below, can be 0 or 0-1 depending on hook. */
+                               break;
+                       fallthrough;
+               case BPF_PROG_TYPE_STRUCT_OPS:
+                       if (!prog->aux->attach_func_proto->type)
+                               return 0;
+                       break;
+               default:
+                       break;
+               }
+       }
 
        /* eBPF calling convention is such that R0 is used
         * to return the value from eBPF program.
@@ -10455,6 +10540,22 @@ static int check_return_code(struct bpf_verifier_env *env)
        case BPF_PROG_TYPE_SK_LOOKUP:
                range = tnum_range(SK_DROP, SK_PASS);
                break;
+
+       case BPF_PROG_TYPE_LSM:
+               if (env->prog->expected_attach_type != BPF_LSM_CGROUP) {
+                       /* Regular BPF_PROG_TYPE_LSM programs can return
+                        * any value.
+                        */
+                       return 0;
+               }
+               if (!env->prog->aux->attach_func_proto->type) {
+                       /* Make sure programs that attach to void
+                        * hooks don't try to modify return value.
+                        */
+                       range = tnum_range(1, 1);
+               }
+               break;
+
        case BPF_PROG_TYPE_EXT:
                /* freplace program can return anything as its return value
                 * depends on the to-be-replaced kernel func or bpf program.
@@ -10471,6 +10572,10 @@ static int check_return_code(struct bpf_verifier_env *env)
 
        if (!tnum_in(range, reg->var_off)) {
                verbose_invalid_scalar(env, reg, &range, "program exit", "R0");
+               if (prog->expected_attach_type == BPF_LSM_CGROUP &&
+                   prog_type == BPF_PROG_TYPE_LSM &&
+                   !prog->aux->attach_func_proto->type)
+                       verbose(env, "Note, BPF_LSM_CGROUP that attach to void LSM hooks can't modify return value!\n");
                return -EINVAL;
        }
 
@@ -10882,7 +10987,7 @@ static int check_btf_func(struct bpf_verifier_env *env,
                        goto err_free;
                ret_type = btf_type_skip_modifiers(btf, func_proto->type, NULL);
                scalar_return =
-                       btf_type_is_small_int(ret_type) || btf_type_is_enum(ret_type);
+                       btf_type_is_small_int(ret_type) || btf_is_any_enum(ret_type);
                if (i && !scalar_return && env->subprog_info[i].has_ld_abs) {
                        verbose(env, "LD_ABS is only allowed in functions that return 'int'.\n");
                        goto err_free;
@@ -12467,6 +12572,7 @@ static bool is_tracing_prog_type(enum bpf_prog_type type)
        case BPF_PROG_TYPE_TRACEPOINT:
        case BPF_PROG_TYPE_PERF_EVENT:
        case BPF_PROG_TYPE_RAW_TRACEPOINT:
+       case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
                return true;
        default:
                return false;
@@ -13525,6 +13631,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
                /* Below members will be freed only at prog->aux */
                func[i]->aux->btf = prog->aux->btf;
                func[i]->aux->func_info = prog->aux->func_info;
+               func[i]->aux->func_info_cnt = prog->aux->func_info_cnt;
                func[i]->aux->poke_tab = prog->aux->poke_tab;
                func[i]->aux->size_poke_tab = prog->aux->size_poke_tab;
 
@@ -13537,9 +13644,6 @@ static int jit_subprogs(struct bpf_verifier_env *env)
                                poke->aux = func[i]->aux;
                }
 
-               /* Use bpf_prog_F_tag to indicate functions in stack traces.
-                * Long term would need debug info to populate names
-                */
                func[i]->aux->name[0] = 'F';
                func[i]->aux->stack_depth = env->subprog_info[i].stack_depth;
                func[i]->jit_requested = 1;
@@ -14275,6 +14379,142 @@ patch_call_imm:
        return 0;
 }
 
+static struct bpf_prog *inline_bpf_loop(struct bpf_verifier_env *env,
+                                       int position,
+                                       s32 stack_base,
+                                       u32 callback_subprogno,
+                                       u32 *cnt)
+{
+       s32 r6_offset = stack_base + 0 * BPF_REG_SIZE;
+       s32 r7_offset = stack_base + 1 * BPF_REG_SIZE;
+       s32 r8_offset = stack_base + 2 * BPF_REG_SIZE;
+       int reg_loop_max = BPF_REG_6;
+       int reg_loop_cnt = BPF_REG_7;
+       int reg_loop_ctx = BPF_REG_8;
+
+       struct bpf_prog *new_prog;
+       u32 callback_start;
+       u32 call_insn_offset;
+       s32 callback_offset;
+
+       /* This represents an inlined version of bpf_iter.c:bpf_loop,
+        * be careful to modify this code in sync.
+        */
+       struct bpf_insn insn_buf[] = {
+               /* Return error and jump to the end of the patch if
+                * expected number of iterations is too big.
+                */
+               BPF_JMP_IMM(BPF_JLE, BPF_REG_1, BPF_MAX_LOOPS, 2),
+               BPF_MOV32_IMM(BPF_REG_0, -E2BIG),
+               BPF_JMP_IMM(BPF_JA, 0, 0, 16),
+               /* spill R6, R7, R8 to use these as loop vars */
+               BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, r6_offset),
+               BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_7, r7_offset),
+               BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_8, r8_offset),
+               /* initialize loop vars */
+               BPF_MOV64_REG(reg_loop_max, BPF_REG_1),
+               BPF_MOV32_IMM(reg_loop_cnt, 0),
+               BPF_MOV64_REG(reg_loop_ctx, BPF_REG_3),
+               /* loop header,
+                * if reg_loop_cnt >= reg_loop_max skip the loop body
+                */
+               BPF_JMP_REG(BPF_JGE, reg_loop_cnt, reg_loop_max, 5),
+               /* callback call,
+                * correct callback offset would be set after patching
+                */
+               BPF_MOV64_REG(BPF_REG_1, reg_loop_cnt),
+               BPF_MOV64_REG(BPF_REG_2, reg_loop_ctx),
+               BPF_CALL_REL(0),
+               /* increment loop counter */
+               BPF_ALU64_IMM(BPF_ADD, reg_loop_cnt, 1),
+               /* jump to loop header if callback returned 0 */
+               BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -6),
+               /* return value of bpf_loop,
+                * set R0 to the number of iterations
+                */
+               BPF_MOV64_REG(BPF_REG_0, reg_loop_cnt),
+               /* restore original values of R6, R7, R8 */
+               BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_10, r6_offset),
+               BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_10, r7_offset),
+               BPF_LDX_MEM(BPF_DW, BPF_REG_8, BPF_REG_10, r8_offset),
+       };
+
+       *cnt = ARRAY_SIZE(insn_buf);
+       new_prog = bpf_patch_insn_data(env, position, insn_buf, *cnt);
+       if (!new_prog)
+               return new_prog;
+
+       /* callback start is known only after patching */
+       callback_start = env->subprog_info[callback_subprogno].start;
+       /* Note: insn_buf[12] is an offset of BPF_CALL_REL instruction */
+       call_insn_offset = position + 12;
+       callback_offset = callback_start - call_insn_offset - 1;
+       new_prog->insnsi[call_insn_offset].imm = callback_offset;
+
+       return new_prog;
+}
+
+static bool is_bpf_loop_call(struct bpf_insn *insn)
+{
+       return insn->code == (BPF_JMP | BPF_CALL) &&
+               insn->src_reg == 0 &&
+               insn->imm == BPF_FUNC_loop;
+}
+
+/* For all sub-programs in the program (including main) check
+ * insn_aux_data to see if there are bpf_loop calls that require
+ * inlining. If such calls are found the calls are replaced with a
+ * sequence of instructions produced by `inline_bpf_loop` function and
+ * subprog stack_depth is increased by the size of 3 registers.
+ * This stack space is used to spill values of the R6, R7, R8.  These
+ * registers are used to store the loop bound, counter and context
+ * variables.
+ */
+static int optimize_bpf_loop(struct bpf_verifier_env *env)
+{
+       struct bpf_subprog_info *subprogs = env->subprog_info;
+       int i, cur_subprog = 0, cnt, delta = 0;
+       struct bpf_insn *insn = env->prog->insnsi;
+       int insn_cnt = env->prog->len;
+       u16 stack_depth = subprogs[cur_subprog].stack_depth;
+       u16 stack_depth_roundup = round_up(stack_depth, 8) - stack_depth;
+       u16 stack_depth_extra = 0;
+
+       for (i = 0; i < insn_cnt; i++, insn++) {
+               struct bpf_loop_inline_state *inline_state =
+                       &env->insn_aux_data[i + delta].loop_inline_state;
+
+               if (is_bpf_loop_call(insn) && inline_state->fit_for_inline) {
+                       struct bpf_prog *new_prog;
+
+                       stack_depth_extra = BPF_REG_SIZE * 3 + stack_depth_roundup;
+                       new_prog = inline_bpf_loop(env,
+                                                  i + delta,
+                                                  -(stack_depth + stack_depth_extra),
+                                                  inline_state->callback_subprogno,
+                                                  &cnt);
+                       if (!new_prog)
+                               return -ENOMEM;
+
+                       delta     += cnt - 1;
+                       env->prog  = new_prog;
+                       insn       = new_prog->insnsi + i + delta;
+               }
+
+               if (subprogs[cur_subprog + 1].start == i + delta + 1) {
+                       subprogs[cur_subprog].stack_depth += stack_depth_extra;
+                       cur_subprog++;
+                       stack_depth = subprogs[cur_subprog].stack_depth;
+                       stack_depth_roundup = round_up(stack_depth, 8) - stack_depth;
+                       stack_depth_extra = 0;
+               }
+       }
+
+       env->prog->aux->stack_depth = env->subprog_info[0].stack_depth;
+
+       return 0;
+}
+
 static void free_states(struct bpf_verifier_env *env)
 {
        struct bpf_verifier_state_list *sl, *sln;
@@ -14694,6 +14934,7 @@ int bpf_check_attach_target(struct bpf_verifier_log *log,
                fallthrough;
        case BPF_MODIFY_RETURN:
        case BPF_LSM_MAC:
+       case BPF_LSM_CGROUP:
        case BPF_TRACE_FENTRY:
        case BPF_TRACE_FEXIT:
                if (!btf_type_is_func(t)) {
@@ -14810,8 +15051,8 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
        }
 
        if (prog->aux->sleepable && prog->type != BPF_PROG_TYPE_TRACING &&
-           prog->type != BPF_PROG_TYPE_LSM) {
-               verbose(env, "Only fentry/fexit/fmod_ret and lsm programs can be sleepable\n");
+           prog->type != BPF_PROG_TYPE_LSM && prog->type != BPF_PROG_TYPE_KPROBE) {
+               verbose(env, "Only fentry/fexit/fmod_ret, lsm, and kprobe/uprobe programs can be sleepable\n");
                return -EINVAL;
        }
 
@@ -15012,6 +15253,9 @@ skip_full_check:
                ret = check_max_stack_depth(env);
 
        /* instruction rewrites happen after this point */
+       if (ret == 0)
+               ret = optimize_bpf_loop(env);
+
        if (is_priv) {
                if (ret == 0)
                        opt_hard_wire_dead_code_branches(env);