Merge branch 'for-5.11' of git://git.kernel.org/pub/scm/linux/kernel/git/jlawall...
[linux-2.6-microblaze.git] / kernel / bpf / verifier.c
index 53fe6ef..17270b8 100644 (file)
@@ -238,7 +238,9 @@ struct bpf_call_arg_meta {
        u64 msize_max_value;
        int ref_obj_id;
        int func_id;
+       struct btf *btf;
        u32 btf_id;
+       struct btf *ret_btf;
        u32 ret_btf_id;
 };
 
@@ -556,10 +558,9 @@ static struct bpf_func_state *func(struct bpf_verifier_env *env,
        return cur->frame[reg->frameno];
 }
 
-const char *kernel_type_name(u32 id)
+static const char *kernel_type_name(const struct btf* btf, u32 id)
 {
-       return btf_name_by_offset(btf_vmlinux,
-                                 btf_type_by_id(btf_vmlinux, id)->name_off);
+       return btf_name_by_offset(btf, btf_type_by_id(btf, id)->name_off);
 }
 
 static void print_verifier_state(struct bpf_verifier_env *env,
@@ -589,7 +590,7 @@ static void print_verifier_state(struct bpf_verifier_env *env,
                        if (t == PTR_TO_BTF_ID ||
                            t == PTR_TO_BTF_ID_OR_NULL ||
                            t == PTR_TO_PERCPU_BTF_ID)
-                               verbose(env, "%s", kernel_type_name(reg->btf_id));
+                               verbose(env, "%s", kernel_type_name(reg->btf, reg->btf_id));
                        verbose(env, "(id=%d", reg->id);
                        if (reg_type_may_be_refcounted_or_null(t))
                                verbose(env, ",ref_obj_id=%d", reg->ref_obj_id);
@@ -1381,7 +1382,8 @@ static void mark_reg_not_init(struct bpf_verifier_env *env,
 
 static void mark_btf_ld_reg(struct bpf_verifier_env *env,
                            struct bpf_reg_state *regs, u32 regno,
-                           enum bpf_reg_type reg_type, u32 btf_id)
+                           enum bpf_reg_type reg_type,
+                           struct btf *btf, u32 btf_id)
 {
        if (reg_type == SCALAR_VALUE) {
                mark_reg_unknown(env, regs, regno);
@@ -1389,6 +1391,7 @@ static void mark_btf_ld_reg(struct bpf_verifier_env *env,
        }
        mark_reg_known_zero(env, regs, regno);
        regs[regno].type = PTR_TO_BTF_ID;
+       regs[regno].btf = btf;
        regs[regno].btf_id = btf_id;
 }
 
@@ -2737,7 +2740,9 @@ static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
                        regno);
                return -EACCES;
        }
-       err = __check_mem_access(env, regno, off, size, reg->range,
+
+       err = reg->range < 0 ? -EINVAL :
+             __check_mem_access(env, regno, off, size, reg->range,
                                 zero_size_allowed);
        if (err) {
                verbose(env, "R%d offset is outside of the packet\n", regno);
@@ -2760,7 +2765,7 @@ static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off,
 /* check access to 'struct bpf_context' fields.  Supports fixed offsets only */
 static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size,
                            enum bpf_access_type t, enum bpf_reg_type *reg_type,
-                           u32 *btf_id)
+                           struct btf **btf, u32 *btf_id)
 {
        struct bpf_insn_access_aux info = {
                .reg_type = *reg_type,
@@ -2778,10 +2783,12 @@ static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off,
                 */
                *reg_type = info.reg_type;
 
-               if (*reg_type == PTR_TO_BTF_ID || *reg_type == PTR_TO_BTF_ID_OR_NULL)
+               if (*reg_type == PTR_TO_BTF_ID || *reg_type == PTR_TO_BTF_ID_OR_NULL) {
+                       *btf = info.btf;
                        *btf_id = info.btf_id;
-               else
+               } else {
                        env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
+               }
                /* remember the offset of last byte accessed in ctx */
                if (env->prog->aux->max_ctx_offset < off + size)
                        env->prog->aux->max_ctx_offset = off + size;
@@ -3293,8 +3300,8 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
                                   int value_regno)
 {
        struct bpf_reg_state *reg = regs + regno;
-       const struct btf_type *t = btf_type_by_id(btf_vmlinux, reg->btf_id);
-       const char *tname = btf_name_by_offset(btf_vmlinux, t->name_off);
+       const struct btf_type *t = btf_type_by_id(reg->btf, reg->btf_id);
+       const char *tname = btf_name_by_offset(reg->btf, t->name_off);
        u32 btf_id;
        int ret;
 
@@ -3315,23 +3322,23 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
        }
 
        if (env->ops->btf_struct_access) {
-               ret = env->ops->btf_struct_access(&env->log, t, off, size,
-                                                 atype, &btf_id);
+               ret = env->ops->btf_struct_access(&env->log, reg->btf, t,
+                                                 off, size, atype, &btf_id);
        } else {
                if (atype != BPF_READ) {
                        verbose(env, "only read is supported\n");
                        return -EACCES;
                }
 
-               ret = btf_struct_access(&env->log, t, off, size, atype,
-                                       &btf_id);
+               ret = btf_struct_access(&env->log, reg->btf, t, off, size,
+                                       atype, &btf_id);
        }
 
        if (ret < 0)
                return ret;
 
        if (atype == BPF_READ && value_regno >= 0)
-               mark_btf_ld_reg(env, regs, value_regno, ret, btf_id);
+               mark_btf_ld_reg(env, regs, value_regno, ret, reg->btf, btf_id);
 
        return 0;
 }
@@ -3381,12 +3388,12 @@ static int check_ptr_to_map_access(struct bpf_verifier_env *env,
                return -EACCES;
        }
 
-       ret = btf_struct_access(&env->log, t, off, size, atype, &btf_id);
+       ret = btf_struct_access(&env->log, btf_vmlinux, t, off, size, atype, &btf_id);
        if (ret < 0)
                return ret;
 
        if (value_regno >= 0)
-               mark_btf_ld_reg(env, regs, value_regno, ret, btf_id);
+               mark_btf_ld_reg(env, regs, value_regno, ret, btf_vmlinux, btf_id);
 
        return 0;
 }
@@ -3462,6 +3469,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
                        mark_reg_unknown(env, regs, value_regno);
        } else if (reg->type == PTR_TO_CTX) {
                enum bpf_reg_type reg_type = SCALAR_VALUE;
+               struct btf *btf = NULL;
                u32 btf_id = 0;
 
                if (t == BPF_WRITE && value_regno >= 0 &&
@@ -3474,7 +3482,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
                if (err < 0)
                        return err;
 
-               err = check_ctx_access(env, insn_idx, off, size, t, &reg_type, &btf_id);
+               err = check_ctx_access(env, insn_idx, off, size, t, &reg_type, &btf, &btf_id);
                if (err)
                        verbose_linfo(env, insn_idx, "; ");
                if (!err && t == BPF_READ && value_regno >= 0) {
@@ -3496,8 +3504,10 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
                                 */
                                regs[value_regno].subreg_def = DEF_NOT_SUBREG;
                                if (reg_type == PTR_TO_BTF_ID ||
-                                   reg_type == PTR_TO_BTF_ID_OR_NULL)
+                                   reg_type == PTR_TO_BTF_ID_OR_NULL) {
+                                       regs[value_regno].btf = btf;
                                        regs[value_regno].btf_id = btf_id;
+                               }
                        }
                        regs[value_regno].type = reg_type;
                }
@@ -3757,7 +3767,8 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
                        goto mark;
 
                if (state->stack[spi].slot_type[0] == STACK_SPILL &&
-                   state->stack[spi].spilled_ptr.type == SCALAR_VALUE) {
+                   (state->stack[spi].spilled_ptr.type == SCALAR_VALUE ||
+                    env->allow_ptr_leaks)) {
                        __mark_reg_unknown(env, &state->stack[spi].spilled_ptr);
                        for (j = 0; j < BPF_REG_SIZE; j++)
                                state->stack[spi].slot_type[j] = STACK_MISC;
@@ -4114,11 +4125,11 @@ found:
                        arg_btf_id = compatible->btf_id;
                }
 
-               if (!btf_struct_ids_match(&env->log, reg->off, reg->btf_id,
-                                         *arg_btf_id)) {
+               if (!btf_struct_ids_match(&env->log, reg->btf, reg->btf_id, reg->off,
+                                         btf_vmlinux, *arg_btf_id)) {
                        verbose(env, "R%d is of type %s but %s is expected\n",
-                               regno, kernel_type_name(reg->btf_id),
-                               kernel_type_name(*arg_btf_id));
+                               regno, kernel_type_name(reg->btf, reg->btf_id),
+                               kernel_type_name(btf_vmlinux, *arg_btf_id));
                        return -EACCES;
                }
 
@@ -4240,6 +4251,7 @@ skip_type_check:
                        verbose(env, "Helper has invalid btf_id in R%d\n", regno);
                        return -EACCES;
                }
+               meta->ret_btf = reg->btf;
                meta->ret_btf_id = reg->btf_id;
        } else if (arg_type == ARG_PTR_TO_SPIN_LOCK) {
                if (meta->func_id == BPF_FUNC_spin_lock) {
@@ -4467,6 +4479,11 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
                    func_id != BPF_FUNC_inode_storage_delete)
                        goto error;
                break;
+       case BPF_MAP_TYPE_TASK_STORAGE:
+               if (func_id != BPF_FUNC_task_storage_get &&
+                   func_id != BPF_FUNC_task_storage_delete)
+                       goto error;
+               break;
        default:
                break;
        }
@@ -4545,6 +4562,11 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
                if (map->map_type != BPF_MAP_TYPE_INODE_STORAGE)
                        goto error;
                break;
+       case BPF_FUNC_task_storage_get:
+       case BPF_FUNC_task_storage_delete:
+               if (map->map_type != BPF_MAP_TYPE_TASK_STORAGE)
+                       goto error;
+               break;
        default:
                break;
        }
@@ -4685,6 +4707,32 @@ static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
                __clear_all_pkt_pointers(env, vstate->frame[i]);
 }
 
+enum {
+       AT_PKT_END = -1,
+       BEYOND_PKT_END = -2,
+};
+
+static void mark_pkt_end(struct bpf_verifier_state *vstate, int regn, bool range_open)
+{
+       struct bpf_func_state *state = vstate->frame[vstate->curframe];
+       struct bpf_reg_state *reg = &state->regs[regn];
+
+       if (reg->type != PTR_TO_PACKET)
+               /* PTR_TO_PACKET_META is not supported yet */
+               return;
+
+       /* The 'reg' is pkt > pkt_end or pkt >= pkt_end.
+        * How far beyond pkt_end it goes is unknown.
+        * if (!range_open) it's the case of pkt >= pkt_end
+        * if (range_open) it's the case of pkt > pkt_end
+        * hence this pointer is at least 1 byte bigger than pkt_end
+        */
+       if (range_open)
+               reg->range = BEYOND_PKT_END;
+       else
+               reg->range = AT_PKT_END;
+}
+
 static void release_reg_references(struct bpf_verifier_env *env,
                                   struct bpf_func_state *state,
                                   int ref_obj_id)
@@ -5152,16 +5200,16 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
                const struct btf_type *t;
 
                mark_reg_known_zero(env, regs, BPF_REG_0);
-               t = btf_type_skip_modifiers(btf_vmlinux, meta.ret_btf_id, NULL);
+               t = btf_type_skip_modifiers(meta.ret_btf, meta.ret_btf_id, NULL);
                if (!btf_type_is_struct(t)) {
                        u32 tsize;
                        const struct btf_type *ret;
                        const char *tname;
 
                        /* resolve the type size of ksym. */
-                       ret = btf_resolve_size(btf_vmlinux, t, &tsize);
+                       ret = btf_resolve_size(meta.ret_btf, t, &tsize);
                        if (IS_ERR(ret)) {
-                               tname = btf_name_by_offset(btf_vmlinux, t->name_off);
+                               tname = btf_name_by_offset(meta.ret_btf, t->name_off);
                                verbose(env, "unable to resolve the size of type '%s': %ld\n",
                                        tname, PTR_ERR(ret));
                                return -EINVAL;
@@ -5174,19 +5222,27 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
                        regs[BPF_REG_0].type =
                                fn->ret_type == RET_PTR_TO_MEM_OR_BTF_ID ?
                                PTR_TO_BTF_ID : PTR_TO_BTF_ID_OR_NULL;
+                       regs[BPF_REG_0].btf = meta.ret_btf;
                        regs[BPF_REG_0].btf_id = meta.ret_btf_id;
                }
-       } else if (fn->ret_type == RET_PTR_TO_BTF_ID_OR_NULL) {
+       } else if (fn->ret_type == RET_PTR_TO_BTF_ID_OR_NULL ||
+                  fn->ret_type == RET_PTR_TO_BTF_ID) {
                int ret_btf_id;
 
                mark_reg_known_zero(env, regs, BPF_REG_0);
-               regs[BPF_REG_0].type = PTR_TO_BTF_ID_OR_NULL;
+               regs[BPF_REG_0].type = fn->ret_type == RET_PTR_TO_BTF_ID ?
+                                                    PTR_TO_BTF_ID :
+                                                    PTR_TO_BTF_ID_OR_NULL;
                ret_btf_id = *fn->ret_btf_id;
                if (ret_btf_id == 0) {
                        verbose(env, "invalid return type %d of func %s#%d\n",
                                fn->ret_type, func_id_name(func_id), func_id);
                        return -EINVAL;
                }
+               /* current BPF helper definitions are only coming from
+                * built-in code with type IDs from  vmlinux BTF
+                */
+               regs[BPF_REG_0].btf = btf_vmlinux;
                regs[BPF_REG_0].btf_id = ret_btf_id;
        } else {
                verbose(env, "unknown return type %d of func %s#%d\n",
@@ -5586,7 +5642,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
                if (reg_is_pkt_pointer(ptr_reg)) {
                        dst_reg->id = ++env->id_gen;
                        /* something was added to pkt_ptr, set range to zero */
-                       dst_reg->raw = 0;
+                       memset(&dst_reg->raw, 0, sizeof(dst_reg->raw));
                }
                break;
        case BPF_SUB:
@@ -5651,7 +5707,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
                        dst_reg->id = ++env->id_gen;
                        /* something was added to pkt_ptr, set range to zero */
                        if (smin_val < 0)
-                               dst_reg->raw = 0;
+                               memset(&dst_reg->raw, 0, sizeof(dst_reg->raw));
                }
                break;
        case BPF_AND:
@@ -6695,7 +6751,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
 
 static void __find_good_pkt_pointers(struct bpf_func_state *state,
                                     struct bpf_reg_state *dst_reg,
-                                    enum bpf_reg_type type, u16 new_range)
+                                    enum bpf_reg_type type, int new_range)
 {
        struct bpf_reg_state *reg;
        int i;
@@ -6720,8 +6776,7 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *vstate,
                                   enum bpf_reg_type type,
                                   bool range_right_open)
 {
-       u16 new_range;
-       int i;
+       int new_range, i;
 
        if (dst_reg->off < 0 ||
            (dst_reg->off == 0 && range_right_open))
@@ -6972,6 +7027,67 @@ static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode,
        return is_branch64_taken(reg, val, opcode);
 }
 
+static int flip_opcode(u32 opcode)
+{
+       /* How can we transform "a <op> b" into "b <op> a"? */
+       static const u8 opcode_flip[16] = {
+               /* these stay the same */
+               [BPF_JEQ  >> 4] = BPF_JEQ,
+               [BPF_JNE  >> 4] = BPF_JNE,
+               [BPF_JSET >> 4] = BPF_JSET,
+               /* these swap "lesser" and "greater" (L and G in the opcodes) */
+               [BPF_JGE  >> 4] = BPF_JLE,
+               [BPF_JGT  >> 4] = BPF_JLT,
+               [BPF_JLE  >> 4] = BPF_JGE,
+               [BPF_JLT  >> 4] = BPF_JGT,
+               [BPF_JSGE >> 4] = BPF_JSLE,
+               [BPF_JSGT >> 4] = BPF_JSLT,
+               [BPF_JSLE >> 4] = BPF_JSGE,
+               [BPF_JSLT >> 4] = BPF_JSGT
+       };
+       return opcode_flip[opcode >> 4];
+}
+
+static int is_pkt_ptr_branch_taken(struct bpf_reg_state *dst_reg,
+                                  struct bpf_reg_state *src_reg,
+                                  u8 opcode)
+{
+       struct bpf_reg_state *pkt;
+
+       if (src_reg->type == PTR_TO_PACKET_END) {
+               pkt = dst_reg;
+       } else if (dst_reg->type == PTR_TO_PACKET_END) {
+               pkt = src_reg;
+               opcode = flip_opcode(opcode);
+       } else {
+               return -1;
+       }
+
+       if (pkt->range >= 0)
+               return -1;
+
+       switch (opcode) {
+       case BPF_JLE:
+               /* pkt <= pkt_end */
+               fallthrough;
+       case BPF_JGT:
+               /* pkt > pkt_end */
+               if (pkt->range == BEYOND_PKT_END)
+                       /* pkt has at last one extra byte beyond pkt_end */
+                       return opcode == BPF_JGT;
+               break;
+       case BPF_JLT:
+               /* pkt < pkt_end */
+               fallthrough;
+       case BPF_JGE:
+               /* pkt >= pkt_end */
+               if (pkt->range == BEYOND_PKT_END || pkt->range == AT_PKT_END)
+                       return opcode == BPF_JGE;
+               break;
+       }
+       return -1;
+}
+
 /* Adjusts the register min/max values in the case that the dst_reg is the
  * variable register that we are working on, and src_reg is a constant or we're
  * simply doing a BPF_K check.
@@ -7135,23 +7251,7 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
                                u64 val, u32 val32,
                                u8 opcode, bool is_jmp32)
 {
-       /* How can we transform "a <op> b" into "b <op> a"? */
-       static const u8 opcode_flip[16] = {
-               /* these stay the same */
-               [BPF_JEQ  >> 4] = BPF_JEQ,
-               [BPF_JNE  >> 4] = BPF_JNE,
-               [BPF_JSET >> 4] = BPF_JSET,
-               /* these swap "lesser" and "greater" (L and G in the opcodes) */
-               [BPF_JGE  >> 4] = BPF_JLE,
-               [BPF_JGT  >> 4] = BPF_JLT,
-               [BPF_JLE  >> 4] = BPF_JGE,
-               [BPF_JLT  >> 4] = BPF_JGT,
-               [BPF_JSGE >> 4] = BPF_JSLE,
-               [BPF_JSGT >> 4] = BPF_JSLT,
-               [BPF_JSLE >> 4] = BPF_JSGE,
-               [BPF_JSLT >> 4] = BPF_JSGT
-       };
-       opcode = opcode_flip[opcode >> 4];
+       opcode = flip_opcode(opcode);
        /* This uses zero as "not present in table"; luckily the zero opcode,
         * BPF_JA, can't get here.
         */
@@ -7333,6 +7433,7 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn,
                        /* pkt_data' > pkt_end, pkt_meta' > pkt_data */
                        find_good_pkt_pointers(this_branch, dst_reg,
                                               dst_reg->type, false);
+                       mark_pkt_end(other_branch, insn->dst_reg, true);
                } else if ((dst_reg->type == PTR_TO_PACKET_END &&
                            src_reg->type == PTR_TO_PACKET) ||
                           (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
@@ -7340,6 +7441,7 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn,
                        /* pkt_end > pkt_data', pkt_data > pkt_meta' */
                        find_good_pkt_pointers(other_branch, src_reg,
                                               src_reg->type, true);
+                       mark_pkt_end(this_branch, insn->src_reg, false);
                } else {
                        return false;
                }
@@ -7352,6 +7454,7 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn,
                        /* pkt_data' < pkt_end, pkt_meta' < pkt_data */
                        find_good_pkt_pointers(other_branch, dst_reg,
                                               dst_reg->type, true);
+                       mark_pkt_end(this_branch, insn->dst_reg, false);
                } else if ((dst_reg->type == PTR_TO_PACKET_END &&
                            src_reg->type == PTR_TO_PACKET) ||
                           (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
@@ -7359,6 +7462,7 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn,
                        /* pkt_end < pkt_data', pkt_data > pkt_meta' */
                        find_good_pkt_pointers(this_branch, src_reg,
                                               src_reg->type, false);
+                       mark_pkt_end(other_branch, insn->src_reg, true);
                } else {
                        return false;
                }
@@ -7371,6 +7475,7 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn,
                        /* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */
                        find_good_pkt_pointers(this_branch, dst_reg,
                                               dst_reg->type, true);
+                       mark_pkt_end(other_branch, insn->dst_reg, false);
                } else if ((dst_reg->type == PTR_TO_PACKET_END &&
                            src_reg->type == PTR_TO_PACKET) ||
                           (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
@@ -7378,6 +7483,7 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn,
                        /* pkt_end >= pkt_data', pkt_data >= pkt_meta' */
                        find_good_pkt_pointers(other_branch, src_reg,
                                               src_reg->type, false);
+                       mark_pkt_end(this_branch, insn->src_reg, true);
                } else {
                        return false;
                }
@@ -7390,6 +7496,7 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn,
                        /* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */
                        find_good_pkt_pointers(other_branch, dst_reg,
                                               dst_reg->type, false);
+                       mark_pkt_end(this_branch, insn->dst_reg, true);
                } else if ((dst_reg->type == PTR_TO_PACKET_END &&
                            src_reg->type == PTR_TO_PACKET) ||
                           (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) &&
@@ -7397,6 +7504,7 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn,
                        /* pkt_end <= pkt_data', pkt_data <= pkt_meta' */
                        find_good_pkt_pointers(this_branch, src_reg,
                                               src_reg->type, true);
+                       mark_pkt_end(other_branch, insn->src_reg, false);
                } else {
                        return false;
                }
@@ -7496,6 +7604,10 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
                                       src_reg->var_off.value,
                                       opcode,
                                       is_jmp32);
+       } else if (reg_is_pkt_pointer_any(dst_reg) &&
+                  reg_is_pkt_pointer_any(src_reg) &&
+                  !is_jmp32) {
+               pred = is_pkt_ptr_branch_taken(dst_reg, src_reg, opcode);
        }
 
        if (pred >= 0) {
@@ -7504,7 +7616,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
                 */
                if (!__is_pointer_value(false, dst_reg))
                        err = mark_chain_precision(env, insn->dst_reg);
-               if (BPF_SRC(insn->code) == BPF_X && !err)
+               if (BPF_SRC(insn->code) == BPF_X && !err &&
+                   !__is_pointer_value(false, src_reg))
                        err = mark_chain_precision(env, insn->src_reg);
                if (err)
                        return err;
@@ -7646,6 +7759,7 @@ static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
                        break;
                case PTR_TO_BTF_ID:
                case PTR_TO_PERCPU_BTF_ID:
+                       dst_reg->btf = aux->btf_var.btf;
                        dst_reg->btf_id = aux->btf_var.btf_id;
                        break;
                default:
@@ -7960,6 +8074,11 @@ static void init_explored_state(struct bpf_verifier_env *env, int idx)
        env->insn_aux_data[idx].prune_point = true;
 }
 
+enum {
+       DONE_EXPLORING = 0,
+       KEEP_EXPLORING = 1,
+};
+
 /* t, w, e - match pseudo-code above:
  * t - index of current instruction
  * w - next instruction
@@ -7972,10 +8091,10 @@ static int push_insn(int t, int w, int e, struct bpf_verifier_env *env,
        int *insn_state = env->cfg.insn_state;
 
        if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH))
-               return 0;
+               return DONE_EXPLORING;
 
        if (e == BRANCH && insn_state[t] >= (DISCOVERED | BRANCH))
-               return 0;
+               return DONE_EXPLORING;
 
        if (w < 0 || w >= env->prog->len) {
                verbose_linfo(env, t, "%d: ", t);
@@ -7994,10 +8113,10 @@ static int push_insn(int t, int w, int e, struct bpf_verifier_env *env,
                if (env->cfg.cur_stack >= env->prog->len)
                        return -E2BIG;
                insn_stack[env->cfg.cur_stack++] = w;
-               return 1;
+               return KEEP_EXPLORING;
        } else if ((insn_state[w] & 0xF0) == DISCOVERED) {
                if (loop_ok && env->bpf_capable)
-                       return 0;
+                       return DONE_EXPLORING;
                verbose_linfo(env, t, "%d: ", t);
                verbose_linfo(env, w, "%d: ", w);
                verbose(env, "back-edge from insn %d to %d\n", t, w);
@@ -8009,7 +8128,74 @@ static int push_insn(int t, int w, int e, struct bpf_verifier_env *env,
                verbose(env, "insn state internal bug\n");
                return -EFAULT;
        }
-       return 0;
+       return DONE_EXPLORING;
+}
+
+/* Visits the instruction at index t and returns one of the following:
+ *  < 0 - an error occurred
+ *  DONE_EXPLORING - the instruction was fully explored
+ *  KEEP_EXPLORING - there is still work to be done before it is fully explored
+ */
+static int visit_insn(int t, int insn_cnt, struct bpf_verifier_env *env)
+{
+       struct bpf_insn *insns = env->prog->insnsi;
+       int ret;
+
+       /* All non-branch instructions have a single fall-through edge. */
+       if (BPF_CLASS(insns[t].code) != BPF_JMP &&
+           BPF_CLASS(insns[t].code) != BPF_JMP32)
+               return push_insn(t, t + 1, FALLTHROUGH, env, false);
+
+       switch (BPF_OP(insns[t].code)) {
+       case BPF_EXIT:
+               return DONE_EXPLORING;
+
+       case BPF_CALL:
+               ret = push_insn(t, t + 1, FALLTHROUGH, env, false);
+               if (ret)
+                       return ret;
+
+               if (t + 1 < insn_cnt)
+                       init_explored_state(env, t + 1);
+               if (insns[t].src_reg == BPF_PSEUDO_CALL) {
+                       init_explored_state(env, t);
+                       ret = push_insn(t, t + insns[t].imm + 1, BRANCH,
+                                       env, false);
+               }
+               return ret;
+
+       case BPF_JA:
+               if (BPF_SRC(insns[t].code) != BPF_K)
+                       return -EINVAL;
+
+               /* unconditional jump with single edge */
+               ret = push_insn(t, t + insns[t].off + 1, FALLTHROUGH, env,
+                               true);
+               if (ret)
+                       return ret;
+
+               /* unconditional jmp is not a good pruning point,
+                * but it's marked, since backtracking needs
+                * to record jmp history in is_state_visited().
+                */
+               init_explored_state(env, t + insns[t].off + 1);
+               /* tell verifier to check for equivalent states
+                * after every call and jump
+                */
+               if (t + 1 < insn_cnt)
+                       init_explored_state(env, t + 1);
+
+               return ret;
+
+       default:
+               /* conditional jump with two edges */
+               init_explored_state(env, t);
+               ret = push_insn(t, t + 1, FALLTHROUGH, env, true);
+               if (ret)
+                       return ret;
+
+               return push_insn(t, t + insns[t].off + 1, BRANCH, env, true);
+       }
 }
 
 /* non-recursive depth-first-search to detect loops in BPF program
@@ -8017,11 +8203,10 @@ static int push_insn(int t, int w, int e, struct bpf_verifier_env *env,
  */
 static int check_cfg(struct bpf_verifier_env *env)
 {
-       struct bpf_insn *insns = env->prog->insnsi;
        int insn_cnt = env->prog->len;
        int *insn_stack, *insn_state;
        int ret = 0;
-       int i, t;
+       int i;
 
        insn_state = env->cfg.insn_state = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
        if (!insn_state)
@@ -8037,92 +8222,32 @@ static int check_cfg(struct bpf_verifier_env *env)
        insn_stack[0] = 0; /* 0 is the first instruction */
        env->cfg.cur_stack = 1;
 
-peek_stack:
-       if (env->cfg.cur_stack == 0)
-               goto check_state;
-       t = insn_stack[env->cfg.cur_stack - 1];
-
-       if (BPF_CLASS(insns[t].code) == BPF_JMP ||
-           BPF_CLASS(insns[t].code) == BPF_JMP32) {
-               u8 opcode = BPF_OP(insns[t].code);
-
-               if (opcode == BPF_EXIT) {
-                       goto mark_explored;
-               } else if (opcode == BPF_CALL) {
-                       ret = push_insn(t, t + 1, FALLTHROUGH, env, false);
-                       if (ret == 1)
-                               goto peek_stack;
-                       else if (ret < 0)
-                               goto err_free;
-                       if (t + 1 < insn_cnt)
-                               init_explored_state(env, t + 1);
-                       if (insns[t].src_reg == BPF_PSEUDO_CALL) {
-                               init_explored_state(env, t);
-                               ret = push_insn(t, t + insns[t].imm + 1, BRANCH,
-                                               env, false);
-                               if (ret == 1)
-                                       goto peek_stack;
-                               else if (ret < 0)
-                                       goto err_free;
-                       }
-               } else if (opcode == BPF_JA) {
-                       if (BPF_SRC(insns[t].code) != BPF_K) {
-                               ret = -EINVAL;
-                               goto err_free;
-                       }
-                       /* unconditional jump with single edge */
-                       ret = push_insn(t, t + insns[t].off + 1,
-                                       FALLTHROUGH, env, true);
-                       if (ret == 1)
-                               goto peek_stack;
-                       else if (ret < 0)
-                               goto err_free;
-                       /* unconditional jmp is not a good pruning point,
-                        * but it's marked, since backtracking needs
-                        * to record jmp history in is_state_visited().
-                        */
-                       init_explored_state(env, t + insns[t].off + 1);
-                       /* tell verifier to check for equivalent states
-                        * after every call and jump
-                        */
-                       if (t + 1 < insn_cnt)
-                               init_explored_state(env, t + 1);
-               } else {
-                       /* conditional jump with two edges */
-                       init_explored_state(env, t);
-                       ret = push_insn(t, t + 1, FALLTHROUGH, env, true);
-                       if (ret == 1)
-                               goto peek_stack;
-                       else if (ret < 0)
-                               goto err_free;
+       while (env->cfg.cur_stack > 0) {
+               int t = insn_stack[env->cfg.cur_stack - 1];
 
-                       ret = push_insn(t, t + insns[t].off + 1, BRANCH, env, true);
-                       if (ret == 1)
-                               goto peek_stack;
-                       else if (ret < 0)
-                               goto err_free;
-               }
-       } else {
-               /* all other non-branch instructions with single
-                * fall-through edge
-                */
-               ret = push_insn(t, t + 1, FALLTHROUGH, env, false);
-               if (ret == 1)
-                       goto peek_stack;
-               else if (ret < 0)
+               ret = visit_insn(t, insn_cnt, env);
+               switch (ret) {
+               case DONE_EXPLORING:
+                       insn_state[t] = EXPLORED;
+                       env->cfg.cur_stack--;
+                       break;
+               case KEEP_EXPLORING:
+                       break;
+               default:
+                       if (ret > 0) {
+                               verbose(env, "visit_insn internal bug\n");
+                               ret = -EFAULT;
+                       }
                        goto err_free;
+               }
        }
 
-mark_explored:
-       insn_state[t] = EXPLORED;
-       if (env->cfg.cur_stack-- <= 0) {
+       if (env->cfg.cur_stack < 0) {
                verbose(env, "pop stack internal bug\n");
                ret = -EFAULT;
                goto err_free;
        }
-       goto peek_stack;
 
-check_state:
        for (i = 0; i < insn_cnt; i++) {
                if (insn_state[i] != EXPLORED) {
                        verbose(env, "unreachable insn %d\n", i);
@@ -9642,6 +9767,7 @@ static int check_pseudo_btf_id(struct bpf_verifier_env *env,
        t = btf_type_skip_modifiers(btf_vmlinux, type, NULL);
        if (percpu) {
                aux->btf_var.reg_type = PTR_TO_PERCPU_BTF_ID;
+               aux->btf_var.btf = btf_vmlinux;
                aux->btf_var.btf_id = type;
        } else if (!btf_type_is_struct(t)) {
                const struct btf_type *ret;
@@ -9660,6 +9786,7 @@ static int check_pseudo_btf_id(struct bpf_verifier_env *env,
                aux->btf_var.mem_size = tsize;
        } else {
                aux->btf_var.reg_type = PTR_TO_BTF_ID;
+               aux->btf_var.btf = btf_vmlinux;
                aux->btf_var.btf_id = type;
        }
        return 0;
@@ -9731,11 +9858,21 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env,
                verbose(env, "trace type programs with run-time allocated hash maps are unsafe. Switch to preallocated hash maps.\n");
        }
 
-       if ((is_tracing_prog_type(prog_type) ||
-            prog_type == BPF_PROG_TYPE_SOCKET_FILTER) &&
-           map_value_has_spin_lock(map)) {
-               verbose(env, "tracing progs cannot use bpf_spin_lock yet\n");
-               return -EINVAL;
+       if (map_value_has_spin_lock(map)) {
+               if (prog_type == BPF_PROG_TYPE_SOCKET_FILTER) {
+                       verbose(env, "socket filter progs cannot use bpf_spin_lock yet\n");
+                       return -EINVAL;
+               }
+
+               if (is_tracing_prog_type(prog_type)) {
+                       verbose(env, "tracing progs cannot use bpf_spin_lock yet\n");
+                       return -EINVAL;
+               }
+
+               if (prog->aux->sleepable) {
+                       verbose(env, "sleepable progs cannot use bpf_spin_lock yet\n");
+                       return -EINVAL;
+               }
        }
 
        if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) &&
@@ -11466,20 +11603,6 @@ static int check_attach_modify_return(unsigned long addr, const char *func_name)
        return -EINVAL;
 }
 
-/* non exhaustive list of sleepable bpf_lsm_*() functions */
-BTF_SET_START(btf_sleepable_lsm_hooks)
-#ifdef CONFIG_BPF_LSM
-BTF_ID(func, bpf_lsm_bprm_committed_creds)
-#else
-BTF_ID_UNUSED
-#endif
-BTF_SET_END(btf_sleepable_lsm_hooks)
-
-static int check_sleepable_lsm_hook(u32 btf_id)
-{
-       return btf_id_set_contains(&btf_sleepable_lsm_hooks, btf_id);
-}
-
 /* list of non-sleepable functions that are otherwise on
  * ALLOW_ERROR_INJECTION list
  */
@@ -11516,7 +11639,7 @@ int bpf_check_attach_target(struct bpf_verifier_log *log,
                bpf_log(log, "Tracing programs must provide btf_id\n");
                return -EINVAL;
        }
-       btf = tgt_prog ? tgt_prog->aux->btf : btf_vmlinux;
+       btf = tgt_prog ? tgt_prog->aux->btf : prog->aux->attach_btf;
        if (!btf) {
                bpf_log(log,
                        "FENTRY/FEXIT program can only be attached to another program annotated with BTF\n");
@@ -11701,7 +11824,7 @@ int bpf_check_attach_target(struct bpf_verifier_log *log,
                                /* LSM progs check that they are attached to bpf_lsm_*() funcs.
                                 * Only some of them are sleepable.
                                 */
-                               if (check_sleepable_lsm_hook(btf_id))
+                               if (bpf_lsm_is_sleepable_hook(btf_id))
                                        ret = 0;
                                break;
                        default:
@@ -11792,7 +11915,7 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
                        return ret;
        }
 
-       key = bpf_trampoline_compute_key(tgt_prog, btf_id);
+       key = bpf_trampoline_compute_key(tgt_prog, prog->aux->attach_btf, btf_id);
        tr = bpf_trampoline_get(key, &tgt_info);
        if (!tr)
                return -ENOMEM;