nds32: fix build error "relocation truncated to fit: R_NDS32_25_PCREL_RELA" when
[linux-2.6-microblaze.git] / kernel / bpf / core.c
index ba03ec3..9f14937 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/rbtree_latch.h>
 #include <linux/kallsyms.h>
 #include <linux/rcupdate.h>
+#include <linux/perf_event.h>
 
 #include <asm/unaligned.h>
 
@@ -218,47 +219,84 @@ int bpf_prog_calc_tag(struct bpf_prog *fp)
        return 0;
 }
 
-static void bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta)
+static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, u32 delta,
+                               u32 curr, const bool probe_pass)
 {
+       const s64 imm_min = S32_MIN, imm_max = S32_MAX;
+       s64 imm = insn->imm;
+
+       if (curr < pos && curr + imm + 1 > pos)
+               imm += delta;
+       else if (curr > pos + delta && curr + imm + 1 <= pos + delta)
+               imm -= delta;
+       if (imm < imm_min || imm > imm_max)
+               return -ERANGE;
+       if (!probe_pass)
+               insn->imm = imm;
+       return 0;
+}
+
+static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, u32 delta,
+                               u32 curr, const bool probe_pass)
+{
+       const s32 off_min = S16_MIN, off_max = S16_MAX;
+       s32 off = insn->off;
+
+       if (curr < pos && curr + off + 1 > pos)
+               off += delta;
+       else if (curr > pos + delta && curr + off + 1 <= pos + delta)
+               off -= delta;
+       if (off < off_min || off > off_max)
+               return -ERANGE;
+       if (!probe_pass)
+               insn->off = off;
+       return 0;
+}
+
+static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta,
+                           const bool probe_pass)
+{
+       u32 i, insn_cnt = prog->len + (probe_pass ? delta : 0);
        struct bpf_insn *insn = prog->insnsi;
-       u32 i, insn_cnt = prog->len;
-       bool pseudo_call;
-       u8 code;
-       int off;
+       int ret = 0;
 
        for (i = 0; i < insn_cnt; i++, insn++) {
+               u8 code;
+
+               /* In the probing pass we still operate on the original,
+                * unpatched image in order to check overflows before we
+                * do any other adjustments. Therefore skip the patchlet.
+                */
+               if (probe_pass && i == pos) {
+                       i += delta + 1;
+                       insn++;
+               }
                code = insn->code;
-               if (BPF_CLASS(code) != BPF_JMP)
-                       continue;
-               if (BPF_OP(code) == BPF_EXIT)
+               if (BPF_CLASS(code) != BPF_JMP ||
+                   BPF_OP(code) == BPF_EXIT)
                        continue;
+               /* Adjust offset of jmps if we cross patch boundaries. */
                if (BPF_OP(code) == BPF_CALL) {
-                       if (insn->src_reg == BPF_PSEUDO_CALL)
-                               pseudo_call = true;
-                       else
+                       if (insn->src_reg != BPF_PSEUDO_CALL)
                                continue;
+                       ret = bpf_adj_delta_to_imm(insn, pos, delta, i,
+                                                  probe_pass);
                } else {
-                       pseudo_call = false;
+                       ret = bpf_adj_delta_to_off(insn, pos, delta, i,
+                                                  probe_pass);
                }
-               off = pseudo_call ? insn->imm : insn->off;
-
-               /* Adjust offset of jmps if we cross boundaries. */
-               if (i < pos && i + off + 1 > pos)
-                       off += delta;
-               else if (i > pos + delta && i + off + 1 <= pos + delta)
-                       off -= delta;
-
-               if (pseudo_call)
-                       insn->imm = off;
-               else
-                       insn->off = off;
+               if (ret)
+                       break;
        }
+
+       return ret;
 }
 
 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
                                       const struct bpf_insn *patch, u32 len)
 {
        u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
+       const u32 cnt_max = S16_MAX;
        struct bpf_prog *prog_adj;
 
        /* Since our patchlet doesn't expand the image, we're done. */
@@ -269,6 +307,15 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
 
        insn_adj_cnt = prog->len + insn_delta;
 
+       /* Reject anything that would potentially let the insn->off
+        * target overflow when we have excessive program expansions.
+        * We need to probe here before we do any reallocation where
+        * we afterwards may not fail anymore.
+        */
+       if (insn_adj_cnt > cnt_max &&
+           bpf_adj_branches(prog, off, insn_delta, true))
+               return NULL;
+
        /* Several new instructions need to be inserted. Make room
         * for them. Likely, there's no need for a new allocation as
         * last page could have large enough tailroom.
@@ -294,7 +341,11 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
                sizeof(*patch) * insn_rest);
        memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
 
-       bpf_adj_branches(prog_adj, off, insn_delta);
+       /* We are guaranteed to not fail at this point, otherwise
+        * the ship has sailed to reverse to the original state. An
+        * overflow cannot happen at this point.
+        */
+       BUG_ON(bpf_adj_branches(prog_adj, off, insn_delta, false));
 
        return prog_adj;
 }
@@ -633,23 +684,6 @@ static int bpf_jit_blind_insn(const struct bpf_insn *from,
                *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
                break;
 
-       case BPF_LD | BPF_ABS | BPF_W:
-       case BPF_LD | BPF_ABS | BPF_H:
-       case BPF_LD | BPF_ABS | BPF_B:
-               *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
-               *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
-               *to++ = BPF_LD_IND(from->code, BPF_REG_AX, 0);
-               break;
-
-       case BPF_LD | BPF_IND | BPF_W:
-       case BPF_LD | BPF_IND | BPF_H:
-       case BPF_LD | BPF_IND | BPF_B:
-               *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
-               *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
-               *to++ = BPF_ALU32_REG(BPF_ADD, BPF_REG_AX, from->src_reg);
-               *to++ = BPF_LD_IND(from->code, BPF_REG_AX, 0);
-               break;
-
        case BPF_LD | BPF_IMM | BPF_DW:
                *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm);
                *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
@@ -890,14 +924,7 @@ EXPORT_SYMBOL_GPL(__bpf_call_base);
        INSN_3(LDX, MEM, W),                    \
        INSN_3(LDX, MEM, DW),                   \
        /*   Immediate based. */                \
-       INSN_3(LD, IMM, DW),                    \
-       /*   Misc (old cBPF carry-over). */     \
-       INSN_3(LD, ABS, B),                     \
-       INSN_3(LD, ABS, H),                     \
-       INSN_3(LD, ABS, W),                     \
-       INSN_3(LD, IND, B),                     \
-       INSN_3(LD, IND, H),                     \
-       INSN_3(LD, IND, W)
+       INSN_3(LD, IMM, DW)
 
 bool bpf_opcode_in_insntable(u8 code)
 {
@@ -907,6 +934,13 @@ bool bpf_opcode_in_insntable(u8 code)
                [0 ... 255] = false,
                /* Now overwrite non-defaults ... */
                BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL),
+               /* UAPI exposed, but rewritten opcodes. cBPF carry-over. */
+               [BPF_LD | BPF_ABS | BPF_B] = true,
+               [BPF_LD | BPF_ABS | BPF_H] = true,
+               [BPF_LD | BPF_ABS | BPF_W] = true,
+               [BPF_LD | BPF_IND | BPF_B] = true,
+               [BPF_LD | BPF_IND | BPF_H] = true,
+               [BPF_LD | BPF_IND | BPF_W] = true,
        };
 #undef BPF_INSN_3_TBL
 #undef BPF_INSN_2_TBL
@@ -937,8 +971,6 @@ static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
 #undef BPF_INSN_3_LBL
 #undef BPF_INSN_2_LBL
        u32 tail_call_cnt = 0;
-       void *ptr;
-       int off;
 
 #define CONT    ({ insn++; goto select_insn; })
 #define CONT_JMP ({ insn++; goto select_insn; })
@@ -1265,67 +1297,6 @@ out:
                atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
                             (DST + insn->off));
                CONT;
-       LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */
-               off = IMM;
-load_word:
-               /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are only
-                * appearing in the programs where ctx == skb
-                * (see may_access_skb() in the verifier). All programs
-                * keep 'ctx' in regs[BPF_REG_CTX] == BPF_R6,
-                * bpf_convert_filter() saves it in BPF_R6, internal BPF
-                * verifier will check that BPF_R6 == ctx.
-                *
-                * BPF_ABS and BPF_IND are wrappers of function calls,
-                * so they scratch BPF_R1-BPF_R5 registers, preserve
-                * BPF_R6-BPF_R9, and store return value into BPF_R0.
-                *
-                * Implicit input:
-                *   ctx == skb == BPF_R6 == CTX
-                *
-                * Explicit input:
-                *   SRC == any register
-                *   IMM == 32-bit immediate
-                *
-                * Output:
-                *   BPF_R0 - 8/16/32-bit skb data converted to cpu endianness
-                */
-
-               ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 4, &tmp);
-               if (likely(ptr != NULL)) {
-                       BPF_R0 = get_unaligned_be32(ptr);
-                       CONT;
-               }
-
-               return 0;
-       LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + imm32)) */
-               off = IMM;
-load_half:
-               ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 2, &tmp);
-               if (likely(ptr != NULL)) {
-                       BPF_R0 = get_unaligned_be16(ptr);
-                       CONT;
-               }
-
-               return 0;
-       LD_ABS_B: /* BPF_R0 = *(u8 *) (skb->data + imm32) */
-               off = IMM;
-load_byte:
-               ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 1, &tmp);
-               if (likely(ptr != NULL)) {
-                       BPF_R0 = *(u8 *)ptr;
-                       CONT;
-               }
-
-               return 0;
-       LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + src_reg + imm32)) */
-               off = IMM + SRC;
-               goto load_word;
-       LD_IND_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + src_reg + imm32)) */
-               off = IMM + SRC;
-               goto load_half;
-       LD_IND_B: /* BPF_R0 = *(u8 *) (skb->data + src_reg + imm32) */
-               off = IMM + SRC;
-               goto load_byte;
 
        default_label:
                /* If we ever reach this, we have a bug somewhere. Die hard here
@@ -1645,6 +1616,7 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
        int new_prog_cnt, carry_prog_cnt = 0;
        struct bpf_prog **existing_prog;
        struct bpf_prog_array *array;
+       bool found_exclude = false;
        int new_prog_idx = 0;
 
        /* Figure out how many existing progs we need to carry over to
@@ -1653,14 +1625,20 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
        if (old_array) {
                existing_prog = old_array->progs;
                for (; *existing_prog; existing_prog++) {
-                       if (*existing_prog != exclude_prog &&
-                           *existing_prog != &dummy_bpf_prog.prog)
+                       if (*existing_prog == exclude_prog) {
+                               found_exclude = true;
+                               continue;
+                       }
+                       if (*existing_prog != &dummy_bpf_prog.prog)
                                carry_prog_cnt++;
                        if (*existing_prog == include_prog)
                                return -EEXIST;
                }
        }
 
+       if (exclude_prog && !found_exclude)
+               return -ENOENT;
+
        /* How many progs (not NULL) will be in the new array? */
        new_prog_cnt = carry_prog_cnt;
        if (include_prog)
@@ -1722,6 +1700,10 @@ static void bpf_prog_free_deferred(struct work_struct *work)
        aux = container_of(work, struct bpf_prog_aux, work);
        if (bpf_prog_is_dev_bound(aux))
                bpf_prog_offload_destroy(aux->prog);
+#ifdef CONFIG_PERF_EVENTS
+       if (aux->prog->has_callchain_buf)
+               put_callchain_buffers();
+#endif
        for (i = 0; i < aux->func_cnt; i++)
                bpf_jit_free(aux->func[i]);
        if (aux->func_cnt) {
@@ -1782,6 +1764,8 @@ const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
 const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
 const struct bpf_func_proto bpf_get_current_comm_proto __weak;
 const struct bpf_func_proto bpf_sock_map_update_proto __weak;
+const struct bpf_func_proto bpf_sock_hash_update_proto __weak;
+const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
 
 const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
 {
@@ -1794,6 +1778,7 @@ bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
 {
        return -ENOTSUPP;
 }
+EXPORT_SYMBOL_GPL(bpf_event_output);
 
 /* Always built-in helper functions. */
 const struct bpf_func_proto bpf_tail_call_proto = {
@@ -1840,9 +1825,3 @@ int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
 #include <linux/bpf_trace.h>
 
 EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
-
-/* These are only used within the BPF_SYSCALL code */
-#ifdef CONFIG_BPF_SYSCALL
-EXPORT_TRACEPOINT_SYMBOL_GPL(bpf_prog_get_type);
-EXPORT_TRACEPOINT_SYMBOL_GPL(bpf_prog_put_rcu);
-#endif