bpf, xdp: Start using the BPF dispatcher for XDP
authorBjörn Töpel <bjorn.topel@intel.com>
Fri, 13 Dec 2019 17:51:09 +0000 (18:51 +0100)
committerAlexei Starovoitov <ast@kernel.org>
Fri, 13 Dec 2019 21:09:32 +0000 (13:09 -0800)
This commit adds a BPF dispatcher for XDP. The dispatcher is updated
from the XDP control-path, dev_xdp_install(), and used when an XDP
program is run via bpf_prog_run_xdp().

Signed-off-by: Björn Töpel <bjorn.topel@intel.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20191213175112.30208-4-bjorn.topel@gmail.com
include/linux/bpf.h
include/linux/filter.h
kernel/bpf/syscall.c
net/core/dev.c
net/core/filter.c

index 53ae4a5..5970989 100644 (file)
@@ -488,6 +488,14 @@ struct bpf_dispatcher {
        u32 image_off;
 };
 
+static __always_inline unsigned int bpf_dispatcher_nopfunc(
+       const void *ctx,
+       const struct bpf_insn *insnsi,
+       unsigned int (*bpf_func)(const void *,
+                                const struct bpf_insn *))
+{
+       return bpf_func(ctx, insnsi);
+}
 #ifdef CONFIG_BPF_JIT
 struct bpf_trampoline *bpf_trampoline_lookup(u64 key);
 int bpf_trampoline_link_prog(struct bpf_prog *prog);
@@ -997,6 +1005,8 @@ int btf_distill_func_proto(struct bpf_verifier_log *log,
 
 int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog);
 
+struct bpf_prog *bpf_prog_by_id(u32 id);
+
 #else /* !CONFIG_BPF_SYSCALL */
 static inline struct bpf_prog *bpf_prog_get(u32 ufd)
 {
@@ -1128,6 +1138,11 @@ static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
 static inline void bpf_map_put(struct bpf_map *map)
 {
 }
+
+static inline struct bpf_prog *bpf_prog_by_id(u32 id)
+{
+       return ERR_PTR(-ENOTSUPP);
+}
 #endif /* CONFIG_BPF_SYSCALL */
 
 static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
index a141cb0..37ac702 100644 (file)
@@ -559,23 +559,26 @@ struct sk_filter {
 
 DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
 
-#define BPF_PROG_RUN(prog, ctx)        ({                              \
-       u32 ret;                                                \
-       cant_sleep();                                           \
-       if (static_branch_unlikely(&bpf_stats_enabled_key)) {   \
-               struct bpf_prog_stats *stats;                   \
-               u64 start = sched_clock();                      \
-               ret = (*(prog)->bpf_func)(ctx, (prog)->insnsi); \
-               stats = this_cpu_ptr(prog->aux->stats);         \
-               u64_stats_update_begin(&stats->syncp);          \
-               stats->cnt++;                                   \
-               stats->nsecs += sched_clock() - start;          \
-               u64_stats_update_end(&stats->syncp);            \
-       } else {                                                \
-               ret = (*(prog)->bpf_func)(ctx, (prog)->insnsi); \
-       }                                                       \
+#define __BPF_PROG_RUN(prog, ctx, dfunc)       ({                      \
+       u32 ret;                                                        \
+       cant_sleep();                                                   \
+       if (static_branch_unlikely(&bpf_stats_enabled_key)) {           \
+               struct bpf_prog_stats *stats;                           \
+               u64 start = sched_clock();                              \
+               ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func);     \
+               stats = this_cpu_ptr(prog->aux->stats);                 \
+               u64_stats_update_begin(&stats->syncp);                  \
+               stats->cnt++;                                           \
+               stats->nsecs += sched_clock() - start;                  \
+               u64_stats_update_end(&stats->syncp);                    \
+       } else {                                                        \
+               ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func);     \
+       }                                                               \
        ret; })
 
+#define BPF_PROG_RUN(prog, ctx) __BPF_PROG_RUN(prog, ctx,              \
+                                              bpf_dispatcher_nopfunc)
+
 #define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN
 
 struct bpf_skb_data_end {
@@ -699,6 +702,8 @@ static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
        return res;
 }
 
+DECLARE_BPF_DISPATCHER(bpf_dispatcher_xdp)
+
 static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
                                            struct xdp_buff *xdp)
 {
@@ -708,9 +713,12 @@ static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
         * already takes rcu_read_lock() when fetching the program, so
         * it's not necessary here anymore.
         */
-       return BPF_PROG_RUN(prog, xdp);
+       return __BPF_PROG_RUN(prog, xdp,
+                             BPF_DISPATCHER_FUNC(bpf_dispatcher_xdp));
 }
 
+void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog);
+
 static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog)
 {
        return prog->len * sizeof(struct bpf_insn);
index 66b90ea..b08c362 100644 (file)
@@ -2338,17 +2338,12 @@ static int bpf_obj_get_next_id(const union bpf_attr *attr,
 
 #define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id
 
-static int bpf_prog_get_fd_by_id(const union bpf_attr *attr)
+struct bpf_prog *bpf_prog_by_id(u32 id)
 {
        struct bpf_prog *prog;
-       u32 id = attr->prog_id;
-       int fd;
-
-       if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID))
-               return -EINVAL;
 
-       if (!capable(CAP_SYS_ADMIN))
-               return -EPERM;
+       if (!id)
+               return ERR_PTR(-ENOENT);
 
        spin_lock_bh(&prog_idr_lock);
        prog = idr_find(&prog_idr, id);
@@ -2357,7 +2352,22 @@ static int bpf_prog_get_fd_by_id(const union bpf_attr *attr)
        else
                prog = ERR_PTR(-ENOENT);
        spin_unlock_bh(&prog_idr_lock);
+       return prog;
+}
+
+static int bpf_prog_get_fd_by_id(const union bpf_attr *attr)
+{
+       struct bpf_prog *prog;
+       u32 id = attr->prog_id;
+       int fd;
+
+       if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID))
+               return -EINVAL;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
 
+       prog = bpf_prog_by_id(id);
        if (IS_ERR(prog))
                return PTR_ERR(prog);
 
index 2c277b8..255d3cf 100644 (file)
@@ -8542,7 +8542,17 @@ static int dev_xdp_install(struct net_device *dev, bpf_op_t bpf_op,
                           struct netlink_ext_ack *extack, u32 flags,
                           struct bpf_prog *prog)
 {
+       bool non_hw = !(flags & XDP_FLAGS_HW_MODE);
+       struct bpf_prog *prev_prog = NULL;
        struct netdev_bpf xdp;
+       int err;
+
+       if (non_hw) {
+               prev_prog = bpf_prog_by_id(__dev_xdp_query(dev, bpf_op,
+                                                          XDP_QUERY_PROG));
+               if (IS_ERR(prev_prog))
+                       prev_prog = NULL;
+       }
 
        memset(&xdp, 0, sizeof(xdp));
        if (flags & XDP_FLAGS_HW_MODE)
@@ -8553,7 +8563,14 @@ static int dev_xdp_install(struct net_device *dev, bpf_op_t bpf_op,
        xdp.flags = flags;
        xdp.prog = prog;
 
-       return bpf_op(dev, &xdp);
+       err = bpf_op(dev, &xdp);
+       if (!err && non_hw)
+               bpf_prog_change_xdp(prev_prog, prog);
+
+       if (prev_prog)
+               bpf_prog_put(prev_prog);
+
+       return err;
 }
 
 static void dev_xdp_uninstall(struct net_device *dev)
index f1e703e..a411f78 100644 (file)
@@ -8940,3 +8940,11 @@ const struct bpf_verifier_ops sk_reuseport_verifier_ops = {
 const struct bpf_prog_ops sk_reuseport_prog_ops = {
 };
 #endif /* CONFIG_INET */
+
+DEFINE_BPF_DISPATCHER(bpf_dispatcher_xdp)
+
+void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog)
+{
+       bpf_dispatcher_change_prog(BPF_DISPATCHER_PTR(bpf_dispatcher_xdp),
+                                  prev_prog, prog);
+}