1 // SPDX-License-Identifier: GPL-2.0
2 /* XSKMAP used for AF_XDP sockets
3 * Copyright(c) 2018 Intel Corporation.
7 #include <linux/filter.h>
8 #include <linux/capability.h>
9 #include <net/xdp_sock.h>
10 #include <linux/slab.h>
11 #include <linux/sched.h>
15 static struct xsk_map_node *xsk_map_node_alloc(struct xsk_map *map,
16 struct xdp_sock __rcu **map_entry)
18 struct xsk_map_node *node;
20 node = bpf_map_kzalloc(&map->map, sizeof(*node),
21 GFP_ATOMIC | __GFP_NOWARN);
23 return ERR_PTR(-ENOMEM);
25 bpf_map_inc(&map->map);
28 node->map_entry = map_entry;
32 static void xsk_map_node_free(struct xsk_map_node *node)
34 bpf_map_put(&node->map->map);
38 static void xsk_map_sock_add(struct xdp_sock *xs, struct xsk_map_node *node)
40 spin_lock_bh(&xs->map_list_lock);
41 list_add_tail(&node->node, &xs->map_list);
42 spin_unlock_bh(&xs->map_list_lock);
45 static void xsk_map_sock_delete(struct xdp_sock *xs,
46 struct xdp_sock __rcu **map_entry)
48 struct xsk_map_node *n, *tmp;
50 spin_lock_bh(&xs->map_list_lock);
51 list_for_each_entry_safe(n, tmp, &xs->map_list, node) {
52 if (map_entry == n->map_entry) {
57 spin_unlock_bh(&xs->map_list_lock);
60 static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
66 if (!capable(CAP_NET_ADMIN))
67 return ERR_PTR(-EPERM);
69 if (attr->max_entries == 0 || attr->key_size != 4 ||
70 attr->value_size != 4 ||
71 attr->map_flags & ~(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY))
72 return ERR_PTR(-EINVAL);
74 numa_node = bpf_map_attr_numa_node(attr);
75 size = struct_size(m, xsk_map, attr->max_entries);
77 m = bpf_map_area_alloc(size, numa_node);
79 return ERR_PTR(-ENOMEM);
81 bpf_map_init_from_attr(&m->map, attr);
82 spin_lock_init(&m->lock);
87 static void xsk_map_free(struct bpf_map *map)
89 struct xsk_map *m = container_of(map, struct xsk_map, map);
95 static int xsk_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
97 struct xsk_map *m = container_of(map, struct xsk_map, map);
98 u32 index = key ? *(u32 *)key : U32_MAX;
101 if (index >= m->map.max_entries) {
106 if (index == m->map.max_entries - 1)
112 static int xsk_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
114 const int ret = BPF_REG_0, mp = BPF_REG_1, index = BPF_REG_2;
115 struct bpf_insn *insn = insn_buf;
117 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
118 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
119 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(sizeof(struct xsk_sock *)));
120 *insn++ = BPF_ALU64_IMM(BPF_ADD, mp, offsetof(struct xsk_map, xsk_map));
121 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, mp);
122 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(struct xsk_sock *), ret, ret, 0);
123 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
124 *insn++ = BPF_MOV64_IMM(ret, 0);
125 return insn - insn_buf;
128 /* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or
129 * by local_bh_disable() (from XDP calls inside NAPI). The
130 * rcu_read_lock_bh_held() below makes lockdep accept both.
132 static void *__xsk_map_lookup_elem(struct bpf_map *map, u32 key)
134 struct xsk_map *m = container_of(map, struct xsk_map, map);
136 if (key >= map->max_entries)
139 return rcu_dereference_check(m->xsk_map[key], rcu_read_lock_bh_held());
142 static void *xsk_map_lookup_elem(struct bpf_map *map, void *key)
144 return __xsk_map_lookup_elem(map, *(u32 *)key);
147 static void *xsk_map_lookup_elem_sys_only(struct bpf_map *map, void *key)
149 return ERR_PTR(-EOPNOTSUPP);
152 static int xsk_map_update_elem(struct bpf_map *map, void *key, void *value,
155 struct xsk_map *m = container_of(map, struct xsk_map, map);
156 struct xdp_sock __rcu **map_entry;
157 struct xdp_sock *xs, *old_xs;
158 u32 i = *(u32 *)key, fd = *(u32 *)value;
159 struct xsk_map_node *node;
163 if (unlikely(map_flags > BPF_EXIST))
165 if (unlikely(i >= m->map.max_entries))
168 sock = sockfd_lookup(fd, &err);
172 if (sock->sk->sk_family != PF_XDP) {
177 xs = (struct xdp_sock *)sock->sk;
179 map_entry = &m->xsk_map[i];
180 node = xsk_map_node_alloc(m, map_entry);
183 return PTR_ERR(node);
186 spin_lock_bh(&m->lock);
187 old_xs = rcu_dereference_protected(*map_entry, lockdep_is_held(&m->lock));
191 } else if (old_xs && map_flags == BPF_NOEXIST) {
194 } else if (!old_xs && map_flags == BPF_EXIST) {
198 xsk_map_sock_add(xs, node);
199 rcu_assign_pointer(*map_entry, xs);
201 xsk_map_sock_delete(old_xs, map_entry);
202 spin_unlock_bh(&m->lock);
207 spin_unlock_bh(&m->lock);
209 xsk_map_node_free(node);
213 static int xsk_map_delete_elem(struct bpf_map *map, void *key)
215 struct xsk_map *m = container_of(map, struct xsk_map, map);
216 struct xdp_sock __rcu **map_entry;
217 struct xdp_sock *old_xs;
220 if (k >= map->max_entries)
223 spin_lock_bh(&m->lock);
224 map_entry = &m->xsk_map[k];
225 old_xs = unrcu_pointer(xchg(map_entry, NULL));
227 xsk_map_sock_delete(old_xs, map_entry);
228 spin_unlock_bh(&m->lock);
233 static int xsk_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags)
235 return __bpf_xdp_redirect_map(map, ifindex, flags, 0,
236 __xsk_map_lookup_elem);
239 void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs,
240 struct xdp_sock __rcu **map_entry)
242 spin_lock_bh(&map->lock);
243 if (rcu_access_pointer(*map_entry) == xs) {
244 rcu_assign_pointer(*map_entry, NULL);
245 xsk_map_sock_delete(xs, map_entry);
247 spin_unlock_bh(&map->lock);
250 static bool xsk_map_meta_equal(const struct bpf_map *meta0,
251 const struct bpf_map *meta1)
253 return meta0->max_entries == meta1->max_entries &&
254 bpf_map_meta_equal(meta0, meta1);
257 static int xsk_map_btf_id;
258 const struct bpf_map_ops xsk_map_ops = {
259 .map_meta_equal = xsk_map_meta_equal,
260 .map_alloc = xsk_map_alloc,
261 .map_free = xsk_map_free,
262 .map_get_next_key = xsk_map_get_next_key,
263 .map_lookup_elem = xsk_map_lookup_elem,
264 .map_gen_lookup = xsk_map_gen_lookup,
265 .map_lookup_elem_sys_only = xsk_map_lookup_elem_sys_only,
266 .map_update_elem = xsk_map_update_elem,
267 .map_delete_elem = xsk_map_delete_elem,
268 .map_check_btf = map_check_no_btf,
269 .map_btf_name = "xsk_map",
270 .map_btf_id = &xsk_map_btf_id,
271 .map_redirect = xsk_map_redirect,