Merge tag 'wq-for-6.4' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
[linux-2.6-microblaze.git] / net / xdp / xskmap.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* XSKMAP used for AF_XDP sockets
3  * Copyright(c) 2018 Intel Corporation.
4  */
5
6 #include <linux/bpf.h>
7 #include <linux/filter.h>
8 #include <linux/capability.h>
9 #include <net/xdp_sock.h>
10 #include <linux/slab.h>
11 #include <linux/sched.h>
12 #include <linux/btf_ids.h>
13
14 #include "xsk.h"
15
16 static struct xsk_map_node *xsk_map_node_alloc(struct xsk_map *map,
17                                                struct xdp_sock __rcu **map_entry)
18 {
19         struct xsk_map_node *node;
20
21         node = bpf_map_kzalloc(&map->map, sizeof(*node),
22                                GFP_ATOMIC | __GFP_NOWARN);
23         if (!node)
24                 return ERR_PTR(-ENOMEM);
25
26         bpf_map_inc(&map->map);
27         atomic_inc(&map->count);
28
29         node->map = map;
30         node->map_entry = map_entry;
31         return node;
32 }
33
34 static void xsk_map_node_free(struct xsk_map_node *node)
35 {
36         struct xsk_map *map = node->map;
37
38         bpf_map_put(&node->map->map);
39         kfree(node);
40         atomic_dec(&map->count);
41 }
42
43 static void xsk_map_sock_add(struct xdp_sock *xs, struct xsk_map_node *node)
44 {
45         spin_lock_bh(&xs->map_list_lock);
46         list_add_tail(&node->node, &xs->map_list);
47         spin_unlock_bh(&xs->map_list_lock);
48 }
49
50 static void xsk_map_sock_delete(struct xdp_sock *xs,
51                                 struct xdp_sock __rcu **map_entry)
52 {
53         struct xsk_map_node *n, *tmp;
54
55         spin_lock_bh(&xs->map_list_lock);
56         list_for_each_entry_safe(n, tmp, &xs->map_list, node) {
57                 if (map_entry == n->map_entry) {
58                         list_del(&n->node);
59                         xsk_map_node_free(n);
60                 }
61         }
62         spin_unlock_bh(&xs->map_list_lock);
63 }
64
65 static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
66 {
67         struct xsk_map *m;
68         int numa_node;
69         u64 size;
70
71         if (!capable(CAP_NET_ADMIN))
72                 return ERR_PTR(-EPERM);
73
74         if (attr->max_entries == 0 || attr->key_size != 4 ||
75             attr->value_size != 4 ||
76             attr->map_flags & ~(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY))
77                 return ERR_PTR(-EINVAL);
78
79         numa_node = bpf_map_attr_numa_node(attr);
80         size = struct_size(m, xsk_map, attr->max_entries);
81
82         m = bpf_map_area_alloc(size, numa_node);
83         if (!m)
84                 return ERR_PTR(-ENOMEM);
85
86         bpf_map_init_from_attr(&m->map, attr);
87         spin_lock_init(&m->lock);
88
89         return &m->map;
90 }
91
92 static u64 xsk_map_mem_usage(const struct bpf_map *map)
93 {
94         struct xsk_map *m = container_of(map, struct xsk_map, map);
95
96         return struct_size(m, xsk_map, map->max_entries) +
97                    (u64)atomic_read(&m->count) * sizeof(struct xsk_map_node);
98 }
99
100 static void xsk_map_free(struct bpf_map *map)
101 {
102         struct xsk_map *m = container_of(map, struct xsk_map, map);
103
104         synchronize_net();
105         bpf_map_area_free(m);
106 }
107
108 static int xsk_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
109 {
110         struct xsk_map *m = container_of(map, struct xsk_map, map);
111         u32 index = key ? *(u32 *)key : U32_MAX;
112         u32 *next = next_key;
113
114         if (index >= m->map.max_entries) {
115                 *next = 0;
116                 return 0;
117         }
118
119         if (index == m->map.max_entries - 1)
120                 return -ENOENT;
121         *next = index + 1;
122         return 0;
123 }
124
125 static int xsk_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
126 {
127         const int ret = BPF_REG_0, mp = BPF_REG_1, index = BPF_REG_2;
128         struct bpf_insn *insn = insn_buf;
129
130         *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
131         *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
132         *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(sizeof(struct xsk_sock *)));
133         *insn++ = BPF_ALU64_IMM(BPF_ADD, mp, offsetof(struct xsk_map, xsk_map));
134         *insn++ = BPF_ALU64_REG(BPF_ADD, ret, mp);
135         *insn++ = BPF_LDX_MEM(BPF_SIZEOF(struct xsk_sock *), ret, ret, 0);
136         *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
137         *insn++ = BPF_MOV64_IMM(ret, 0);
138         return insn - insn_buf;
139 }
140
141 /* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or
142  * by local_bh_disable() (from XDP calls inside NAPI). The
143  * rcu_read_lock_bh_held() below makes lockdep accept both.
144  */
145 static void *__xsk_map_lookup_elem(struct bpf_map *map, u32 key)
146 {
147         struct xsk_map *m = container_of(map, struct xsk_map, map);
148
149         if (key >= map->max_entries)
150                 return NULL;
151
152         return rcu_dereference_check(m->xsk_map[key], rcu_read_lock_bh_held());
153 }
154
155 static void *xsk_map_lookup_elem(struct bpf_map *map, void *key)
156 {
157         return __xsk_map_lookup_elem(map, *(u32 *)key);
158 }
159
160 static void *xsk_map_lookup_elem_sys_only(struct bpf_map *map, void *key)
161 {
162         return ERR_PTR(-EOPNOTSUPP);
163 }
164
165 static long xsk_map_update_elem(struct bpf_map *map, void *key, void *value,
166                                 u64 map_flags)
167 {
168         struct xsk_map *m = container_of(map, struct xsk_map, map);
169         struct xdp_sock __rcu **map_entry;
170         struct xdp_sock *xs, *old_xs;
171         u32 i = *(u32 *)key, fd = *(u32 *)value;
172         struct xsk_map_node *node;
173         struct socket *sock;
174         int err;
175
176         if (unlikely(map_flags > BPF_EXIST))
177                 return -EINVAL;
178         if (unlikely(i >= m->map.max_entries))
179                 return -E2BIG;
180
181         sock = sockfd_lookup(fd, &err);
182         if (!sock)
183                 return err;
184
185         if (sock->sk->sk_family != PF_XDP) {
186                 sockfd_put(sock);
187                 return -EOPNOTSUPP;
188         }
189
190         xs = (struct xdp_sock *)sock->sk;
191
192         map_entry = &m->xsk_map[i];
193         node = xsk_map_node_alloc(m, map_entry);
194         if (IS_ERR(node)) {
195                 sockfd_put(sock);
196                 return PTR_ERR(node);
197         }
198
199         spin_lock_bh(&m->lock);
200         old_xs = rcu_dereference_protected(*map_entry, lockdep_is_held(&m->lock));
201         if (old_xs == xs) {
202                 err = 0;
203                 goto out;
204         } else if (old_xs && map_flags == BPF_NOEXIST) {
205                 err = -EEXIST;
206                 goto out;
207         } else if (!old_xs && map_flags == BPF_EXIST) {
208                 err = -ENOENT;
209                 goto out;
210         }
211         xsk_map_sock_add(xs, node);
212         rcu_assign_pointer(*map_entry, xs);
213         if (old_xs)
214                 xsk_map_sock_delete(old_xs, map_entry);
215         spin_unlock_bh(&m->lock);
216         sockfd_put(sock);
217         return 0;
218
219 out:
220         spin_unlock_bh(&m->lock);
221         sockfd_put(sock);
222         xsk_map_node_free(node);
223         return err;
224 }
225
226 static long xsk_map_delete_elem(struct bpf_map *map, void *key)
227 {
228         struct xsk_map *m = container_of(map, struct xsk_map, map);
229         struct xdp_sock __rcu **map_entry;
230         struct xdp_sock *old_xs;
231         int k = *(u32 *)key;
232
233         if (k >= map->max_entries)
234                 return -EINVAL;
235
236         spin_lock_bh(&m->lock);
237         map_entry = &m->xsk_map[k];
238         old_xs = unrcu_pointer(xchg(map_entry, NULL));
239         if (old_xs)
240                 xsk_map_sock_delete(old_xs, map_entry);
241         spin_unlock_bh(&m->lock);
242
243         return 0;
244 }
245
246 static long xsk_map_redirect(struct bpf_map *map, u64 index, u64 flags)
247 {
248         return __bpf_xdp_redirect_map(map, index, flags, 0,
249                                       __xsk_map_lookup_elem);
250 }
251
252 void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs,
253                              struct xdp_sock __rcu **map_entry)
254 {
255         spin_lock_bh(&map->lock);
256         if (rcu_access_pointer(*map_entry) == xs) {
257                 rcu_assign_pointer(*map_entry, NULL);
258                 xsk_map_sock_delete(xs, map_entry);
259         }
260         spin_unlock_bh(&map->lock);
261 }
262
263 static bool xsk_map_meta_equal(const struct bpf_map *meta0,
264                                const struct bpf_map *meta1)
265 {
266         return meta0->max_entries == meta1->max_entries &&
267                 bpf_map_meta_equal(meta0, meta1);
268 }
269
270 BTF_ID_LIST_SINGLE(xsk_map_btf_ids, struct, xsk_map)
271 const struct bpf_map_ops xsk_map_ops = {
272         .map_meta_equal = xsk_map_meta_equal,
273         .map_alloc = xsk_map_alloc,
274         .map_free = xsk_map_free,
275         .map_get_next_key = xsk_map_get_next_key,
276         .map_lookup_elem = xsk_map_lookup_elem,
277         .map_gen_lookup = xsk_map_gen_lookup,
278         .map_lookup_elem_sys_only = xsk_map_lookup_elem_sys_only,
279         .map_update_elem = xsk_map_update_elem,
280         .map_delete_elem = xsk_map_delete_elem,
281         .map_check_btf = map_check_no_btf,
282         .map_mem_usage = xsk_map_mem_usage,
283         .map_btf_id = &xsk_map_btf_ids[0],
284         .map_redirect = xsk_map_redirect,
285 };