Merge tag 'nds32-for-linus-4.18' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / drivers / net / ethernet / netronome / nfp / bpf / main.c
1 /*
2  * Copyright (C) 2017-2018 Netronome Systems, Inc.
3  *
4  * This software is dual licensed under the GNU General License Version 2,
5  * June 1991 as shown in the file COPYING in the top-level directory of this
6  * source tree or the BSD 2-Clause License provided below.  You have the
7  * option to license this software under the complete terms of either license.
8  *
9  * The BSD 2-Clause License:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      1. Redistributions of source code must retain the above
16  *         copyright notice, this list of conditions and the following
17  *         disclaimer.
18  *
19  *      2. Redistributions in binary form must reproduce the above
20  *         copyright notice, this list of conditions and the following
21  *         disclaimer in the documentation and/or other materials
22  *         provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33
34 #include <net/pkt_cls.h>
35
36 #include "../nfpcore/nfp_cpp.h"
37 #include "../nfpcore/nfp_nffw.h"
38 #include "../nfpcore/nfp_nsp.h"
39 #include "../nfp_app.h"
40 #include "../nfp_main.h"
41 #include "../nfp_net.h"
42 #include "../nfp_port.h"
43 #include "fw.h"
44 #include "main.h"
45
46 const struct rhashtable_params nfp_bpf_maps_neutral_params = {
47         .nelem_hint             = 4,
48         .key_len                = FIELD_SIZEOF(struct nfp_bpf_neutral_map, ptr),
49         .key_offset             = offsetof(struct nfp_bpf_neutral_map, ptr),
50         .head_offset            = offsetof(struct nfp_bpf_neutral_map, l),
51         .automatic_shrinking    = true,
52 };
53
54 static bool nfp_net_ebpf_capable(struct nfp_net *nn)
55 {
56 #ifdef __LITTLE_ENDIAN
57         if (nn->cap & NFP_NET_CFG_CTRL_BPF &&
58             nn_readb(nn, NFP_NET_CFG_BPF_ABI) == NFP_NET_BPF_ABI)
59                 return true;
60 #endif
61         return false;
62 }
63
64 static int
65 nfp_bpf_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
66                     struct bpf_prog *prog, struct netlink_ext_ack *extack)
67 {
68         bool running, xdp_running;
69         int ret;
70
71         if (!nfp_net_ebpf_capable(nn))
72                 return -EINVAL;
73
74         running = nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF;
75         xdp_running = running && nn->dp.bpf_offload_xdp;
76
77         if (!prog && !xdp_running)
78                 return 0;
79         if (prog && running && !xdp_running)
80                 return -EBUSY;
81
82         ret = nfp_net_bpf_offload(nn, prog, running, extack);
83         /* Stop offload if replace not possible */
84         if (ret)
85                 return ret;
86
87         nn->dp.bpf_offload_xdp = !!prog;
88         return ret;
89 }
90
91 static const char *nfp_bpf_extra_cap(struct nfp_app *app, struct nfp_net *nn)
92 {
93         return nfp_net_ebpf_capable(nn) ? "BPF" : "";
94 }
95
96 static int
97 nfp_bpf_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id)
98 {
99         struct nfp_pf *pf = app->pf;
100         struct nfp_bpf_vnic *bv;
101         int err;
102
103         if (!pf->eth_tbl) {
104                 nfp_err(pf->cpp, "No ETH table\n");
105                 return -EINVAL;
106         }
107         if (pf->max_data_vnics != pf->eth_tbl->count) {
108                 nfp_err(pf->cpp, "ETH entries don't match vNICs (%d vs %d)\n",
109                         pf->max_data_vnics, pf->eth_tbl->count);
110                 return -EINVAL;
111         }
112
113         bv = kzalloc(sizeof(*bv), GFP_KERNEL);
114         if (!bv)
115                 return -ENOMEM;
116         nn->app_priv = bv;
117
118         err = nfp_app_nic_vnic_alloc(app, nn, id);
119         if (err)
120                 goto err_free_priv;
121
122         bv->start_off = nn_readw(nn, NFP_NET_CFG_BPF_START);
123         bv->tgt_done = nn_readw(nn, NFP_NET_CFG_BPF_DONE);
124
125         return 0;
126 err_free_priv:
127         kfree(nn->app_priv);
128         return err;
129 }
130
131 static void nfp_bpf_vnic_free(struct nfp_app *app, struct nfp_net *nn)
132 {
133         struct nfp_bpf_vnic *bv = nn->app_priv;
134
135         WARN_ON(bv->tc_prog);
136         kfree(bv);
137 }
138
139 static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
140                                      void *type_data, void *cb_priv)
141 {
142         struct tc_cls_bpf_offload *cls_bpf = type_data;
143         struct nfp_net *nn = cb_priv;
144         struct bpf_prog *oldprog;
145         struct nfp_bpf_vnic *bv;
146         int err;
147
148         if (type != TC_SETUP_CLSBPF) {
149                 NL_SET_ERR_MSG_MOD(cls_bpf->common.extack,
150                                    "only offload of BPF classifiers supported");
151                 return -EOPNOTSUPP;
152         }
153         if (!tc_cls_can_offload_and_chain0(nn->dp.netdev, &cls_bpf->common))
154                 return -EOPNOTSUPP;
155         if (!nfp_net_ebpf_capable(nn)) {
156                 NL_SET_ERR_MSG_MOD(cls_bpf->common.extack,
157                                    "NFP firmware does not support eBPF offload");
158                 return -EOPNOTSUPP;
159         }
160         if (cls_bpf->common.protocol != htons(ETH_P_ALL)) {
161                 NL_SET_ERR_MSG_MOD(cls_bpf->common.extack,
162                                    "only ETH_P_ALL supported as filter protocol");
163                 return -EOPNOTSUPP;
164         }
165
166         /* Only support TC direct action */
167         if (!cls_bpf->exts_integrated ||
168             tcf_exts_has_actions(cls_bpf->exts)) {
169                 NL_SET_ERR_MSG_MOD(cls_bpf->common.extack,
170                                    "only direct action with no legacy actions supported");
171                 return -EOPNOTSUPP;
172         }
173
174         if (cls_bpf->command != TC_CLSBPF_OFFLOAD)
175                 return -EOPNOTSUPP;
176
177         bv = nn->app_priv;
178         oldprog = cls_bpf->oldprog;
179
180         /* Don't remove if oldprog doesn't match driver's state */
181         if (bv->tc_prog != oldprog) {
182                 oldprog = NULL;
183                 if (!cls_bpf->prog)
184                         return 0;
185         }
186
187         err = nfp_net_bpf_offload(nn, cls_bpf->prog, oldprog,
188                                   cls_bpf->common.extack);
189         if (err)
190                 return err;
191
192         bv->tc_prog = cls_bpf->prog;
193         nn->port->tc_offload_cnt = !!bv->tc_prog;
194         return 0;
195 }
196
197 static int nfp_bpf_setup_tc_block(struct net_device *netdev,
198                                   struct tc_block_offload *f)
199 {
200         struct nfp_net *nn = netdev_priv(netdev);
201
202         if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
203                 return -EOPNOTSUPP;
204
205         if (tcf_block_shared(f->block))
206                 return -EOPNOTSUPP;
207
208         switch (f->command) {
209         case TC_BLOCK_BIND:
210                 return tcf_block_cb_register(f->block,
211                                              nfp_bpf_setup_tc_block_cb,
212                                              nn, nn);
213         case TC_BLOCK_UNBIND:
214                 tcf_block_cb_unregister(f->block,
215                                         nfp_bpf_setup_tc_block_cb,
216                                         nn);
217                 return 0;
218         default:
219                 return -EOPNOTSUPP;
220         }
221 }
222
223 static int nfp_bpf_setup_tc(struct nfp_app *app, struct net_device *netdev,
224                             enum tc_setup_type type, void *type_data)
225 {
226         switch (type) {
227         case TC_SETUP_BLOCK:
228                 return nfp_bpf_setup_tc_block(netdev, type_data);
229         default:
230                 return -EOPNOTSUPP;
231         }
232 }
233
234 static int
235 nfp_bpf_check_mtu(struct nfp_app *app, struct net_device *netdev, int new_mtu)
236 {
237         struct nfp_net *nn = netdev_priv(netdev);
238         unsigned int max_mtu;
239
240         if (~nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
241                 return 0;
242
243         max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
244         if (new_mtu > max_mtu) {
245                 nn_info(nn, "BPF offload active, MTU over %u not supported\n",
246                         max_mtu);
247                 return -EBUSY;
248         }
249         return 0;
250 }
251
252 static int
253 nfp_bpf_parse_cap_adjust_head(struct nfp_app_bpf *bpf, void __iomem *value,
254                               u32 length)
255 {
256         struct nfp_bpf_cap_tlv_adjust_head __iomem *cap = value;
257         struct nfp_cpp *cpp = bpf->app->pf->cpp;
258
259         if (length < sizeof(*cap)) {
260                 nfp_err(cpp, "truncated adjust_head TLV: %d\n", length);
261                 return -EINVAL;
262         }
263
264         bpf->adjust_head.flags = readl(&cap->flags);
265         bpf->adjust_head.off_min = readl(&cap->off_min);
266         bpf->adjust_head.off_max = readl(&cap->off_max);
267         bpf->adjust_head.guaranteed_sub = readl(&cap->guaranteed_sub);
268         bpf->adjust_head.guaranteed_add = readl(&cap->guaranteed_add);
269
270         if (bpf->adjust_head.off_min > bpf->adjust_head.off_max) {
271                 nfp_err(cpp, "invalid adjust_head TLV: min > max\n");
272                 return -EINVAL;
273         }
274         if (!FIELD_FIT(UR_REG_IMM_MAX, bpf->adjust_head.off_min) ||
275             !FIELD_FIT(UR_REG_IMM_MAX, bpf->adjust_head.off_max)) {
276                 nfp_warn(cpp, "disabling adjust_head - driver expects min/max to fit in as immediates\n");
277                 memset(&bpf->adjust_head, 0, sizeof(bpf->adjust_head));
278                 return 0;
279         }
280
281         return 0;
282 }
283
284 static int
285 nfp_bpf_parse_cap_func(struct nfp_app_bpf *bpf, void __iomem *value, u32 length)
286 {
287         struct nfp_bpf_cap_tlv_func __iomem *cap = value;
288
289         if (length < sizeof(*cap)) {
290                 nfp_err(bpf->app->cpp, "truncated function TLV: %d\n", length);
291                 return -EINVAL;
292         }
293
294         switch (readl(&cap->func_id)) {
295         case BPF_FUNC_map_lookup_elem:
296                 bpf->helpers.map_lookup = readl(&cap->func_addr);
297                 break;
298         case BPF_FUNC_map_update_elem:
299                 bpf->helpers.map_update = readl(&cap->func_addr);
300                 break;
301         case BPF_FUNC_map_delete_elem:
302                 bpf->helpers.map_delete = readl(&cap->func_addr);
303                 break;
304         case BPF_FUNC_perf_event_output:
305                 bpf->helpers.perf_event_output = readl(&cap->func_addr);
306                 break;
307         }
308
309         return 0;
310 }
311
312 static int
313 nfp_bpf_parse_cap_maps(struct nfp_app_bpf *bpf, void __iomem *value, u32 length)
314 {
315         struct nfp_bpf_cap_tlv_maps __iomem *cap = value;
316
317         if (length < sizeof(*cap)) {
318                 nfp_err(bpf->app->cpp, "truncated maps TLV: %d\n", length);
319                 return -EINVAL;
320         }
321
322         bpf->maps.types = readl(&cap->types);
323         bpf->maps.max_maps = readl(&cap->max_maps);
324         bpf->maps.max_elems = readl(&cap->max_elems);
325         bpf->maps.max_key_sz = readl(&cap->max_key_sz);
326         bpf->maps.max_val_sz = readl(&cap->max_val_sz);
327         bpf->maps.max_elem_sz = readl(&cap->max_elem_sz);
328
329         return 0;
330 }
331
332 static int
333 nfp_bpf_parse_cap_random(struct nfp_app_bpf *bpf, void __iomem *value,
334                          u32 length)
335 {
336         bpf->pseudo_random = true;
337         return 0;
338 }
339
340 static int
341 nfp_bpf_parse_cap_qsel(struct nfp_app_bpf *bpf, void __iomem *value, u32 length)
342 {
343         bpf->queue_select = true;
344         return 0;
345 }
346
347 static int nfp_bpf_parse_capabilities(struct nfp_app *app)
348 {
349         struct nfp_cpp *cpp = app->pf->cpp;
350         struct nfp_cpp_area *area;
351         u8 __iomem *mem, *start;
352
353         mem = nfp_rtsym_map(app->pf->rtbl, "_abi_bpf_capabilities", "bpf.cap",
354                             8, &area);
355         if (IS_ERR(mem))
356                 return PTR_ERR(mem) == -ENOENT ? 0 : PTR_ERR(mem);
357
358         start = mem;
359         while (mem - start + 8 <= nfp_cpp_area_size(area)) {
360                 u8 __iomem *value;
361                 u32 type, length;
362
363                 type = readl(mem);
364                 length = readl(mem + 4);
365                 value = mem + 8;
366
367                 mem += 8 + length;
368                 if (mem - start > nfp_cpp_area_size(area))
369                         goto err_release_free;
370
371                 switch (type) {
372                 case NFP_BPF_CAP_TYPE_FUNC:
373                         if (nfp_bpf_parse_cap_func(app->priv, value, length))
374                                 goto err_release_free;
375                         break;
376                 case NFP_BPF_CAP_TYPE_ADJUST_HEAD:
377                         if (nfp_bpf_parse_cap_adjust_head(app->priv, value,
378                                                           length))
379                                 goto err_release_free;
380                         break;
381                 case NFP_BPF_CAP_TYPE_MAPS:
382                         if (nfp_bpf_parse_cap_maps(app->priv, value, length))
383                                 goto err_release_free;
384                         break;
385                 case NFP_BPF_CAP_TYPE_RANDOM:
386                         if (nfp_bpf_parse_cap_random(app->priv, value, length))
387                                 goto err_release_free;
388                         break;
389                 case NFP_BPF_CAP_TYPE_QUEUE_SELECT:
390                         if (nfp_bpf_parse_cap_qsel(app->priv, value, length))
391                                 goto err_release_free;
392                         break;
393                 default:
394                         nfp_dbg(cpp, "unknown BPF capability: %d\n", type);
395                         break;
396                 }
397         }
398         if (mem - start != nfp_cpp_area_size(area)) {
399                 nfp_err(cpp, "BPF capabilities left after parsing, parsed:%zd total length:%zu\n",
400                         mem - start, nfp_cpp_area_size(area));
401                 goto err_release_free;
402         }
403
404         nfp_cpp_area_release_free(area);
405
406         return 0;
407
408 err_release_free:
409         nfp_err(cpp, "invalid BPF capabilities at offset:%zd\n", mem - start);
410         nfp_cpp_area_release_free(area);
411         return -EINVAL;
412 }
413
414 static int nfp_bpf_init(struct nfp_app *app)
415 {
416         struct nfp_app_bpf *bpf;
417         int err;
418
419         bpf = kzalloc(sizeof(*bpf), GFP_KERNEL);
420         if (!bpf)
421                 return -ENOMEM;
422         bpf->app = app;
423         app->priv = bpf;
424
425         skb_queue_head_init(&bpf->cmsg_replies);
426         init_waitqueue_head(&bpf->cmsg_wq);
427         INIT_LIST_HEAD(&bpf->map_list);
428
429         err = rhashtable_init(&bpf->maps_neutral, &nfp_bpf_maps_neutral_params);
430         if (err)
431                 goto err_free_bpf;
432
433         err = nfp_bpf_parse_capabilities(app);
434         if (err)
435                 goto err_free_neutral_maps;
436
437         return 0;
438
439 err_free_neutral_maps:
440         rhashtable_destroy(&bpf->maps_neutral);
441 err_free_bpf:
442         kfree(bpf);
443         return err;
444 }
445
446 static void nfp_check_rhashtable_empty(void *ptr, void *arg)
447 {
448         WARN_ON_ONCE(1);
449 }
450
451 static void nfp_bpf_clean(struct nfp_app *app)
452 {
453         struct nfp_app_bpf *bpf = app->priv;
454
455         WARN_ON(!skb_queue_empty(&bpf->cmsg_replies));
456         WARN_ON(!list_empty(&bpf->map_list));
457         WARN_ON(bpf->maps_in_use || bpf->map_elems_in_use);
458         rhashtable_free_and_destroy(&bpf->maps_neutral,
459                                     nfp_check_rhashtable_empty, NULL);
460         kfree(bpf);
461 }
462
463 const struct nfp_app_type app_bpf = {
464         .id             = NFP_APP_BPF_NIC,
465         .name           = "ebpf",
466
467         .ctrl_cap_mask  = 0,
468
469         .init           = nfp_bpf_init,
470         .clean          = nfp_bpf_clean,
471
472         .check_mtu      = nfp_bpf_check_mtu,
473
474         .extra_cap      = nfp_bpf_extra_cap,
475
476         .vnic_alloc     = nfp_bpf_vnic_alloc,
477         .vnic_free      = nfp_bpf_vnic_free,
478
479         .ctrl_msg_rx    = nfp_bpf_ctrl_msg_rx,
480
481         .setup_tc       = nfp_bpf_setup_tc,
482         .bpf            = nfp_ndo_bpf,
483         .xdp_offload    = nfp_bpf_xdp_offload,
484 };