Merge branch 'for-5.5/logitech' into for-linus
[linux-2.6-microblaze.git] / net / core / lwt_bpf.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2016 Thomas Graf <tgraf@tgraf.ch>
3  */
4
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/skbuff.h>
8 #include <linux/types.h>
9 #include <linux/bpf.h>
10 #include <net/lwtunnel.h>
11 #include <net/gre.h>
12 #include <net/ip6_route.h>
13 #include <net/ipv6_stubs.h>
14
15 struct bpf_lwt_prog {
16         struct bpf_prog *prog;
17         char *name;
18 };
19
20 struct bpf_lwt {
21         struct bpf_lwt_prog in;
22         struct bpf_lwt_prog out;
23         struct bpf_lwt_prog xmit;
24         int family;
25 };
26
27 #define MAX_PROG_NAME 256
28
29 static inline struct bpf_lwt *bpf_lwt_lwtunnel(struct lwtunnel_state *lwt)
30 {
31         return (struct bpf_lwt *)lwt->data;
32 }
33
34 #define NO_REDIRECT false
35 #define CAN_REDIRECT true
36
37 static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt,
38                        struct dst_entry *dst, bool can_redirect)
39 {
40         int ret;
41
42         /* Preempt disable is needed to protect per-cpu redirect_info between
43          * BPF prog and skb_do_redirect(). The call_rcu in bpf_prog_put() and
44          * access to maps strictly require a rcu_read_lock() for protection,
45          * mixing with BH RCU lock doesn't work.
46          */
47         preempt_disable();
48         bpf_compute_data_pointers(skb);
49         ret = bpf_prog_run_save_cb(lwt->prog, skb);
50
51         switch (ret) {
52         case BPF_OK:
53         case BPF_LWT_REROUTE:
54                 break;
55
56         case BPF_REDIRECT:
57                 if (unlikely(!can_redirect)) {
58                         pr_warn_once("Illegal redirect return code in prog %s\n",
59                                      lwt->name ? : "<unknown>");
60                         ret = BPF_OK;
61                 } else {
62                         skb_reset_mac_header(skb);
63                         ret = skb_do_redirect(skb);
64                         if (ret == 0)
65                                 ret = BPF_REDIRECT;
66                 }
67                 break;
68
69         case BPF_DROP:
70                 kfree_skb(skb);
71                 ret = -EPERM;
72                 break;
73
74         default:
75                 pr_warn_once("bpf-lwt: Illegal return value %u, expect packet loss\n", ret);
76                 kfree_skb(skb);
77                 ret = -EINVAL;
78                 break;
79         }
80
81         preempt_enable();
82
83         return ret;
84 }
85
86 static int bpf_lwt_input_reroute(struct sk_buff *skb)
87 {
88         int err = -EINVAL;
89
90         if (skb->protocol == htons(ETH_P_IP)) {
91                 struct net_device *dev = skb_dst(skb)->dev;
92                 struct iphdr *iph = ip_hdr(skb);
93
94                 dev_hold(dev);
95                 skb_dst_drop(skb);
96                 err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
97                                            iph->tos, dev);
98                 dev_put(dev);
99         } else if (skb->protocol == htons(ETH_P_IPV6)) {
100                 skb_dst_drop(skb);
101                 err = ipv6_stub->ipv6_route_input(skb);
102         } else {
103                 err = -EAFNOSUPPORT;
104         }
105
106         if (err)
107                 goto err;
108         return dst_input(skb);
109
110 err:
111         kfree_skb(skb);
112         return err;
113 }
114
115 static int bpf_input(struct sk_buff *skb)
116 {
117         struct dst_entry *dst = skb_dst(skb);
118         struct bpf_lwt *bpf;
119         int ret;
120
121         bpf = bpf_lwt_lwtunnel(dst->lwtstate);
122         if (bpf->in.prog) {
123                 ret = run_lwt_bpf(skb, &bpf->in, dst, NO_REDIRECT);
124                 if (ret < 0)
125                         return ret;
126                 if (ret == BPF_LWT_REROUTE)
127                         return bpf_lwt_input_reroute(skb);
128         }
129
130         if (unlikely(!dst->lwtstate->orig_input)) {
131                 kfree_skb(skb);
132                 return -EINVAL;
133         }
134
135         return dst->lwtstate->orig_input(skb);
136 }
137
138 static int bpf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
139 {
140         struct dst_entry *dst = skb_dst(skb);
141         struct bpf_lwt *bpf;
142         int ret;
143
144         bpf = bpf_lwt_lwtunnel(dst->lwtstate);
145         if (bpf->out.prog) {
146                 ret = run_lwt_bpf(skb, &bpf->out, dst, NO_REDIRECT);
147                 if (ret < 0)
148                         return ret;
149         }
150
151         if (unlikely(!dst->lwtstate->orig_output)) {
152                 pr_warn_once("orig_output not set on dst for prog %s\n",
153                              bpf->out.name);
154                 kfree_skb(skb);
155                 return -EINVAL;
156         }
157
158         return dst->lwtstate->orig_output(net, sk, skb);
159 }
160
161 static int xmit_check_hhlen(struct sk_buff *skb)
162 {
163         int hh_len = skb_dst(skb)->dev->hard_header_len;
164
165         if (skb_headroom(skb) < hh_len) {
166                 int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
167
168                 if (pskb_expand_head(skb, nhead, 0, GFP_ATOMIC))
169                         return -ENOMEM;
170         }
171
172         return 0;
173 }
174
175 static int bpf_lwt_xmit_reroute(struct sk_buff *skb)
176 {
177         struct net_device *l3mdev = l3mdev_master_dev_rcu(skb_dst(skb)->dev);
178         int oif = l3mdev ? l3mdev->ifindex : 0;
179         struct dst_entry *dst = NULL;
180         int err = -EAFNOSUPPORT;
181         struct sock *sk;
182         struct net *net;
183         bool ipv4;
184
185         if (skb->protocol == htons(ETH_P_IP))
186                 ipv4 = true;
187         else if (skb->protocol == htons(ETH_P_IPV6))
188                 ipv4 = false;
189         else
190                 goto err;
191
192         sk = sk_to_full_sk(skb->sk);
193         if (sk) {
194                 if (sk->sk_bound_dev_if)
195                         oif = sk->sk_bound_dev_if;
196                 net = sock_net(sk);
197         } else {
198                 net = dev_net(skb_dst(skb)->dev);
199         }
200
201         if (ipv4) {
202                 struct iphdr *iph = ip_hdr(skb);
203                 struct flowi4 fl4 = {};
204                 struct rtable *rt;
205
206                 fl4.flowi4_oif = oif;
207                 fl4.flowi4_mark = skb->mark;
208                 fl4.flowi4_uid = sock_net_uid(net, sk);
209                 fl4.flowi4_tos = RT_TOS(iph->tos);
210                 fl4.flowi4_flags = FLOWI_FLAG_ANYSRC;
211                 fl4.flowi4_proto = iph->protocol;
212                 fl4.daddr = iph->daddr;
213                 fl4.saddr = iph->saddr;
214
215                 rt = ip_route_output_key(net, &fl4);
216                 if (IS_ERR(rt)) {
217                         err = PTR_ERR(rt);
218                         goto err;
219                 }
220                 dst = &rt->dst;
221         } else {
222                 struct ipv6hdr *iph6 = ipv6_hdr(skb);
223                 struct flowi6 fl6 = {};
224
225                 fl6.flowi6_oif = oif;
226                 fl6.flowi6_mark = skb->mark;
227                 fl6.flowi6_uid = sock_net_uid(net, sk);
228                 fl6.flowlabel = ip6_flowinfo(iph6);
229                 fl6.flowi6_proto = iph6->nexthdr;
230                 fl6.daddr = iph6->daddr;
231                 fl6.saddr = iph6->saddr;
232
233                 err = ipv6_stub->ipv6_dst_lookup(net, skb->sk, &dst, &fl6);
234                 if (unlikely(err))
235                         goto err;
236                 if (IS_ERR(dst)) {
237                         err = PTR_ERR(dst);
238                         goto err;
239                 }
240         }
241         if (unlikely(dst->error)) {
242                 err = dst->error;
243                 dst_release(dst);
244                 goto err;
245         }
246
247         /* Although skb header was reserved in bpf_lwt_push_ip_encap(), it
248          * was done for the previous dst, so we are doing it here again, in
249          * case the new dst needs much more space. The call below is a noop
250          * if there is enough header space in skb.
251          */
252         err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
253         if (unlikely(err))
254                 goto err;
255
256         skb_dst_drop(skb);
257         skb_dst_set(skb, dst);
258
259         err = dst_output(dev_net(skb_dst(skb)->dev), skb->sk, skb);
260         if (unlikely(err))
261                 return err;
262
263         /* ip[6]_finish_output2 understand LWTUNNEL_XMIT_DONE */
264         return LWTUNNEL_XMIT_DONE;
265
266 err:
267         kfree_skb(skb);
268         return err;
269 }
270
271 static int bpf_xmit(struct sk_buff *skb)
272 {
273         struct dst_entry *dst = skb_dst(skb);
274         struct bpf_lwt *bpf;
275
276         bpf = bpf_lwt_lwtunnel(dst->lwtstate);
277         if (bpf->xmit.prog) {
278                 __be16 proto = skb->protocol;
279                 int ret;
280
281                 ret = run_lwt_bpf(skb, &bpf->xmit, dst, CAN_REDIRECT);
282                 switch (ret) {
283                 case BPF_OK:
284                         /* If the header changed, e.g. via bpf_lwt_push_encap,
285                          * BPF_LWT_REROUTE below should have been used if the
286                          * protocol was also changed.
287                          */
288                         if (skb->protocol != proto) {
289                                 kfree_skb(skb);
290                                 return -EINVAL;
291                         }
292                         /* If the header was expanded, headroom might be too
293                          * small for L2 header to come, expand as needed.
294                          */
295                         ret = xmit_check_hhlen(skb);
296                         if (unlikely(ret))
297                                 return ret;
298
299                         return LWTUNNEL_XMIT_CONTINUE;
300                 case BPF_REDIRECT:
301                         return LWTUNNEL_XMIT_DONE;
302                 case BPF_LWT_REROUTE:
303                         return bpf_lwt_xmit_reroute(skb);
304                 default:
305                         return ret;
306                 }
307         }
308
309         return LWTUNNEL_XMIT_CONTINUE;
310 }
311
312 static void bpf_lwt_prog_destroy(struct bpf_lwt_prog *prog)
313 {
314         if (prog->prog)
315                 bpf_prog_put(prog->prog);
316
317         kfree(prog->name);
318 }
319
320 static void bpf_destroy_state(struct lwtunnel_state *lwt)
321 {
322         struct bpf_lwt *bpf = bpf_lwt_lwtunnel(lwt);
323
324         bpf_lwt_prog_destroy(&bpf->in);
325         bpf_lwt_prog_destroy(&bpf->out);
326         bpf_lwt_prog_destroy(&bpf->xmit);
327 }
328
329 static const struct nla_policy bpf_prog_policy[LWT_BPF_PROG_MAX + 1] = {
330         [LWT_BPF_PROG_FD]   = { .type = NLA_U32, },
331         [LWT_BPF_PROG_NAME] = { .type = NLA_NUL_STRING,
332                                 .len = MAX_PROG_NAME },
333 };
334
335 static int bpf_parse_prog(struct nlattr *attr, struct bpf_lwt_prog *prog,
336                           enum bpf_prog_type type)
337 {
338         struct nlattr *tb[LWT_BPF_PROG_MAX + 1];
339         struct bpf_prog *p;
340         int ret;
341         u32 fd;
342
343         ret = nla_parse_nested_deprecated(tb, LWT_BPF_PROG_MAX, attr,
344                                           bpf_prog_policy, NULL);
345         if (ret < 0)
346                 return ret;
347
348         if (!tb[LWT_BPF_PROG_FD] || !tb[LWT_BPF_PROG_NAME])
349                 return -EINVAL;
350
351         prog->name = nla_memdup(tb[LWT_BPF_PROG_NAME], GFP_ATOMIC);
352         if (!prog->name)
353                 return -ENOMEM;
354
355         fd = nla_get_u32(tb[LWT_BPF_PROG_FD]);
356         p = bpf_prog_get_type(fd, type);
357         if (IS_ERR(p))
358                 return PTR_ERR(p);
359
360         prog->prog = p;
361
362         return 0;
363 }
364
365 static const struct nla_policy bpf_nl_policy[LWT_BPF_MAX + 1] = {
366         [LWT_BPF_IN]            = { .type = NLA_NESTED, },
367         [LWT_BPF_OUT]           = { .type = NLA_NESTED, },
368         [LWT_BPF_XMIT]          = { .type = NLA_NESTED, },
369         [LWT_BPF_XMIT_HEADROOM] = { .type = NLA_U32 },
370 };
371
372 static int bpf_build_state(struct nlattr *nla,
373                            unsigned int family, const void *cfg,
374                            struct lwtunnel_state **ts,
375                            struct netlink_ext_ack *extack)
376 {
377         struct nlattr *tb[LWT_BPF_MAX + 1];
378         struct lwtunnel_state *newts;
379         struct bpf_lwt *bpf;
380         int ret;
381
382         if (family != AF_INET && family != AF_INET6)
383                 return -EAFNOSUPPORT;
384
385         ret = nla_parse_nested_deprecated(tb, LWT_BPF_MAX, nla, bpf_nl_policy,
386                                           extack);
387         if (ret < 0)
388                 return ret;
389
390         if (!tb[LWT_BPF_IN] && !tb[LWT_BPF_OUT] && !tb[LWT_BPF_XMIT])
391                 return -EINVAL;
392
393         newts = lwtunnel_state_alloc(sizeof(*bpf));
394         if (!newts)
395                 return -ENOMEM;
396
397         newts->type = LWTUNNEL_ENCAP_BPF;
398         bpf = bpf_lwt_lwtunnel(newts);
399
400         if (tb[LWT_BPF_IN]) {
401                 newts->flags |= LWTUNNEL_STATE_INPUT_REDIRECT;
402                 ret = bpf_parse_prog(tb[LWT_BPF_IN], &bpf->in,
403                                      BPF_PROG_TYPE_LWT_IN);
404                 if (ret  < 0)
405                         goto errout;
406         }
407
408         if (tb[LWT_BPF_OUT]) {
409                 newts->flags |= LWTUNNEL_STATE_OUTPUT_REDIRECT;
410                 ret = bpf_parse_prog(tb[LWT_BPF_OUT], &bpf->out,
411                                      BPF_PROG_TYPE_LWT_OUT);
412                 if (ret < 0)
413                         goto errout;
414         }
415
416         if (tb[LWT_BPF_XMIT]) {
417                 newts->flags |= LWTUNNEL_STATE_XMIT_REDIRECT;
418                 ret = bpf_parse_prog(tb[LWT_BPF_XMIT], &bpf->xmit,
419                                      BPF_PROG_TYPE_LWT_XMIT);
420                 if (ret < 0)
421                         goto errout;
422         }
423
424         if (tb[LWT_BPF_XMIT_HEADROOM]) {
425                 u32 headroom = nla_get_u32(tb[LWT_BPF_XMIT_HEADROOM]);
426
427                 if (headroom > LWT_BPF_MAX_HEADROOM) {
428                         ret = -ERANGE;
429                         goto errout;
430                 }
431
432                 newts->headroom = headroom;
433         }
434
435         bpf->family = family;
436         *ts = newts;
437
438         return 0;
439
440 errout:
441         bpf_destroy_state(newts);
442         kfree(newts);
443         return ret;
444 }
445
446 static int bpf_fill_lwt_prog(struct sk_buff *skb, int attr,
447                              struct bpf_lwt_prog *prog)
448 {
449         struct nlattr *nest;
450
451         if (!prog->prog)
452                 return 0;
453
454         nest = nla_nest_start_noflag(skb, attr);
455         if (!nest)
456                 return -EMSGSIZE;
457
458         if (prog->name &&
459             nla_put_string(skb, LWT_BPF_PROG_NAME, prog->name))
460                 return -EMSGSIZE;
461
462         return nla_nest_end(skb, nest);
463 }
464
465 static int bpf_fill_encap_info(struct sk_buff *skb, struct lwtunnel_state *lwt)
466 {
467         struct bpf_lwt *bpf = bpf_lwt_lwtunnel(lwt);
468
469         if (bpf_fill_lwt_prog(skb, LWT_BPF_IN, &bpf->in) < 0 ||
470             bpf_fill_lwt_prog(skb, LWT_BPF_OUT, &bpf->out) < 0 ||
471             bpf_fill_lwt_prog(skb, LWT_BPF_XMIT, &bpf->xmit) < 0)
472                 return -EMSGSIZE;
473
474         return 0;
475 }
476
477 static int bpf_encap_nlsize(struct lwtunnel_state *lwtstate)
478 {
479         int nest_len = nla_total_size(sizeof(struct nlattr)) +
480                        nla_total_size(MAX_PROG_NAME) + /* LWT_BPF_PROG_NAME */
481                        0;
482
483         return nest_len + /* LWT_BPF_IN */
484                nest_len + /* LWT_BPF_OUT */
485                nest_len + /* LWT_BPF_XMIT */
486                0;
487 }
488
489 static int bpf_lwt_prog_cmp(struct bpf_lwt_prog *a, struct bpf_lwt_prog *b)
490 {
491         /* FIXME:
492          * The LWT state is currently rebuilt for delete requests which
493          * results in a new bpf_prog instance. Comparing names for now.
494          */
495         if (!a->name && !b->name)
496                 return 0;
497
498         if (!a->name || !b->name)
499                 return 1;
500
501         return strcmp(a->name, b->name);
502 }
503
504 static int bpf_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b)
505 {
506         struct bpf_lwt *a_bpf = bpf_lwt_lwtunnel(a);
507         struct bpf_lwt *b_bpf = bpf_lwt_lwtunnel(b);
508
509         return bpf_lwt_prog_cmp(&a_bpf->in, &b_bpf->in) ||
510                bpf_lwt_prog_cmp(&a_bpf->out, &b_bpf->out) ||
511                bpf_lwt_prog_cmp(&a_bpf->xmit, &b_bpf->xmit);
512 }
513
514 static const struct lwtunnel_encap_ops bpf_encap_ops = {
515         .build_state    = bpf_build_state,
516         .destroy_state  = bpf_destroy_state,
517         .input          = bpf_input,
518         .output         = bpf_output,
519         .xmit           = bpf_xmit,
520         .fill_encap     = bpf_fill_encap_info,
521         .get_encap_size = bpf_encap_nlsize,
522         .cmp_encap      = bpf_encap_cmp,
523         .owner          = THIS_MODULE,
524 };
525
526 static int handle_gso_type(struct sk_buff *skb, unsigned int gso_type,
527                            int encap_len)
528 {
529         struct skb_shared_info *shinfo = skb_shinfo(skb);
530
531         gso_type |= SKB_GSO_DODGY;
532         shinfo->gso_type |= gso_type;
533         skb_decrease_gso_size(shinfo, encap_len);
534         shinfo->gso_segs = 0;
535         return 0;
536 }
537
538 static int handle_gso_encap(struct sk_buff *skb, bool ipv4, int encap_len)
539 {
540         int next_hdr_offset;
541         void *next_hdr;
542         __u8 protocol;
543
544         /* SCTP and UDP_L4 gso need more nuanced handling than what
545          * handle_gso_type() does above: skb_decrease_gso_size() is not enough.
546          * So at the moment only TCP GSO packets are let through.
547          */
548         if (!(skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
549                 return -ENOTSUPP;
550
551         if (ipv4) {
552                 protocol = ip_hdr(skb)->protocol;
553                 next_hdr_offset = sizeof(struct iphdr);
554                 next_hdr = skb_network_header(skb) + next_hdr_offset;
555         } else {
556                 protocol = ipv6_hdr(skb)->nexthdr;
557                 next_hdr_offset = sizeof(struct ipv6hdr);
558                 next_hdr = skb_network_header(skb) + next_hdr_offset;
559         }
560
561         switch (protocol) {
562         case IPPROTO_GRE:
563                 next_hdr_offset += sizeof(struct gre_base_hdr);
564                 if (next_hdr_offset > encap_len)
565                         return -EINVAL;
566
567                 if (((struct gre_base_hdr *)next_hdr)->flags & GRE_CSUM)
568                         return handle_gso_type(skb, SKB_GSO_GRE_CSUM,
569                                                encap_len);
570                 return handle_gso_type(skb, SKB_GSO_GRE, encap_len);
571
572         case IPPROTO_UDP:
573                 next_hdr_offset += sizeof(struct udphdr);
574                 if (next_hdr_offset > encap_len)
575                         return -EINVAL;
576
577                 if (((struct udphdr *)next_hdr)->check)
578                         return handle_gso_type(skb, SKB_GSO_UDP_TUNNEL_CSUM,
579                                                encap_len);
580                 return handle_gso_type(skb, SKB_GSO_UDP_TUNNEL, encap_len);
581
582         case IPPROTO_IP:
583         case IPPROTO_IPV6:
584                 if (ipv4)
585                         return handle_gso_type(skb, SKB_GSO_IPXIP4, encap_len);
586                 else
587                         return handle_gso_type(skb, SKB_GSO_IPXIP6, encap_len);
588
589         default:
590                 return -EPROTONOSUPPORT;
591         }
592 }
593
594 int bpf_lwt_push_ip_encap(struct sk_buff *skb, void *hdr, u32 len, bool ingress)
595 {
596         struct iphdr *iph;
597         bool ipv4;
598         int err;
599
600         if (unlikely(len < sizeof(struct iphdr) || len > LWT_BPF_MAX_HEADROOM))
601                 return -EINVAL;
602
603         /* validate protocol and length */
604         iph = (struct iphdr *)hdr;
605         if (iph->version == 4) {
606                 ipv4 = true;
607                 if (unlikely(len < iph->ihl * 4))
608                         return -EINVAL;
609         } else if (iph->version == 6) {
610                 ipv4 = false;
611                 if (unlikely(len < sizeof(struct ipv6hdr)))
612                         return -EINVAL;
613         } else {
614                 return -EINVAL;
615         }
616
617         if (ingress)
618                 err = skb_cow_head(skb, len + skb->mac_len);
619         else
620                 err = skb_cow_head(skb,
621                                    len + LL_RESERVED_SPACE(skb_dst(skb)->dev));
622         if (unlikely(err))
623                 return err;
624
625         /* push the encap headers and fix pointers */
626         skb_reset_inner_headers(skb);
627         skb_reset_inner_mac_header(skb);  /* mac header is not yet set */
628         skb_set_inner_protocol(skb, skb->protocol);
629         skb->encapsulation = 1;
630         skb_push(skb, len);
631         if (ingress)
632                 skb_postpush_rcsum(skb, iph, len);
633         skb_reset_network_header(skb);
634         memcpy(skb_network_header(skb), hdr, len);
635         bpf_compute_data_pointers(skb);
636         skb_clear_hash(skb);
637
638         if (ipv4) {
639                 skb->protocol = htons(ETH_P_IP);
640                 iph = ip_hdr(skb);
641
642                 if (!iph->check)
643                         iph->check = ip_fast_csum((unsigned char *)iph,
644                                                   iph->ihl);
645         } else {
646                 skb->protocol = htons(ETH_P_IPV6);
647         }
648
649         if (skb_is_gso(skb))
650                 return handle_gso_encap(skb, ipv4, len);
651
652         return 0;
653 }
654
655 static int __init bpf_lwt_init(void)
656 {
657         return lwtunnel_encap_add_ops(&bpf_encap_ops, LWTUNNEL_ENCAP_BPF);
658 }
659
660 subsys_initcall(bpf_lwt_init)