lkdtm/heap: Hide allocation size from -Warray-bounds
[linux-2.6-microblaze.git] / net / core / lwt_bpf.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2016 Thomas Graf <tgraf@tgraf.ch>
3  */
4
5 #include <linux/filter.h>
6 #include <linux/kernel.h>
7 #include <linux/module.h>
8 #include <linux/skbuff.h>
9 #include <linux/types.h>
10 #include <linux/bpf.h>
11 #include <net/lwtunnel.h>
12 #include <net/gre.h>
13 #include <net/ip6_route.h>
14 #include <net/ipv6_stubs.h>
15
16 struct bpf_lwt_prog {
17         struct bpf_prog *prog;
18         char *name;
19 };
20
21 struct bpf_lwt {
22         struct bpf_lwt_prog in;
23         struct bpf_lwt_prog out;
24         struct bpf_lwt_prog xmit;
25         int family;
26 };
27
28 #define MAX_PROG_NAME 256
29
30 static inline struct bpf_lwt *bpf_lwt_lwtunnel(struct lwtunnel_state *lwt)
31 {
32         return (struct bpf_lwt *)lwt->data;
33 }
34
35 #define NO_REDIRECT false
36 #define CAN_REDIRECT true
37
38 static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt,
39                        struct dst_entry *dst, bool can_redirect)
40 {
41         int ret;
42
43         /* Migration disable and BH disable are needed to protect per-cpu
44          * redirect_info between BPF prog and skb_do_redirect().
45          */
46         migrate_disable();
47         local_bh_disable();
48         bpf_compute_data_pointers(skb);
49         ret = bpf_prog_run_save_cb(lwt->prog, skb);
50
51         switch (ret) {
52         case BPF_OK:
53         case BPF_LWT_REROUTE:
54                 break;
55
56         case BPF_REDIRECT:
57                 if (unlikely(!can_redirect)) {
58                         pr_warn_once("Illegal redirect return code in prog %s\n",
59                                      lwt->name ? : "<unknown>");
60                         ret = BPF_OK;
61                 } else {
62                         skb_reset_mac_header(skb);
63                         ret = skb_do_redirect(skb);
64                         if (ret == 0)
65                                 ret = BPF_REDIRECT;
66                 }
67                 break;
68
69         case BPF_DROP:
70                 kfree_skb(skb);
71                 ret = -EPERM;
72                 break;
73
74         default:
75                 pr_warn_once("bpf-lwt: Illegal return value %u, expect packet loss\n", ret);
76                 kfree_skb(skb);
77                 ret = -EINVAL;
78                 break;
79         }
80
81         local_bh_enable();
82         migrate_enable();
83
84         return ret;
85 }
86
87 static int bpf_lwt_input_reroute(struct sk_buff *skb)
88 {
89         int err = -EINVAL;
90
91         if (skb->protocol == htons(ETH_P_IP)) {
92                 struct net_device *dev = skb_dst(skb)->dev;
93                 struct iphdr *iph = ip_hdr(skb);
94
95                 dev_hold(dev);
96                 skb_dst_drop(skb);
97                 err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
98                                            iph->tos, dev);
99                 dev_put(dev);
100         } else if (skb->protocol == htons(ETH_P_IPV6)) {
101                 skb_dst_drop(skb);
102                 err = ipv6_stub->ipv6_route_input(skb);
103         } else {
104                 err = -EAFNOSUPPORT;
105         }
106
107         if (err)
108                 goto err;
109         return dst_input(skb);
110
111 err:
112         kfree_skb(skb);
113         return err;
114 }
115
116 static int bpf_input(struct sk_buff *skb)
117 {
118         struct dst_entry *dst = skb_dst(skb);
119         struct bpf_lwt *bpf;
120         int ret;
121
122         bpf = bpf_lwt_lwtunnel(dst->lwtstate);
123         if (bpf->in.prog) {
124                 ret = run_lwt_bpf(skb, &bpf->in, dst, NO_REDIRECT);
125                 if (ret < 0)
126                         return ret;
127                 if (ret == BPF_LWT_REROUTE)
128                         return bpf_lwt_input_reroute(skb);
129         }
130
131         if (unlikely(!dst->lwtstate->orig_input)) {
132                 kfree_skb(skb);
133                 return -EINVAL;
134         }
135
136         return dst->lwtstate->orig_input(skb);
137 }
138
139 static int bpf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
140 {
141         struct dst_entry *dst = skb_dst(skb);
142         struct bpf_lwt *bpf;
143         int ret;
144
145         bpf = bpf_lwt_lwtunnel(dst->lwtstate);
146         if (bpf->out.prog) {
147                 ret = run_lwt_bpf(skb, &bpf->out, dst, NO_REDIRECT);
148                 if (ret < 0)
149                         return ret;
150         }
151
152         if (unlikely(!dst->lwtstate->orig_output)) {
153                 pr_warn_once("orig_output not set on dst for prog %s\n",
154                              bpf->out.name);
155                 kfree_skb(skb);
156                 return -EINVAL;
157         }
158
159         return dst->lwtstate->orig_output(net, sk, skb);
160 }
161
162 static int xmit_check_hhlen(struct sk_buff *skb)
163 {
164         int hh_len = skb_dst(skb)->dev->hard_header_len;
165
166         if (skb_headroom(skb) < hh_len) {
167                 int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
168
169                 if (pskb_expand_head(skb, nhead, 0, GFP_ATOMIC))
170                         return -ENOMEM;
171         }
172
173         return 0;
174 }
175
176 static int bpf_lwt_xmit_reroute(struct sk_buff *skb)
177 {
178         struct net_device *l3mdev = l3mdev_master_dev_rcu(skb_dst(skb)->dev);
179         int oif = l3mdev ? l3mdev->ifindex : 0;
180         struct dst_entry *dst = NULL;
181         int err = -EAFNOSUPPORT;
182         struct sock *sk;
183         struct net *net;
184         bool ipv4;
185
186         if (skb->protocol == htons(ETH_P_IP))
187                 ipv4 = true;
188         else if (skb->protocol == htons(ETH_P_IPV6))
189                 ipv4 = false;
190         else
191                 goto err;
192
193         sk = sk_to_full_sk(skb->sk);
194         if (sk) {
195                 if (sk->sk_bound_dev_if)
196                         oif = sk->sk_bound_dev_if;
197                 net = sock_net(sk);
198         } else {
199                 net = dev_net(skb_dst(skb)->dev);
200         }
201
202         if (ipv4) {
203                 struct iphdr *iph = ip_hdr(skb);
204                 struct flowi4 fl4 = {};
205                 struct rtable *rt;
206
207                 fl4.flowi4_oif = oif;
208                 fl4.flowi4_mark = skb->mark;
209                 fl4.flowi4_uid = sock_net_uid(net, sk);
210                 fl4.flowi4_tos = RT_TOS(iph->tos);
211                 fl4.flowi4_flags = FLOWI_FLAG_ANYSRC;
212                 fl4.flowi4_proto = iph->protocol;
213                 fl4.daddr = iph->daddr;
214                 fl4.saddr = iph->saddr;
215
216                 rt = ip_route_output_key(net, &fl4);
217                 if (IS_ERR(rt)) {
218                         err = PTR_ERR(rt);
219                         goto err;
220                 }
221                 dst = &rt->dst;
222         } else {
223                 struct ipv6hdr *iph6 = ipv6_hdr(skb);
224                 struct flowi6 fl6 = {};
225
226                 fl6.flowi6_oif = oif;
227                 fl6.flowi6_mark = skb->mark;
228                 fl6.flowi6_uid = sock_net_uid(net, sk);
229                 fl6.flowlabel = ip6_flowinfo(iph6);
230                 fl6.flowi6_proto = iph6->nexthdr;
231                 fl6.daddr = iph6->daddr;
232                 fl6.saddr = iph6->saddr;
233
234                 dst = ipv6_stub->ipv6_dst_lookup_flow(net, skb->sk, &fl6, NULL);
235                 if (IS_ERR(dst)) {
236                         err = PTR_ERR(dst);
237                         goto err;
238                 }
239         }
240         if (unlikely(dst->error)) {
241                 err = dst->error;
242                 dst_release(dst);
243                 goto err;
244         }
245
246         /* Although skb header was reserved in bpf_lwt_push_ip_encap(), it
247          * was done for the previous dst, so we are doing it here again, in
248          * case the new dst needs much more space. The call below is a noop
249          * if there is enough header space in skb.
250          */
251         err = skb_cow_head(skb, LL_RESERVED_SPACE(dst->dev));
252         if (unlikely(err))
253                 goto err;
254
255         skb_dst_drop(skb);
256         skb_dst_set(skb, dst);
257
258         err = dst_output(dev_net(skb_dst(skb)->dev), skb->sk, skb);
259         if (unlikely(err))
260                 return err;
261
262         /* ip[6]_finish_output2 understand LWTUNNEL_XMIT_DONE */
263         return LWTUNNEL_XMIT_DONE;
264
265 err:
266         kfree_skb(skb);
267         return err;
268 }
269
270 static int bpf_xmit(struct sk_buff *skb)
271 {
272         struct dst_entry *dst = skb_dst(skb);
273         struct bpf_lwt *bpf;
274
275         bpf = bpf_lwt_lwtunnel(dst->lwtstate);
276         if (bpf->xmit.prog) {
277                 __be16 proto = skb->protocol;
278                 int ret;
279
280                 ret = run_lwt_bpf(skb, &bpf->xmit, dst, CAN_REDIRECT);
281                 switch (ret) {
282                 case BPF_OK:
283                         /* If the header changed, e.g. via bpf_lwt_push_encap,
284                          * BPF_LWT_REROUTE below should have been used if the
285                          * protocol was also changed.
286                          */
287                         if (skb->protocol != proto) {
288                                 kfree_skb(skb);
289                                 return -EINVAL;
290                         }
291                         /* If the header was expanded, headroom might be too
292                          * small for L2 header to come, expand as needed.
293                          */
294                         ret = xmit_check_hhlen(skb);
295                         if (unlikely(ret))
296                                 return ret;
297
298                         return LWTUNNEL_XMIT_CONTINUE;
299                 case BPF_REDIRECT:
300                         return LWTUNNEL_XMIT_DONE;
301                 case BPF_LWT_REROUTE:
302                         return bpf_lwt_xmit_reroute(skb);
303                 default:
304                         return ret;
305                 }
306         }
307
308         return LWTUNNEL_XMIT_CONTINUE;
309 }
310
311 static void bpf_lwt_prog_destroy(struct bpf_lwt_prog *prog)
312 {
313         if (prog->prog)
314                 bpf_prog_put(prog->prog);
315
316         kfree(prog->name);
317 }
318
319 static void bpf_destroy_state(struct lwtunnel_state *lwt)
320 {
321         struct bpf_lwt *bpf = bpf_lwt_lwtunnel(lwt);
322
323         bpf_lwt_prog_destroy(&bpf->in);
324         bpf_lwt_prog_destroy(&bpf->out);
325         bpf_lwt_prog_destroy(&bpf->xmit);
326 }
327
328 static const struct nla_policy bpf_prog_policy[LWT_BPF_PROG_MAX + 1] = {
329         [LWT_BPF_PROG_FD]   = { .type = NLA_U32, },
330         [LWT_BPF_PROG_NAME] = { .type = NLA_NUL_STRING,
331                                 .len = MAX_PROG_NAME },
332 };
333
334 static int bpf_parse_prog(struct nlattr *attr, struct bpf_lwt_prog *prog,
335                           enum bpf_prog_type type)
336 {
337         struct nlattr *tb[LWT_BPF_PROG_MAX + 1];
338         struct bpf_prog *p;
339         int ret;
340         u32 fd;
341
342         ret = nla_parse_nested_deprecated(tb, LWT_BPF_PROG_MAX, attr,
343                                           bpf_prog_policy, NULL);
344         if (ret < 0)
345                 return ret;
346
347         if (!tb[LWT_BPF_PROG_FD] || !tb[LWT_BPF_PROG_NAME])
348                 return -EINVAL;
349
350         prog->name = nla_memdup(tb[LWT_BPF_PROG_NAME], GFP_ATOMIC);
351         if (!prog->name)
352                 return -ENOMEM;
353
354         fd = nla_get_u32(tb[LWT_BPF_PROG_FD]);
355         p = bpf_prog_get_type(fd, type);
356         if (IS_ERR(p))
357                 return PTR_ERR(p);
358
359         prog->prog = p;
360
361         return 0;
362 }
363
364 static const struct nla_policy bpf_nl_policy[LWT_BPF_MAX + 1] = {
365         [LWT_BPF_IN]            = { .type = NLA_NESTED, },
366         [LWT_BPF_OUT]           = { .type = NLA_NESTED, },
367         [LWT_BPF_XMIT]          = { .type = NLA_NESTED, },
368         [LWT_BPF_XMIT_HEADROOM] = { .type = NLA_U32 },
369 };
370
371 static int bpf_build_state(struct net *net, struct nlattr *nla,
372                            unsigned int family, const void *cfg,
373                            struct lwtunnel_state **ts,
374                            struct netlink_ext_ack *extack)
375 {
376         struct nlattr *tb[LWT_BPF_MAX + 1];
377         struct lwtunnel_state *newts;
378         struct bpf_lwt *bpf;
379         int ret;
380
381         if (family != AF_INET && family != AF_INET6)
382                 return -EAFNOSUPPORT;
383
384         ret = nla_parse_nested_deprecated(tb, LWT_BPF_MAX, nla, bpf_nl_policy,
385                                           extack);
386         if (ret < 0)
387                 return ret;
388
389         if (!tb[LWT_BPF_IN] && !tb[LWT_BPF_OUT] && !tb[LWT_BPF_XMIT])
390                 return -EINVAL;
391
392         newts = lwtunnel_state_alloc(sizeof(*bpf));
393         if (!newts)
394                 return -ENOMEM;
395
396         newts->type = LWTUNNEL_ENCAP_BPF;
397         bpf = bpf_lwt_lwtunnel(newts);
398
399         if (tb[LWT_BPF_IN]) {
400                 newts->flags |= LWTUNNEL_STATE_INPUT_REDIRECT;
401                 ret = bpf_parse_prog(tb[LWT_BPF_IN], &bpf->in,
402                                      BPF_PROG_TYPE_LWT_IN);
403                 if (ret  < 0)
404                         goto errout;
405         }
406
407         if (tb[LWT_BPF_OUT]) {
408                 newts->flags |= LWTUNNEL_STATE_OUTPUT_REDIRECT;
409                 ret = bpf_parse_prog(tb[LWT_BPF_OUT], &bpf->out,
410                                      BPF_PROG_TYPE_LWT_OUT);
411                 if (ret < 0)
412                         goto errout;
413         }
414
415         if (tb[LWT_BPF_XMIT]) {
416                 newts->flags |= LWTUNNEL_STATE_XMIT_REDIRECT;
417                 ret = bpf_parse_prog(tb[LWT_BPF_XMIT], &bpf->xmit,
418                                      BPF_PROG_TYPE_LWT_XMIT);
419                 if (ret < 0)
420                         goto errout;
421         }
422
423         if (tb[LWT_BPF_XMIT_HEADROOM]) {
424                 u32 headroom = nla_get_u32(tb[LWT_BPF_XMIT_HEADROOM]);
425
426                 if (headroom > LWT_BPF_MAX_HEADROOM) {
427                         ret = -ERANGE;
428                         goto errout;
429                 }
430
431                 newts->headroom = headroom;
432         }
433
434         bpf->family = family;
435         *ts = newts;
436
437         return 0;
438
439 errout:
440         bpf_destroy_state(newts);
441         kfree(newts);
442         return ret;
443 }
444
445 static int bpf_fill_lwt_prog(struct sk_buff *skb, int attr,
446                              struct bpf_lwt_prog *prog)
447 {
448         struct nlattr *nest;
449
450         if (!prog->prog)
451                 return 0;
452
453         nest = nla_nest_start_noflag(skb, attr);
454         if (!nest)
455                 return -EMSGSIZE;
456
457         if (prog->name &&
458             nla_put_string(skb, LWT_BPF_PROG_NAME, prog->name))
459                 return -EMSGSIZE;
460
461         return nla_nest_end(skb, nest);
462 }
463
464 static int bpf_fill_encap_info(struct sk_buff *skb, struct lwtunnel_state *lwt)
465 {
466         struct bpf_lwt *bpf = bpf_lwt_lwtunnel(lwt);
467
468         if (bpf_fill_lwt_prog(skb, LWT_BPF_IN, &bpf->in) < 0 ||
469             bpf_fill_lwt_prog(skb, LWT_BPF_OUT, &bpf->out) < 0 ||
470             bpf_fill_lwt_prog(skb, LWT_BPF_XMIT, &bpf->xmit) < 0)
471                 return -EMSGSIZE;
472
473         return 0;
474 }
475
476 static int bpf_encap_nlsize(struct lwtunnel_state *lwtstate)
477 {
478         int nest_len = nla_total_size(sizeof(struct nlattr)) +
479                        nla_total_size(MAX_PROG_NAME) + /* LWT_BPF_PROG_NAME */
480                        0;
481
482         return nest_len + /* LWT_BPF_IN */
483                nest_len + /* LWT_BPF_OUT */
484                nest_len + /* LWT_BPF_XMIT */
485                0;
486 }
487
488 static int bpf_lwt_prog_cmp(struct bpf_lwt_prog *a, struct bpf_lwt_prog *b)
489 {
490         /* FIXME:
491          * The LWT state is currently rebuilt for delete requests which
492          * results in a new bpf_prog instance. Comparing names for now.
493          */
494         if (!a->name && !b->name)
495                 return 0;
496
497         if (!a->name || !b->name)
498                 return 1;
499
500         return strcmp(a->name, b->name);
501 }
502
503 static int bpf_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b)
504 {
505         struct bpf_lwt *a_bpf = bpf_lwt_lwtunnel(a);
506         struct bpf_lwt *b_bpf = bpf_lwt_lwtunnel(b);
507
508         return bpf_lwt_prog_cmp(&a_bpf->in, &b_bpf->in) ||
509                bpf_lwt_prog_cmp(&a_bpf->out, &b_bpf->out) ||
510                bpf_lwt_prog_cmp(&a_bpf->xmit, &b_bpf->xmit);
511 }
512
513 static const struct lwtunnel_encap_ops bpf_encap_ops = {
514         .build_state    = bpf_build_state,
515         .destroy_state  = bpf_destroy_state,
516         .input          = bpf_input,
517         .output         = bpf_output,
518         .xmit           = bpf_xmit,
519         .fill_encap     = bpf_fill_encap_info,
520         .get_encap_size = bpf_encap_nlsize,
521         .cmp_encap      = bpf_encap_cmp,
522         .owner          = THIS_MODULE,
523 };
524
525 static int handle_gso_type(struct sk_buff *skb, unsigned int gso_type,
526                            int encap_len)
527 {
528         struct skb_shared_info *shinfo = skb_shinfo(skb);
529
530         gso_type |= SKB_GSO_DODGY;
531         shinfo->gso_type |= gso_type;
532         skb_decrease_gso_size(shinfo, encap_len);
533         shinfo->gso_segs = 0;
534         return 0;
535 }
536
537 static int handle_gso_encap(struct sk_buff *skb, bool ipv4, int encap_len)
538 {
539         int next_hdr_offset;
540         void *next_hdr;
541         __u8 protocol;
542
543         /* SCTP and UDP_L4 gso need more nuanced handling than what
544          * handle_gso_type() does above: skb_decrease_gso_size() is not enough.
545          * So at the moment only TCP GSO packets are let through.
546          */
547         if (!(skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
548                 return -ENOTSUPP;
549
550         if (ipv4) {
551                 protocol = ip_hdr(skb)->protocol;
552                 next_hdr_offset = sizeof(struct iphdr);
553                 next_hdr = skb_network_header(skb) + next_hdr_offset;
554         } else {
555                 protocol = ipv6_hdr(skb)->nexthdr;
556                 next_hdr_offset = sizeof(struct ipv6hdr);
557                 next_hdr = skb_network_header(skb) + next_hdr_offset;
558         }
559
560         switch (protocol) {
561         case IPPROTO_GRE:
562                 next_hdr_offset += sizeof(struct gre_base_hdr);
563                 if (next_hdr_offset > encap_len)
564                         return -EINVAL;
565
566                 if (((struct gre_base_hdr *)next_hdr)->flags & GRE_CSUM)
567                         return handle_gso_type(skb, SKB_GSO_GRE_CSUM,
568                                                encap_len);
569                 return handle_gso_type(skb, SKB_GSO_GRE, encap_len);
570
571         case IPPROTO_UDP:
572                 next_hdr_offset += sizeof(struct udphdr);
573                 if (next_hdr_offset > encap_len)
574                         return -EINVAL;
575
576                 if (((struct udphdr *)next_hdr)->check)
577                         return handle_gso_type(skb, SKB_GSO_UDP_TUNNEL_CSUM,
578                                                encap_len);
579                 return handle_gso_type(skb, SKB_GSO_UDP_TUNNEL, encap_len);
580
581         case IPPROTO_IP:
582         case IPPROTO_IPV6:
583                 if (ipv4)
584                         return handle_gso_type(skb, SKB_GSO_IPXIP4, encap_len);
585                 else
586                         return handle_gso_type(skb, SKB_GSO_IPXIP6, encap_len);
587
588         default:
589                 return -EPROTONOSUPPORT;
590         }
591 }
592
593 int bpf_lwt_push_ip_encap(struct sk_buff *skb, void *hdr, u32 len, bool ingress)
594 {
595         struct iphdr *iph;
596         bool ipv4;
597         int err;
598
599         if (unlikely(len < sizeof(struct iphdr) || len > LWT_BPF_MAX_HEADROOM))
600                 return -EINVAL;
601
602         /* validate protocol and length */
603         iph = (struct iphdr *)hdr;
604         if (iph->version == 4) {
605                 ipv4 = true;
606                 if (unlikely(len < iph->ihl * 4))
607                         return -EINVAL;
608         } else if (iph->version == 6) {
609                 ipv4 = false;
610                 if (unlikely(len < sizeof(struct ipv6hdr)))
611                         return -EINVAL;
612         } else {
613                 return -EINVAL;
614         }
615
616         if (ingress)
617                 err = skb_cow_head(skb, len + skb->mac_len);
618         else
619                 err = skb_cow_head(skb,
620                                    len + LL_RESERVED_SPACE(skb_dst(skb)->dev));
621         if (unlikely(err))
622                 return err;
623
624         /* push the encap headers and fix pointers */
625         skb_reset_inner_headers(skb);
626         skb_reset_inner_mac_header(skb);  /* mac header is not yet set */
627         skb_set_inner_protocol(skb, skb->protocol);
628         skb->encapsulation = 1;
629         skb_push(skb, len);
630         if (ingress)
631                 skb_postpush_rcsum(skb, iph, len);
632         skb_reset_network_header(skb);
633         memcpy(skb_network_header(skb), hdr, len);
634         bpf_compute_data_pointers(skb);
635         skb_clear_hash(skb);
636
637         if (ipv4) {
638                 skb->protocol = htons(ETH_P_IP);
639                 iph = ip_hdr(skb);
640
641                 if (!iph->check)
642                         iph->check = ip_fast_csum((unsigned char *)iph,
643                                                   iph->ihl);
644         } else {
645                 skb->protocol = htons(ETH_P_IPV6);
646         }
647
648         if (skb_is_gso(skb))
649                 return handle_gso_encap(skb, ipv4, len);
650
651         return 0;
652 }
653
654 static int __init bpf_lwt_init(void)
655 {
656         return lwtunnel_encap_add_ops(&bpf_encap_ops, LWTUNNEL_ENCAP_BPF);
657 }
658
659 subsys_initcall(bpf_lwt_init)