netfilter: nft_flow_offload: use direct xmit if hardware offload is enabled
[linux-2.6-microblaze.git] / net / netfilter / nft_flow_offload.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kernel.h>
3 #include <linux/module.h>
4 #include <linux/init.h>
5 #include <linux/netlink.h>
6 #include <linux/netfilter.h>
7 #include <linux/workqueue.h>
8 #include <linux/spinlock.h>
9 #include <linux/netfilter/nf_conntrack_common.h>
10 #include <linux/netfilter/nf_tables.h>
11 #include <net/ip.h> /* for ipv4 options. */
12 #include <net/netfilter/nf_tables.h>
13 #include <net/netfilter/nf_tables_core.h>
14 #include <net/netfilter/nf_conntrack_core.h>
15 #include <net/netfilter/nf_conntrack_extend.h>
16 #include <net/netfilter/nf_flow_table.h>
17
18 struct nft_flow_offload {
19         struct nft_flowtable    *flowtable;
20 };
21
22 static enum flow_offload_xmit_type nft_xmit_type(struct dst_entry *dst)
23 {
24         if (dst_xfrm(dst))
25                 return FLOW_OFFLOAD_XMIT_XFRM;
26
27         return FLOW_OFFLOAD_XMIT_NEIGH;
28 }
29
30 static void nft_default_forward_path(struct nf_flow_route *route,
31                                      struct dst_entry *dst_cache,
32                                      enum ip_conntrack_dir dir)
33 {
34         route->tuple[!dir].in.ifindex   = dst_cache->dev->ifindex;
35         route->tuple[dir].dst           = dst_cache;
36         route->tuple[dir].xmit_type     = nft_xmit_type(dst_cache);
37 }
38
39 static int nft_dev_fill_forward_path(const struct nf_flow_route *route,
40                                      const struct dst_entry *dst_cache,
41                                      const struct nf_conn *ct,
42                                      enum ip_conntrack_dir dir, u8 *ha,
43                                      struct net_device_path_stack *stack)
44 {
45         const void *daddr = &ct->tuplehash[!dir].tuple.src.u3;
46         struct net_device *dev = dst_cache->dev;
47         struct neighbour *n;
48         u8 nud_state;
49
50         n = dst_neigh_lookup(dst_cache, daddr);
51         if (!n)
52                 return -1;
53
54         read_lock_bh(&n->lock);
55         nud_state = n->nud_state;
56         ether_addr_copy(ha, n->ha);
57         read_unlock_bh(&n->lock);
58         neigh_release(n);
59
60         if (!(nud_state & NUD_VALID))
61                 return -1;
62
63         return dev_fill_forward_path(dev, ha, stack);
64 }
65
66 struct nft_forward_info {
67         const struct net_device *indev;
68         const struct net_device *outdev;
69         const struct net_device *hw_outdev;
70         struct id {
71                 __u16   id;
72                 __be16  proto;
73         } encap[NF_FLOW_TABLE_ENCAP_MAX];
74         u8 num_encaps;
75         u8 h_source[ETH_ALEN];
76         u8 h_dest[ETH_ALEN];
77         enum flow_offload_xmit_type xmit_type;
78 };
79
80 static bool nft_is_valid_ether_device(const struct net_device *dev)
81 {
82         if (!dev || (dev->flags & IFF_LOOPBACK) || dev->type != ARPHRD_ETHER ||
83             dev->addr_len != ETH_ALEN || !is_valid_ether_addr(dev->dev_addr))
84                 return false;
85
86         return true;
87 }
88
89 static void nft_dev_path_info(const struct net_device_path_stack *stack,
90                               struct nft_forward_info *info,
91                               unsigned char *ha, struct nf_flowtable *flowtable)
92 {
93         const struct net_device_path *path;
94         int i;
95
96         memcpy(info->h_dest, ha, ETH_ALEN);
97
98         for (i = 0; i < stack->num_paths; i++) {
99                 path = &stack->path[i];
100                 switch (path->type) {
101                 case DEV_PATH_ETHERNET:
102                 case DEV_PATH_DSA:
103                 case DEV_PATH_VLAN:
104                 case DEV_PATH_PPPOE:
105                         info->indev = path->dev;
106                         if (is_zero_ether_addr(info->h_source))
107                                 memcpy(info->h_source, path->dev->dev_addr, ETH_ALEN);
108
109                         if (path->type == DEV_PATH_ETHERNET)
110                                 break;
111                         if (path->type == DEV_PATH_DSA) {
112                                 i = stack->num_paths;
113                                 break;
114                         }
115
116                         /* DEV_PATH_VLAN and DEV_PATH_PPPOE */
117                         if (info->num_encaps >= NF_FLOW_TABLE_ENCAP_MAX) {
118                                 info->indev = NULL;
119                                 break;
120                         }
121                         info->outdev = path->dev;
122                         info->encap[info->num_encaps].id = path->encap.id;
123                         info->encap[info->num_encaps].proto = path->encap.proto;
124                         info->num_encaps++;
125                         if (path->type == DEV_PATH_PPPOE)
126                                 memcpy(info->h_dest, path->encap.h_dest, ETH_ALEN);
127                         break;
128                 case DEV_PATH_BRIDGE:
129                         if (is_zero_ether_addr(info->h_source))
130                                 memcpy(info->h_source, path->dev->dev_addr, ETH_ALEN);
131
132                         switch (path->bridge.vlan_mode) {
133                         case DEV_PATH_BR_VLAN_TAG:
134                                 info->encap[info->num_encaps].id = path->bridge.vlan_id;
135                                 info->encap[info->num_encaps].proto = path->bridge.vlan_proto;
136                                 info->num_encaps++;
137                                 break;
138                         case DEV_PATH_BR_VLAN_UNTAG:
139                                 info->num_encaps--;
140                                 break;
141                         case DEV_PATH_BR_VLAN_KEEP:
142                                 break;
143                         }
144                         info->xmit_type = FLOW_OFFLOAD_XMIT_DIRECT;
145                         break;
146                 default:
147                         info->indev = NULL;
148                         break;
149                 }
150         }
151         if (!info->outdev)
152                 info->outdev = info->indev;
153
154         info->hw_outdev = info->indev;
155
156         if (nf_flowtable_hw_offload(flowtable) &&
157             nft_is_valid_ether_device(info->indev))
158                 info->xmit_type = FLOW_OFFLOAD_XMIT_DIRECT;
159 }
160
161 static bool nft_flowtable_find_dev(const struct net_device *dev,
162                                    struct nft_flowtable *ft)
163 {
164         struct nft_hook *hook;
165         bool found = false;
166
167         list_for_each_entry_rcu(hook, &ft->hook_list, list) {
168                 if (hook->ops.dev != dev)
169                         continue;
170
171                 found = true;
172                 break;
173         }
174
175         return found;
176 }
177
178 static void nft_dev_forward_path(struct nf_flow_route *route,
179                                  const struct nf_conn *ct,
180                                  enum ip_conntrack_dir dir,
181                                  struct nft_flowtable *ft)
182 {
183         const struct dst_entry *dst = route->tuple[dir].dst;
184         struct net_device_path_stack stack;
185         struct nft_forward_info info = {};
186         unsigned char ha[ETH_ALEN];
187         int i;
188
189         if (nft_dev_fill_forward_path(route, dst, ct, dir, ha, &stack) >= 0)
190                 nft_dev_path_info(&stack, &info, ha, &ft->data);
191
192         if (!info.indev || !nft_flowtable_find_dev(info.indev, ft))
193                 return;
194
195         route->tuple[!dir].in.ifindex = info.indev->ifindex;
196         for (i = 0; i < info.num_encaps; i++) {
197                 route->tuple[!dir].in.encap[i].id = info.encap[i].id;
198                 route->tuple[!dir].in.encap[i].proto = info.encap[i].proto;
199         }
200         route->tuple[!dir].in.num_encaps = info.num_encaps;
201
202         if (info.xmit_type == FLOW_OFFLOAD_XMIT_DIRECT) {
203                 memcpy(route->tuple[dir].out.h_source, info.h_source, ETH_ALEN);
204                 memcpy(route->tuple[dir].out.h_dest, info.h_dest, ETH_ALEN);
205                 route->tuple[dir].out.ifindex = info.outdev->ifindex;
206                 route->tuple[dir].out.hw_ifindex = info.hw_outdev->ifindex;
207                 route->tuple[dir].xmit_type = info.xmit_type;
208         }
209 }
210
211 static int nft_flow_route(const struct nft_pktinfo *pkt,
212                           const struct nf_conn *ct,
213                           struct nf_flow_route *route,
214                           enum ip_conntrack_dir dir,
215                           struct nft_flowtable *ft)
216 {
217         struct dst_entry *this_dst = skb_dst(pkt->skb);
218         struct dst_entry *other_dst = NULL;
219         struct flowi fl;
220
221         memset(&fl, 0, sizeof(fl));
222         switch (nft_pf(pkt)) {
223         case NFPROTO_IPV4:
224                 fl.u.ip4.daddr = ct->tuplehash[dir].tuple.src.u3.ip;
225                 fl.u.ip4.flowi4_oif = nft_in(pkt)->ifindex;
226                 break;
227         case NFPROTO_IPV6:
228                 fl.u.ip6.daddr = ct->tuplehash[dir].tuple.src.u3.in6;
229                 fl.u.ip6.flowi6_oif = nft_in(pkt)->ifindex;
230                 break;
231         }
232
233         nf_route(nft_net(pkt), &other_dst, &fl, false, nft_pf(pkt));
234         if (!other_dst)
235                 return -ENOENT;
236
237         nft_default_forward_path(route, this_dst, dir);
238         nft_default_forward_path(route, other_dst, !dir);
239
240         if (route->tuple[dir].xmit_type == FLOW_OFFLOAD_XMIT_NEIGH &&
241             route->tuple[!dir].xmit_type == FLOW_OFFLOAD_XMIT_NEIGH) {
242                 nft_dev_forward_path(route, ct, dir, ft);
243                 nft_dev_forward_path(route, ct, !dir, ft);
244         }
245
246         return 0;
247 }
248
249 static bool nft_flow_offload_skip(struct sk_buff *skb, int family)
250 {
251         if (skb_sec_path(skb))
252                 return true;
253
254         if (family == NFPROTO_IPV4) {
255                 const struct ip_options *opt;
256
257                 opt = &(IPCB(skb)->opt);
258
259                 if (unlikely(opt->optlen))
260                         return true;
261         }
262
263         return false;
264 }
265
266 static void nft_flow_offload_eval(const struct nft_expr *expr,
267                                   struct nft_regs *regs,
268                                   const struct nft_pktinfo *pkt)
269 {
270         struct nft_flow_offload *priv = nft_expr_priv(expr);
271         struct nf_flowtable *flowtable = &priv->flowtable->data;
272         struct tcphdr _tcph, *tcph = NULL;
273         struct nf_flow_route route = {};
274         enum ip_conntrack_info ctinfo;
275         struct flow_offload *flow;
276         enum ip_conntrack_dir dir;
277         struct nf_conn *ct;
278         int ret;
279
280         if (nft_flow_offload_skip(pkt->skb, nft_pf(pkt)))
281                 goto out;
282
283         ct = nf_ct_get(pkt->skb, &ctinfo);
284         if (!ct)
285                 goto out;
286
287         switch (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum) {
288         case IPPROTO_TCP:
289                 tcph = skb_header_pointer(pkt->skb, pkt->xt.thoff,
290                                           sizeof(_tcph), &_tcph);
291                 if (unlikely(!tcph || tcph->fin || tcph->rst))
292                         goto out;
293                 break;
294         case IPPROTO_UDP:
295                 break;
296         default:
297                 goto out;
298         }
299
300         if (nf_ct_ext_exist(ct, NF_CT_EXT_HELPER) ||
301             ct->status & (IPS_SEQ_ADJUST | IPS_NAT_CLASH))
302                 goto out;
303
304         if (!nf_ct_is_confirmed(ct))
305                 goto out;
306
307         if (test_and_set_bit(IPS_OFFLOAD_BIT, &ct->status))
308                 goto out;
309
310         dir = CTINFO2DIR(ctinfo);
311         if (nft_flow_route(pkt, ct, &route, dir, priv->flowtable) < 0)
312                 goto err_flow_route;
313
314         flow = flow_offload_alloc(ct);
315         if (!flow)
316                 goto err_flow_alloc;
317
318         if (flow_offload_route_init(flow, &route) < 0)
319                 goto err_flow_add;
320
321         if (tcph) {
322                 ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
323                 ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
324         }
325
326         ret = flow_offload_add(flowtable, flow);
327         if (ret < 0)
328                 goto err_flow_add;
329
330         dst_release(route.tuple[!dir].dst);
331         return;
332
333 err_flow_add:
334         flow_offload_free(flow);
335 err_flow_alloc:
336         dst_release(route.tuple[!dir].dst);
337 err_flow_route:
338         clear_bit(IPS_OFFLOAD_BIT, &ct->status);
339 out:
340         regs->verdict.code = NFT_BREAK;
341 }
342
343 static int nft_flow_offload_validate(const struct nft_ctx *ctx,
344                                      const struct nft_expr *expr,
345                                      const struct nft_data **data)
346 {
347         unsigned int hook_mask = (1 << NF_INET_FORWARD);
348
349         return nft_chain_validate_hooks(ctx->chain, hook_mask);
350 }
351
352 static const struct nla_policy nft_flow_offload_policy[NFTA_FLOW_MAX + 1] = {
353         [NFTA_FLOW_TABLE_NAME]  = { .type = NLA_STRING,
354                                     .len = NFT_NAME_MAXLEN - 1 },
355 };
356
357 static int nft_flow_offload_init(const struct nft_ctx *ctx,
358                                  const struct nft_expr *expr,
359                                  const struct nlattr * const tb[])
360 {
361         struct nft_flow_offload *priv = nft_expr_priv(expr);
362         u8 genmask = nft_genmask_next(ctx->net);
363         struct nft_flowtable *flowtable;
364
365         if (!tb[NFTA_FLOW_TABLE_NAME])
366                 return -EINVAL;
367
368         flowtable = nft_flowtable_lookup(ctx->table, tb[NFTA_FLOW_TABLE_NAME],
369                                          genmask);
370         if (IS_ERR(flowtable))
371                 return PTR_ERR(flowtable);
372
373         priv->flowtable = flowtable;
374         flowtable->use++;
375
376         return nf_ct_netns_get(ctx->net, ctx->family);
377 }
378
379 static void nft_flow_offload_deactivate(const struct nft_ctx *ctx,
380                                         const struct nft_expr *expr,
381                                         enum nft_trans_phase phase)
382 {
383         struct nft_flow_offload *priv = nft_expr_priv(expr);
384
385         nf_tables_deactivate_flowtable(ctx, priv->flowtable, phase);
386 }
387
388 static void nft_flow_offload_activate(const struct nft_ctx *ctx,
389                                       const struct nft_expr *expr)
390 {
391         struct nft_flow_offload *priv = nft_expr_priv(expr);
392
393         priv->flowtable->use++;
394 }
395
396 static void nft_flow_offload_destroy(const struct nft_ctx *ctx,
397                                      const struct nft_expr *expr)
398 {
399         nf_ct_netns_put(ctx->net, ctx->family);
400 }
401
402 static int nft_flow_offload_dump(struct sk_buff *skb, const struct nft_expr *expr)
403 {
404         struct nft_flow_offload *priv = nft_expr_priv(expr);
405
406         if (nla_put_string(skb, NFTA_FLOW_TABLE_NAME, priv->flowtable->name))
407                 goto nla_put_failure;
408
409         return 0;
410
411 nla_put_failure:
412         return -1;
413 }
414
415 static struct nft_expr_type nft_flow_offload_type;
416 static const struct nft_expr_ops nft_flow_offload_ops = {
417         .type           = &nft_flow_offload_type,
418         .size           = NFT_EXPR_SIZE(sizeof(struct nft_flow_offload)),
419         .eval           = nft_flow_offload_eval,
420         .init           = nft_flow_offload_init,
421         .activate       = nft_flow_offload_activate,
422         .deactivate     = nft_flow_offload_deactivate,
423         .destroy        = nft_flow_offload_destroy,
424         .validate       = nft_flow_offload_validate,
425         .dump           = nft_flow_offload_dump,
426 };
427
428 static struct nft_expr_type nft_flow_offload_type __read_mostly = {
429         .name           = "flow_offload",
430         .ops            = &nft_flow_offload_ops,
431         .policy         = nft_flow_offload_policy,
432         .maxattr        = NFTA_FLOW_MAX,
433         .owner          = THIS_MODULE,
434 };
435
436 static int flow_offload_netdev_event(struct notifier_block *this,
437                                      unsigned long event, void *ptr)
438 {
439         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
440
441         if (event != NETDEV_DOWN)
442                 return NOTIFY_DONE;
443
444         nf_flow_table_cleanup(dev);
445
446         return NOTIFY_DONE;
447 }
448
449 static struct notifier_block flow_offload_netdev_notifier = {
450         .notifier_call  = flow_offload_netdev_event,
451 };
452
453 static int __init nft_flow_offload_module_init(void)
454 {
455         int err;
456
457         err = register_netdevice_notifier(&flow_offload_netdev_notifier);
458         if (err)
459                 goto err;
460
461         err = nft_register_expr(&nft_flow_offload_type);
462         if (err < 0)
463                 goto register_expr;
464
465         return 0;
466
467 register_expr:
468         unregister_netdevice_notifier(&flow_offload_netdev_notifier);
469 err:
470         return err;
471 }
472
473 static void __exit nft_flow_offload_module_exit(void)
474 {
475         nft_unregister_expr(&nft_flow_offload_type);
476         unregister_netdevice_notifier(&flow_offload_netdev_notifier);
477 }
478
479 module_init(nft_flow_offload_module_init);
480 module_exit(nft_flow_offload_module_exit);
481
482 MODULE_LICENSE("GPL");
483 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
484 MODULE_ALIAS_NFT_EXPR("flow_offload");
485 MODULE_DESCRIPTION("nftables hardware flow offload module");