1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/kernel.h>
3 #include <linux/init.h>
4 #include <linux/module.h>
5 #include <linux/seqlock.h>
6 #include <linux/netlink.h>
7 #include <linux/netfilter.h>
8 #include <linux/netfilter/nf_tables.h>
9 #include <net/netfilter/nf_tables.h>
10 #include <net/dst_metadata.h>
11 #include <net/ip_tunnels.h>
12 #include <net/vxlan.h>
13 #include <net/erspan.h>
14 #include <net/geneve.h>
17 enum nft_tunnel_keys key:8;
19 enum nft_tunnel_mode mode:8;
23 static void nft_tunnel_get_eval(const struct nft_expr *expr,
24 struct nft_regs *regs,
25 const struct nft_pktinfo *pkt)
27 const struct nft_tunnel *priv = nft_expr_priv(expr);
28 u32 *dest = ®s->data[priv->dreg];
29 struct ip_tunnel_info *tun_info;
31 tun_info = skb_tunnel_info(pkt->skb);
36 nft_reg_store8(dest, false);
39 if (priv->mode == NFT_TUNNEL_MODE_NONE ||
40 (priv->mode == NFT_TUNNEL_MODE_RX &&
41 !(tun_info->mode & IP_TUNNEL_INFO_TX)) ||
42 (priv->mode == NFT_TUNNEL_MODE_TX &&
43 (tun_info->mode & IP_TUNNEL_INFO_TX)))
44 nft_reg_store8(dest, true);
46 nft_reg_store8(dest, false);
50 regs->verdict.code = NFT_BREAK;
53 if (priv->mode == NFT_TUNNEL_MODE_NONE ||
54 (priv->mode == NFT_TUNNEL_MODE_RX &&
55 !(tun_info->mode & IP_TUNNEL_INFO_TX)) ||
56 (priv->mode == NFT_TUNNEL_MODE_TX &&
57 (tun_info->mode & IP_TUNNEL_INFO_TX)))
58 *dest = ntohl(tunnel_id_to_key32(tun_info->key.tun_id));
60 regs->verdict.code = NFT_BREAK;
64 regs->verdict.code = NFT_BREAK;
68 static const struct nla_policy nft_tunnel_policy[NFTA_TUNNEL_MAX + 1] = {
69 [NFTA_TUNNEL_KEY] = { .type = NLA_U32 },
70 [NFTA_TUNNEL_DREG] = { .type = NLA_U32 },
71 [NFTA_TUNNEL_MODE] = { .type = NLA_U32 },
74 static int nft_tunnel_get_init(const struct nft_ctx *ctx,
75 const struct nft_expr *expr,
76 const struct nlattr * const tb[])
78 struct nft_tunnel *priv = nft_expr_priv(expr);
81 if (!tb[NFTA_TUNNEL_KEY] ||
82 !tb[NFTA_TUNNEL_DREG])
85 priv->key = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY]));
97 if (tb[NFTA_TUNNEL_MODE]) {
98 priv->mode = ntohl(nla_get_be32(tb[NFTA_TUNNEL_MODE]));
99 if (priv->mode > NFT_TUNNEL_MODE_MAX)
102 priv->mode = NFT_TUNNEL_MODE_NONE;
106 return nft_parse_register_store(ctx, tb[NFTA_TUNNEL_DREG], &priv->dreg,
107 NULL, NFT_DATA_VALUE, len);
110 static int nft_tunnel_get_dump(struct sk_buff *skb,
111 const struct nft_expr *expr)
113 const struct nft_tunnel *priv = nft_expr_priv(expr);
115 if (nla_put_be32(skb, NFTA_TUNNEL_KEY, htonl(priv->key)))
116 goto nla_put_failure;
117 if (nft_dump_register(skb, NFTA_TUNNEL_DREG, priv->dreg))
118 goto nla_put_failure;
119 if (nla_put_be32(skb, NFTA_TUNNEL_MODE, htonl(priv->mode)))
120 goto nla_put_failure;
127 static bool nft_tunnel_get_reduce(struct nft_regs_track *track,
128 const struct nft_expr *expr)
130 const struct nft_tunnel *priv = nft_expr_priv(expr);
131 const struct nft_tunnel *tunnel;
133 if (!nft_reg_track_cmp(track, expr, priv->dreg)) {
134 nft_reg_track_update(track, expr, priv->dreg, priv->len);
138 tunnel = nft_expr_priv(track->regs[priv->dreg].selector);
139 if (priv->key != tunnel->key ||
140 priv->dreg != tunnel->dreg ||
141 priv->mode != tunnel->mode) {
142 nft_reg_track_update(track, expr, priv->dreg, priv->len);
146 if (!track->regs[priv->dreg].bitwise)
152 static struct nft_expr_type nft_tunnel_type;
153 static const struct nft_expr_ops nft_tunnel_get_ops = {
154 .type = &nft_tunnel_type,
155 .size = NFT_EXPR_SIZE(sizeof(struct nft_tunnel)),
156 .eval = nft_tunnel_get_eval,
157 .init = nft_tunnel_get_init,
158 .dump = nft_tunnel_get_dump,
159 .reduce = nft_tunnel_get_reduce,
162 static struct nft_expr_type nft_tunnel_type __read_mostly = {
164 .ops = &nft_tunnel_get_ops,
165 .policy = nft_tunnel_policy,
166 .maxattr = NFTA_TUNNEL_MAX,
167 .owner = THIS_MODULE,
170 struct nft_tunnel_opts {
172 struct vxlan_metadata vxlan;
173 struct erspan_metadata erspan;
174 u8 data[IP_TUNNEL_OPTS_MAX];
180 struct nft_tunnel_obj {
181 struct metadata_dst *md;
182 struct nft_tunnel_opts opts;
185 static const struct nla_policy nft_tunnel_ip_policy[NFTA_TUNNEL_KEY_IP_MAX + 1] = {
186 [NFTA_TUNNEL_KEY_IP_SRC] = { .type = NLA_U32 },
187 [NFTA_TUNNEL_KEY_IP_DST] = { .type = NLA_U32 },
190 static int nft_tunnel_obj_ip_init(const struct nft_ctx *ctx,
191 const struct nlattr *attr,
192 struct ip_tunnel_info *info)
194 struct nlattr *tb[NFTA_TUNNEL_KEY_IP_MAX + 1];
197 err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_IP_MAX, attr,
198 nft_tunnel_ip_policy, NULL);
202 if (!tb[NFTA_TUNNEL_KEY_IP_DST])
205 if (tb[NFTA_TUNNEL_KEY_IP_SRC])
206 info->key.u.ipv4.src = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP_SRC]);
207 if (tb[NFTA_TUNNEL_KEY_IP_DST])
208 info->key.u.ipv4.dst = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP_DST]);
213 static const struct nla_policy nft_tunnel_ip6_policy[NFTA_TUNNEL_KEY_IP6_MAX + 1] = {
214 [NFTA_TUNNEL_KEY_IP6_SRC] = { .len = sizeof(struct in6_addr), },
215 [NFTA_TUNNEL_KEY_IP6_DST] = { .len = sizeof(struct in6_addr), },
216 [NFTA_TUNNEL_KEY_IP6_FLOWLABEL] = { .type = NLA_U32, }
219 static int nft_tunnel_obj_ip6_init(const struct nft_ctx *ctx,
220 const struct nlattr *attr,
221 struct ip_tunnel_info *info)
223 struct nlattr *tb[NFTA_TUNNEL_KEY_IP6_MAX + 1];
226 err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_IP6_MAX, attr,
227 nft_tunnel_ip6_policy, NULL);
231 if (!tb[NFTA_TUNNEL_KEY_IP6_DST])
234 if (tb[NFTA_TUNNEL_KEY_IP6_SRC]) {
235 memcpy(&info->key.u.ipv6.src,
236 nla_data(tb[NFTA_TUNNEL_KEY_IP6_SRC]),
237 sizeof(struct in6_addr));
239 if (tb[NFTA_TUNNEL_KEY_IP6_DST]) {
240 memcpy(&info->key.u.ipv6.dst,
241 nla_data(tb[NFTA_TUNNEL_KEY_IP6_DST]),
242 sizeof(struct in6_addr));
244 if (tb[NFTA_TUNNEL_KEY_IP6_FLOWLABEL])
245 info->key.label = nla_get_be32(tb[NFTA_TUNNEL_KEY_IP6_FLOWLABEL]);
247 info->mode |= IP_TUNNEL_INFO_IPV6;
252 static const struct nla_policy nft_tunnel_opts_vxlan_policy[NFTA_TUNNEL_KEY_VXLAN_MAX + 1] = {
253 [NFTA_TUNNEL_KEY_VXLAN_GBP] = { .type = NLA_U32 },
256 static int nft_tunnel_obj_vxlan_init(const struct nlattr *attr,
257 struct nft_tunnel_opts *opts)
259 struct nlattr *tb[NFTA_TUNNEL_KEY_VXLAN_MAX + 1];
262 err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_VXLAN_MAX, attr,
263 nft_tunnel_opts_vxlan_policy, NULL);
267 if (!tb[NFTA_TUNNEL_KEY_VXLAN_GBP])
270 opts->u.vxlan.gbp = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_VXLAN_GBP]));
272 opts->len = sizeof(struct vxlan_metadata);
273 opts->flags = TUNNEL_VXLAN_OPT;
278 static const struct nla_policy nft_tunnel_opts_erspan_policy[NFTA_TUNNEL_KEY_ERSPAN_MAX + 1] = {
279 [NFTA_TUNNEL_KEY_ERSPAN_VERSION] = { .type = NLA_U32 },
280 [NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX] = { .type = NLA_U32 },
281 [NFTA_TUNNEL_KEY_ERSPAN_V2_DIR] = { .type = NLA_U8 },
282 [NFTA_TUNNEL_KEY_ERSPAN_V2_HWID] = { .type = NLA_U8 },
285 static int nft_tunnel_obj_erspan_init(const struct nlattr *attr,
286 struct nft_tunnel_opts *opts)
288 struct nlattr *tb[NFTA_TUNNEL_KEY_ERSPAN_MAX + 1];
292 err = nla_parse_nested_deprecated(tb, NFTA_TUNNEL_KEY_ERSPAN_MAX,
293 attr, nft_tunnel_opts_erspan_policy,
298 if (!tb[NFTA_TUNNEL_KEY_ERSPAN_VERSION])
301 version = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_ERSPAN_VERSION]));
304 if (!tb[NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX])
307 opts->u.erspan.u.index =
308 nla_get_be32(tb[NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX]);
310 case ERSPAN_VERSION2:
311 if (!tb[NFTA_TUNNEL_KEY_ERSPAN_V2_DIR] ||
312 !tb[NFTA_TUNNEL_KEY_ERSPAN_V2_HWID])
315 hwid = nla_get_u8(tb[NFTA_TUNNEL_KEY_ERSPAN_V2_HWID]);
316 dir = nla_get_u8(tb[NFTA_TUNNEL_KEY_ERSPAN_V2_DIR]);
318 set_hwid(&opts->u.erspan.u.md2, hwid);
319 opts->u.erspan.u.md2.dir = dir;
324 opts->u.erspan.version = version;
326 opts->len = sizeof(struct erspan_metadata);
327 opts->flags = TUNNEL_ERSPAN_OPT;
332 static const struct nla_policy nft_tunnel_opts_geneve_policy[NFTA_TUNNEL_KEY_GENEVE_MAX + 1] = {
333 [NFTA_TUNNEL_KEY_GENEVE_CLASS] = { .type = NLA_U16 },
334 [NFTA_TUNNEL_KEY_GENEVE_TYPE] = { .type = NLA_U8 },
335 [NFTA_TUNNEL_KEY_GENEVE_DATA] = { .type = NLA_BINARY, .len = 128 },
338 static int nft_tunnel_obj_geneve_init(const struct nlattr *attr,
339 struct nft_tunnel_opts *opts)
341 struct geneve_opt *opt = (struct geneve_opt *)opts->u.data + opts->len;
342 struct nlattr *tb[NFTA_TUNNEL_KEY_GENEVE_MAX + 1];
345 err = nla_parse_nested(tb, NFTA_TUNNEL_KEY_GENEVE_MAX, attr,
346 nft_tunnel_opts_geneve_policy, NULL);
350 if (!tb[NFTA_TUNNEL_KEY_GENEVE_CLASS] ||
351 !tb[NFTA_TUNNEL_KEY_GENEVE_TYPE] ||
352 !tb[NFTA_TUNNEL_KEY_GENEVE_DATA])
355 attr = tb[NFTA_TUNNEL_KEY_GENEVE_DATA];
356 data_len = nla_len(attr);
360 opts->len += sizeof(*opt) + data_len;
361 if (opts->len > IP_TUNNEL_OPTS_MAX)
364 memcpy(opt->opt_data, nla_data(attr), data_len);
365 opt->length = data_len / 4;
366 opt->opt_class = nla_get_be16(tb[NFTA_TUNNEL_KEY_GENEVE_CLASS]);
367 opt->type = nla_get_u8(tb[NFTA_TUNNEL_KEY_GENEVE_TYPE]);
368 opts->flags = TUNNEL_GENEVE_OPT;
373 static const struct nla_policy nft_tunnel_opts_policy[NFTA_TUNNEL_KEY_OPTS_MAX + 1] = {
374 [NFTA_TUNNEL_KEY_OPTS_UNSPEC] = {
375 .strict_start_type = NFTA_TUNNEL_KEY_OPTS_GENEVE },
376 [NFTA_TUNNEL_KEY_OPTS_VXLAN] = { .type = NLA_NESTED, },
377 [NFTA_TUNNEL_KEY_OPTS_ERSPAN] = { .type = NLA_NESTED, },
378 [NFTA_TUNNEL_KEY_OPTS_GENEVE] = { .type = NLA_NESTED, },
381 static int nft_tunnel_obj_opts_init(const struct nft_ctx *ctx,
382 const struct nlattr *attr,
383 struct ip_tunnel_info *info,
384 struct nft_tunnel_opts *opts)
386 int err, rem, type = 0;
389 err = nla_validate_nested_deprecated(attr, NFTA_TUNNEL_KEY_OPTS_MAX,
390 nft_tunnel_opts_policy, NULL);
394 nla_for_each_attr(nla, nla_data(attr), nla_len(attr), rem) {
395 switch (nla_type(nla)) {
396 case NFTA_TUNNEL_KEY_OPTS_VXLAN:
399 err = nft_tunnel_obj_vxlan_init(nla, opts);
402 type = TUNNEL_VXLAN_OPT;
404 case NFTA_TUNNEL_KEY_OPTS_ERSPAN:
407 err = nft_tunnel_obj_erspan_init(nla, opts);
410 type = TUNNEL_ERSPAN_OPT;
412 case NFTA_TUNNEL_KEY_OPTS_GENEVE:
413 if (type && type != TUNNEL_GENEVE_OPT)
415 err = nft_tunnel_obj_geneve_init(nla, opts);
418 type = TUNNEL_GENEVE_OPT;
428 static const struct nla_policy nft_tunnel_key_policy[NFTA_TUNNEL_KEY_MAX + 1] = {
429 [NFTA_TUNNEL_KEY_IP] = { .type = NLA_NESTED, },
430 [NFTA_TUNNEL_KEY_IP6] = { .type = NLA_NESTED, },
431 [NFTA_TUNNEL_KEY_ID] = { .type = NLA_U32, },
432 [NFTA_TUNNEL_KEY_FLAGS] = { .type = NLA_U32, },
433 [NFTA_TUNNEL_KEY_TOS] = { .type = NLA_U8, },
434 [NFTA_TUNNEL_KEY_TTL] = { .type = NLA_U8, },
435 [NFTA_TUNNEL_KEY_SPORT] = { .type = NLA_U16, },
436 [NFTA_TUNNEL_KEY_DPORT] = { .type = NLA_U16, },
437 [NFTA_TUNNEL_KEY_OPTS] = { .type = NLA_NESTED, },
440 static int nft_tunnel_obj_init(const struct nft_ctx *ctx,
441 const struct nlattr * const tb[],
442 struct nft_object *obj)
444 struct nft_tunnel_obj *priv = nft_obj_data(obj);
445 struct ip_tunnel_info info;
446 struct metadata_dst *md;
449 if (!tb[NFTA_TUNNEL_KEY_ID])
452 memset(&info, 0, sizeof(info));
453 info.mode = IP_TUNNEL_INFO_TX;
454 info.key.tun_id = key32_to_tunnel_id(nla_get_be32(tb[NFTA_TUNNEL_KEY_ID]));
455 info.key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE;
457 if (tb[NFTA_TUNNEL_KEY_IP]) {
458 err = nft_tunnel_obj_ip_init(ctx, tb[NFTA_TUNNEL_KEY_IP], &info);
461 } else if (tb[NFTA_TUNNEL_KEY_IP6]) {
462 err = nft_tunnel_obj_ip6_init(ctx, tb[NFTA_TUNNEL_KEY_IP6], &info);
469 if (tb[NFTA_TUNNEL_KEY_SPORT]) {
470 info.key.tp_src = nla_get_be16(tb[NFTA_TUNNEL_KEY_SPORT]);
472 if (tb[NFTA_TUNNEL_KEY_DPORT]) {
473 info.key.tp_dst = nla_get_be16(tb[NFTA_TUNNEL_KEY_DPORT]);
476 if (tb[NFTA_TUNNEL_KEY_FLAGS]) {
479 tun_flags = ntohl(nla_get_be32(tb[NFTA_TUNNEL_KEY_FLAGS]));
480 if (tun_flags & ~NFT_TUNNEL_F_MASK)
483 if (tun_flags & NFT_TUNNEL_F_ZERO_CSUM_TX)
484 info.key.tun_flags &= ~TUNNEL_CSUM;
485 if (tun_flags & NFT_TUNNEL_F_DONT_FRAGMENT)
486 info.key.tun_flags |= TUNNEL_DONT_FRAGMENT;
487 if (tun_flags & NFT_TUNNEL_F_SEQ_NUMBER)
488 info.key.tun_flags |= TUNNEL_SEQ;
490 if (tb[NFTA_TUNNEL_KEY_TOS])
491 info.key.tos = nla_get_u8(tb[NFTA_TUNNEL_KEY_TOS]);
492 if (tb[NFTA_TUNNEL_KEY_TTL])
493 info.key.ttl = nla_get_u8(tb[NFTA_TUNNEL_KEY_TTL]);
495 info.key.ttl = U8_MAX;
497 if (tb[NFTA_TUNNEL_KEY_OPTS]) {
498 err = nft_tunnel_obj_opts_init(ctx, tb[NFTA_TUNNEL_KEY_OPTS],
504 md = metadata_dst_alloc(priv->opts.len, METADATA_IP_TUNNEL, GFP_KERNEL);
508 memcpy(&md->u.tun_info, &info, sizeof(info));
509 #ifdef CONFIG_DST_CACHE
510 err = dst_cache_init(&md->u.tun_info.dst_cache, GFP_KERNEL);
512 metadata_dst_free(md);
516 ip_tunnel_info_opts_set(&md->u.tun_info, &priv->opts.u, priv->opts.len,
523 static inline void nft_tunnel_obj_eval(struct nft_object *obj,
524 struct nft_regs *regs,
525 const struct nft_pktinfo *pkt)
527 struct nft_tunnel_obj *priv = nft_obj_data(obj);
528 struct sk_buff *skb = pkt->skb;
531 dst_hold((struct dst_entry *) priv->md);
532 skb_dst_set(skb, (struct dst_entry *) priv->md);
535 static int nft_tunnel_ip_dump(struct sk_buff *skb, struct ip_tunnel_info *info)
539 if (info->mode & IP_TUNNEL_INFO_IPV6) {
540 nest = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_IP6);
544 if (nla_put_in6_addr(skb, NFTA_TUNNEL_KEY_IP6_SRC,
545 &info->key.u.ipv6.src) < 0 ||
546 nla_put_in6_addr(skb, NFTA_TUNNEL_KEY_IP6_DST,
547 &info->key.u.ipv6.dst) < 0 ||
548 nla_put_be32(skb, NFTA_TUNNEL_KEY_IP6_FLOWLABEL,
550 nla_nest_cancel(skb, nest);
554 nla_nest_end(skb, nest);
556 nest = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_IP);
560 if (nla_put_in_addr(skb, NFTA_TUNNEL_KEY_IP_SRC,
561 info->key.u.ipv4.src) < 0 ||
562 nla_put_in_addr(skb, NFTA_TUNNEL_KEY_IP_DST,
563 info->key.u.ipv4.dst) < 0) {
564 nla_nest_cancel(skb, nest);
568 nla_nest_end(skb, nest);
574 static int nft_tunnel_opts_dump(struct sk_buff *skb,
575 struct nft_tunnel_obj *priv)
577 struct nft_tunnel_opts *opts = &priv->opts;
578 struct nlattr *nest, *inner;
580 nest = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS);
584 if (opts->flags & TUNNEL_VXLAN_OPT) {
585 inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_VXLAN);
588 if (nla_put_be32(skb, NFTA_TUNNEL_KEY_VXLAN_GBP,
589 htonl(opts->u.vxlan.gbp)))
591 nla_nest_end(skb, inner);
592 } else if (opts->flags & TUNNEL_ERSPAN_OPT) {
593 inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_ERSPAN);
596 if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ERSPAN_VERSION,
597 htonl(opts->u.erspan.version)))
599 switch (opts->u.erspan.version) {
601 if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX,
602 opts->u.erspan.u.index))
605 case ERSPAN_VERSION2:
606 if (nla_put_u8(skb, NFTA_TUNNEL_KEY_ERSPAN_V2_HWID,
607 get_hwid(&opts->u.erspan.u.md2)) ||
608 nla_put_u8(skb, NFTA_TUNNEL_KEY_ERSPAN_V2_DIR,
609 opts->u.erspan.u.md2.dir))
613 nla_nest_end(skb, inner);
614 } else if (opts->flags & TUNNEL_GENEVE_OPT) {
615 struct geneve_opt *opt;
618 inner = nla_nest_start_noflag(skb, NFTA_TUNNEL_KEY_OPTS_GENEVE);
621 while (opts->len > offset) {
622 opt = (struct geneve_opt *)opts->u.data + offset;
623 if (nla_put_be16(skb, NFTA_TUNNEL_KEY_GENEVE_CLASS,
625 nla_put_u8(skb, NFTA_TUNNEL_KEY_GENEVE_TYPE,
627 nla_put(skb, NFTA_TUNNEL_KEY_GENEVE_DATA,
628 opt->length * 4, opt->opt_data))
630 offset += sizeof(*opt) + opt->length * 4;
632 nla_nest_end(skb, inner);
634 nla_nest_end(skb, nest);
638 nla_nest_cancel(skb, inner);
640 nla_nest_cancel(skb, nest);
644 static int nft_tunnel_ports_dump(struct sk_buff *skb,
645 struct ip_tunnel_info *info)
647 if (nla_put_be16(skb, NFTA_TUNNEL_KEY_SPORT, info->key.tp_src) < 0 ||
648 nla_put_be16(skb, NFTA_TUNNEL_KEY_DPORT, info->key.tp_dst) < 0)
654 static int nft_tunnel_flags_dump(struct sk_buff *skb,
655 struct ip_tunnel_info *info)
659 if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)
660 flags |= NFT_TUNNEL_F_DONT_FRAGMENT;
661 if (!(info->key.tun_flags & TUNNEL_CSUM))
662 flags |= NFT_TUNNEL_F_ZERO_CSUM_TX;
663 if (info->key.tun_flags & TUNNEL_SEQ)
664 flags |= NFT_TUNNEL_F_SEQ_NUMBER;
666 if (nla_put_be32(skb, NFTA_TUNNEL_KEY_FLAGS, htonl(flags)) < 0)
672 static int nft_tunnel_obj_dump(struct sk_buff *skb,
673 struct nft_object *obj, bool reset)
675 struct nft_tunnel_obj *priv = nft_obj_data(obj);
676 struct ip_tunnel_info *info = &priv->md->u.tun_info;
678 if (nla_put_be32(skb, NFTA_TUNNEL_KEY_ID,
679 tunnel_id_to_key32(info->key.tun_id)) ||
680 nft_tunnel_ip_dump(skb, info) < 0 ||
681 nft_tunnel_ports_dump(skb, info) < 0 ||
682 nft_tunnel_flags_dump(skb, info) < 0 ||
683 nla_put_u8(skb, NFTA_TUNNEL_KEY_TOS, info->key.tos) ||
684 nla_put_u8(skb, NFTA_TUNNEL_KEY_TTL, info->key.ttl) ||
685 nft_tunnel_opts_dump(skb, priv) < 0)
686 goto nla_put_failure;
694 static void nft_tunnel_obj_destroy(const struct nft_ctx *ctx,
695 struct nft_object *obj)
697 struct nft_tunnel_obj *priv = nft_obj_data(obj);
699 metadata_dst_free(priv->md);
702 static struct nft_object_type nft_tunnel_obj_type;
703 static const struct nft_object_ops nft_tunnel_obj_ops = {
704 .type = &nft_tunnel_obj_type,
705 .size = sizeof(struct nft_tunnel_obj),
706 .eval = nft_tunnel_obj_eval,
707 .init = nft_tunnel_obj_init,
708 .destroy = nft_tunnel_obj_destroy,
709 .dump = nft_tunnel_obj_dump,
712 static struct nft_object_type nft_tunnel_obj_type __read_mostly = {
713 .type = NFT_OBJECT_TUNNEL,
714 .ops = &nft_tunnel_obj_ops,
715 .maxattr = NFTA_TUNNEL_KEY_MAX,
716 .policy = nft_tunnel_key_policy,
717 .owner = THIS_MODULE,
720 static int __init nft_tunnel_module_init(void)
724 err = nft_register_expr(&nft_tunnel_type);
728 err = nft_register_obj(&nft_tunnel_obj_type);
730 nft_unregister_expr(&nft_tunnel_type);
735 static void __exit nft_tunnel_module_exit(void)
737 nft_unregister_obj(&nft_tunnel_obj_type);
738 nft_unregister_expr(&nft_tunnel_type);
741 module_init(nft_tunnel_module_init);
742 module_exit(nft_tunnel_module_exit);
744 MODULE_LICENSE("GPL");
745 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
746 MODULE_ALIAS_NFT_EXPR("tunnel");
747 MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_TUNNEL);
748 MODULE_DESCRIPTION("nftables tunnel expression support");