1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
5 * Development of this code funded by Astaro AG (http://www.astaro.com/)
8 #include <asm/unaligned.h>
9 #include <linux/kernel.h>
10 #include <linux/netlink.h>
11 #include <linux/netfilter.h>
12 #include <linux/netfilter/nf_tables.h>
13 #include <linux/sctp.h>
14 #include <net/netfilter/nf_tables_core.h>
15 #include <net/netfilter/nf_tables.h>
16 #include <net/sctp/sctp.h>
29 static unsigned int optlen(const u8 *opt, unsigned int offset)
31 /* Beware zero-length options: make finite progress */
32 if (opt[offset] <= TCPOPT_NOP || opt[offset + 1] == 0)
35 return opt[offset + 1];
38 static void nft_exthdr_ipv6_eval(const struct nft_expr *expr,
39 struct nft_regs *regs,
40 const struct nft_pktinfo *pkt)
42 struct nft_exthdr *priv = nft_expr_priv(expr);
43 u32 *dest = ®s->data[priv->dreg];
44 unsigned int offset = 0;
47 if (pkt->skb->protocol != htons(ETH_P_IPV6))
50 err = ipv6_find_hdr(pkt->skb, &offset, priv->type, NULL, NULL);
51 if (priv->flags & NFT_EXTHDR_F_PRESENT) {
52 nft_reg_store8(dest, err >= 0);
57 offset += priv->offset;
59 dest[priv->len / NFT_REG32_SIZE] = 0;
60 if (skb_copy_bits(pkt->skb, offset, dest, priv->len) < 0)
64 regs->verdict.code = NFT_BREAK;
67 /* find the offset to specified option.
69 * If target header is found, its offset is set in *offset and return option
70 * number. Otherwise, return negative error.
72 * If the first fragment doesn't contain the End of Options it is considered
75 static int ipv4_find_option(struct net *net, struct sk_buff *skb,
76 unsigned int *offset, int target)
78 unsigned char optbuf[sizeof(struct ip_options) + 40];
79 struct ip_options *opt = (struct ip_options *)optbuf;
80 struct iphdr *iph, _iph;
86 iph = skb_header_pointer(skb, 0, sizeof(_iph), &_iph);
89 start = sizeof(struct iphdr);
91 optlen = iph->ihl * 4 - (int)sizeof(struct iphdr);
95 memset(opt, 0, sizeof(struct ip_options));
96 /* Copy the options since __ip_options_compile() modifies
99 if (skb_copy_bits(skb, start, opt->__data, optlen))
101 opt->optlen = optlen;
103 if (__ip_options_compile(net, opt, NULL, &info))
111 found = target == IPOPT_SSRR ? opt->is_strictroute :
112 !opt->is_strictroute;
114 *offset = opt->srr + start;
119 *offset = opt->rr + start;
123 if (!opt->router_alert)
125 *offset = opt->router_alert + start;
131 return found ? target : -ENOENT;
134 static void nft_exthdr_ipv4_eval(const struct nft_expr *expr,
135 struct nft_regs *regs,
136 const struct nft_pktinfo *pkt)
138 struct nft_exthdr *priv = nft_expr_priv(expr);
139 u32 *dest = ®s->data[priv->dreg];
140 struct sk_buff *skb = pkt->skb;
144 if (skb->protocol != htons(ETH_P_IP))
147 err = ipv4_find_option(nft_net(pkt), skb, &offset, priv->type);
148 if (priv->flags & NFT_EXTHDR_F_PRESENT) {
149 nft_reg_store8(dest, err >= 0);
151 } else if (err < 0) {
154 offset += priv->offset;
156 dest[priv->len / NFT_REG32_SIZE] = 0;
157 if (skb_copy_bits(pkt->skb, offset, dest, priv->len) < 0)
161 regs->verdict.code = NFT_BREAK;
165 nft_tcp_header_pointer(const struct nft_pktinfo *pkt,
166 unsigned int len, void *buffer, unsigned int *tcphdr_len)
170 if (pkt->tprot != IPPROTO_TCP)
173 tcph = skb_header_pointer(pkt->skb, nft_thoff(pkt), sizeof(*tcph), buffer);
177 *tcphdr_len = __tcp_hdrlen(tcph);
178 if (*tcphdr_len < sizeof(*tcph) || *tcphdr_len > len)
181 return skb_header_pointer(pkt->skb, nft_thoff(pkt), *tcphdr_len, buffer);
184 static void nft_exthdr_tcp_eval(const struct nft_expr *expr,
185 struct nft_regs *regs,
186 const struct nft_pktinfo *pkt)
188 u8 buff[sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE];
189 struct nft_exthdr *priv = nft_expr_priv(expr);
190 unsigned int i, optl, tcphdr_len, offset;
191 u32 *dest = ®s->data[priv->dreg];
195 tcph = nft_tcp_header_pointer(pkt, sizeof(buff), buff, &tcphdr_len);
200 for (i = sizeof(*tcph); i < tcphdr_len - 1; i += optl) {
201 optl = optlen(opt, i);
203 if (priv->type != opt[i])
206 if (i + optl > tcphdr_len || priv->len + priv->offset > optl)
209 offset = i + priv->offset;
210 if (priv->flags & NFT_EXTHDR_F_PRESENT) {
213 dest[priv->len / NFT_REG32_SIZE] = 0;
214 memcpy(dest, opt + offset, priv->len);
221 if (priv->flags & NFT_EXTHDR_F_PRESENT)
224 regs->verdict.code = NFT_BREAK;
227 static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
228 struct nft_regs *regs,
229 const struct nft_pktinfo *pkt)
231 u8 buff[sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE];
232 struct nft_exthdr *priv = nft_expr_priv(expr);
233 unsigned int i, optl, tcphdr_len, offset;
237 tcph = nft_tcp_header_pointer(pkt, sizeof(buff), buff, &tcphdr_len);
242 for (i = sizeof(*tcph); i < tcphdr_len - 1; i += optl) {
248 optl = optlen(opt, i);
250 if (priv->type != opt[i])
253 if (i + optl > tcphdr_len || priv->len + priv->offset > optl)
256 if (skb_ensure_writable(pkt->skb,
257 nft_thoff(pkt) + i + priv->len))
260 tcph = nft_tcp_header_pointer(pkt, sizeof(buff), buff,
265 offset = i + priv->offset;
269 old.v16 = get_unaligned((u16 *)(opt + offset));
270 new.v16 = (__force __be16)nft_reg_load16(
271 ®s->data[priv->sreg]);
273 switch (priv->type) {
275 /* increase can cause connection to stall */
276 if (ntohs(old.v16) <= ntohs(new.v16))
281 if (old.v16 == new.v16)
284 put_unaligned(new.v16, (u16*)(opt + offset));
285 inet_proto_csum_replace2(&tcph->check, pkt->skb,
286 old.v16, new.v16, false);
289 new.v32 = regs->data[priv->sreg];
290 old.v32 = get_unaligned((u32 *)(opt + offset));
292 if (old.v32 == new.v32)
295 put_unaligned(new.v32, (u32*)(opt + offset));
296 inet_proto_csum_replace4(&tcph->check, pkt->skb,
297 old.v32, new.v32, false);
308 static void nft_exthdr_sctp_eval(const struct nft_expr *expr,
309 struct nft_regs *regs,
310 const struct nft_pktinfo *pkt)
312 unsigned int offset = nft_thoff(pkt) + sizeof(struct sctphdr);
313 struct nft_exthdr *priv = nft_expr_priv(expr);
314 u32 *dest = ®s->data[priv->dreg];
315 const struct sctp_chunkhdr *sch;
316 struct sctp_chunkhdr _sch;
318 if (pkt->tprot != IPPROTO_SCTP)
322 sch = skb_header_pointer(pkt->skb, offset, sizeof(_sch), &_sch);
323 if (!sch || !sch->length)
326 if (sch->type == priv->type) {
327 if (priv->flags & NFT_EXTHDR_F_PRESENT) {
328 nft_reg_store8(dest, true);
331 if (priv->offset + priv->len > ntohs(sch->length) ||
332 offset + ntohs(sch->length) > pkt->skb->len)
335 dest[priv->len / NFT_REG32_SIZE] = 0;
336 if (skb_copy_bits(pkt->skb, offset + priv->offset,
337 dest, priv->len) < 0)
341 offset += SCTP_PAD4(ntohs(sch->length));
342 } while (offset < pkt->skb->len);
344 if (priv->flags & NFT_EXTHDR_F_PRESENT)
345 nft_reg_store8(dest, false);
347 regs->verdict.code = NFT_BREAK;
350 static const struct nla_policy nft_exthdr_policy[NFTA_EXTHDR_MAX + 1] = {
351 [NFTA_EXTHDR_DREG] = { .type = NLA_U32 },
352 [NFTA_EXTHDR_TYPE] = { .type = NLA_U8 },
353 [NFTA_EXTHDR_OFFSET] = { .type = NLA_U32 },
354 [NFTA_EXTHDR_LEN] = { .type = NLA_U32 },
355 [NFTA_EXTHDR_FLAGS] = { .type = NLA_U32 },
356 [NFTA_EXTHDR_OP] = { .type = NLA_U32 },
357 [NFTA_EXTHDR_SREG] = { .type = NLA_U32 },
360 static int nft_exthdr_init(const struct nft_ctx *ctx,
361 const struct nft_expr *expr,
362 const struct nlattr * const tb[])
364 struct nft_exthdr *priv = nft_expr_priv(expr);
365 u32 offset, len, flags = 0, op = NFT_EXTHDR_OP_IPV6;
368 if (!tb[NFTA_EXTHDR_DREG] ||
369 !tb[NFTA_EXTHDR_TYPE] ||
370 !tb[NFTA_EXTHDR_OFFSET] ||
371 !tb[NFTA_EXTHDR_LEN])
374 err = nft_parse_u32_check(tb[NFTA_EXTHDR_OFFSET], U8_MAX, &offset);
378 err = nft_parse_u32_check(tb[NFTA_EXTHDR_LEN], U8_MAX, &len);
382 if (tb[NFTA_EXTHDR_FLAGS]) {
383 err = nft_parse_u32_check(tb[NFTA_EXTHDR_FLAGS], U8_MAX, &flags);
387 if (flags & ~NFT_EXTHDR_F_PRESENT)
391 if (tb[NFTA_EXTHDR_OP]) {
392 err = nft_parse_u32_check(tb[NFTA_EXTHDR_OP], U8_MAX, &op);
397 priv->type = nla_get_u8(tb[NFTA_EXTHDR_TYPE]);
398 priv->offset = offset;
403 return nft_parse_register_store(ctx, tb[NFTA_EXTHDR_DREG],
404 &priv->dreg, NULL, NFT_DATA_VALUE,
408 static int nft_exthdr_tcp_set_init(const struct nft_ctx *ctx,
409 const struct nft_expr *expr,
410 const struct nlattr * const tb[])
412 struct nft_exthdr *priv = nft_expr_priv(expr);
413 u32 offset, len, flags = 0, op = NFT_EXTHDR_OP_IPV6;
416 if (!tb[NFTA_EXTHDR_SREG] ||
417 !tb[NFTA_EXTHDR_TYPE] ||
418 !tb[NFTA_EXTHDR_OFFSET] ||
419 !tb[NFTA_EXTHDR_LEN])
422 if (tb[NFTA_EXTHDR_DREG] || tb[NFTA_EXTHDR_FLAGS])
425 err = nft_parse_u32_check(tb[NFTA_EXTHDR_OFFSET], U8_MAX, &offset);
429 err = nft_parse_u32_check(tb[NFTA_EXTHDR_LEN], U8_MAX, &len);
443 err = nft_parse_u32_check(tb[NFTA_EXTHDR_OP], U8_MAX, &op);
447 priv->type = nla_get_u8(tb[NFTA_EXTHDR_TYPE]);
448 priv->offset = offset;
453 return nft_parse_register_load(tb[NFTA_EXTHDR_SREG], &priv->sreg,
457 static int nft_exthdr_ipv4_init(const struct nft_ctx *ctx,
458 const struct nft_expr *expr,
459 const struct nlattr * const tb[])
461 struct nft_exthdr *priv = nft_expr_priv(expr);
462 int err = nft_exthdr_init(ctx, expr, tb);
467 switch (priv->type) {
479 static int nft_exthdr_dump_common(struct sk_buff *skb, const struct nft_exthdr *priv)
481 if (nla_put_u8(skb, NFTA_EXTHDR_TYPE, priv->type))
482 goto nla_put_failure;
483 if (nla_put_be32(skb, NFTA_EXTHDR_OFFSET, htonl(priv->offset)))
484 goto nla_put_failure;
485 if (nla_put_be32(skb, NFTA_EXTHDR_LEN, htonl(priv->len)))
486 goto nla_put_failure;
487 if (nla_put_be32(skb, NFTA_EXTHDR_FLAGS, htonl(priv->flags)))
488 goto nla_put_failure;
489 if (nla_put_be32(skb, NFTA_EXTHDR_OP, htonl(priv->op)))
490 goto nla_put_failure;
497 static int nft_exthdr_dump(struct sk_buff *skb, const struct nft_expr *expr)
499 const struct nft_exthdr *priv = nft_expr_priv(expr);
501 if (nft_dump_register(skb, NFTA_EXTHDR_DREG, priv->dreg))
504 return nft_exthdr_dump_common(skb, priv);
507 static int nft_exthdr_dump_set(struct sk_buff *skb, const struct nft_expr *expr)
509 const struct nft_exthdr *priv = nft_expr_priv(expr);
511 if (nft_dump_register(skb, NFTA_EXTHDR_SREG, priv->sreg))
514 return nft_exthdr_dump_common(skb, priv);
517 static const struct nft_expr_ops nft_exthdr_ipv6_ops = {
518 .type = &nft_exthdr_type,
519 .size = NFT_EXPR_SIZE(sizeof(struct nft_exthdr)),
520 .eval = nft_exthdr_ipv6_eval,
521 .init = nft_exthdr_init,
522 .dump = nft_exthdr_dump,
525 static const struct nft_expr_ops nft_exthdr_ipv4_ops = {
526 .type = &nft_exthdr_type,
527 .size = NFT_EXPR_SIZE(sizeof(struct nft_exthdr)),
528 .eval = nft_exthdr_ipv4_eval,
529 .init = nft_exthdr_ipv4_init,
530 .dump = nft_exthdr_dump,
533 static const struct nft_expr_ops nft_exthdr_tcp_ops = {
534 .type = &nft_exthdr_type,
535 .size = NFT_EXPR_SIZE(sizeof(struct nft_exthdr)),
536 .eval = nft_exthdr_tcp_eval,
537 .init = nft_exthdr_init,
538 .dump = nft_exthdr_dump,
541 static const struct nft_expr_ops nft_exthdr_tcp_set_ops = {
542 .type = &nft_exthdr_type,
543 .size = NFT_EXPR_SIZE(sizeof(struct nft_exthdr)),
544 .eval = nft_exthdr_tcp_set_eval,
545 .init = nft_exthdr_tcp_set_init,
546 .dump = nft_exthdr_dump_set,
549 static const struct nft_expr_ops nft_exthdr_sctp_ops = {
550 .type = &nft_exthdr_type,
551 .size = NFT_EXPR_SIZE(sizeof(struct nft_exthdr)),
552 .eval = nft_exthdr_sctp_eval,
553 .init = nft_exthdr_init,
554 .dump = nft_exthdr_dump,
557 static const struct nft_expr_ops *
558 nft_exthdr_select_ops(const struct nft_ctx *ctx,
559 const struct nlattr * const tb[])
563 if (!tb[NFTA_EXTHDR_OP])
564 return &nft_exthdr_ipv6_ops;
566 if (tb[NFTA_EXTHDR_SREG] && tb[NFTA_EXTHDR_DREG])
567 return ERR_PTR(-EOPNOTSUPP);
569 op = ntohl(nla_get_be32(tb[NFTA_EXTHDR_OP]));
571 case NFT_EXTHDR_OP_TCPOPT:
572 if (tb[NFTA_EXTHDR_SREG])
573 return &nft_exthdr_tcp_set_ops;
574 if (tb[NFTA_EXTHDR_DREG])
575 return &nft_exthdr_tcp_ops;
577 case NFT_EXTHDR_OP_IPV6:
578 if (tb[NFTA_EXTHDR_DREG])
579 return &nft_exthdr_ipv6_ops;
581 case NFT_EXTHDR_OP_IPV4:
582 if (ctx->family != NFPROTO_IPV6) {
583 if (tb[NFTA_EXTHDR_DREG])
584 return &nft_exthdr_ipv4_ops;
587 case NFT_EXTHDR_OP_SCTP:
588 if (tb[NFTA_EXTHDR_DREG])
589 return &nft_exthdr_sctp_ops;
593 return ERR_PTR(-EOPNOTSUPP);
596 struct nft_expr_type nft_exthdr_type __read_mostly = {
598 .select_ops = nft_exthdr_select_ops,
599 .policy = nft_exthdr_policy,
600 .maxattr = NFTA_EXTHDR_MAX,
601 .owner = THIS_MODULE,