1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
5 * Development of this code funded by Astaro AG (http://www.astaro.com/)
8 #include <asm/unaligned.h>
9 #include <linux/kernel.h>
10 #include <linux/netlink.h>
11 #include <linux/netfilter.h>
12 #include <linux/netfilter/nf_tables.h>
13 #include <net/netfilter/nf_tables_core.h>
14 #include <net/netfilter/nf_tables.h>
22 enum nft_registers dreg:8;
23 enum nft_registers sreg:8;
27 static unsigned int optlen(const u8 *opt, unsigned int offset)
29 /* Beware zero-length options: make finite progress */
30 if (opt[offset] <= TCPOPT_NOP || opt[offset + 1] == 0)
33 return opt[offset + 1];
36 static void nft_exthdr_ipv6_eval(const struct nft_expr *expr,
37 struct nft_regs *regs,
38 const struct nft_pktinfo *pkt)
40 struct nft_exthdr *priv = nft_expr_priv(expr);
41 u32 *dest = ®s->data[priv->dreg];
42 unsigned int offset = 0;
45 err = ipv6_find_hdr(pkt->skb, &offset, priv->type, NULL, NULL);
46 if (priv->flags & NFT_EXTHDR_F_PRESENT) {
52 offset += priv->offset;
54 dest[priv->len / NFT_REG32_SIZE] = 0;
55 if (skb_copy_bits(pkt->skb, offset, dest, priv->len) < 0)
59 regs->verdict.code = NFT_BREAK;
62 /* find the offset to specified option.
64 * If target header is found, its offset is set in *offset and return option
65 * number. Otherwise, return negative error.
67 * If the first fragment doesn't contain the End of Options it is considered
70 static int ipv4_find_option(struct net *net, struct sk_buff *skb,
71 unsigned int *offset, int target)
73 unsigned char optbuf[sizeof(struct ip_options) + 40];
74 struct ip_options *opt = (struct ip_options *)optbuf;
75 struct iphdr *iph, _iph;
81 iph = skb_header_pointer(skb, 0, sizeof(_iph), &_iph);
84 start = sizeof(struct iphdr);
86 optlen = iph->ihl * 4 - (int)sizeof(struct iphdr);
90 memset(opt, 0, sizeof(struct ip_options));
91 /* Copy the options since __ip_options_compile() modifies
94 if (skb_copy_bits(skb, start, opt->__data, optlen))
98 if (__ip_options_compile(net, opt, NULL, &info))
106 found = target == IPOPT_SSRR ? opt->is_strictroute :
107 !opt->is_strictroute;
109 *offset = opt->srr + start;
114 *offset = opt->rr + start;
118 if (!opt->router_alert)
120 *offset = opt->router_alert + start;
126 return found ? target : -ENOENT;
129 static void nft_exthdr_ipv4_eval(const struct nft_expr *expr,
130 struct nft_regs *regs,
131 const struct nft_pktinfo *pkt)
133 struct nft_exthdr *priv = nft_expr_priv(expr);
134 u32 *dest = ®s->data[priv->dreg];
135 struct sk_buff *skb = pkt->skb;
139 if (skb->protocol != htons(ETH_P_IP))
142 err = ipv4_find_option(nft_net(pkt), skb, &offset, priv->type);
143 if (priv->flags & NFT_EXTHDR_F_PRESENT) {
146 } else if (err < 0) {
149 offset += priv->offset;
151 dest[priv->len / NFT_REG32_SIZE] = 0;
152 if (skb_copy_bits(pkt->skb, offset, dest, priv->len) < 0)
156 regs->verdict.code = NFT_BREAK;
160 nft_tcp_header_pointer(const struct nft_pktinfo *pkt,
161 unsigned int len, void *buffer, unsigned int *tcphdr_len)
165 if (!pkt->tprot_set || pkt->tprot != IPPROTO_TCP)
168 tcph = skb_header_pointer(pkt->skb, pkt->xt.thoff, sizeof(*tcph), buffer);
172 *tcphdr_len = __tcp_hdrlen(tcph);
173 if (*tcphdr_len < sizeof(*tcph) || *tcphdr_len > len)
176 return skb_header_pointer(pkt->skb, pkt->xt.thoff, *tcphdr_len, buffer);
179 static void nft_exthdr_tcp_eval(const struct nft_expr *expr,
180 struct nft_regs *regs,
181 const struct nft_pktinfo *pkt)
183 u8 buff[sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE];
184 struct nft_exthdr *priv = nft_expr_priv(expr);
185 unsigned int i, optl, tcphdr_len, offset;
186 u32 *dest = ®s->data[priv->dreg];
190 tcph = nft_tcp_header_pointer(pkt, sizeof(buff), buff, &tcphdr_len);
195 for (i = sizeof(*tcph); i < tcphdr_len - 1; i += optl) {
196 optl = optlen(opt, i);
198 if (priv->type != opt[i])
201 if (i + optl > tcphdr_len || priv->len + priv->offset > optl)
204 offset = i + priv->offset;
205 if (priv->flags & NFT_EXTHDR_F_PRESENT) {
208 dest[priv->len / NFT_REG32_SIZE] = 0;
209 memcpy(dest, opt + offset, priv->len);
216 if (priv->flags & NFT_EXTHDR_F_PRESENT)
219 regs->verdict.code = NFT_BREAK;
222 static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
223 struct nft_regs *regs,
224 const struct nft_pktinfo *pkt)
226 u8 buff[sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE];
227 struct nft_exthdr *priv = nft_expr_priv(expr);
228 unsigned int i, optl, tcphdr_len, offset;
233 tcph = nft_tcp_header_pointer(pkt, sizeof(buff), buff, &tcphdr_len);
238 for (i = sizeof(*tcph); i < tcphdr_len - 1; i += optl) {
245 optl = optlen(opt, i);
247 if (priv->type != opt[i])
250 if (i + optl > tcphdr_len || priv->len + priv->offset > optl)
253 if (skb_ensure_writable(pkt->skb,
254 pkt->xt.thoff + i + priv->len))
257 tcph = nft_tcp_header_pointer(pkt, sizeof(buff), buff,
262 src = regs->data[priv->sreg];
263 offset = i + priv->offset;
267 old.v16 = get_unaligned((u16 *)(opt + offset));
270 switch (priv->type) {
272 /* increase can cause connection to stall */
273 if (ntohs(old.v16) <= ntohs(new.v16))
278 if (old.v16 == new.v16)
281 put_unaligned(new.v16, (u16*)(opt + offset));
282 inet_proto_csum_replace2(&tcph->check, pkt->skb,
283 old.v16, new.v16, false);
287 old.v32 = get_unaligned((u32 *)(opt + offset));
289 if (old.v32 == new.v32)
292 put_unaligned(new.v32, (u32*)(opt + offset));
293 inet_proto_csum_replace4(&tcph->check, pkt->skb,
294 old.v32, new.v32, false);
305 static const struct nla_policy nft_exthdr_policy[NFTA_EXTHDR_MAX + 1] = {
306 [NFTA_EXTHDR_DREG] = { .type = NLA_U32 },
307 [NFTA_EXTHDR_TYPE] = { .type = NLA_U8 },
308 [NFTA_EXTHDR_OFFSET] = { .type = NLA_U32 },
309 [NFTA_EXTHDR_LEN] = { .type = NLA_U32 },
310 [NFTA_EXTHDR_FLAGS] = { .type = NLA_U32 },
311 [NFTA_EXTHDR_OP] = { .type = NLA_U32 },
312 [NFTA_EXTHDR_SREG] = { .type = NLA_U32 },
315 static int nft_exthdr_init(const struct nft_ctx *ctx,
316 const struct nft_expr *expr,
317 const struct nlattr * const tb[])
319 struct nft_exthdr *priv = nft_expr_priv(expr);
320 u32 offset, len, flags = 0, op = NFT_EXTHDR_OP_IPV6;
323 if (!tb[NFTA_EXTHDR_DREG] ||
324 !tb[NFTA_EXTHDR_TYPE] ||
325 !tb[NFTA_EXTHDR_OFFSET] ||
326 !tb[NFTA_EXTHDR_LEN])
329 err = nft_parse_u32_check(tb[NFTA_EXTHDR_OFFSET], U8_MAX, &offset);
333 err = nft_parse_u32_check(tb[NFTA_EXTHDR_LEN], U8_MAX, &len);
337 if (tb[NFTA_EXTHDR_FLAGS]) {
338 err = nft_parse_u32_check(tb[NFTA_EXTHDR_FLAGS], U8_MAX, &flags);
342 if (flags & ~NFT_EXTHDR_F_PRESENT)
346 if (tb[NFTA_EXTHDR_OP]) {
347 err = nft_parse_u32_check(tb[NFTA_EXTHDR_OP], U8_MAX, &op);
352 priv->type = nla_get_u8(tb[NFTA_EXTHDR_TYPE]);
353 priv->offset = offset;
355 priv->dreg = nft_parse_register(tb[NFTA_EXTHDR_DREG]);
359 return nft_validate_register_store(ctx, priv->dreg, NULL,
360 NFT_DATA_VALUE, priv->len);
363 static int nft_exthdr_tcp_set_init(const struct nft_ctx *ctx,
364 const struct nft_expr *expr,
365 const struct nlattr * const tb[])
367 struct nft_exthdr *priv = nft_expr_priv(expr);
368 u32 offset, len, flags = 0, op = NFT_EXTHDR_OP_IPV6;
371 if (!tb[NFTA_EXTHDR_SREG] ||
372 !tb[NFTA_EXTHDR_TYPE] ||
373 !tb[NFTA_EXTHDR_OFFSET] ||
374 !tb[NFTA_EXTHDR_LEN])
377 if (tb[NFTA_EXTHDR_DREG] || tb[NFTA_EXTHDR_FLAGS])
380 err = nft_parse_u32_check(tb[NFTA_EXTHDR_OFFSET], U8_MAX, &offset);
384 err = nft_parse_u32_check(tb[NFTA_EXTHDR_LEN], U8_MAX, &len);
398 err = nft_parse_u32_check(tb[NFTA_EXTHDR_OP], U8_MAX, &op);
402 priv->type = nla_get_u8(tb[NFTA_EXTHDR_TYPE]);
403 priv->offset = offset;
405 priv->sreg = nft_parse_register(tb[NFTA_EXTHDR_SREG]);
409 return nft_validate_register_load(priv->sreg, priv->len);
412 static int nft_exthdr_ipv4_init(const struct nft_ctx *ctx,
413 const struct nft_expr *expr,
414 const struct nlattr * const tb[])
416 struct nft_exthdr *priv = nft_expr_priv(expr);
417 int err = nft_exthdr_init(ctx, expr, tb);
422 switch (priv->type) {
434 static int nft_exthdr_dump_common(struct sk_buff *skb, const struct nft_exthdr *priv)
436 if (nla_put_u8(skb, NFTA_EXTHDR_TYPE, priv->type))
437 goto nla_put_failure;
438 if (nla_put_be32(skb, NFTA_EXTHDR_OFFSET, htonl(priv->offset)))
439 goto nla_put_failure;
440 if (nla_put_be32(skb, NFTA_EXTHDR_LEN, htonl(priv->len)))
441 goto nla_put_failure;
442 if (nla_put_be32(skb, NFTA_EXTHDR_FLAGS, htonl(priv->flags)))
443 goto nla_put_failure;
444 if (nla_put_be32(skb, NFTA_EXTHDR_OP, htonl(priv->op)))
445 goto nla_put_failure;
452 static int nft_exthdr_dump(struct sk_buff *skb, const struct nft_expr *expr)
454 const struct nft_exthdr *priv = nft_expr_priv(expr);
456 if (nft_dump_register(skb, NFTA_EXTHDR_DREG, priv->dreg))
459 return nft_exthdr_dump_common(skb, priv);
462 static int nft_exthdr_dump_set(struct sk_buff *skb, const struct nft_expr *expr)
464 const struct nft_exthdr *priv = nft_expr_priv(expr);
466 if (nft_dump_register(skb, NFTA_EXTHDR_SREG, priv->sreg))
469 return nft_exthdr_dump_common(skb, priv);
472 static const struct nft_expr_ops nft_exthdr_ipv6_ops = {
473 .type = &nft_exthdr_type,
474 .size = NFT_EXPR_SIZE(sizeof(struct nft_exthdr)),
475 .eval = nft_exthdr_ipv6_eval,
476 .init = nft_exthdr_init,
477 .dump = nft_exthdr_dump,
480 static const struct nft_expr_ops nft_exthdr_ipv4_ops = {
481 .type = &nft_exthdr_type,
482 .size = NFT_EXPR_SIZE(sizeof(struct nft_exthdr)),
483 .eval = nft_exthdr_ipv4_eval,
484 .init = nft_exthdr_ipv4_init,
485 .dump = nft_exthdr_dump,
488 static const struct nft_expr_ops nft_exthdr_tcp_ops = {
489 .type = &nft_exthdr_type,
490 .size = NFT_EXPR_SIZE(sizeof(struct nft_exthdr)),
491 .eval = nft_exthdr_tcp_eval,
492 .init = nft_exthdr_init,
493 .dump = nft_exthdr_dump,
496 static const struct nft_expr_ops nft_exthdr_tcp_set_ops = {
497 .type = &nft_exthdr_type,
498 .size = NFT_EXPR_SIZE(sizeof(struct nft_exthdr)),
499 .eval = nft_exthdr_tcp_set_eval,
500 .init = nft_exthdr_tcp_set_init,
501 .dump = nft_exthdr_dump_set,
504 static const struct nft_expr_ops *
505 nft_exthdr_select_ops(const struct nft_ctx *ctx,
506 const struct nlattr * const tb[])
510 if (!tb[NFTA_EXTHDR_OP])
511 return &nft_exthdr_ipv6_ops;
513 if (tb[NFTA_EXTHDR_SREG] && tb[NFTA_EXTHDR_DREG])
514 return ERR_PTR(-EOPNOTSUPP);
516 op = ntohl(nla_get_be32(tb[NFTA_EXTHDR_OP]));
518 case NFT_EXTHDR_OP_TCPOPT:
519 if (tb[NFTA_EXTHDR_SREG])
520 return &nft_exthdr_tcp_set_ops;
521 if (tb[NFTA_EXTHDR_DREG])
522 return &nft_exthdr_tcp_ops;
524 case NFT_EXTHDR_OP_IPV6:
525 if (tb[NFTA_EXTHDR_DREG])
526 return &nft_exthdr_ipv6_ops;
528 case NFT_EXTHDR_OP_IPV4:
529 if (ctx->family != NFPROTO_IPV6) {
530 if (tb[NFTA_EXTHDR_DREG])
531 return &nft_exthdr_ipv4_ops;
536 return ERR_PTR(-EOPNOTSUPP);
539 struct nft_expr_type nft_exthdr_type __read_mostly = {
541 .select_ops = nft_exthdr_select_ops,
542 .policy = nft_exthdr_policy,
543 .maxattr = NFTA_EXTHDR_MAX,
544 .owner = THIS_MODULE,