1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2008 Patrick McHardy <kaber@trash.net>
5 * Development of this code funded by Astaro AG (http://www.astaro.com/)
8 #include <asm/unaligned.h>
9 #include <linux/kernel.h>
10 #include <linux/netlink.h>
11 #include <linux/netfilter.h>
12 #include <linux/netfilter/nf_tables.h>
13 #include <net/netfilter/nf_tables_core.h>
14 #include <net/netfilter/nf_tables.h>
22 enum nft_registers dreg:8;
23 enum nft_registers sreg:8;
27 static unsigned int optlen(const u8 *opt, unsigned int offset)
29 /* Beware zero-length options: make finite progress */
30 if (opt[offset] <= TCPOPT_NOP || opt[offset + 1] == 0)
33 return opt[offset + 1];
36 static void nft_exthdr_ipv6_eval(const struct nft_expr *expr,
37 struct nft_regs *regs,
38 const struct nft_pktinfo *pkt)
40 struct nft_exthdr *priv = nft_expr_priv(expr);
41 u32 *dest = ®s->data[priv->dreg];
42 unsigned int offset = 0;
45 err = ipv6_find_hdr(pkt->skb, &offset, priv->type, NULL, NULL);
46 if (priv->flags & NFT_EXTHDR_F_PRESENT) {
52 offset += priv->offset;
54 dest[priv->len / NFT_REG32_SIZE] = 0;
55 if (skb_copy_bits(pkt->skb, offset, dest, priv->len) < 0)
59 regs->verdict.code = NFT_BREAK;
62 /* find the offset to specified option.
64 * If target header is found, its offset is set in *offset and return option
65 * number. Otherwise, return negative error.
67 * If the first fragment doesn't contain the End of Options it is considered
70 static int ipv4_find_option(struct net *net, struct sk_buff *skb,
71 unsigned int *offset, int target)
73 unsigned char optbuf[sizeof(struct ip_options) + 40];
74 struct ip_options *opt = (struct ip_options *)optbuf;
75 struct iphdr *iph, _iph;
81 iph = skb_header_pointer(skb, 0, sizeof(_iph), &_iph);
84 start = sizeof(struct iphdr);
86 optlen = iph->ihl * 4 - (int)sizeof(struct iphdr);
90 memset(opt, 0, sizeof(struct ip_options));
91 /* Copy the options since __ip_options_compile() modifies
94 if (skb_copy_bits(skb, start, opt->__data, optlen))
98 if (__ip_options_compile(net, opt, NULL, &info))
106 found = target == IPOPT_SSRR ? opt->is_strictroute :
107 !opt->is_strictroute;
109 *offset = opt->srr + start;
114 *offset = opt->rr + start;
118 if (!opt->router_alert)
120 *offset = opt->router_alert + start;
126 return found ? target : -ENOENT;
129 static void nft_exthdr_ipv4_eval(const struct nft_expr *expr,
130 struct nft_regs *regs,
131 const struct nft_pktinfo *pkt)
133 struct nft_exthdr *priv = nft_expr_priv(expr);
134 u32 *dest = ®s->data[priv->dreg];
135 struct sk_buff *skb = pkt->skb;
139 if (skb->protocol != htons(ETH_P_IP))
142 err = ipv4_find_option(nft_net(pkt), skb, &offset, priv->type);
143 if (priv->flags & NFT_EXTHDR_F_PRESENT) {
146 } else if (err < 0) {
149 offset += priv->offset;
151 dest[priv->len / NFT_REG32_SIZE] = 0;
152 if (skb_copy_bits(pkt->skb, offset, dest, priv->len) < 0)
156 regs->verdict.code = NFT_BREAK;
160 nft_tcp_header_pointer(const struct nft_pktinfo *pkt,
161 unsigned int len, void *buffer, unsigned int *tcphdr_len)
165 if (!pkt->tprot_set || pkt->tprot != IPPROTO_TCP)
168 tcph = skb_header_pointer(pkt->skb, pkt->xt.thoff, sizeof(*tcph), buffer);
172 *tcphdr_len = __tcp_hdrlen(tcph);
173 if (*tcphdr_len < sizeof(*tcph) || *tcphdr_len > len)
176 return skb_header_pointer(pkt->skb, pkt->xt.thoff, *tcphdr_len, buffer);
179 static void nft_exthdr_tcp_eval(const struct nft_expr *expr,
180 struct nft_regs *regs,
181 const struct nft_pktinfo *pkt)
183 u8 buff[sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE];
184 struct nft_exthdr *priv = nft_expr_priv(expr);
185 unsigned int i, optl, tcphdr_len, offset;
186 u32 *dest = ®s->data[priv->dreg];
190 tcph = nft_tcp_header_pointer(pkt, sizeof(buff), buff, &tcphdr_len);
195 for (i = sizeof(*tcph); i < tcphdr_len - 1; i += optl) {
196 optl = optlen(opt, i);
198 if (priv->type != opt[i])
201 if (i + optl > tcphdr_len || priv->len + priv->offset > optl)
204 offset = i + priv->offset;
205 if (priv->flags & NFT_EXTHDR_F_PRESENT) {
208 dest[priv->len / NFT_REG32_SIZE] = 0;
209 memcpy(dest, opt + offset, priv->len);
216 if (priv->flags & NFT_EXTHDR_F_PRESENT)
219 regs->verdict.code = NFT_BREAK;
222 static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
223 struct nft_regs *regs,
224 const struct nft_pktinfo *pkt)
226 u8 buff[sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE];
227 struct nft_exthdr *priv = nft_expr_priv(expr);
228 unsigned int i, optl, tcphdr_len, offset;
232 tcph = nft_tcp_header_pointer(pkt, sizeof(buff), buff, &tcphdr_len);
237 for (i = sizeof(*tcph); i < tcphdr_len - 1; i += optl) {
243 optl = optlen(opt, i);
245 if (priv->type != opt[i])
248 if (i + optl > tcphdr_len || priv->len + priv->offset > optl)
251 if (skb_ensure_writable(pkt->skb,
252 pkt->xt.thoff + i + priv->len))
255 tcph = nft_tcp_header_pointer(pkt, sizeof(buff), buff,
260 offset = i + priv->offset;
264 old.v16 = get_unaligned((u16 *)(opt + offset));
265 new.v16 = (__force __be16)nft_reg_load16(
266 ®s->data[priv->sreg]);
268 switch (priv->type) {
270 /* increase can cause connection to stall */
271 if (ntohs(old.v16) <= ntohs(new.v16))
276 if (old.v16 == new.v16)
279 put_unaligned(new.v16, (u16*)(opt + offset));
280 inet_proto_csum_replace2(&tcph->check, pkt->skb,
281 old.v16, new.v16, false);
284 new.v32 = regs->data[priv->sreg];
285 old.v32 = get_unaligned((u32 *)(opt + offset));
287 if (old.v32 == new.v32)
290 put_unaligned(new.v32, (u32*)(opt + offset));
291 inet_proto_csum_replace4(&tcph->check, pkt->skb,
292 old.v32, new.v32, false);
303 static const struct nla_policy nft_exthdr_policy[NFTA_EXTHDR_MAX + 1] = {
304 [NFTA_EXTHDR_DREG] = { .type = NLA_U32 },
305 [NFTA_EXTHDR_TYPE] = { .type = NLA_U8 },
306 [NFTA_EXTHDR_OFFSET] = { .type = NLA_U32 },
307 [NFTA_EXTHDR_LEN] = { .type = NLA_U32 },
308 [NFTA_EXTHDR_FLAGS] = { .type = NLA_U32 },
309 [NFTA_EXTHDR_OP] = { .type = NLA_U32 },
310 [NFTA_EXTHDR_SREG] = { .type = NLA_U32 },
313 static int nft_exthdr_init(const struct nft_ctx *ctx,
314 const struct nft_expr *expr,
315 const struct nlattr * const tb[])
317 struct nft_exthdr *priv = nft_expr_priv(expr);
318 u32 offset, len, flags = 0, op = NFT_EXTHDR_OP_IPV6;
321 if (!tb[NFTA_EXTHDR_DREG] ||
322 !tb[NFTA_EXTHDR_TYPE] ||
323 !tb[NFTA_EXTHDR_OFFSET] ||
324 !tb[NFTA_EXTHDR_LEN])
327 err = nft_parse_u32_check(tb[NFTA_EXTHDR_OFFSET], U8_MAX, &offset);
331 err = nft_parse_u32_check(tb[NFTA_EXTHDR_LEN], U8_MAX, &len);
335 if (tb[NFTA_EXTHDR_FLAGS]) {
336 err = nft_parse_u32_check(tb[NFTA_EXTHDR_FLAGS], U8_MAX, &flags);
340 if (flags & ~NFT_EXTHDR_F_PRESENT)
344 if (tb[NFTA_EXTHDR_OP]) {
345 err = nft_parse_u32_check(tb[NFTA_EXTHDR_OP], U8_MAX, &op);
350 priv->type = nla_get_u8(tb[NFTA_EXTHDR_TYPE]);
351 priv->offset = offset;
353 priv->dreg = nft_parse_register(tb[NFTA_EXTHDR_DREG]);
357 return nft_validate_register_store(ctx, priv->dreg, NULL,
358 NFT_DATA_VALUE, priv->len);
361 static int nft_exthdr_tcp_set_init(const struct nft_ctx *ctx,
362 const struct nft_expr *expr,
363 const struct nlattr * const tb[])
365 struct nft_exthdr *priv = nft_expr_priv(expr);
366 u32 offset, len, flags = 0, op = NFT_EXTHDR_OP_IPV6;
369 if (!tb[NFTA_EXTHDR_SREG] ||
370 !tb[NFTA_EXTHDR_TYPE] ||
371 !tb[NFTA_EXTHDR_OFFSET] ||
372 !tb[NFTA_EXTHDR_LEN])
375 if (tb[NFTA_EXTHDR_DREG] || tb[NFTA_EXTHDR_FLAGS])
378 err = nft_parse_u32_check(tb[NFTA_EXTHDR_OFFSET], U8_MAX, &offset);
382 err = nft_parse_u32_check(tb[NFTA_EXTHDR_LEN], U8_MAX, &len);
396 err = nft_parse_u32_check(tb[NFTA_EXTHDR_OP], U8_MAX, &op);
400 priv->type = nla_get_u8(tb[NFTA_EXTHDR_TYPE]);
401 priv->offset = offset;
403 priv->sreg = nft_parse_register(tb[NFTA_EXTHDR_SREG]);
407 return nft_validate_register_load(priv->sreg, priv->len);
410 static int nft_exthdr_ipv4_init(const struct nft_ctx *ctx,
411 const struct nft_expr *expr,
412 const struct nlattr * const tb[])
414 struct nft_exthdr *priv = nft_expr_priv(expr);
415 int err = nft_exthdr_init(ctx, expr, tb);
420 switch (priv->type) {
432 static int nft_exthdr_dump_common(struct sk_buff *skb, const struct nft_exthdr *priv)
434 if (nla_put_u8(skb, NFTA_EXTHDR_TYPE, priv->type))
435 goto nla_put_failure;
436 if (nla_put_be32(skb, NFTA_EXTHDR_OFFSET, htonl(priv->offset)))
437 goto nla_put_failure;
438 if (nla_put_be32(skb, NFTA_EXTHDR_LEN, htonl(priv->len)))
439 goto nla_put_failure;
440 if (nla_put_be32(skb, NFTA_EXTHDR_FLAGS, htonl(priv->flags)))
441 goto nla_put_failure;
442 if (nla_put_be32(skb, NFTA_EXTHDR_OP, htonl(priv->op)))
443 goto nla_put_failure;
450 static int nft_exthdr_dump(struct sk_buff *skb, const struct nft_expr *expr)
452 const struct nft_exthdr *priv = nft_expr_priv(expr);
454 if (nft_dump_register(skb, NFTA_EXTHDR_DREG, priv->dreg))
457 return nft_exthdr_dump_common(skb, priv);
460 static int nft_exthdr_dump_set(struct sk_buff *skb, const struct nft_expr *expr)
462 const struct nft_exthdr *priv = nft_expr_priv(expr);
464 if (nft_dump_register(skb, NFTA_EXTHDR_SREG, priv->sreg))
467 return nft_exthdr_dump_common(skb, priv);
470 static const struct nft_expr_ops nft_exthdr_ipv6_ops = {
471 .type = &nft_exthdr_type,
472 .size = NFT_EXPR_SIZE(sizeof(struct nft_exthdr)),
473 .eval = nft_exthdr_ipv6_eval,
474 .init = nft_exthdr_init,
475 .dump = nft_exthdr_dump,
478 static const struct nft_expr_ops nft_exthdr_ipv4_ops = {
479 .type = &nft_exthdr_type,
480 .size = NFT_EXPR_SIZE(sizeof(struct nft_exthdr)),
481 .eval = nft_exthdr_ipv4_eval,
482 .init = nft_exthdr_ipv4_init,
483 .dump = nft_exthdr_dump,
486 static const struct nft_expr_ops nft_exthdr_tcp_ops = {
487 .type = &nft_exthdr_type,
488 .size = NFT_EXPR_SIZE(sizeof(struct nft_exthdr)),
489 .eval = nft_exthdr_tcp_eval,
490 .init = nft_exthdr_init,
491 .dump = nft_exthdr_dump,
494 static const struct nft_expr_ops nft_exthdr_tcp_set_ops = {
495 .type = &nft_exthdr_type,
496 .size = NFT_EXPR_SIZE(sizeof(struct nft_exthdr)),
497 .eval = nft_exthdr_tcp_set_eval,
498 .init = nft_exthdr_tcp_set_init,
499 .dump = nft_exthdr_dump_set,
502 static const struct nft_expr_ops *
503 nft_exthdr_select_ops(const struct nft_ctx *ctx,
504 const struct nlattr * const tb[])
508 if (!tb[NFTA_EXTHDR_OP])
509 return &nft_exthdr_ipv6_ops;
511 if (tb[NFTA_EXTHDR_SREG] && tb[NFTA_EXTHDR_DREG])
512 return ERR_PTR(-EOPNOTSUPP);
514 op = ntohl(nla_get_be32(tb[NFTA_EXTHDR_OP]));
516 case NFT_EXTHDR_OP_TCPOPT:
517 if (tb[NFTA_EXTHDR_SREG])
518 return &nft_exthdr_tcp_set_ops;
519 if (tb[NFTA_EXTHDR_DREG])
520 return &nft_exthdr_tcp_ops;
522 case NFT_EXTHDR_OP_IPV6:
523 if (tb[NFTA_EXTHDR_DREG])
524 return &nft_exthdr_ipv6_ops;
526 case NFT_EXTHDR_OP_IPV4:
527 if (ctx->family != NFPROTO_IPV6) {
528 if (tb[NFTA_EXTHDR_DREG])
529 return &nft_exthdr_ipv4_ops;
534 return ERR_PTR(-EOPNOTSUPP);
537 struct nft_expr_type nft_exthdr_type __read_mostly = {
539 .select_ops = nft_exthdr_select_ops,
540 .policy = nft_exthdr_policy,
541 .maxattr = NFTA_EXTHDR_MAX,
542 .owner = THIS_MODULE,