1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
4 * Copyright (c) 2016 Pablo Neira Ayuso <pablo@netfilter.org>
6 * Development of this code funded by Astaro AG (http://www.astaro.com/)
9 #include <linux/kernel.h>
10 #include <linux/if_vlan.h>
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/netlink.h>
14 #include <linux/netfilter.h>
15 #include <linux/netfilter/nf_tables.h>
16 #include <net/netfilter/nf_tables_core.h>
17 #include <net/netfilter/nf_tables.h>
18 #include <net/netfilter/nf_tables_offload.h>
19 /* For layer 4 checksum field offset. */
20 #include <linux/tcp.h>
21 #include <linux/udp.h>
22 #include <linux/icmpv6.h>
24 #include <linux/ipv6.h>
25 #include <net/sctp/checksum.h>
27 static bool nft_payload_rebuild_vlan_hdr(const struct sk_buff *skb, int mac_off,
28 struct vlan_ethhdr *veth)
30 if (skb_copy_bits(skb, mac_off, veth, ETH_HLEN))
33 veth->h_vlan_proto = skb->vlan_proto;
34 veth->h_vlan_TCI = htons(skb_vlan_tag_get(skb));
35 veth->h_vlan_encapsulated_proto = skb->protocol;
40 /* add vlan header into the user buffer for if tag was removed by offloads */
42 nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len)
44 int mac_off = skb_mac_header(skb) - skb->data;
45 u8 *vlanh, *dst_u8 = (u8 *) d;
46 struct vlan_ethhdr veth;
49 if ((skb->protocol == htons(ETH_P_8021AD) ||
50 skb->protocol == htons(ETH_P_8021Q)) &&
51 offset >= VLAN_ETH_HLEN && offset < VLAN_ETH_HLEN + VLAN_HLEN)
52 vlan_hlen += VLAN_HLEN;
55 if (offset < VLAN_ETH_HLEN + vlan_hlen) {
59 skb_copy_bits(skb, mac_off, &veth, VLAN_ETH_HLEN) < 0)
61 else if (!nft_payload_rebuild_vlan_hdr(skb, mac_off, &veth))
64 if (offset + len > VLAN_ETH_HLEN + vlan_hlen)
65 ethlen -= offset + len - VLAN_ETH_HLEN + vlan_hlen;
67 memcpy(dst_u8, vlanh + offset - vlan_hlen, ethlen);
74 offset = ETH_HLEN + vlan_hlen;
76 offset -= VLAN_HLEN + vlan_hlen;
79 return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0;
82 static int __nft_payload_inner_offset(struct nft_pktinfo *pkt)
84 unsigned int thoff = nft_thoff(pkt);
86 if (!(pkt->flags & NFT_PKTINFO_L4PROTO))
91 pkt->inneroff = thoff + sizeof(struct udphdr);
94 struct tcphdr *th, _tcph;
96 th = skb_header_pointer(pkt->skb, thoff, sizeof(_tcph), &_tcph);
100 pkt->inneroff = thoff + __tcp_hdrlen(th);
107 pkt->flags |= NFT_PKTINFO_INNER;
112 static int nft_payload_inner_offset(const struct nft_pktinfo *pkt)
114 if (!(pkt->flags & NFT_PKTINFO_INNER) &&
115 __nft_payload_inner_offset((struct nft_pktinfo *)pkt) < 0)
118 return pkt->inneroff;
121 void nft_payload_eval(const struct nft_expr *expr,
122 struct nft_regs *regs,
123 const struct nft_pktinfo *pkt)
125 const struct nft_payload *priv = nft_expr_priv(expr);
126 const struct sk_buff *skb = pkt->skb;
127 u32 *dest = ®s->data[priv->dreg];
130 if (priv->len % NFT_REG32_SIZE)
131 dest[priv->len / NFT_REG32_SIZE] = 0;
133 switch (priv->base) {
134 case NFT_PAYLOAD_LL_HEADER:
135 if (!skb_mac_header_was_set(skb))
138 if (skb_vlan_tag_present(skb)) {
139 if (!nft_payload_copy_vlan(dest, skb,
140 priv->offset, priv->len))
144 offset = skb_mac_header(skb) - skb->data;
146 case NFT_PAYLOAD_NETWORK_HEADER:
147 offset = skb_network_offset(skb);
149 case NFT_PAYLOAD_TRANSPORT_HEADER:
150 if (!(pkt->flags & NFT_PKTINFO_L4PROTO))
152 offset = nft_thoff(pkt);
154 case NFT_PAYLOAD_INNER_HEADER:
155 offset = nft_payload_inner_offset(pkt);
163 offset += priv->offset;
165 if (skb_copy_bits(skb, offset, dest, priv->len) < 0)
169 regs->verdict.code = NFT_BREAK;
172 static const struct nla_policy nft_payload_policy[NFTA_PAYLOAD_MAX + 1] = {
173 [NFTA_PAYLOAD_SREG] = { .type = NLA_U32 },
174 [NFTA_PAYLOAD_DREG] = { .type = NLA_U32 },
175 [NFTA_PAYLOAD_BASE] = { .type = NLA_U32 },
176 [NFTA_PAYLOAD_OFFSET] = { .type = NLA_U32 },
177 [NFTA_PAYLOAD_LEN] = { .type = NLA_U32 },
178 [NFTA_PAYLOAD_CSUM_TYPE] = { .type = NLA_U32 },
179 [NFTA_PAYLOAD_CSUM_OFFSET] = { .type = NLA_U32 },
180 [NFTA_PAYLOAD_CSUM_FLAGS] = { .type = NLA_U32 },
183 static int nft_payload_init(const struct nft_ctx *ctx,
184 const struct nft_expr *expr,
185 const struct nlattr * const tb[])
187 struct nft_payload *priv = nft_expr_priv(expr);
189 priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
190 priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
191 priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
193 return nft_parse_register_store(ctx, tb[NFTA_PAYLOAD_DREG],
194 &priv->dreg, NULL, NFT_DATA_VALUE,
198 static int nft_payload_dump(struct sk_buff *skb, const struct nft_expr *expr)
200 const struct nft_payload *priv = nft_expr_priv(expr);
202 if (nft_dump_register(skb, NFTA_PAYLOAD_DREG, priv->dreg) ||
203 nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
204 nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
205 nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)))
206 goto nla_put_failure;
213 static bool nft_payload_reduce(struct nft_regs_track *track,
214 const struct nft_expr *expr)
216 const struct nft_payload *priv = nft_expr_priv(expr);
217 const struct nft_payload *payload;
219 if (!track->regs[priv->dreg].selector ||
220 track->regs[priv->dreg].selector->ops != expr->ops) {
221 track->regs[priv->dreg].selector = expr;
222 track->regs[priv->dreg].bitwise = NULL;
226 payload = nft_expr_priv(track->regs[priv->dreg].selector);
227 if (priv->base != payload->base ||
228 priv->offset != payload->offset ||
229 priv->len != payload->len) {
230 track->regs[priv->dreg].selector = expr;
231 track->regs[priv->dreg].bitwise = NULL;
235 if (!track->regs[priv->dreg].bitwise)
238 return nft_expr_reduce_bitwise(track, expr);
241 static bool nft_payload_offload_mask(struct nft_offload_reg *reg,
242 u32 priv_len, u32 field_len)
244 unsigned int remainder, delta, k;
245 struct nft_data mask = {};
246 __be32 remainder_mask;
248 if (priv_len == field_len) {
249 memset(®->mask, 0xff, priv_len);
251 } else if (priv_len > field_len) {
255 memset(&mask, 0xff, field_len);
256 remainder = priv_len % sizeof(u32);
258 k = priv_len / sizeof(u32);
259 delta = field_len - priv_len;
260 remainder_mask = htonl(~((1 << (delta * BITS_PER_BYTE)) - 1));
261 mask.data[k] = (__force u32)remainder_mask;
264 memcpy(®->mask, &mask, field_len);
269 static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
270 struct nft_flow_rule *flow,
271 const struct nft_payload *priv)
273 struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
275 switch (priv->offset) {
276 case offsetof(struct ethhdr, h_source):
277 if (!nft_payload_offload_mask(reg, priv->len, ETH_ALEN))
280 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
283 case offsetof(struct ethhdr, h_dest):
284 if (!nft_payload_offload_mask(reg, priv->len, ETH_ALEN))
287 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
290 case offsetof(struct ethhdr, h_proto):
291 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
294 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic,
295 n_proto, sizeof(__be16), reg);
296 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
298 case offsetof(struct vlan_ethhdr, h_vlan_TCI):
299 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
302 NFT_OFFLOAD_MATCH_FLAGS(FLOW_DISSECTOR_KEY_VLAN, vlan,
303 vlan_tci, sizeof(__be16), reg,
304 NFT_OFFLOAD_F_NETWORK2HOST);
306 case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto):
307 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
310 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_VLAN, vlan,
311 vlan_tpid, sizeof(__be16), reg);
312 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
314 case offsetof(struct vlan_ethhdr, h_vlan_TCI) + sizeof(struct vlan_hdr):
315 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
318 NFT_OFFLOAD_MATCH_FLAGS(FLOW_DISSECTOR_KEY_CVLAN, cvlan,
319 vlan_tci, sizeof(__be16), reg,
320 NFT_OFFLOAD_F_NETWORK2HOST);
322 case offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto) +
323 sizeof(struct vlan_hdr):
324 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
327 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_CVLAN, cvlan,
328 vlan_tpid, sizeof(__be16), reg);
329 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_NETWORK);
338 static int nft_payload_offload_ip(struct nft_offload_ctx *ctx,
339 struct nft_flow_rule *flow,
340 const struct nft_payload *priv)
342 struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
344 switch (priv->offset) {
345 case offsetof(struct iphdr, saddr):
346 if (!nft_payload_offload_mask(reg, priv->len,
347 sizeof(struct in_addr)))
350 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, src,
351 sizeof(struct in_addr), reg);
352 nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
354 case offsetof(struct iphdr, daddr):
355 if (!nft_payload_offload_mask(reg, priv->len,
356 sizeof(struct in_addr)))
359 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, dst,
360 sizeof(struct in_addr), reg);
361 nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
363 case offsetof(struct iphdr, protocol):
364 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__u8)))
367 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
369 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
378 static int nft_payload_offload_ip6(struct nft_offload_ctx *ctx,
379 struct nft_flow_rule *flow,
380 const struct nft_payload *priv)
382 struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
384 switch (priv->offset) {
385 case offsetof(struct ipv6hdr, saddr):
386 if (!nft_payload_offload_mask(reg, priv->len,
387 sizeof(struct in6_addr)))
390 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, src,
391 sizeof(struct in6_addr), reg);
392 nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
394 case offsetof(struct ipv6hdr, daddr):
395 if (!nft_payload_offload_mask(reg, priv->len,
396 sizeof(struct in6_addr)))
399 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, dst,
400 sizeof(struct in6_addr), reg);
401 nft_flow_rule_set_addr_type(flow, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
403 case offsetof(struct ipv6hdr, nexthdr):
404 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__u8)))
407 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
409 nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
418 static int nft_payload_offload_nh(struct nft_offload_ctx *ctx,
419 struct nft_flow_rule *flow,
420 const struct nft_payload *priv)
424 switch (ctx->dep.l3num) {
425 case htons(ETH_P_IP):
426 err = nft_payload_offload_ip(ctx, flow, priv);
428 case htons(ETH_P_IPV6):
429 err = nft_payload_offload_ip6(ctx, flow, priv);
438 static int nft_payload_offload_tcp(struct nft_offload_ctx *ctx,
439 struct nft_flow_rule *flow,
440 const struct nft_payload *priv)
442 struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
444 switch (priv->offset) {
445 case offsetof(struct tcphdr, source):
446 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
449 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
450 sizeof(__be16), reg);
452 case offsetof(struct tcphdr, dest):
453 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
456 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
457 sizeof(__be16), reg);
466 static int nft_payload_offload_udp(struct nft_offload_ctx *ctx,
467 struct nft_flow_rule *flow,
468 const struct nft_payload *priv)
470 struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
472 switch (priv->offset) {
473 case offsetof(struct udphdr, source):
474 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
477 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
478 sizeof(__be16), reg);
480 case offsetof(struct udphdr, dest):
481 if (!nft_payload_offload_mask(reg, priv->len, sizeof(__be16)))
484 NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
485 sizeof(__be16), reg);
494 static int nft_payload_offload_th(struct nft_offload_ctx *ctx,
495 struct nft_flow_rule *flow,
496 const struct nft_payload *priv)
500 switch (ctx->dep.protonum) {
502 err = nft_payload_offload_tcp(ctx, flow, priv);
505 err = nft_payload_offload_udp(ctx, flow, priv);
514 static int nft_payload_offload(struct nft_offload_ctx *ctx,
515 struct nft_flow_rule *flow,
516 const struct nft_expr *expr)
518 const struct nft_payload *priv = nft_expr_priv(expr);
521 switch (priv->base) {
522 case NFT_PAYLOAD_LL_HEADER:
523 err = nft_payload_offload_ll(ctx, flow, priv);
525 case NFT_PAYLOAD_NETWORK_HEADER:
526 err = nft_payload_offload_nh(ctx, flow, priv);
528 case NFT_PAYLOAD_TRANSPORT_HEADER:
529 err = nft_payload_offload_th(ctx, flow, priv);
538 static const struct nft_expr_ops nft_payload_ops = {
539 .type = &nft_payload_type,
540 .size = NFT_EXPR_SIZE(sizeof(struct nft_payload)),
541 .eval = nft_payload_eval,
542 .init = nft_payload_init,
543 .dump = nft_payload_dump,
544 .reduce = nft_payload_reduce,
545 .offload = nft_payload_offload,
548 const struct nft_expr_ops nft_payload_fast_ops = {
549 .type = &nft_payload_type,
550 .size = NFT_EXPR_SIZE(sizeof(struct nft_payload)),
551 .eval = nft_payload_eval,
552 .init = nft_payload_init,
553 .dump = nft_payload_dump,
554 .reduce = nft_payload_reduce,
555 .offload = nft_payload_offload,
558 static inline void nft_csum_replace(__sum16 *sum, __wsum fsum, __wsum tsum)
560 *sum = csum_fold(csum_add(csum_sub(~csum_unfold(*sum), fsum), tsum));
562 *sum = CSUM_MANGLED_0;
565 static bool nft_payload_udp_checksum(struct sk_buff *skb, unsigned int thoff)
567 struct udphdr *uh, _uh;
569 uh = skb_header_pointer(skb, thoff, sizeof(_uh), &_uh);
573 return (__force bool)uh->check;
576 static int nft_payload_l4csum_offset(const struct nft_pktinfo *pkt,
578 unsigned int *l4csum_offset)
583 switch (pkt->tprot) {
585 *l4csum_offset = offsetof(struct tcphdr, check);
588 if (!nft_payload_udp_checksum(skb, nft_thoff(pkt)))
591 case IPPROTO_UDPLITE:
592 *l4csum_offset = offsetof(struct udphdr, check);
595 *l4csum_offset = offsetof(struct icmp6hdr, icmp6_cksum);
601 *l4csum_offset += nft_thoff(pkt);
605 static int nft_payload_csum_sctp(struct sk_buff *skb, int offset)
609 if (skb_ensure_writable(skb, offset + sizeof(*sh)))
612 sh = (struct sctphdr *)(skb->data + offset);
613 sh->checksum = sctp_compute_cksum(skb, offset);
614 skb->ip_summed = CHECKSUM_UNNECESSARY;
618 static int nft_payload_l4csum_update(const struct nft_pktinfo *pkt,
620 __wsum fsum, __wsum tsum)
625 /* If we cannot determine layer 4 checksum offset or this packet doesn't
626 * require layer 4 checksum recalculation, skip this packet.
628 if (nft_payload_l4csum_offset(pkt, skb, &l4csum_offset) < 0)
631 if (skb_copy_bits(skb, l4csum_offset, &sum, sizeof(sum)) < 0)
634 /* Checksum mangling for an arbitrary amount of bytes, based on
635 * inet_proto_csum_replace*() functions.
637 if (skb->ip_summed != CHECKSUM_PARTIAL) {
638 nft_csum_replace(&sum, fsum, tsum);
639 if (skb->ip_summed == CHECKSUM_COMPLETE) {
640 skb->csum = ~csum_add(csum_sub(~(skb->csum), fsum),
644 sum = ~csum_fold(csum_add(csum_sub(csum_unfold(sum), fsum),
648 if (skb_ensure_writable(skb, l4csum_offset + sizeof(sum)) ||
649 skb_store_bits(skb, l4csum_offset, &sum, sizeof(sum)) < 0)
655 static int nft_payload_csum_inet(struct sk_buff *skb, const u32 *src,
656 __wsum fsum, __wsum tsum, int csum_offset)
660 if (skb_copy_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
663 nft_csum_replace(&sum, fsum, tsum);
664 if (skb_ensure_writable(skb, csum_offset + sizeof(sum)) ||
665 skb_store_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
671 static void nft_payload_set_eval(const struct nft_expr *expr,
672 struct nft_regs *regs,
673 const struct nft_pktinfo *pkt)
675 const struct nft_payload_set *priv = nft_expr_priv(expr);
676 struct sk_buff *skb = pkt->skb;
677 const u32 *src = ®s->data[priv->sreg];
678 int offset, csum_offset;
681 switch (priv->base) {
682 case NFT_PAYLOAD_LL_HEADER:
683 if (!skb_mac_header_was_set(skb))
685 offset = skb_mac_header(skb) - skb->data;
687 case NFT_PAYLOAD_NETWORK_HEADER:
688 offset = skb_network_offset(skb);
690 case NFT_PAYLOAD_TRANSPORT_HEADER:
691 if (!(pkt->flags & NFT_PKTINFO_L4PROTO))
693 offset = nft_thoff(pkt);
695 case NFT_PAYLOAD_INNER_HEADER:
696 offset = nft_payload_inner_offset(pkt);
705 csum_offset = offset + priv->csum_offset;
706 offset += priv->offset;
708 if ((priv->csum_type == NFT_PAYLOAD_CSUM_INET || priv->csum_flags) &&
709 ((priv->base != NFT_PAYLOAD_TRANSPORT_HEADER &&
710 priv->base != NFT_PAYLOAD_INNER_HEADER) ||
711 skb->ip_summed != CHECKSUM_PARTIAL)) {
712 fsum = skb_checksum(skb, offset, priv->len, 0);
713 tsum = csum_partial(src, priv->len, 0);
715 if (priv->csum_type == NFT_PAYLOAD_CSUM_INET &&
716 nft_payload_csum_inet(skb, src, fsum, tsum, csum_offset))
719 if (priv->csum_flags &&
720 nft_payload_l4csum_update(pkt, skb, fsum, tsum) < 0)
724 if (skb_ensure_writable(skb, max(offset + priv->len, 0)) ||
725 skb_store_bits(skb, offset, src, priv->len) < 0)
728 if (priv->csum_type == NFT_PAYLOAD_CSUM_SCTP &&
729 pkt->tprot == IPPROTO_SCTP &&
730 skb->ip_summed != CHECKSUM_PARTIAL) {
731 if (nft_payload_csum_sctp(skb, nft_thoff(pkt)))
737 regs->verdict.code = NFT_BREAK;
740 static int nft_payload_set_init(const struct nft_ctx *ctx,
741 const struct nft_expr *expr,
742 const struct nlattr * const tb[])
744 struct nft_payload_set *priv = nft_expr_priv(expr);
746 priv->base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
747 priv->offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
748 priv->len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
750 if (tb[NFTA_PAYLOAD_CSUM_TYPE])
752 ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_TYPE]));
753 if (tb[NFTA_PAYLOAD_CSUM_OFFSET])
755 ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_OFFSET]));
756 if (tb[NFTA_PAYLOAD_CSUM_FLAGS]) {
759 flags = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_CSUM_FLAGS]));
760 if (flags & ~NFT_PAYLOAD_L4CSUM_PSEUDOHDR)
763 priv->csum_flags = flags;
766 switch (priv->csum_type) {
767 case NFT_PAYLOAD_CSUM_NONE:
768 case NFT_PAYLOAD_CSUM_INET:
770 case NFT_PAYLOAD_CSUM_SCTP:
771 if (priv->base != NFT_PAYLOAD_TRANSPORT_HEADER)
774 if (priv->csum_offset != offsetof(struct sctphdr, checksum))
781 return nft_parse_register_load(tb[NFTA_PAYLOAD_SREG], &priv->sreg,
785 static int nft_payload_set_dump(struct sk_buff *skb, const struct nft_expr *expr)
787 const struct nft_payload_set *priv = nft_expr_priv(expr);
789 if (nft_dump_register(skb, NFTA_PAYLOAD_SREG, priv->sreg) ||
790 nla_put_be32(skb, NFTA_PAYLOAD_BASE, htonl(priv->base)) ||
791 nla_put_be32(skb, NFTA_PAYLOAD_OFFSET, htonl(priv->offset)) ||
792 nla_put_be32(skb, NFTA_PAYLOAD_LEN, htonl(priv->len)) ||
793 nla_put_be32(skb, NFTA_PAYLOAD_CSUM_TYPE, htonl(priv->csum_type)) ||
794 nla_put_be32(skb, NFTA_PAYLOAD_CSUM_OFFSET,
795 htonl(priv->csum_offset)) ||
796 nla_put_be32(skb, NFTA_PAYLOAD_CSUM_FLAGS, htonl(priv->csum_flags)))
797 goto nla_put_failure;
804 static bool nft_payload_set_reduce(struct nft_regs_track *track,
805 const struct nft_expr *expr)
809 for (i = 0; i < NFT_REG32_NUM; i++) {
810 if (!track->regs[i].selector)
813 if (track->regs[i].selector->ops != &nft_payload_ops &&
814 track->regs[i].selector->ops != &nft_payload_fast_ops)
817 track->regs[i].selector = NULL;
818 track->regs[i].bitwise = NULL;
824 static const struct nft_expr_ops nft_payload_set_ops = {
825 .type = &nft_payload_type,
826 .size = NFT_EXPR_SIZE(sizeof(struct nft_payload_set)),
827 .eval = nft_payload_set_eval,
828 .init = nft_payload_set_init,
829 .dump = nft_payload_set_dump,
830 .reduce = nft_payload_set_reduce,
833 static const struct nft_expr_ops *
834 nft_payload_select_ops(const struct nft_ctx *ctx,
835 const struct nlattr * const tb[])
837 enum nft_payload_bases base;
838 unsigned int offset, len;
840 if (tb[NFTA_PAYLOAD_BASE] == NULL ||
841 tb[NFTA_PAYLOAD_OFFSET] == NULL ||
842 tb[NFTA_PAYLOAD_LEN] == NULL)
843 return ERR_PTR(-EINVAL);
845 base = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_BASE]));
847 case NFT_PAYLOAD_LL_HEADER:
848 case NFT_PAYLOAD_NETWORK_HEADER:
849 case NFT_PAYLOAD_TRANSPORT_HEADER:
850 case NFT_PAYLOAD_INNER_HEADER:
853 return ERR_PTR(-EOPNOTSUPP);
856 if (tb[NFTA_PAYLOAD_SREG] != NULL) {
857 if (tb[NFTA_PAYLOAD_DREG] != NULL)
858 return ERR_PTR(-EINVAL);
859 return &nft_payload_set_ops;
862 if (tb[NFTA_PAYLOAD_DREG] == NULL)
863 return ERR_PTR(-EINVAL);
865 offset = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_OFFSET]));
866 len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
868 if (len <= 4 && is_power_of_2(len) && IS_ALIGNED(offset, len) &&
869 base != NFT_PAYLOAD_LL_HEADER && base != NFT_PAYLOAD_INNER_HEADER)
870 return &nft_payload_fast_ops;
872 return &nft_payload_ops;
875 struct nft_expr_type nft_payload_type __read_mostly = {
877 .select_ops = nft_payload_select_ops,
878 .policy = nft_payload_policy,
879 .maxattr = NFTA_PAYLOAD_MAX,
880 .owner = THIS_MODULE,