1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2014 Pablo Neira Ayuso <pablo@netfilter.org>
6 #include <linux/kernel.h>
7 #include <linux/init.h>
8 #include <linux/module.h>
9 #include <linux/netlink.h>
10 #include <linux/netfilter.h>
11 #include <linux/netfilter/nf_tables.h>
12 #include <net/netfilter/nf_tables.h>
13 #include <net/netfilter/nft_reject.h>
14 #include <net/netfilter/ipv4/nf_reject.h>
15 #include <net/netfilter/ipv6/nf_reject.h>
18 #include <net/ip6_checksum.h>
19 #include <linux/netfilter_bridge.h>
20 #include <linux/netfilter_ipv6.h>
21 #include "../br_private.h"
23 static void nft_reject_br_push_etherhdr(struct sk_buff *oldskb,
28 eth = skb_push(nskb, ETH_HLEN);
29 skb_reset_mac_header(nskb);
30 ether_addr_copy(eth->h_source, eth_hdr(oldskb)->h_dest);
31 ether_addr_copy(eth->h_dest, eth_hdr(oldskb)->h_source);
32 eth->h_proto = eth_hdr(oldskb)->h_proto;
33 skb_pull(nskb, ETH_HLEN);
36 static int nft_bridge_iphdr_validate(struct sk_buff *skb)
41 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
45 if (iph->ihl < 5 || iph->version != 4)
48 len = ntohs(iph->tot_len);
51 else if (len < (iph->ihl*4))
54 if (!pskb_may_pull(skb, iph->ihl*4))
60 /* We cannot use oldskb->dev, it can be either bridge device (NF_BRIDGE INPUT)
61 * or the bridge port (NF_BRIDGE PREROUTING).
63 static void nft_reject_br_send_v4_tcp_reset(struct net *net,
64 struct sk_buff *oldskb,
65 const struct net_device *dev,
70 const struct tcphdr *oth;
73 if (!nft_bridge_iphdr_validate(oldskb))
76 oth = nf_reject_ip_tcphdr_get(oldskb, &_oth, hook);
80 nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) +
81 LL_MAX_HEADER, GFP_ATOMIC);
85 skb_reserve(nskb, LL_MAX_HEADER);
86 niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
87 net->ipv4.sysctl_ip_default_ttl);
88 nf_reject_ip_tcphdr_put(nskb, oldskb, oth);
89 niph->tot_len = htons(nskb->len);
92 nft_reject_br_push_etherhdr(oldskb, nskb);
94 br_forward(br_port_get_rcu(dev), nskb, false, true);
97 static void nft_reject_br_send_v4_unreach(struct net *net,
98 struct sk_buff *oldskb,
99 const struct net_device *dev,
102 struct sk_buff *nskb;
104 struct icmphdr *icmph;
109 if (!nft_bridge_iphdr_validate(oldskb))
112 /* IP header checks: fragment. */
113 if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET))
116 /* RFC says return as much as we can without exceeding 576 bytes. */
117 len = min_t(unsigned int, 536, oldskb->len);
119 if (!pskb_may_pull(oldskb, len))
122 if (pskb_trim_rcsum(oldskb, ntohs(ip_hdr(oldskb)->tot_len)))
125 proto = ip_hdr(oldskb)->protocol;
127 if (!skb_csum_unnecessary(oldskb) &&
128 nf_reject_verify_csum(proto) &&
129 nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), proto))
132 nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmphdr) +
133 LL_MAX_HEADER + len, GFP_ATOMIC);
137 skb_reserve(nskb, LL_MAX_HEADER);
138 niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_ICMP,
139 net->ipv4.sysctl_ip_default_ttl);
141 skb_reset_transport_header(nskb);
142 icmph = skb_put_zero(nskb, sizeof(struct icmphdr));
143 icmph->type = ICMP_DEST_UNREACH;
146 skb_put_data(nskb, skb_network_header(oldskb), len);
148 csum = csum_partial((void *)icmph, len + sizeof(struct icmphdr), 0);
149 icmph->checksum = csum_fold(csum);
151 niph->tot_len = htons(nskb->len);
154 nft_reject_br_push_etherhdr(oldskb, nskb);
156 br_forward(br_port_get_rcu(dev), nskb, false, true);
159 static int nft_bridge_ip6hdr_validate(struct sk_buff *skb)
164 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
168 if (hdr->version != 6)
171 pkt_len = ntohs(hdr->payload_len);
172 if (pkt_len + sizeof(struct ipv6hdr) > skb->len)
178 static void nft_reject_br_send_v6_tcp_reset(struct net *net,
179 struct sk_buff *oldskb,
180 const struct net_device *dev,
183 struct sk_buff *nskb;
184 const struct tcphdr *oth;
186 unsigned int otcplen;
187 struct ipv6hdr *nip6h;
189 if (!nft_bridge_ip6hdr_validate(oldskb))
192 oth = nf_reject_ip6_tcphdr_get(oldskb, &_oth, &otcplen, hook);
196 nskb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(struct tcphdr) +
197 LL_MAX_HEADER, GFP_ATOMIC);
201 skb_reserve(nskb, LL_MAX_HEADER);
202 nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
203 net->ipv6.devconf_all->hop_limit);
204 nf_reject_ip6_tcphdr_put(nskb, oldskb, oth, otcplen);
205 nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr));
207 nft_reject_br_push_etherhdr(oldskb, nskb);
209 br_forward(br_port_get_rcu(dev), nskb, false, true);
212 static bool reject6_br_csum_ok(struct sk_buff *skb, int hook)
214 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
217 u8 proto = ip6h->nexthdr;
219 if (skb_csum_unnecessary(skb))
222 if (ip6h->payload_len &&
223 pskb_trim_rcsum(skb, ntohs(ip6h->payload_len) + sizeof(*ip6h)))
226 ip6h = ipv6_hdr(skb);
227 thoff = ipv6_skip_exthdr(skb, ((u8*)(ip6h+1) - skb->data), &proto, &fo);
228 if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0)
231 if (!nf_reject_verify_csum(proto))
234 return nf_ip6_checksum(skb, hook, thoff, proto) == 0;
237 static void nft_reject_br_send_v6_unreach(struct net *net,
238 struct sk_buff *oldskb,
239 const struct net_device *dev,
242 struct sk_buff *nskb;
243 struct ipv6hdr *nip6h;
244 struct icmp6hdr *icmp6h;
247 if (!nft_bridge_ip6hdr_validate(oldskb))
250 /* Include "As much of invoking packet as possible without the ICMPv6
251 * packet exceeding the minimum IPv6 MTU" in the ICMP payload.
253 len = min_t(unsigned int, 1220, oldskb->len);
255 if (!pskb_may_pull(oldskb, len))
258 if (!reject6_br_csum_ok(oldskb, hook))
261 nskb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(struct icmp6hdr) +
262 LL_MAX_HEADER + len, GFP_ATOMIC);
266 skb_reserve(nskb, LL_MAX_HEADER);
267 nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_ICMPV6,
268 net->ipv6.devconf_all->hop_limit);
270 skb_reset_transport_header(nskb);
271 icmp6h = skb_put_zero(nskb, sizeof(struct icmp6hdr));
272 icmp6h->icmp6_type = ICMPV6_DEST_UNREACH;
273 icmp6h->icmp6_code = code;
275 skb_put_data(nskb, skb_network_header(oldskb), len);
276 nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr));
278 icmp6h->icmp6_cksum =
279 csum_ipv6_magic(&nip6h->saddr, &nip6h->daddr,
280 nskb->len - sizeof(struct ipv6hdr),
283 nskb->len - sizeof(struct ipv6hdr),
286 nft_reject_br_push_etherhdr(oldskb, nskb);
288 br_forward(br_port_get_rcu(dev), nskb, false, true);
291 static void nft_reject_bridge_eval(const struct nft_expr *expr,
292 struct nft_regs *regs,
293 const struct nft_pktinfo *pkt)
295 struct nft_reject *priv = nft_expr_priv(expr);
296 const unsigned char *dest = eth_hdr(pkt->skb)->h_dest;
298 if (is_broadcast_ether_addr(dest) ||
299 is_multicast_ether_addr(dest))
302 switch (eth_hdr(pkt->skb)->h_proto) {
303 case htons(ETH_P_IP):
304 switch (priv->type) {
305 case NFT_REJECT_ICMP_UNREACH:
306 nft_reject_br_send_v4_unreach(nft_net(pkt), pkt->skb,
311 case NFT_REJECT_TCP_RST:
312 nft_reject_br_send_v4_tcp_reset(nft_net(pkt), pkt->skb,
316 case NFT_REJECT_ICMPX_UNREACH:
317 nft_reject_br_send_v4_unreach(nft_net(pkt), pkt->skb,
320 nft_reject_icmp_code(priv->icmp_code));
324 case htons(ETH_P_IPV6):
325 switch (priv->type) {
326 case NFT_REJECT_ICMP_UNREACH:
327 nft_reject_br_send_v6_unreach(nft_net(pkt), pkt->skb,
332 case NFT_REJECT_TCP_RST:
333 nft_reject_br_send_v6_tcp_reset(nft_net(pkt), pkt->skb,
337 case NFT_REJECT_ICMPX_UNREACH:
338 nft_reject_br_send_v6_unreach(nft_net(pkt), pkt->skb,
341 nft_reject_icmpv6_code(priv->icmp_code));
346 /* No explicit way to reject this protocol, drop it. */
350 regs->verdict.code = NF_DROP;
353 static int nft_reject_bridge_validate(const struct nft_ctx *ctx,
354 const struct nft_expr *expr,
355 const struct nft_data **data)
357 return nft_chain_validate_hooks(ctx->chain, (1 << NF_BR_PRE_ROUTING) |
358 (1 << NF_BR_LOCAL_IN));
361 static int nft_reject_bridge_init(const struct nft_ctx *ctx,
362 const struct nft_expr *expr,
363 const struct nlattr * const tb[])
365 struct nft_reject *priv = nft_expr_priv(expr);
368 if (tb[NFTA_REJECT_TYPE] == NULL)
371 priv->type = ntohl(nla_get_be32(tb[NFTA_REJECT_TYPE]));
372 switch (priv->type) {
373 case NFT_REJECT_ICMP_UNREACH:
374 case NFT_REJECT_ICMPX_UNREACH:
375 if (tb[NFTA_REJECT_ICMP_CODE] == NULL)
378 icmp_code = nla_get_u8(tb[NFTA_REJECT_ICMP_CODE]);
379 if (priv->type == NFT_REJECT_ICMPX_UNREACH &&
380 icmp_code > NFT_REJECT_ICMPX_MAX)
383 priv->icmp_code = icmp_code;
385 case NFT_REJECT_TCP_RST:
393 static int nft_reject_bridge_dump(struct sk_buff *skb,
394 const struct nft_expr *expr)
396 const struct nft_reject *priv = nft_expr_priv(expr);
398 if (nla_put_be32(skb, NFTA_REJECT_TYPE, htonl(priv->type)))
399 goto nla_put_failure;
401 switch (priv->type) {
402 case NFT_REJECT_ICMP_UNREACH:
403 case NFT_REJECT_ICMPX_UNREACH:
404 if (nla_put_u8(skb, NFTA_REJECT_ICMP_CODE, priv->icmp_code))
405 goto nla_put_failure;
417 static struct nft_expr_type nft_reject_bridge_type;
418 static const struct nft_expr_ops nft_reject_bridge_ops = {
419 .type = &nft_reject_bridge_type,
420 .size = NFT_EXPR_SIZE(sizeof(struct nft_reject)),
421 .eval = nft_reject_bridge_eval,
422 .init = nft_reject_bridge_init,
423 .dump = nft_reject_bridge_dump,
424 .validate = nft_reject_bridge_validate,
427 static struct nft_expr_type nft_reject_bridge_type __read_mostly = {
428 .family = NFPROTO_BRIDGE,
430 .ops = &nft_reject_bridge_ops,
431 .policy = nft_reject_policy,
432 .maxattr = NFTA_REJECT_MAX,
433 .owner = THIS_MODULE,
436 static int __init nft_reject_bridge_module_init(void)
438 return nft_register_expr(&nft_reject_bridge_type);
441 static void __exit nft_reject_bridge_module_exit(void)
443 nft_unregister_expr(&nft_reject_bridge_type);
446 module_init(nft_reject_bridge_module_init);
447 module_exit(nft_reject_bridge_module_exit);
449 MODULE_LICENSE("GPL");
450 MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
451 MODULE_ALIAS_NFT_AF_EXPR(AF_BRIDGE, "reject");