1 // SPDX-License-Identifier: GPL-2.0-only
2 /* (C) 1999-2001 Paul `Rusty' Russell
3 * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
6 #include <linux/module.h>
11 #include <net/netfilter/ipv4/nf_reject.h>
12 #include <linux/netfilter_ipv4.h>
13 #include <linux/netfilter_bridge.h>
15 static int nf_reject_iphdr_validate(struct sk_buff *skb)
20 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
24 if (iph->ihl < 5 || iph->version != 4)
27 len = ntohs(iph->tot_len);
30 else if (len < (iph->ihl*4))
33 if (!pskb_may_pull(skb, iph->ihl*4))
39 struct sk_buff *nf_reject_skb_v4_tcp_reset(struct net *net,
40 struct sk_buff *oldskb,
41 const struct net_device *dev,
44 const struct tcphdr *oth;
49 if (!nf_reject_iphdr_validate(oldskb))
52 oth = nf_reject_ip_tcphdr_get(oldskb, &_oth, hook);
56 nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) +
57 LL_MAX_HEADER, GFP_ATOMIC);
61 nskb->dev = (struct net_device *)dev;
63 skb_reserve(nskb, LL_MAX_HEADER);
64 niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
65 net->ipv4.sysctl_ip_default_ttl);
66 nf_reject_ip_tcphdr_put(nskb, oldskb, oth);
67 niph->tot_len = htons(nskb->len);
72 EXPORT_SYMBOL_GPL(nf_reject_skb_v4_tcp_reset);
74 struct sk_buff *nf_reject_skb_v4_unreach(struct net *net,
75 struct sk_buff *oldskb,
76 const struct net_device *dev,
81 struct icmphdr *icmph;
86 if (!nf_reject_iphdr_validate(oldskb))
89 /* IP header checks: fragment. */
90 if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET))
93 /* RFC says return as much as we can without exceeding 576 bytes. */
94 len = min_t(unsigned int, 536, oldskb->len);
96 if (!pskb_may_pull(oldskb, len))
99 if (pskb_trim_rcsum(oldskb, ntohs(ip_hdr(oldskb)->tot_len)))
102 proto = ip_hdr(oldskb)->protocol;
104 if (!skb_csum_unnecessary(oldskb) &&
105 nf_reject_verify_csum(proto) &&
106 nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), proto))
109 nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmphdr) +
110 LL_MAX_HEADER + len, GFP_ATOMIC);
114 nskb->dev = (struct net_device *)dev;
116 skb_reserve(nskb, LL_MAX_HEADER);
117 niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_ICMP,
118 net->ipv4.sysctl_ip_default_ttl);
120 skb_reset_transport_header(nskb);
121 icmph = skb_put_zero(nskb, sizeof(struct icmphdr));
122 icmph->type = ICMP_DEST_UNREACH;
125 skb_put_data(nskb, skb_network_header(oldskb), len);
127 csum = csum_partial((void *)icmph, len + sizeof(struct icmphdr), 0);
128 icmph->checksum = csum_fold(csum);
130 niph->tot_len = htons(nskb->len);
135 EXPORT_SYMBOL_GPL(nf_reject_skb_v4_unreach);
137 const struct tcphdr *nf_reject_ip_tcphdr_get(struct sk_buff *oldskb,
138 struct tcphdr *_oth, int hook)
140 const struct tcphdr *oth;
142 /* IP header checks: fragment. */
143 if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET))
146 if (ip_hdr(oldskb)->protocol != IPPROTO_TCP)
149 oth = skb_header_pointer(oldskb, ip_hdrlen(oldskb),
150 sizeof(struct tcphdr), _oth);
154 /* No RST for RST. */
159 if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), IPPROTO_TCP))
164 EXPORT_SYMBOL_GPL(nf_reject_ip_tcphdr_get);
166 struct iphdr *nf_reject_iphdr_put(struct sk_buff *nskb,
167 const struct sk_buff *oldskb,
168 __u8 protocol, int ttl)
170 struct iphdr *niph, *oiph = ip_hdr(oldskb);
172 skb_reset_network_header(nskb);
173 niph = skb_put(nskb, sizeof(struct iphdr));
175 niph->ihl = sizeof(struct iphdr) / 4;
178 niph->frag_off = htons(IP_DF);
179 niph->protocol = protocol;
181 niph->saddr = oiph->daddr;
182 niph->daddr = oiph->saddr;
185 nskb->protocol = htons(ETH_P_IP);
189 EXPORT_SYMBOL_GPL(nf_reject_iphdr_put);
191 void nf_reject_ip_tcphdr_put(struct sk_buff *nskb, const struct sk_buff *oldskb,
192 const struct tcphdr *oth)
194 struct iphdr *niph = ip_hdr(nskb);
197 skb_reset_transport_header(nskb);
198 tcph = skb_put_zero(nskb, sizeof(struct tcphdr));
199 tcph->source = oth->dest;
200 tcph->dest = oth->source;
201 tcph->doff = sizeof(struct tcphdr) / 4;
204 tcph->seq = oth->ack_seq;
206 tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin +
207 oldskb->len - ip_hdrlen(oldskb) -
213 tcph->check = ~tcp_v4_check(sizeof(struct tcphdr), niph->saddr,
215 nskb->ip_summed = CHECKSUM_PARTIAL;
216 nskb->csum_start = (unsigned char *)tcph - nskb->head;
217 nskb->csum_offset = offsetof(struct tcphdr, check);
219 EXPORT_SYMBOL_GPL(nf_reject_ip_tcphdr_put);
221 static int nf_reject_fill_skb_dst(struct sk_buff *skb_in)
223 struct dst_entry *dst = NULL;
226 memset(&fl, 0, sizeof(struct flowi));
227 fl.u.ip4.daddr = ip_hdr(skb_in)->saddr;
228 nf_ip_route(dev_net(skb_in->dev), &dst, &fl, false);
232 skb_dst_set(skb_in, dst);
237 void nf_send_reset(struct net *net, struct sock *sk, struct sk_buff *oldskb,
240 struct net_device *br_indev __maybe_unused;
241 struct sk_buff *nskb;
243 const struct tcphdr *oth;
246 oth = nf_reject_ip_tcphdr_get(oldskb, &_oth, hook);
250 if ((hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) &&
251 nf_reject_fill_skb_dst(oldskb) < 0)
254 if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
257 nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) +
258 LL_MAX_HEADER, GFP_ATOMIC);
262 /* ip_route_me_harder expects skb->dst to be set */
263 skb_dst_set_noref(nskb, skb_dst(oldskb));
265 nskb->mark = IP4_REPLY_MARK(net, oldskb->mark);
267 skb_reserve(nskb, LL_MAX_HEADER);
268 niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
269 ip4_dst_hoplimit(skb_dst(nskb)));
270 nf_reject_ip_tcphdr_put(nskb, oldskb, oth);
271 if (ip_route_me_harder(net, sk, nskb, RTN_UNSPEC))
276 /* "Never happens" */
277 if (nskb->len > dst_mtu(skb_dst(nskb)))
280 nf_ct_attach(nskb, oldskb);
282 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
283 /* If we use ip_local_out for bridged traffic, the MAC source on
284 * the RST will be ours, instead of the destination's. This confuses
285 * some routers/firewalls, and they drop the packet. So we need to
286 * build the eth header using the original destination's MAC as the
287 * source, and send the RST packet directly.
289 br_indev = nf_bridge_get_physindev(oldskb);
291 struct ethhdr *oeth = eth_hdr(oldskb);
293 nskb->dev = br_indev;
294 niph->tot_len = htons(nskb->len);
296 if (dev_hard_header(nskb, nskb->dev, ntohs(nskb->protocol),
297 oeth->h_source, oeth->h_dest, nskb->len) < 0)
299 dev_queue_xmit(nskb);
302 ip_local_out(net, nskb->sk, nskb);
309 EXPORT_SYMBOL_GPL(nf_send_reset);
311 void nf_send_unreach(struct sk_buff *skb_in, int code, int hook)
313 struct iphdr *iph = ip_hdr(skb_in);
314 u8 proto = iph->protocol;
316 if (iph->frag_off & htons(IP_OFFSET))
319 if ((hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) &&
320 nf_reject_fill_skb_dst(skb_in) < 0)
323 if (skb_csum_unnecessary(skb_in) || !nf_reject_verify_csum(proto)) {
324 icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0);
328 if (nf_ip_checksum(skb_in, hook, ip_hdrlen(skb_in), proto) == 0)
329 icmp_send(skb_in, ICMP_DEST_UNREACH, code, 0);
331 EXPORT_SYMBOL_GPL(nf_send_unreach);
333 MODULE_LICENSE("GPL");