1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
7 * ROUTE - implementation of the IP router.
10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11 * Alan Cox, <gw4pts@gw4pts.ampr.org>
12 * Linus Torvalds, <Linus.Torvalds@helsinki.fi>
13 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
16 * Alan Cox : Verify area fixes.
17 * Alan Cox : cli() protects routing changes
18 * Rui Oliveira : ICMP routing table updates
19 * (rco@di.uminho.pt) Routing table insertion and update
20 * Linus Torvalds : Rewrote bits to be sensible
21 * Alan Cox : Added BSD route gw semantics
22 * Alan Cox : Super /proc >4K
23 * Alan Cox : MTU in route table
24 * Alan Cox : MSS actually. Also added the window
26 * Sam Lantinga : Fixed route matching in rt_del()
27 * Alan Cox : Routing cache support.
28 * Alan Cox : Removed compatibility cruft.
29 * Alan Cox : RTF_REJECT support.
30 * Alan Cox : TCP irtt support.
31 * Jonathan Naylor : Added Metric support.
32 * Miquel van Smoorenburg : BSD API fixes.
33 * Miquel van Smoorenburg : Metrics.
34 * Alan Cox : Use __u32 properly
35 * Alan Cox : Aligned routing errors more closely with BSD
36 * our system is still very different.
37 * Alan Cox : Faster /proc handling
38 * Alexey Kuznetsov : Massive rework to support tree based routing,
39 * routing caches and better behaviour.
41 * Olaf Erb : irtt wasn't being copied right.
42 * Bjorn Ekwall : Kerneld route support.
43 * Alan Cox : Multicast fixed (I hope)
44 * Pavel Krauz : Limited broadcast fixed
45 * Mike McLagan : Routing by source
46 * Alexey Kuznetsov : End of old history. Split to fib.c and
47 * route.c and rewritten from scratch.
48 * Andi Kleen : Load-limit warning messages.
49 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
50 * Vitaly E. Lavrov : Race condition in ip_route_input_slow.
51 * Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow.
52 * Vladimir V. Ivanov : IP rule info (flowid) is really useful.
53 * Marc Boucher : routing by fwmark
54 * Robert Olsson : Added rt_cache statistics
55 * Arnaldo C. Melo : Convert proc stuff to seq_file
56 * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes.
57 * Ilia Sotnikov : Ignore TOS on PMTUD and Redirect
58 * Ilia Sotnikov : Removed TOS from hash calculations
61 #define pr_fmt(fmt) "IPv4: " fmt
63 #include <linux/module.h>
64 #include <linux/uaccess.h>
65 #include <linux/bitops.h>
66 #include <linux/types.h>
67 #include <linux/kernel.h>
69 #include <linux/string.h>
70 #include <linux/socket.h>
71 #include <linux/sockios.h>
72 #include <linux/errno.h>
74 #include <linux/inet.h>
75 #include <linux/netdevice.h>
76 #include <linux/proc_fs.h>
77 #include <linux/init.h>
78 #include <linux/skbuff.h>
79 #include <linux/inetdevice.h>
80 #include <linux/igmp.h>
81 #include <linux/pkt_sched.h>
82 #include <linux/mroute.h>
83 #include <linux/netfilter_ipv4.h>
84 #include <linux/random.h>
85 #include <linux/rcupdate.h>
86 #include <linux/times.h>
87 #include <linux/slab.h>
88 #include <linux/jhash.h>
90 #include <net/dst_metadata.h>
91 #include <net/net_namespace.h>
92 #include <net/protocol.h>
94 #include <net/route.h>
95 #include <net/inetpeer.h>
97 #include <net/ip_fib.h>
98 #include <net/nexthop.h>
101 #include <net/icmp.h>
102 #include <net/xfrm.h>
103 #include <net/lwtunnel.h>
104 #include <net/netevent.h>
105 #include <net/rtnetlink.h>
107 #include <linux/sysctl.h>
109 #include <net/secure_seq.h>
110 #include <net/ip_tunnels.h>
111 #include <net/l3mdev.h>
113 #include "fib_lookup.h"
115 #define RT_FL_TOS(oldflp4) \
116 ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
118 #define RT_GC_TIMEOUT (300*HZ)
120 static int ip_rt_max_size;
121 static int ip_rt_redirect_number __read_mostly = 9;
122 static int ip_rt_redirect_load __read_mostly = HZ / 50;
123 static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
124 static int ip_rt_error_cost __read_mostly = HZ;
125 static int ip_rt_error_burst __read_mostly = 5 * HZ;
126 static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
127 static u32 ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
128 static int ip_rt_min_advmss __read_mostly = 256;
130 static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
133 * Interface to generic destination cache.
136 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
137 static unsigned int ipv4_default_advmss(const struct dst_entry *dst);
138 static unsigned int ipv4_mtu(const struct dst_entry *dst);
139 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
140 static void ipv4_link_failure(struct sk_buff *skb);
141 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
142 struct sk_buff *skb, u32 mtu,
144 static void ip_do_redirect(struct dst_entry *dst, struct sock *sk,
145 struct sk_buff *skb);
146 static void ipv4_dst_destroy(struct dst_entry *dst);
148 static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
154 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
157 static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr);
159 static struct dst_ops ipv4_dst_ops = {
161 .check = ipv4_dst_check,
162 .default_advmss = ipv4_default_advmss,
164 .cow_metrics = ipv4_cow_metrics,
165 .destroy = ipv4_dst_destroy,
166 .negative_advice = ipv4_negative_advice,
167 .link_failure = ipv4_link_failure,
168 .update_pmtu = ip_rt_update_pmtu,
169 .redirect = ip_do_redirect,
170 .local_out = __ip_local_out,
171 .neigh_lookup = ipv4_neigh_lookup,
172 .confirm_neigh = ipv4_confirm_neigh,
175 #define ECN_OR_COST(class) TC_PRIO_##class
177 const __u8 ip_tos2prio[16] = {
179 ECN_OR_COST(BESTEFFORT),
181 ECN_OR_COST(BESTEFFORT),
187 ECN_OR_COST(INTERACTIVE),
189 ECN_OR_COST(INTERACTIVE),
190 TC_PRIO_INTERACTIVE_BULK,
191 ECN_OR_COST(INTERACTIVE_BULK),
192 TC_PRIO_INTERACTIVE_BULK,
193 ECN_OR_COST(INTERACTIVE_BULK)
195 EXPORT_SYMBOL(ip_tos2prio);
197 static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
198 #define RT_CACHE_STAT_INC(field) raw_cpu_inc(rt_cache_stat.field)
200 #ifdef CONFIG_PROC_FS
201 static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
205 return SEQ_START_TOKEN;
208 static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
214 static void rt_cache_seq_stop(struct seq_file *seq, void *v)
218 static int rt_cache_seq_show(struct seq_file *seq, void *v)
220 if (v == SEQ_START_TOKEN)
221 seq_printf(seq, "%-127s\n",
222 "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
223 "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
228 static const struct seq_operations rt_cache_seq_ops = {
229 .start = rt_cache_seq_start,
230 .next = rt_cache_seq_next,
231 .stop = rt_cache_seq_stop,
232 .show = rt_cache_seq_show,
235 static int rt_cache_seq_open(struct inode *inode, struct file *file)
237 return seq_open(file, &rt_cache_seq_ops);
240 static const struct file_operations rt_cache_seq_fops = {
241 .open = rt_cache_seq_open,
244 .release = seq_release,
248 static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
253 return SEQ_START_TOKEN;
255 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
256 if (!cpu_possible(cpu))
259 return &per_cpu(rt_cache_stat, cpu);
264 static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
268 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
269 if (!cpu_possible(cpu))
272 return &per_cpu(rt_cache_stat, cpu);
278 static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
283 static int rt_cpu_seq_show(struct seq_file *seq, void *v)
285 struct rt_cache_stat *st = v;
287 if (v == SEQ_START_TOKEN) {
288 seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
292 seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x "
293 " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
294 dst_entries_get_slow(&ipv4_dst_ops),
307 0, /* st->gc_total */
308 0, /* st->gc_ignored */
309 0, /* st->gc_goal_miss */
310 0, /* st->gc_dst_overflow */
311 0, /* st->in_hlist_search */
312 0 /* st->out_hlist_search */
317 static const struct seq_operations rt_cpu_seq_ops = {
318 .start = rt_cpu_seq_start,
319 .next = rt_cpu_seq_next,
320 .stop = rt_cpu_seq_stop,
321 .show = rt_cpu_seq_show,
325 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
327 return seq_open(file, &rt_cpu_seq_ops);
330 static const struct file_operations rt_cpu_seq_fops = {
331 .open = rt_cpu_seq_open,
334 .release = seq_release,
337 #ifdef CONFIG_IP_ROUTE_CLASSID
338 static int rt_acct_proc_show(struct seq_file *m, void *v)
340 struct ip_rt_acct *dst, *src;
343 dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
347 for_each_possible_cpu(i) {
348 src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
349 for (j = 0; j < 256; j++) {
350 dst[j].o_bytes += src[j].o_bytes;
351 dst[j].o_packets += src[j].o_packets;
352 dst[j].i_bytes += src[j].i_bytes;
353 dst[j].i_packets += src[j].i_packets;
357 seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
363 static int __net_init ip_rt_do_proc_init(struct net *net)
365 struct proc_dir_entry *pde;
367 pde = proc_create("rt_cache", 0444, net->proc_net,
372 pde = proc_create("rt_cache", 0444,
373 net->proc_net_stat, &rt_cpu_seq_fops);
377 #ifdef CONFIG_IP_ROUTE_CLASSID
378 pde = proc_create_single("rt_acct", 0, net->proc_net,
385 #ifdef CONFIG_IP_ROUTE_CLASSID
387 remove_proc_entry("rt_cache", net->proc_net_stat);
390 remove_proc_entry("rt_cache", net->proc_net);
395 static void __net_exit ip_rt_do_proc_exit(struct net *net)
397 remove_proc_entry("rt_cache", net->proc_net_stat);
398 remove_proc_entry("rt_cache", net->proc_net);
399 #ifdef CONFIG_IP_ROUTE_CLASSID
400 remove_proc_entry("rt_acct", net->proc_net);
404 static struct pernet_operations ip_rt_proc_ops __net_initdata = {
405 .init = ip_rt_do_proc_init,
406 .exit = ip_rt_do_proc_exit,
409 static int __init ip_rt_proc_init(void)
411 return register_pernet_subsys(&ip_rt_proc_ops);
415 static inline int ip_rt_proc_init(void)
419 #endif /* CONFIG_PROC_FS */
421 static inline bool rt_is_expired(const struct rtable *rth)
423 return rth->rt_genid != rt_genid_ipv4(dev_net(rth->dst.dev));
426 void rt_cache_flush(struct net *net)
428 rt_genid_bump_ipv4(net);
431 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
435 const struct rtable *rt = container_of(dst, struct rtable, dst);
436 struct net_device *dev = dst->dev;
441 if (likely(rt->rt_gw_family == AF_INET)) {
442 n = ip_neigh_gw4(dev, rt->rt_gw4);
443 } else if (rt->rt_gw_family == AF_INET6) {
444 n = ip_neigh_gw6(dev, &rt->rt_gw6);
448 pkey = skb ? ip_hdr(skb)->daddr : *((__be32 *) daddr);
449 n = ip_neigh_gw4(dev, pkey);
452 if (!IS_ERR(n) && !refcount_inc_not_zero(&n->refcnt))
455 rcu_read_unlock_bh();
460 static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr)
462 const struct rtable *rt = container_of(dst, struct rtable, dst);
463 struct net_device *dev = dst->dev;
464 const __be32 *pkey = daddr;
466 if (rt->rt_gw_family == AF_INET) {
467 pkey = (const __be32 *)&rt->rt_gw4;
468 } else if (rt->rt_gw_family == AF_INET6) {
469 return __ipv6_confirm_neigh_stub(dev, &rt->rt_gw6);
472 (RTCF_MULTICAST | RTCF_BROADCAST | RTCF_LOCAL))) {
475 __ipv4_confirm_neigh(dev, *(__force u32 *)pkey);
478 #define IP_IDENTS_SZ 2048u
480 static atomic_t *ip_idents __read_mostly;
481 static u32 *ip_tstamps __read_mostly;
483 /* In order to protect privacy, we add a perturbation to identifiers
484 * if one generator is seldom used. This makes hard for an attacker
485 * to infer how many packets were sent between two points in time.
487 u32 ip_idents_reserve(u32 hash, int segs)
489 u32 *p_tstamp = ip_tstamps + hash % IP_IDENTS_SZ;
490 atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ;
491 u32 old = READ_ONCE(*p_tstamp);
492 u32 now = (u32)jiffies;
495 if (old != now && cmpxchg(p_tstamp, old, now) == old)
496 delta = prandom_u32_max(now - old);
498 /* Do not use atomic_add_return() as it makes UBSAN unhappy */
500 old = (u32)atomic_read(p_id);
501 new = old + delta + segs;
502 } while (atomic_cmpxchg(p_id, old, new) != old);
506 EXPORT_SYMBOL(ip_idents_reserve);
508 void __ip_select_ident(struct net *net, struct iphdr *iph, int segs)
512 /* Note the following code is not safe, but this is okay. */
513 if (unlikely(siphash_key_is_zero(&net->ipv4.ip_id_key)))
514 get_random_bytes(&net->ipv4.ip_id_key,
515 sizeof(net->ipv4.ip_id_key));
517 hash = siphash_3u32((__force u32)iph->daddr,
518 (__force u32)iph->saddr,
520 &net->ipv4.ip_id_key);
521 id = ip_idents_reserve(hash, segs);
524 EXPORT_SYMBOL(__ip_select_ident);
526 static void __build_flow_key(const struct net *net, struct flowi4 *fl4,
527 const struct sock *sk,
528 const struct iphdr *iph,
530 u8 prot, u32 mark, int flow_flags)
533 const struct inet_sock *inet = inet_sk(sk);
535 oif = sk->sk_bound_dev_if;
537 tos = RT_CONN_FLAGS(sk);
538 prot = inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol;
540 flowi4_init_output(fl4, oif, mark, tos,
541 RT_SCOPE_UNIVERSE, prot,
543 iph->daddr, iph->saddr, 0, 0,
544 sock_net_uid(net, sk));
547 static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
548 const struct sock *sk)
550 const struct net *net = dev_net(skb->dev);
551 const struct iphdr *iph = ip_hdr(skb);
552 int oif = skb->dev->ifindex;
553 u8 tos = RT_TOS(iph->tos);
554 u8 prot = iph->protocol;
555 u32 mark = skb->mark;
557 __build_flow_key(net, fl4, sk, iph, oif, tos, prot, mark, 0);
560 static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
562 const struct inet_sock *inet = inet_sk(sk);
563 const struct ip_options_rcu *inet_opt;
564 __be32 daddr = inet->inet_daddr;
567 inet_opt = rcu_dereference(inet->inet_opt);
568 if (inet_opt && inet_opt->opt.srr)
569 daddr = inet_opt->opt.faddr;
570 flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
571 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
572 inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
573 inet_sk_flowi_flags(sk),
574 daddr, inet->inet_saddr, 0, 0, sk->sk_uid);
578 static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk,
579 const struct sk_buff *skb)
582 build_skb_flow_key(fl4, skb, sk);
584 build_sk_flow_key(fl4, sk);
587 static DEFINE_SPINLOCK(fnhe_lock);
589 static void fnhe_flush_routes(struct fib_nh_exception *fnhe)
593 rt = rcu_dereference(fnhe->fnhe_rth_input);
595 RCU_INIT_POINTER(fnhe->fnhe_rth_input, NULL);
596 dst_dev_put(&rt->dst);
597 dst_release(&rt->dst);
599 rt = rcu_dereference(fnhe->fnhe_rth_output);
601 RCU_INIT_POINTER(fnhe->fnhe_rth_output, NULL);
602 dst_dev_put(&rt->dst);
603 dst_release(&rt->dst);
607 static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash)
609 struct fib_nh_exception *fnhe, *oldest;
611 oldest = rcu_dereference(hash->chain);
612 for (fnhe = rcu_dereference(oldest->fnhe_next); fnhe;
613 fnhe = rcu_dereference(fnhe->fnhe_next)) {
614 if (time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp))
617 fnhe_flush_routes(oldest);
621 static inline u32 fnhe_hashfun(__be32 daddr)
623 static u32 fnhe_hashrnd __read_mostly;
626 net_get_random_once(&fnhe_hashrnd, sizeof(fnhe_hashrnd));
627 hval = jhash_1word((__force u32) daddr, fnhe_hashrnd);
628 return hash_32(hval, FNHE_HASH_SHIFT);
631 static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe)
633 rt->rt_pmtu = fnhe->fnhe_pmtu;
634 rt->rt_mtu_locked = fnhe->fnhe_mtu_locked;
635 rt->dst.expires = fnhe->fnhe_expires;
638 rt->rt_flags |= RTCF_REDIRECTED;
639 rt->rt_uses_gateway = 1;
640 rt->rt_gw_family = AF_INET;
641 rt->rt_gw4 = fnhe->fnhe_gw;
645 static void update_or_create_fnhe(struct fib_nh_common *nhc, __be32 daddr,
646 __be32 gw, u32 pmtu, bool lock,
647 unsigned long expires)
649 struct fnhe_hash_bucket *hash;
650 struct fib_nh_exception *fnhe;
656 genid = fnhe_genid(dev_net(nhc->nhc_dev));
657 hval = fnhe_hashfun(daddr);
659 spin_lock_bh(&fnhe_lock);
661 hash = rcu_dereference(nhc->nhc_exceptions);
663 hash = kcalloc(FNHE_HASH_SIZE, sizeof(*hash), GFP_ATOMIC);
666 rcu_assign_pointer(nhc->nhc_exceptions, hash);
672 for (fnhe = rcu_dereference(hash->chain); fnhe;
673 fnhe = rcu_dereference(fnhe->fnhe_next)) {
674 if (fnhe->fnhe_daddr == daddr)
680 if (fnhe->fnhe_genid != genid)
681 fnhe->fnhe_genid = genid;
685 fnhe->fnhe_pmtu = pmtu;
686 fnhe->fnhe_mtu_locked = lock;
688 fnhe->fnhe_expires = max(1UL, expires);
689 /* Update all cached dsts too */
690 rt = rcu_dereference(fnhe->fnhe_rth_input);
692 fill_route_from_fnhe(rt, fnhe);
693 rt = rcu_dereference(fnhe->fnhe_rth_output);
695 fill_route_from_fnhe(rt, fnhe);
697 if (depth > FNHE_RECLAIM_DEPTH)
698 fnhe = fnhe_oldest(hash);
700 fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC);
704 fnhe->fnhe_next = hash->chain;
705 rcu_assign_pointer(hash->chain, fnhe);
707 fnhe->fnhe_genid = genid;
708 fnhe->fnhe_daddr = daddr;
710 fnhe->fnhe_pmtu = pmtu;
711 fnhe->fnhe_mtu_locked = lock;
712 fnhe->fnhe_expires = max(1UL, expires);
714 /* Exception created; mark the cached routes for the nexthop
715 * stale, so anyone caching it rechecks if this exception
718 rt = rcu_dereference(nhc->nhc_rth_input);
720 rt->dst.obsolete = DST_OBSOLETE_KILL;
722 for_each_possible_cpu(i) {
723 struct rtable __rcu **prt;
724 prt = per_cpu_ptr(nhc->nhc_pcpu_rth_output, i);
725 rt = rcu_dereference(*prt);
727 rt->dst.obsolete = DST_OBSOLETE_KILL;
731 fnhe->fnhe_stamp = jiffies;
734 spin_unlock_bh(&fnhe_lock);
737 static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4,
740 __be32 new_gw = icmp_hdr(skb)->un.gateway;
741 __be32 old_gw = ip_hdr(skb)->saddr;
742 struct net_device *dev = skb->dev;
743 struct in_device *in_dev;
744 struct fib_result res;
748 switch (icmp_hdr(skb)->code & 7) {
750 case ICMP_REDIR_NETTOS:
751 case ICMP_REDIR_HOST:
752 case ICMP_REDIR_HOSTTOS:
759 if (rt->rt_gw_family != AF_INET || rt->rt_gw4 != old_gw)
762 in_dev = __in_dev_get_rcu(dev);
767 if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
768 ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
769 ipv4_is_zeronet(new_gw))
770 goto reject_redirect;
772 if (!IN_DEV_SHARED_MEDIA(in_dev)) {
773 if (!inet_addr_onlink(in_dev, new_gw, old_gw))
774 goto reject_redirect;
775 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
776 goto reject_redirect;
778 if (inet_addr_type(net, new_gw) != RTN_UNICAST)
779 goto reject_redirect;
782 n = __ipv4_neigh_lookup(rt->dst.dev, new_gw);
784 n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev);
786 if (!(n->nud_state & NUD_VALID)) {
787 neigh_event_send(n, NULL);
789 if (fib_lookup(net, fl4, &res, 0) == 0) {
790 struct fib_nh_common *nhc = FIB_RES_NHC(res);
792 update_or_create_fnhe(nhc, fl4->daddr, new_gw,
794 jiffies + ip_rt_gc_timeout);
797 rt->dst.obsolete = DST_OBSOLETE_KILL;
798 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
805 #ifdef CONFIG_IP_ROUTE_VERBOSE
806 if (IN_DEV_LOG_MARTIANS(in_dev)) {
807 const struct iphdr *iph = (const struct iphdr *) skb->data;
808 __be32 daddr = iph->daddr;
809 __be32 saddr = iph->saddr;
811 net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n"
812 " Advised path = %pI4 -> %pI4\n",
813 &old_gw, dev->name, &new_gw,
820 static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
824 const struct iphdr *iph = (const struct iphdr *) skb->data;
825 struct net *net = dev_net(skb->dev);
826 int oif = skb->dev->ifindex;
827 u8 tos = RT_TOS(iph->tos);
828 u8 prot = iph->protocol;
829 u32 mark = skb->mark;
831 rt = (struct rtable *) dst;
833 __build_flow_key(net, &fl4, sk, iph, oif, tos, prot, mark, 0);
834 __ip_do_redirect(rt, skb, &fl4, true);
837 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
839 struct rtable *rt = (struct rtable *)dst;
840 struct dst_entry *ret = dst;
843 if (dst->obsolete > 0) {
846 } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
857 * 1. The first ip_rt_redirect_number redirects are sent
858 * with exponential backoff, then we stop sending them at all,
859 * assuming that the host ignores our redirects.
860 * 2. If we did not see packets requiring redirects
861 * during ip_rt_redirect_silence, we assume that the host
862 * forgot redirected route and start to send redirects again.
864 * This algorithm is much cheaper and more intelligent than dumb load limiting
867 * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
868 * and "frag. need" (breaks PMTU discovery) in icmp.c.
871 void ip_rt_send_redirect(struct sk_buff *skb)
873 struct rtable *rt = skb_rtable(skb);
874 struct in_device *in_dev;
875 struct inet_peer *peer;
881 in_dev = __in_dev_get_rcu(rt->dst.dev);
882 if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
886 log_martians = IN_DEV_LOG_MARTIANS(in_dev);
887 vif = l3mdev_master_ifindex_rcu(rt->dst.dev);
890 net = dev_net(rt->dst.dev);
891 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, vif, 1);
893 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST,
894 rt_nexthop(rt, ip_hdr(skb)->daddr));
898 /* No redirected packets during ip_rt_redirect_silence;
899 * reset the algorithm.
901 if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) {
902 peer->rate_tokens = 0;
903 peer->n_redirects = 0;
906 /* Too many ignored redirects; do not send anything
907 * set dst.rate_last to the last seen redirected packet.
909 if (peer->n_redirects >= ip_rt_redirect_number) {
910 peer->rate_last = jiffies;
914 /* Check for load limit; set rate_last to the latest sent
917 if (peer->rate_tokens == 0 ||
920 (ip_rt_redirect_load << peer->n_redirects)))) {
921 __be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr);
923 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
924 peer->rate_last = jiffies;
926 #ifdef CONFIG_IP_ROUTE_VERBOSE
928 peer->n_redirects == ip_rt_redirect_number)
929 net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
930 &ip_hdr(skb)->saddr, inet_iif(skb),
931 &ip_hdr(skb)->daddr, &gw);
938 static int ip_error(struct sk_buff *skb)
940 struct rtable *rt = skb_rtable(skb);
941 struct net_device *dev = skb->dev;
942 struct in_device *in_dev;
943 struct inet_peer *peer;
949 if (netif_is_l3_master(skb->dev)) {
950 dev = __dev_get_by_index(dev_net(skb->dev), IPCB(skb)->iif);
955 in_dev = __in_dev_get_rcu(dev);
957 /* IP on this device is disabled. */
961 net = dev_net(rt->dst.dev);
962 if (!IN_DEV_FORWARD(in_dev)) {
963 switch (rt->dst.error) {
965 __IP_INC_STATS(net, IPSTATS_MIB_INADDRERRORS);
969 __IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
975 switch (rt->dst.error) {
980 code = ICMP_HOST_UNREACH;
983 code = ICMP_NET_UNREACH;
984 __IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
987 code = ICMP_PKT_FILTERED;
991 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr,
992 l3mdev_master_ifindex(skb->dev), 1);
997 peer->rate_tokens += now - peer->rate_last;
998 if (peer->rate_tokens > ip_rt_error_burst)
999 peer->rate_tokens = ip_rt_error_burst;
1000 peer->rate_last = now;
1001 if (peer->rate_tokens >= ip_rt_error_cost)
1002 peer->rate_tokens -= ip_rt_error_cost;
1008 icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
1010 out: kfree_skb(skb);
1014 static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
1016 struct dst_entry *dst = &rt->dst;
1017 u32 old_mtu = ipv4_mtu(dst);
1018 struct fib_result res;
1021 if (ip_mtu_locked(dst))
1027 if (mtu < ip_rt_min_pmtu) {
1029 mtu = min(old_mtu, ip_rt_min_pmtu);
1032 if (rt->rt_pmtu == mtu && !lock &&
1033 time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2))
1037 if (fib_lookup(dev_net(dst->dev), fl4, &res, 0) == 0) {
1038 struct fib_nh_common *nhc = FIB_RES_NHC(res);
1040 update_or_create_fnhe(nhc, fl4->daddr, 0, mtu, lock,
1041 jiffies + ip_rt_mtu_expires);
1046 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
1047 struct sk_buff *skb, u32 mtu,
1050 struct rtable *rt = (struct rtable *) dst;
1053 ip_rt_build_flow_key(&fl4, sk, skb);
1054 __ip_rt_update_pmtu(rt, &fl4, mtu);
1057 void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
1058 int oif, u8 protocol)
1060 const struct iphdr *iph = (const struct iphdr *) skb->data;
1063 u32 mark = IP4_REPLY_MARK(net, skb->mark);
1065 __build_flow_key(net, &fl4, NULL, iph, oif,
1066 RT_TOS(iph->tos), protocol, mark, 0);
1067 rt = __ip_route_output_key(net, &fl4);
1069 __ip_rt_update_pmtu(rt, &fl4, mtu);
1073 EXPORT_SYMBOL_GPL(ipv4_update_pmtu);
1075 static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1077 const struct iphdr *iph = (const struct iphdr *) skb->data;
1081 __build_flow_key(sock_net(sk), &fl4, sk, iph, 0, 0, 0, 0, 0);
1083 if (!fl4.flowi4_mark)
1084 fl4.flowi4_mark = IP4_REPLY_MARK(sock_net(sk), skb->mark);
1086 rt = __ip_route_output_key(sock_net(sk), &fl4);
1088 __ip_rt_update_pmtu(rt, &fl4, mtu);
1093 void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1095 const struct iphdr *iph = (const struct iphdr *) skb->data;
1098 struct dst_entry *odst = NULL;
1100 struct net *net = sock_net(sk);
1104 if (!ip_sk_accept_pmtu(sk))
1107 odst = sk_dst_get(sk);
1109 if (sock_owned_by_user(sk) || !odst) {
1110 __ipv4_sk_update_pmtu(skb, sk, mtu);
1114 __build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
1116 rt = (struct rtable *)odst;
1117 if (odst->obsolete && !odst->ops->check(odst, 0)) {
1118 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1125 __ip_rt_update_pmtu((struct rtable *) xfrm_dst_path(&rt->dst), &fl4, mtu);
1127 if (!dst_check(&rt->dst, 0)) {
1129 dst_release(&rt->dst);
1131 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1139 sk_dst_set(sk, &rt->dst);
1145 EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
1147 void ipv4_redirect(struct sk_buff *skb, struct net *net,
1148 int oif, u8 protocol)
1150 const struct iphdr *iph = (const struct iphdr *) skb->data;
1154 __build_flow_key(net, &fl4, NULL, iph, oif,
1155 RT_TOS(iph->tos), protocol, 0, 0);
1156 rt = __ip_route_output_key(net, &fl4);
1158 __ip_do_redirect(rt, skb, &fl4, false);
1162 EXPORT_SYMBOL_GPL(ipv4_redirect);
1164 void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk)
1166 const struct iphdr *iph = (const struct iphdr *) skb->data;
1169 struct net *net = sock_net(sk);
1171 __build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
1172 rt = __ip_route_output_key(net, &fl4);
1174 __ip_do_redirect(rt, skb, &fl4, false);
1178 EXPORT_SYMBOL_GPL(ipv4_sk_redirect);
1180 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1182 struct rtable *rt = (struct rtable *) dst;
1184 /* All IPV4 dsts are created with ->obsolete set to the value
1185 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1186 * into this function always.
1188 * When a PMTU/redirect information update invalidates a route,
1189 * this is indicated by setting obsolete to DST_OBSOLETE_KILL or
1190 * DST_OBSOLETE_DEAD.
1192 if (dst->obsolete != DST_OBSOLETE_FORCE_CHK || rt_is_expired(rt))
1197 static void ipv4_send_dest_unreach(struct sk_buff *skb)
1199 struct ip_options opt;
1202 /* Recompile ip options since IPCB may not be valid anymore.
1203 * Also check we have a reasonable ipv4 header.
1205 if (!pskb_network_may_pull(skb, sizeof(struct iphdr)) ||
1206 ip_hdr(skb)->version != 4 || ip_hdr(skb)->ihl < 5)
1209 memset(&opt, 0, sizeof(opt));
1210 if (ip_hdr(skb)->ihl > 5) {
1211 if (!pskb_network_may_pull(skb, ip_hdr(skb)->ihl * 4))
1213 opt.optlen = ip_hdr(skb)->ihl * 4 - sizeof(struct iphdr);
1216 res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL);
1222 __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, &opt);
1225 static void ipv4_link_failure(struct sk_buff *skb)
1229 ipv4_send_dest_unreach(skb);
1231 rt = skb_rtable(skb);
1233 dst_set_expires(&rt->dst, 0);
1236 static int ip_rt_bug(struct net *net, struct sock *sk, struct sk_buff *skb)
1238 pr_debug("%s: %pI4 -> %pI4, %s\n",
1239 __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1240 skb->dev ? skb->dev->name : "?");
1247 We do not cache source address of outgoing interface,
1248 because it is used only by IP RR, TS and SRR options,
1249 so that it out of fast path.
1251 BTW remember: "addr" is allowed to be not aligned
1255 void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
1259 if (rt_is_output_route(rt))
1260 src = ip_hdr(skb)->saddr;
1262 struct fib_result res;
1263 struct iphdr *iph = ip_hdr(skb);
1264 struct flowi4 fl4 = {
1265 .daddr = iph->daddr,
1266 .saddr = iph->saddr,
1267 .flowi4_tos = RT_TOS(iph->tos),
1268 .flowi4_oif = rt->dst.dev->ifindex,
1269 .flowi4_iif = skb->dev->ifindex,
1270 .flowi4_mark = skb->mark,
1274 if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res, 0) == 0)
1275 src = fib_result_prefsrc(dev_net(rt->dst.dev), &res);
1277 src = inet_select_addr(rt->dst.dev,
1278 rt_nexthop(rt, iph->daddr),
1282 memcpy(addr, &src, 4);
1285 #ifdef CONFIG_IP_ROUTE_CLASSID
1286 static void set_class_tag(struct rtable *rt, u32 tag)
1288 if (!(rt->dst.tclassid & 0xFFFF))
1289 rt->dst.tclassid |= tag & 0xFFFF;
1290 if (!(rt->dst.tclassid & 0xFFFF0000))
1291 rt->dst.tclassid |= tag & 0xFFFF0000;
1295 static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
1297 unsigned int header_size = sizeof(struct tcphdr) + sizeof(struct iphdr);
1298 unsigned int advmss = max_t(unsigned int, ipv4_mtu(dst) - header_size,
1301 return min(advmss, IPV4_MAX_PMTU - header_size);
1304 static unsigned int ipv4_mtu(const struct dst_entry *dst)
1306 const struct rtable *rt = (const struct rtable *) dst;
1307 unsigned int mtu = rt->rt_pmtu;
1309 if (!mtu || time_after_eq(jiffies, rt->dst.expires))
1310 mtu = dst_metric_raw(dst, RTAX_MTU);
1315 mtu = READ_ONCE(dst->dev->mtu);
1317 if (unlikely(ip_mtu_locked(dst))) {
1318 if (rt->rt_uses_gateway && mtu > 576)
1322 mtu = min_t(unsigned int, mtu, IP_MAX_MTU);
1324 return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
1327 static void ip_del_fnhe(struct fib_nh_common *nhc, __be32 daddr)
1329 struct fnhe_hash_bucket *hash;
1330 struct fib_nh_exception *fnhe, __rcu **fnhe_p;
1331 u32 hval = fnhe_hashfun(daddr);
1333 spin_lock_bh(&fnhe_lock);
1335 hash = rcu_dereference_protected(nhc->nhc_exceptions,
1336 lockdep_is_held(&fnhe_lock));
1339 fnhe_p = &hash->chain;
1340 fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock));
1342 if (fnhe->fnhe_daddr == daddr) {
1343 rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
1344 fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
1345 /* set fnhe_daddr to 0 to ensure it won't bind with
1346 * new dsts in rt_bind_exception().
1348 fnhe->fnhe_daddr = 0;
1349 fnhe_flush_routes(fnhe);
1350 kfree_rcu(fnhe, rcu);
1353 fnhe_p = &fnhe->fnhe_next;
1354 fnhe = rcu_dereference_protected(fnhe->fnhe_next,
1355 lockdep_is_held(&fnhe_lock));
1358 spin_unlock_bh(&fnhe_lock);
1361 static struct fib_nh_exception *find_exception(struct fib_nh_common *nhc,
1364 struct fnhe_hash_bucket *hash = rcu_dereference(nhc->nhc_exceptions);
1365 struct fib_nh_exception *fnhe;
1371 hval = fnhe_hashfun(daddr);
1373 for (fnhe = rcu_dereference(hash[hval].chain); fnhe;
1374 fnhe = rcu_dereference(fnhe->fnhe_next)) {
1375 if (fnhe->fnhe_daddr == daddr) {
1376 if (fnhe->fnhe_expires &&
1377 time_after(jiffies, fnhe->fnhe_expires)) {
1378 ip_del_fnhe(nhc, daddr);
1388 * 1. mtu on route is locked - use it
1389 * 2. mtu from nexthop exception
1390 * 3. mtu from egress device
1393 u32 ip_mtu_from_fib_result(struct fib_result *res, __be32 daddr)
1395 struct fib_nh_common *nhc = res->nhc;
1396 struct net_device *dev = nhc->nhc_dev;
1397 struct fib_info *fi = res->fi;
1400 if (dev_net(dev)->ipv4.sysctl_ip_fwd_use_pmtu ||
1401 fi->fib_metrics->metrics[RTAX_LOCK - 1] & (1 << RTAX_MTU))
1405 struct fib_nh_exception *fnhe;
1407 fnhe = find_exception(nhc, daddr);
1408 if (fnhe && !time_after_eq(jiffies, fnhe->fnhe_expires))
1409 mtu = fnhe->fnhe_pmtu;
1413 mtu = min(READ_ONCE(dev->mtu), IP_MAX_MTU);
1415 return mtu - lwtunnel_headroom(nhc->nhc_lwtstate, mtu);
1418 static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
1419 __be32 daddr, const bool do_cache)
1423 spin_lock_bh(&fnhe_lock);
1425 if (daddr == fnhe->fnhe_daddr) {
1426 struct rtable __rcu **porig;
1427 struct rtable *orig;
1428 int genid = fnhe_genid(dev_net(rt->dst.dev));
1430 if (rt_is_input_route(rt))
1431 porig = &fnhe->fnhe_rth_input;
1433 porig = &fnhe->fnhe_rth_output;
1434 orig = rcu_dereference(*porig);
1436 if (fnhe->fnhe_genid != genid) {
1437 fnhe->fnhe_genid = genid;
1439 fnhe->fnhe_pmtu = 0;
1440 fnhe->fnhe_expires = 0;
1441 fnhe->fnhe_mtu_locked = false;
1442 fnhe_flush_routes(fnhe);
1445 fill_route_from_fnhe(rt, fnhe);
1448 rt->rt_gw_family = AF_INET;
1453 rcu_assign_pointer(*porig, rt);
1455 dst_dev_put(&orig->dst);
1456 dst_release(&orig->dst);
1461 fnhe->fnhe_stamp = jiffies;
1463 spin_unlock_bh(&fnhe_lock);
1468 static bool rt_cache_route(struct fib_nh_common *nhc, struct rtable *rt)
1470 struct rtable *orig, *prev, **p;
1473 if (rt_is_input_route(rt)) {
1474 p = (struct rtable **)&nhc->nhc_rth_input;
1476 p = (struct rtable **)raw_cpu_ptr(nhc->nhc_pcpu_rth_output);
1480 /* hold dst before doing cmpxchg() to avoid race condition
1484 prev = cmpxchg(p, orig, rt);
1487 rt_add_uncached_list(orig);
1488 dst_release(&orig->dst);
1491 dst_release(&rt->dst);
1498 struct uncached_list {
1500 struct list_head head;
1503 static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list);
1505 void rt_add_uncached_list(struct rtable *rt)
1507 struct uncached_list *ul = raw_cpu_ptr(&rt_uncached_list);
1509 rt->rt_uncached_list = ul;
1511 spin_lock_bh(&ul->lock);
1512 list_add_tail(&rt->rt_uncached, &ul->head);
1513 spin_unlock_bh(&ul->lock);
1516 void rt_del_uncached_list(struct rtable *rt)
1518 if (!list_empty(&rt->rt_uncached)) {
1519 struct uncached_list *ul = rt->rt_uncached_list;
1521 spin_lock_bh(&ul->lock);
1522 list_del(&rt->rt_uncached);
1523 spin_unlock_bh(&ul->lock);
1527 static void ipv4_dst_destroy(struct dst_entry *dst)
1529 struct rtable *rt = (struct rtable *)dst;
1531 ip_dst_metrics_put(dst);
1532 rt_del_uncached_list(rt);
1535 void rt_flush_dev(struct net_device *dev)
1540 for_each_possible_cpu(cpu) {
1541 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
1543 spin_lock_bh(&ul->lock);
1544 list_for_each_entry(rt, &ul->head, rt_uncached) {
1545 if (rt->dst.dev != dev)
1547 rt->dst.dev = blackhole_netdev;
1548 dev_hold(rt->dst.dev);
1551 spin_unlock_bh(&ul->lock);
1555 static bool rt_cache_valid(const struct rtable *rt)
1558 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
1562 static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
1563 const struct fib_result *res,
1564 struct fib_nh_exception *fnhe,
1565 struct fib_info *fi, u16 type, u32 itag,
1566 const bool do_cache)
1568 bool cached = false;
1571 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
1573 if (nhc->nhc_gw_family && nhc->nhc_scope == RT_SCOPE_LINK) {
1574 rt->rt_uses_gateway = 1;
1575 rt->rt_gw_family = nhc->nhc_gw_family;
1576 /* only INET and INET6 are supported */
1577 if (likely(nhc->nhc_gw_family == AF_INET))
1578 rt->rt_gw4 = nhc->nhc_gw.ipv4;
1580 rt->rt_gw6 = nhc->nhc_gw.ipv6;
1583 ip_dst_init_metrics(&rt->dst, fi->fib_metrics);
1585 #ifdef CONFIG_IP_ROUTE_CLASSID
1586 if (nhc->nhc_family == AF_INET) {
1589 nh = container_of(nhc, struct fib_nh, nh_common);
1590 rt->dst.tclassid = nh->nh_tclassid;
1593 rt->dst.lwtstate = lwtstate_get(nhc->nhc_lwtstate);
1595 cached = rt_bind_exception(rt, fnhe, daddr, do_cache);
1597 cached = rt_cache_route(nhc, rt);
1598 if (unlikely(!cached)) {
1599 /* Routes we intend to cache in nexthop exception or
1600 * FIB nexthop have the DST_NOCACHE bit clear.
1601 * However, if we are unsuccessful at storing this
1602 * route into the cache we really need to set it.
1605 rt->rt_gw_family = AF_INET;
1608 rt_add_uncached_list(rt);
1611 rt_add_uncached_list(rt);
1613 #ifdef CONFIG_IP_ROUTE_CLASSID
1614 #ifdef CONFIG_IP_MULTIPLE_TABLES
1615 set_class_tag(rt, res->tclassid);
1617 set_class_tag(rt, itag);
1621 struct rtable *rt_dst_alloc(struct net_device *dev,
1622 unsigned int flags, u16 type,
1623 bool nopolicy, bool noxfrm, bool will_cache)
1627 rt = dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
1628 (will_cache ? 0 : DST_HOST) |
1629 (nopolicy ? DST_NOPOLICY : 0) |
1630 (noxfrm ? DST_NOXFRM : 0));
1633 rt->rt_genid = rt_genid_ipv4(dev_net(dev));
1634 rt->rt_flags = flags;
1636 rt->rt_is_input = 0;
1639 rt->rt_mtu_locked = 0;
1640 rt->rt_uses_gateway = 0;
1641 rt->rt_gw_family = 0;
1643 INIT_LIST_HEAD(&rt->rt_uncached);
1645 rt->dst.output = ip_output;
1646 if (flags & RTCF_LOCAL)
1647 rt->dst.input = ip_local_deliver;
1652 EXPORT_SYMBOL(rt_dst_alloc);
1654 struct rtable *rt_dst_clone(struct net_device *dev, struct rtable *rt)
1656 struct rtable *new_rt;
1658 new_rt = dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
1662 new_rt->rt_genid = rt_genid_ipv4(dev_net(dev));
1663 new_rt->rt_flags = rt->rt_flags;
1664 new_rt->rt_type = rt->rt_type;
1665 new_rt->rt_is_input = rt->rt_is_input;
1666 new_rt->rt_iif = rt->rt_iif;
1667 new_rt->rt_pmtu = rt->rt_pmtu;
1668 new_rt->rt_mtu_locked = rt->rt_mtu_locked;
1669 new_rt->rt_gw_family = rt->rt_gw_family;
1670 if (rt->rt_gw_family == AF_INET)
1671 new_rt->rt_gw4 = rt->rt_gw4;
1672 else if (rt->rt_gw_family == AF_INET6)
1673 new_rt->rt_gw6 = rt->rt_gw6;
1674 INIT_LIST_HEAD(&new_rt->rt_uncached);
1676 new_rt->dst.flags |= DST_HOST;
1677 new_rt->dst.input = rt->dst.input;
1678 new_rt->dst.output = rt->dst.output;
1679 new_rt->dst.error = rt->dst.error;
1680 new_rt->dst.lastuse = jiffies;
1681 new_rt->dst.lwtstate = lwtstate_get(rt->dst.lwtstate);
1685 EXPORT_SYMBOL(rt_dst_clone);
1687 /* called in rcu_read_lock() section */
1688 int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1689 u8 tos, struct net_device *dev,
1690 struct in_device *in_dev, u32 *itag)
1694 /* Primary sanity checks. */
1698 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
1699 skb->protocol != htons(ETH_P_IP))
1702 if (ipv4_is_loopback(saddr) && !IN_DEV_ROUTE_LOCALNET(in_dev))
1705 if (ipv4_is_zeronet(saddr)) {
1706 if (!ipv4_is_local_multicast(daddr) &&
1707 ip_hdr(skb)->protocol != IPPROTO_IGMP)
1710 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
1718 /* called in rcu_read_lock() section */
1719 static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1720 u8 tos, struct net_device *dev, int our)
1722 struct in_device *in_dev = __in_dev_get_rcu(dev);
1723 unsigned int flags = RTCF_MULTICAST;
1728 err = ip_mc_validate_source(skb, daddr, saddr, tos, dev, in_dev, &itag);
1733 flags |= RTCF_LOCAL;
1735 rth = rt_dst_alloc(dev_net(dev)->loopback_dev, flags, RTN_MULTICAST,
1736 IN_DEV_CONF_GET(in_dev, NOPOLICY), false, false);
1740 #ifdef CONFIG_IP_ROUTE_CLASSID
1741 rth->dst.tclassid = itag;
1743 rth->dst.output = ip_rt_bug;
1744 rth->rt_is_input= 1;
1746 #ifdef CONFIG_IP_MROUTE
1747 if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
1748 rth->dst.input = ip_mr_input;
1750 RT_CACHE_STAT_INC(in_slow_mc);
1752 skb_dst_set(skb, &rth->dst);
1757 static void ip_handle_martian_source(struct net_device *dev,
1758 struct in_device *in_dev,
1759 struct sk_buff *skb,
1763 RT_CACHE_STAT_INC(in_martian_src);
1764 #ifdef CONFIG_IP_ROUTE_VERBOSE
1765 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
1767 * RFC1812 recommendation, if source is martian,
1768 * the only hint is MAC header.
1770 pr_warn("martian source %pI4 from %pI4, on dev %s\n",
1771 &daddr, &saddr, dev->name);
1772 if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
1773 print_hex_dump(KERN_WARNING, "ll header: ",
1774 DUMP_PREFIX_OFFSET, 16, 1,
1775 skb_mac_header(skb),
1776 dev->hard_header_len, false);
1782 /* called in rcu_read_lock() section */
1783 static int __mkroute_input(struct sk_buff *skb,
1784 const struct fib_result *res,
1785 struct in_device *in_dev,
1786 __be32 daddr, __be32 saddr, u32 tos)
1788 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
1789 struct net_device *dev = nhc->nhc_dev;
1790 struct fib_nh_exception *fnhe;
1793 struct in_device *out_dev;
1797 /* get a working reference to the output device */
1798 out_dev = __in_dev_get_rcu(dev);
1800 net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
1804 err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
1805 in_dev->dev, in_dev, &itag);
1807 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1813 do_cache = res->fi && !itag;
1814 if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) &&
1815 skb->protocol == htons(ETH_P_IP)) {
1818 gw = nhc->nhc_gw_family == AF_INET ? nhc->nhc_gw.ipv4 : 0;
1819 if (IN_DEV_SHARED_MEDIA(out_dev) ||
1820 inet_addr_onlink(out_dev, saddr, gw))
1821 IPCB(skb)->flags |= IPSKB_DOREDIRECT;
1824 if (skb->protocol != htons(ETH_P_IP)) {
1825 /* Not IP (i.e. ARP). Do not create route, if it is
1826 * invalid for proxy arp. DNAT routes are always valid.
1828 * Proxy arp feature have been extended to allow, ARP
1829 * replies back to the same interface, to support
1830 * Private VLAN switch technologies. See arp.c.
1832 if (out_dev == in_dev &&
1833 IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
1839 fnhe = find_exception(nhc, daddr);
1842 rth = rcu_dereference(fnhe->fnhe_rth_input);
1844 rth = rcu_dereference(nhc->nhc_rth_input);
1845 if (rt_cache_valid(rth)) {
1846 skb_dst_set_noref(skb, &rth->dst);
1851 rth = rt_dst_alloc(out_dev->dev, 0, res->type,
1852 IN_DEV_CONF_GET(in_dev, NOPOLICY),
1853 IN_DEV_CONF_GET(out_dev, NOXFRM), do_cache);
1859 rth->rt_is_input = 1;
1860 RT_CACHE_STAT_INC(in_slow_tot);
1862 rth->dst.input = ip_forward;
1864 rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag,
1866 lwtunnel_set_redirect(&rth->dst);
1867 skb_dst_set(skb, &rth->dst);
1874 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1875 /* To make ICMP packets follow the right flow, the multipath hash is
1876 * calculated from the inner IP addresses.
1878 static void ip_multipath_l3_keys(const struct sk_buff *skb,
1879 struct flow_keys *hash_keys)
1881 const struct iphdr *outer_iph = ip_hdr(skb);
1882 const struct iphdr *key_iph = outer_iph;
1883 const struct iphdr *inner_iph;
1884 const struct icmphdr *icmph;
1885 struct iphdr _inner_iph;
1886 struct icmphdr _icmph;
1888 if (likely(outer_iph->protocol != IPPROTO_ICMP))
1891 if (unlikely((outer_iph->frag_off & htons(IP_OFFSET)) != 0))
1894 icmph = skb_header_pointer(skb, outer_iph->ihl * 4, sizeof(_icmph),
1899 if (!icmp_is_err(icmph->type))
1902 inner_iph = skb_header_pointer(skb,
1903 outer_iph->ihl * 4 + sizeof(_icmph),
1904 sizeof(_inner_iph), &_inner_iph);
1908 key_iph = inner_iph;
1910 hash_keys->addrs.v4addrs.src = key_iph->saddr;
1911 hash_keys->addrs.v4addrs.dst = key_iph->daddr;
1914 /* if skb is set it will be used and fl4 can be NULL */
1915 int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4,
1916 const struct sk_buff *skb, struct flow_keys *flkeys)
1918 u32 multipath_hash = fl4 ? fl4->flowi4_multipath_hash : 0;
1919 struct flow_keys hash_keys;
1922 switch (net->ipv4.sysctl_fib_multipath_hash_policy) {
1924 memset(&hash_keys, 0, sizeof(hash_keys));
1925 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1927 ip_multipath_l3_keys(skb, &hash_keys);
1929 hash_keys.addrs.v4addrs.src = fl4->saddr;
1930 hash_keys.addrs.v4addrs.dst = fl4->daddr;
1934 /* skb is currently provided only when forwarding */
1936 unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
1937 struct flow_keys keys;
1939 /* short-circuit if we already have L4 hash present */
1941 return skb_get_hash_raw(skb) >> 1;
1943 memset(&hash_keys, 0, sizeof(hash_keys));
1946 skb_flow_dissect_flow_keys(skb, &keys, flag);
1950 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1951 hash_keys.addrs.v4addrs.src = flkeys->addrs.v4addrs.src;
1952 hash_keys.addrs.v4addrs.dst = flkeys->addrs.v4addrs.dst;
1953 hash_keys.ports.src = flkeys->ports.src;
1954 hash_keys.ports.dst = flkeys->ports.dst;
1955 hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
1957 memset(&hash_keys, 0, sizeof(hash_keys));
1958 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1959 hash_keys.addrs.v4addrs.src = fl4->saddr;
1960 hash_keys.addrs.v4addrs.dst = fl4->daddr;
1961 hash_keys.ports.src = fl4->fl4_sport;
1962 hash_keys.ports.dst = fl4->fl4_dport;
1963 hash_keys.basic.ip_proto = fl4->flowi4_proto;
1967 memset(&hash_keys, 0, sizeof(hash_keys));
1968 /* skb is currently provided only when forwarding */
1970 struct flow_keys keys;
1972 skb_flow_dissect_flow_keys(skb, &keys, 0);
1973 /* Inner can be v4 or v6 */
1974 if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1975 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1976 hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
1977 hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
1978 } else if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1979 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1980 hash_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src;
1981 hash_keys.addrs.v6addrs.dst = keys.addrs.v6addrs.dst;
1982 hash_keys.tags.flow_label = keys.tags.flow_label;
1983 hash_keys.basic.ip_proto = keys.basic.ip_proto;
1985 /* Same as case 0 */
1986 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1987 ip_multipath_l3_keys(skb, &hash_keys);
1990 /* Same as case 0 */
1991 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1992 hash_keys.addrs.v4addrs.src = fl4->saddr;
1993 hash_keys.addrs.v4addrs.dst = fl4->daddr;
1997 mhash = flow_hash_from_keys(&hash_keys);
2000 mhash = jhash_2words(mhash, multipath_hash, 0);
2004 #endif /* CONFIG_IP_ROUTE_MULTIPATH */
2006 static int ip_mkroute_input(struct sk_buff *skb,
2007 struct fib_result *res,
2008 struct in_device *in_dev,
2009 __be32 daddr, __be32 saddr, u32 tos,
2010 struct flow_keys *hkeys)
2012 #ifdef CONFIG_IP_ROUTE_MULTIPATH
2013 if (res->fi && fib_info_num_path(res->fi) > 1) {
2014 int h = fib_multipath_hash(res->fi->fib_net, NULL, skb, hkeys);
2016 fib_select_multipath(res, h);
2020 /* create a routing cache entry */
2021 return __mkroute_input(skb, res, in_dev, daddr, saddr, tos);
2024 /* Implements all the saddr-related checks as ip_route_input_slow(),
2025 * assuming daddr is valid and the destination is not a local broadcast one.
2026 * Uses the provided hint instead of performing a route lookup.
2028 int ip_route_use_hint(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2029 u8 tos, struct net_device *dev,
2030 const struct sk_buff *hint)
2032 struct in_device *in_dev = __in_dev_get_rcu(dev);
2033 struct rtable *rt = (struct rtable *)hint;
2034 struct net *net = dev_net(dev);
2038 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
2039 goto martian_source;
2041 if (ipv4_is_zeronet(saddr))
2042 goto martian_source;
2044 if (ipv4_is_loopback(saddr) && !IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
2045 goto martian_source;
2047 if (rt->rt_type != RTN_LOCAL)
2048 goto skip_validate_source;
2050 tos &= IPTOS_RT_MASK;
2051 err = fib_validate_source(skb, saddr, daddr, tos, 0, dev, in_dev, &tag);
2053 goto martian_source;
2055 skip_validate_source:
2056 skb_dst_copy(skb, hint);
2060 ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
2065 * NOTE. We drop all the packets that has local source
2066 * addresses, because every properly looped back packet
2067 * must have correct destination already attached by output routine.
2068 * Changes in the enforced policies must be applied also to
2069 * ip_route_use_hint().
2071 * Such approach solves two big problems:
2072 * 1. Not simplex devices are handled properly.
2073 * 2. IP spoofing attempts are filtered with 100% of guarantee.
2074 * called with rcu_read_lock()
2077 static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2078 u8 tos, struct net_device *dev,
2079 struct fib_result *res)
2081 struct in_device *in_dev = __in_dev_get_rcu(dev);
2082 struct flow_keys *flkeys = NULL, _flkeys;
2083 struct net *net = dev_net(dev);
2084 struct ip_tunnel_info *tun_info;
2086 unsigned int flags = 0;
2090 bool do_cache = true;
2092 /* IP on this device is disabled. */
2097 /* Check for the most weird martians, which can be not detected
2101 tun_info = skb_tunnel_info(skb);
2102 if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
2103 fl4.flowi4_tun_key.tun_id = tun_info->key.tun_id;
2105 fl4.flowi4_tun_key.tun_id = 0;
2108 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
2109 goto martian_source;
2113 if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
2116 /* Accept zero addresses only to limited broadcast;
2117 * I even do not know to fix it or not. Waiting for complains :-)
2119 if (ipv4_is_zeronet(saddr))
2120 goto martian_source;
2122 if (ipv4_is_zeronet(daddr))
2123 goto martian_destination;
2125 /* Following code try to avoid calling IN_DEV_NET_ROUTE_LOCALNET(),
2126 * and call it once if daddr or/and saddr are loopback addresses
2128 if (ipv4_is_loopback(daddr)) {
2129 if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
2130 goto martian_destination;
2131 } else if (ipv4_is_loopback(saddr)) {
2132 if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
2133 goto martian_source;
2137 * Now we are ready to route packet.
2140 fl4.flowi4_iif = dev->ifindex;
2141 fl4.flowi4_mark = skb->mark;
2142 fl4.flowi4_tos = tos;
2143 fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
2144 fl4.flowi4_flags = 0;
2147 fl4.flowi4_uid = sock_net_uid(net, NULL);
2149 if (fib4_rules_early_flow_dissect(net, skb, &fl4, &_flkeys)) {
2152 fl4.flowi4_proto = 0;
2157 err = fib_lookup(net, &fl4, res, 0);
2159 if (!IN_DEV_FORWARD(in_dev))
2160 err = -EHOSTUNREACH;
2164 if (res->type == RTN_BROADCAST) {
2165 if (IN_DEV_BFORWARD(in_dev))
2167 /* not do cache if bc_forwarding is enabled */
2168 if (IPV4_DEVCONF_ALL(net, BC_FORWARDING))
2173 if (res->type == RTN_LOCAL) {
2174 err = fib_validate_source(skb, saddr, daddr, tos,
2175 0, dev, in_dev, &itag);
2177 goto martian_source;
2181 if (!IN_DEV_FORWARD(in_dev)) {
2182 err = -EHOSTUNREACH;
2185 if (res->type != RTN_UNICAST)
2186 goto martian_destination;
2189 err = ip_mkroute_input(skb, res, in_dev, daddr, saddr, tos, flkeys);
2193 if (skb->protocol != htons(ETH_P_IP))
2196 if (!ipv4_is_zeronet(saddr)) {
2197 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
2200 goto martian_source;
2202 flags |= RTCF_BROADCAST;
2203 res->type = RTN_BROADCAST;
2204 RT_CACHE_STAT_INC(in_brd);
2207 do_cache &= res->fi && !itag;
2209 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
2211 rth = rcu_dereference(nhc->nhc_rth_input);
2212 if (rt_cache_valid(rth)) {
2213 skb_dst_set_noref(skb, &rth->dst);
2219 rth = rt_dst_alloc(l3mdev_master_dev_rcu(dev) ? : net->loopback_dev,
2220 flags | RTCF_LOCAL, res->type,
2221 IN_DEV_CONF_GET(in_dev, NOPOLICY), false, do_cache);
2225 rth->dst.output= ip_rt_bug;
2226 #ifdef CONFIG_IP_ROUTE_CLASSID
2227 rth->dst.tclassid = itag;
2229 rth->rt_is_input = 1;
2231 RT_CACHE_STAT_INC(in_slow_tot);
2232 if (res->type == RTN_UNREACHABLE) {
2233 rth->dst.input= ip_error;
2234 rth->dst.error= -err;
2235 rth->rt_flags &= ~RTCF_LOCAL;
2239 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
2241 rth->dst.lwtstate = lwtstate_get(nhc->nhc_lwtstate);
2242 if (lwtunnel_input_redirect(rth->dst.lwtstate)) {
2243 WARN_ON(rth->dst.input == lwtunnel_input);
2244 rth->dst.lwtstate->orig_input = rth->dst.input;
2245 rth->dst.input = lwtunnel_input;
2248 if (unlikely(!rt_cache_route(nhc, rth)))
2249 rt_add_uncached_list(rth);
2251 skb_dst_set(skb, &rth->dst);
2256 RT_CACHE_STAT_INC(in_no_route);
2257 res->type = RTN_UNREACHABLE;
2263 * Do not cache martian addresses: they should be logged (RFC1812)
2265 martian_destination:
2266 RT_CACHE_STAT_INC(in_martian_dst);
2267 #ifdef CONFIG_IP_ROUTE_VERBOSE
2268 if (IN_DEV_LOG_MARTIANS(in_dev))
2269 net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n",
2270 &daddr, &saddr, dev->name);
2282 ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
2286 int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2287 u8 tos, struct net_device *dev)
2289 struct fib_result res;
2292 tos &= IPTOS_RT_MASK;
2294 err = ip_route_input_rcu(skb, daddr, saddr, tos, dev, &res);
2299 EXPORT_SYMBOL(ip_route_input_noref);
2301 /* called with rcu_read_lock held */
2302 int ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2303 u8 tos, struct net_device *dev, struct fib_result *res)
2305 /* Multicast recognition logic is moved from route cache to here.
2306 The problem was that too many Ethernet cards have broken/missing
2307 hardware multicast filters :-( As result the host on multicasting
2308 network acquires a lot of useless route cache entries, sort of
2309 SDR messages from all the world. Now we try to get rid of them.
2310 Really, provided software IP multicast filter is organized
2311 reasonably (at least, hashed), it does not result in a slowdown
2312 comparing with route cache reject entries.
2313 Note, that multicast routers are not affected, because
2314 route cache entry is created eventually.
2316 if (ipv4_is_multicast(daddr)) {
2317 struct in_device *in_dev = __in_dev_get_rcu(dev);
2323 our = ip_check_mc_rcu(in_dev, daddr, saddr,
2324 ip_hdr(skb)->protocol);
2326 /* check l3 master if no match yet */
2327 if (!our && netif_is_l3_slave(dev)) {
2328 struct in_device *l3_in_dev;
2330 l3_in_dev = __in_dev_get_rcu(skb->dev);
2332 our = ip_check_mc_rcu(l3_in_dev, daddr, saddr,
2333 ip_hdr(skb)->protocol);
2337 #ifdef CONFIG_IP_MROUTE
2339 (!ipv4_is_local_multicast(daddr) &&
2340 IN_DEV_MFORWARD(in_dev))
2343 err = ip_route_input_mc(skb, daddr, saddr,
2349 return ip_route_input_slow(skb, daddr, saddr, tos, dev, res);
2352 /* called with rcu_read_lock() */
2353 static struct rtable *__mkroute_output(const struct fib_result *res,
2354 const struct flowi4 *fl4, int orig_oif,
2355 struct net_device *dev_out,
2358 struct fib_info *fi = res->fi;
2359 struct fib_nh_exception *fnhe;
2360 struct in_device *in_dev;
2361 u16 type = res->type;
2365 in_dev = __in_dev_get_rcu(dev_out);
2367 return ERR_PTR(-EINVAL);
2369 if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
2370 if (ipv4_is_loopback(fl4->saddr) &&
2371 !(dev_out->flags & IFF_LOOPBACK) &&
2372 !netif_is_l3_master(dev_out))
2373 return ERR_PTR(-EINVAL);
2375 if (ipv4_is_lbcast(fl4->daddr))
2376 type = RTN_BROADCAST;
2377 else if (ipv4_is_multicast(fl4->daddr))
2378 type = RTN_MULTICAST;
2379 else if (ipv4_is_zeronet(fl4->daddr))
2380 return ERR_PTR(-EINVAL);
2382 if (dev_out->flags & IFF_LOOPBACK)
2383 flags |= RTCF_LOCAL;
2386 if (type == RTN_BROADCAST) {
2387 flags |= RTCF_BROADCAST | RTCF_LOCAL;
2389 } else if (type == RTN_MULTICAST) {
2390 flags |= RTCF_MULTICAST | RTCF_LOCAL;
2391 if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
2393 flags &= ~RTCF_LOCAL;
2396 /* If multicast route do not exist use
2397 * default one, but do not gateway in this case.
2400 if (fi && res->prefixlen < 4)
2402 } else if ((type == RTN_LOCAL) && (orig_oif != 0) &&
2403 (orig_oif != dev_out->ifindex)) {
2404 /* For local routes that require a particular output interface
2405 * we do not want to cache the result. Caching the result
2406 * causes incorrect behaviour when there are multiple source
2407 * addresses on the interface, the end result being that if the
2408 * intended recipient is waiting on that interface for the
2409 * packet he won't receive it because it will be delivered on
2410 * the loopback interface and the IP_PKTINFO ipi_ifindex will
2411 * be set to the loopback interface as well.
2417 do_cache &= fi != NULL;
2419 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
2420 struct rtable __rcu **prth;
2422 fnhe = find_exception(nhc, fl4->daddr);
2426 prth = &fnhe->fnhe_rth_output;
2428 if (unlikely(fl4->flowi4_flags &
2429 FLOWI_FLAG_KNOWN_NH &&
2430 !(nhc->nhc_gw_family &&
2431 nhc->nhc_scope == RT_SCOPE_LINK))) {
2435 prth = raw_cpu_ptr(nhc->nhc_pcpu_rth_output);
2437 rth = rcu_dereference(*prth);
2438 if (rt_cache_valid(rth) && dst_hold_safe(&rth->dst))
2443 rth = rt_dst_alloc(dev_out, flags, type,
2444 IN_DEV_CONF_GET(in_dev, NOPOLICY),
2445 IN_DEV_CONF_GET(in_dev, NOXFRM),
2448 return ERR_PTR(-ENOBUFS);
2450 rth->rt_iif = orig_oif;
2452 RT_CACHE_STAT_INC(out_slow_tot);
2454 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
2455 if (flags & RTCF_LOCAL &&
2456 !(dev_out->flags & IFF_LOOPBACK)) {
2457 rth->dst.output = ip_mc_output;
2458 RT_CACHE_STAT_INC(out_slow_mc);
2460 #ifdef CONFIG_IP_MROUTE
2461 if (type == RTN_MULTICAST) {
2462 if (IN_DEV_MFORWARD(in_dev) &&
2463 !ipv4_is_local_multicast(fl4->daddr)) {
2464 rth->dst.input = ip_mr_input;
2465 rth->dst.output = ip_mc_output;
2471 rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0, do_cache);
2472 lwtunnel_set_redirect(&rth->dst);
2478 * Major route resolver routine.
2481 struct rtable *ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
2482 const struct sk_buff *skb)
2484 __u8 tos = RT_FL_TOS(fl4);
2485 struct fib_result res = {
2493 fl4->flowi4_iif = LOOPBACK_IFINDEX;
2494 fl4->flowi4_tos = tos & IPTOS_RT_MASK;
2495 fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
2496 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
2499 rth = ip_route_output_key_hash_rcu(net, fl4, &res, skb);
2504 EXPORT_SYMBOL_GPL(ip_route_output_key_hash);
2506 struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *fl4,
2507 struct fib_result *res,
2508 const struct sk_buff *skb)
2510 struct net_device *dev_out = NULL;
2511 int orig_oif = fl4->flowi4_oif;
2512 unsigned int flags = 0;
2517 if (ipv4_is_multicast(fl4->saddr) ||
2518 ipv4_is_lbcast(fl4->saddr) ||
2519 ipv4_is_zeronet(fl4->saddr)) {
2520 rth = ERR_PTR(-EINVAL);
2524 rth = ERR_PTR(-ENETUNREACH);
2526 /* I removed check for oif == dev_out->oif here.
2527 It was wrong for two reasons:
2528 1. ip_dev_find(net, saddr) can return wrong iface, if saddr
2529 is assigned to multiple interfaces.
2530 2. Moreover, we are allowed to send packets with saddr
2531 of another iface. --ANK
2534 if (fl4->flowi4_oif == 0 &&
2535 (ipv4_is_multicast(fl4->daddr) ||
2536 ipv4_is_lbcast(fl4->daddr))) {
2537 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2538 dev_out = __ip_dev_find(net, fl4->saddr, false);
2542 /* Special hack: user can direct multicasts
2543 and limited broadcast via necessary interface
2544 without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2545 This hack is not just for fun, it allows
2546 vic,vat and friends to work.
2547 They bind socket to loopback, set ttl to zero
2548 and expect that it will work.
2549 From the viewpoint of routing cache they are broken,
2550 because we are not allowed to build multicast path
2551 with loopback source addr (look, routing cache
2552 cannot know, that ttl is zero, so that packet
2553 will not leave this host and route is valid).
2554 Luckily, this hack is good workaround.
2557 fl4->flowi4_oif = dev_out->ifindex;
2561 if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) {
2562 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2563 if (!__ip_dev_find(net, fl4->saddr, false))
2569 if (fl4->flowi4_oif) {
2570 dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
2571 rth = ERR_PTR(-ENODEV);
2575 /* RACE: Check return value of inet_select_addr instead. */
2576 if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
2577 rth = ERR_PTR(-ENETUNREACH);
2580 if (ipv4_is_local_multicast(fl4->daddr) ||
2581 ipv4_is_lbcast(fl4->daddr) ||
2582 fl4->flowi4_proto == IPPROTO_IGMP) {
2584 fl4->saddr = inet_select_addr(dev_out, 0,
2589 if (ipv4_is_multicast(fl4->daddr))
2590 fl4->saddr = inet_select_addr(dev_out, 0,
2592 else if (!fl4->daddr)
2593 fl4->saddr = inet_select_addr(dev_out, 0,
2599 fl4->daddr = fl4->saddr;
2601 fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
2602 dev_out = net->loopback_dev;
2603 fl4->flowi4_oif = LOOPBACK_IFINDEX;
2604 res->type = RTN_LOCAL;
2605 flags |= RTCF_LOCAL;
2609 err = fib_lookup(net, fl4, res, 0);
2613 if (fl4->flowi4_oif &&
2614 (ipv4_is_multicast(fl4->daddr) ||
2615 !netif_index_is_l3_master(net, fl4->flowi4_oif))) {
2616 /* Apparently, routing tables are wrong. Assume,
2617 that the destination is on link.
2620 Because we are allowed to send to iface
2621 even if it has NO routes and NO assigned
2622 addresses. When oif is specified, routing
2623 tables are looked up with only one purpose:
2624 to catch if destination is gatewayed, rather than
2625 direct. Moreover, if MSG_DONTROUTE is set,
2626 we send packet, ignoring both routing tables
2627 and ifaddr state. --ANK
2630 We could make it even if oif is unknown,
2631 likely IPv6, but we do not.
2634 if (fl4->saddr == 0)
2635 fl4->saddr = inet_select_addr(dev_out, 0,
2637 res->type = RTN_UNICAST;
2644 if (res->type == RTN_LOCAL) {
2646 if (res->fi->fib_prefsrc)
2647 fl4->saddr = res->fi->fib_prefsrc;
2649 fl4->saddr = fl4->daddr;
2652 /* L3 master device is the loopback for that domain */
2653 dev_out = l3mdev_master_dev_rcu(FIB_RES_DEV(*res)) ? :
2656 /* make sure orig_oif points to fib result device even
2657 * though packet rx/tx happens over loopback or l3mdev
2659 orig_oif = FIB_RES_OIF(*res);
2661 fl4->flowi4_oif = dev_out->ifindex;
2662 flags |= RTCF_LOCAL;
2666 fib_select_path(net, res, fl4, skb);
2668 dev_out = FIB_RES_DEV(*res);
2669 fl4->flowi4_oif = dev_out->ifindex;
2673 rth = __mkroute_output(res, fl4, orig_oif, dev_out, flags);
2679 static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
2684 static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
2686 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
2688 return mtu ? : dst->dev->mtu;
2691 static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
2692 struct sk_buff *skb, u32 mtu,
2697 static void ipv4_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
2698 struct sk_buff *skb)
2702 static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst,
2708 static struct dst_ops ipv4_dst_blackhole_ops = {
2710 .check = ipv4_blackhole_dst_check,
2711 .mtu = ipv4_blackhole_mtu,
2712 .default_advmss = ipv4_default_advmss,
2713 .update_pmtu = ipv4_rt_blackhole_update_pmtu,
2714 .redirect = ipv4_rt_blackhole_redirect,
2715 .cow_metrics = ipv4_rt_blackhole_cow_metrics,
2716 .neigh_lookup = ipv4_neigh_lookup,
2719 struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2721 struct rtable *ort = (struct rtable *) dst_orig;
2724 rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_DEAD, 0);
2726 struct dst_entry *new = &rt->dst;
2729 new->input = dst_discard;
2730 new->output = dst_discard_out;
2732 new->dev = net->loopback_dev;
2736 rt->rt_is_input = ort->rt_is_input;
2737 rt->rt_iif = ort->rt_iif;
2738 rt->rt_pmtu = ort->rt_pmtu;
2739 rt->rt_mtu_locked = ort->rt_mtu_locked;
2741 rt->rt_genid = rt_genid_ipv4(net);
2742 rt->rt_flags = ort->rt_flags;
2743 rt->rt_type = ort->rt_type;
2744 rt->rt_uses_gateway = ort->rt_uses_gateway;
2745 rt->rt_gw_family = ort->rt_gw_family;
2746 if (rt->rt_gw_family == AF_INET)
2747 rt->rt_gw4 = ort->rt_gw4;
2748 else if (rt->rt_gw_family == AF_INET6)
2749 rt->rt_gw6 = ort->rt_gw6;
2751 INIT_LIST_HEAD(&rt->rt_uncached);
2754 dst_release(dst_orig);
2756 return rt ? &rt->dst : ERR_PTR(-ENOMEM);
2759 struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
2760 const struct sock *sk)
2762 struct rtable *rt = __ip_route_output_key(net, flp4);
2767 if (flp4->flowi4_proto)
2768 rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst,
2769 flowi4_to_flowi(flp4),
2774 EXPORT_SYMBOL_GPL(ip_route_output_flow);
2776 /* called with rcu_read_lock held */
2777 static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
2778 struct rtable *rt, u32 table_id, struct flowi4 *fl4,
2779 struct sk_buff *skb, u32 portid, u32 seq,
2783 struct nlmsghdr *nlh;
2784 unsigned long expires = 0;
2786 u32 metrics[RTAX_MAX];
2788 nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*r), flags);
2792 r = nlmsg_data(nlh);
2793 r->rtm_family = AF_INET;
2794 r->rtm_dst_len = 32;
2796 r->rtm_tos = fl4 ? fl4->flowi4_tos : 0;
2797 r->rtm_table = table_id < 256 ? table_id : RT_TABLE_COMPAT;
2798 if (nla_put_u32(skb, RTA_TABLE, table_id))
2799 goto nla_put_failure;
2800 r->rtm_type = rt->rt_type;
2801 r->rtm_scope = RT_SCOPE_UNIVERSE;
2802 r->rtm_protocol = RTPROT_UNSPEC;
2803 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2804 if (rt->rt_flags & RTCF_NOTIFY)
2805 r->rtm_flags |= RTM_F_NOTIFY;
2806 if (IPCB(skb)->flags & IPSKB_DOREDIRECT)
2807 r->rtm_flags |= RTCF_DOREDIRECT;
2809 if (nla_put_in_addr(skb, RTA_DST, dst))
2810 goto nla_put_failure;
2812 r->rtm_src_len = 32;
2813 if (nla_put_in_addr(skb, RTA_SRC, src))
2814 goto nla_put_failure;
2817 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
2818 goto nla_put_failure;
2819 #ifdef CONFIG_IP_ROUTE_CLASSID
2820 if (rt->dst.tclassid &&
2821 nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
2822 goto nla_put_failure;
2824 if (fl4 && !rt_is_input_route(rt) &&
2825 fl4->saddr != src) {
2826 if (nla_put_in_addr(skb, RTA_PREFSRC, fl4->saddr))
2827 goto nla_put_failure;
2829 if (rt->rt_uses_gateway) {
2830 if (rt->rt_gw_family == AF_INET &&
2831 nla_put_in_addr(skb, RTA_GATEWAY, rt->rt_gw4)) {
2832 goto nla_put_failure;
2833 } else if (rt->rt_gw_family == AF_INET6) {
2834 int alen = sizeof(struct in6_addr);
2838 nla = nla_reserve(skb, RTA_VIA, alen + 2);
2840 goto nla_put_failure;
2842 via = nla_data(nla);
2843 via->rtvia_family = AF_INET6;
2844 memcpy(via->rtvia_addr, &rt->rt_gw6, alen);
2848 expires = rt->dst.expires;
2850 unsigned long now = jiffies;
2852 if (time_before(now, expires))
2858 memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
2859 if (rt->rt_pmtu && expires)
2860 metrics[RTAX_MTU - 1] = rt->rt_pmtu;
2861 if (rt->rt_mtu_locked && expires)
2862 metrics[RTAX_LOCK - 1] |= BIT(RTAX_MTU);
2863 if (rtnetlink_put_metrics(skb, metrics) < 0)
2864 goto nla_put_failure;
2867 if (fl4->flowi4_mark &&
2868 nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark))
2869 goto nla_put_failure;
2871 if (!uid_eq(fl4->flowi4_uid, INVALID_UID) &&
2872 nla_put_u32(skb, RTA_UID,
2873 from_kuid_munged(current_user_ns(),
2875 goto nla_put_failure;
2877 if (rt_is_input_route(rt)) {
2878 #ifdef CONFIG_IP_MROUTE
2879 if (ipv4_is_multicast(dst) &&
2880 !ipv4_is_local_multicast(dst) &&
2881 IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
2882 int err = ipmr_get_route(net, skb,
2883 fl4->saddr, fl4->daddr,
2889 goto nla_put_failure;
2893 if (nla_put_u32(skb, RTA_IIF, fl4->flowi4_iif))
2894 goto nla_put_failure;
2898 error = rt->dst.error;
2900 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0)
2901 goto nla_put_failure;
2903 nlmsg_end(skb, nlh);
2907 nlmsg_cancel(skb, nlh);
2911 static int fnhe_dump_bucket(struct net *net, struct sk_buff *skb,
2912 struct netlink_callback *cb, u32 table_id,
2913 struct fnhe_hash_bucket *bucket, int genid,
2914 int *fa_index, int fa_start, unsigned int flags)
2918 for (i = 0; i < FNHE_HASH_SIZE; i++) {
2919 struct fib_nh_exception *fnhe;
2921 for (fnhe = rcu_dereference(bucket[i].chain); fnhe;
2922 fnhe = rcu_dereference(fnhe->fnhe_next)) {
2926 if (*fa_index < fa_start)
2929 if (fnhe->fnhe_genid != genid)
2932 if (fnhe->fnhe_expires &&
2933 time_after(jiffies, fnhe->fnhe_expires))
2936 rt = rcu_dereference(fnhe->fnhe_rth_input);
2938 rt = rcu_dereference(fnhe->fnhe_rth_output);
2942 err = rt_fill_info(net, fnhe->fnhe_daddr, 0, rt,
2943 table_id, NULL, skb,
2944 NETLINK_CB(cb->skb).portid,
2945 cb->nlh->nlmsg_seq, flags);
2956 int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb,
2957 u32 table_id, struct fib_info *fi,
2958 int *fa_index, int fa_start, unsigned int flags)
2960 struct net *net = sock_net(cb->skb->sk);
2961 int nhsel, genid = fnhe_genid(net);
2963 for (nhsel = 0; nhsel < fib_info_num_path(fi); nhsel++) {
2964 struct fib_nh_common *nhc = fib_info_nhc(fi, nhsel);
2965 struct fnhe_hash_bucket *bucket;
2968 if (nhc->nhc_flags & RTNH_F_DEAD)
2972 bucket = rcu_dereference(nhc->nhc_exceptions);
2975 err = fnhe_dump_bucket(net, skb, cb, table_id, bucket,
2976 genid, fa_index, fa_start,
2986 static struct sk_buff *inet_rtm_getroute_build_skb(__be32 src, __be32 dst,
2987 u8 ip_proto, __be16 sport,
2990 struct sk_buff *skb;
2993 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2997 /* Reserve room for dummy headers, this skb can pass
2998 * through good chunk of routing engine.
3000 skb_reset_mac_header(skb);
3001 skb_reset_network_header(skb);
3002 skb->protocol = htons(ETH_P_IP);
3003 iph = skb_put(skb, sizeof(struct iphdr));
3004 iph->protocol = ip_proto;
3010 skb_set_transport_header(skb, skb->len);
3012 switch (iph->protocol) {
3014 struct udphdr *udph;
3016 udph = skb_put_zero(skb, sizeof(struct udphdr));
3017 udph->source = sport;
3019 udph->len = sizeof(struct udphdr);
3024 struct tcphdr *tcph;
3026 tcph = skb_put_zero(skb, sizeof(struct tcphdr));
3027 tcph->source = sport;
3029 tcph->doff = sizeof(struct tcphdr) / 4;
3031 tcph->check = ~tcp_v4_check(sizeof(struct tcphdr),
3035 case IPPROTO_ICMP: {
3036 struct icmphdr *icmph;
3038 icmph = skb_put_zero(skb, sizeof(struct icmphdr));
3039 icmph->type = ICMP_ECHO;
3047 static int inet_rtm_valid_getroute_req(struct sk_buff *skb,
3048 const struct nlmsghdr *nlh,
3050 struct netlink_ext_ack *extack)
3055 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
3056 NL_SET_ERR_MSG(extack,
3057 "ipv4: Invalid header for route get request");
3061 if (!netlink_strict_get_check(skb))
3062 return nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
3063 rtm_ipv4_policy, extack);
3065 rtm = nlmsg_data(nlh);
3066 if ((rtm->rtm_src_len && rtm->rtm_src_len != 32) ||
3067 (rtm->rtm_dst_len && rtm->rtm_dst_len != 32) ||
3068 rtm->rtm_table || rtm->rtm_protocol ||
3069 rtm->rtm_scope || rtm->rtm_type) {
3070 NL_SET_ERR_MSG(extack, "ipv4: Invalid values in header for route get request");
3074 if (rtm->rtm_flags & ~(RTM_F_NOTIFY |
3075 RTM_F_LOOKUP_TABLE |
3077 NL_SET_ERR_MSG(extack, "ipv4: Unsupported rtm_flags for route get request");
3081 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
3082 rtm_ipv4_policy, extack);
3086 if ((tb[RTA_SRC] && !rtm->rtm_src_len) ||
3087 (tb[RTA_DST] && !rtm->rtm_dst_len)) {
3088 NL_SET_ERR_MSG(extack, "ipv4: rtm_src_len and rtm_dst_len must be 32 for IPv4");
3092 for (i = 0; i <= RTA_MAX; i++) {
3108 NL_SET_ERR_MSG(extack, "ipv4: Unsupported attribute in route get request");
3116 static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
3117 struct netlink_ext_ack *extack)
3119 struct net *net = sock_net(in_skb->sk);
3120 struct nlattr *tb[RTA_MAX+1];
3121 u32 table_id = RT_TABLE_MAIN;
3122 __be16 sport = 0, dport = 0;
3123 struct fib_result res = {};
3124 u8 ip_proto = IPPROTO_UDP;
3125 struct rtable *rt = NULL;
3126 struct sk_buff *skb;
3128 struct flowi4 fl4 = {};
3136 err = inet_rtm_valid_getroute_req(in_skb, nlh, tb, extack);
3140 rtm = nlmsg_data(nlh);
3141 src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
3142 dst = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0;
3143 iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
3144 mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
3146 uid = make_kuid(current_user_ns(), nla_get_u32(tb[RTA_UID]));
3148 uid = (iif ? INVALID_UID : current_uid());
3150 if (tb[RTA_IP_PROTO]) {
3151 err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
3152 &ip_proto, AF_INET, extack);
3158 sport = nla_get_be16(tb[RTA_SPORT]);
3161 dport = nla_get_be16(tb[RTA_DPORT]);
3163 skb = inet_rtm_getroute_build_skb(src, dst, ip_proto, sport, dport);
3169 fl4.flowi4_tos = rtm->rtm_tos;
3170 fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
3171 fl4.flowi4_mark = mark;
3172 fl4.flowi4_uid = uid;
3174 fl4.fl4_sport = sport;
3176 fl4.fl4_dport = dport;
3177 fl4.flowi4_proto = ip_proto;
3182 struct net_device *dev;
3184 dev = dev_get_by_index_rcu(net, iif);
3190 fl4.flowi4_iif = iif; /* for rt_fill_info */
3193 err = ip_route_input_rcu(skb, dst, src, rtm->rtm_tos,
3196 rt = skb_rtable(skb);
3197 if (err == 0 && rt->dst.error)
3198 err = -rt->dst.error;
3200 fl4.flowi4_iif = LOOPBACK_IFINDEX;
3201 skb->dev = net->loopback_dev;
3202 rt = ip_route_output_key_hash_rcu(net, &fl4, &res, skb);
3207 skb_dst_set(skb, &rt->dst);
3213 if (rtm->rtm_flags & RTM_F_NOTIFY)
3214 rt->rt_flags |= RTCF_NOTIFY;
3216 if (rtm->rtm_flags & RTM_F_LOOKUP_TABLE)
3217 table_id = res.table ? res.table->tb_id : 0;
3219 /* reset skb for netlink reply msg */
3221 skb_reset_network_header(skb);
3222 skb_reset_transport_header(skb);
3223 skb_reset_mac_header(skb);
3225 if (rtm->rtm_flags & RTM_F_FIB_MATCH) {
3226 struct fib_rt_info fri;
3229 err = fib_props[res.type].error;
3231 err = -EHOSTUNREACH;
3235 fri.tb_id = table_id;
3236 fri.dst = res.prefix;
3237 fri.dst_len = res.prefixlen;
3238 fri.tos = fl4.flowi4_tos;
3239 fri.type = rt->rt_type;
3243 struct fib_alias *fa;
3245 hlist_for_each_entry_rcu(fa, res.fa_head, fa_list) {
3246 u8 slen = 32 - fri.dst_len;
3248 if (fa->fa_slen == slen &&
3249 fa->tb_id == fri.tb_id &&
3250 fa->fa_tos == fri.tos &&
3251 fa->fa_info == res.fi &&
3252 fa->fa_type == fri.type) {
3253 fri.offload = fa->offload;
3254 fri.trap = fa->trap;
3259 err = fib_dump_info(skb, NETLINK_CB(in_skb).portid,
3260 nlh->nlmsg_seq, RTM_NEWROUTE, &fri, 0);
3262 err = rt_fill_info(net, dst, src, rt, table_id, &fl4, skb,
3263 NETLINK_CB(in_skb).portid,
3271 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
3281 void ip_rt_multicast_event(struct in_device *in_dev)
3283 rt_cache_flush(dev_net(in_dev->dev));
3286 #ifdef CONFIG_SYSCTL
3287 static int ip_rt_gc_interval __read_mostly = 60 * HZ;
3288 static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
3289 static int ip_rt_gc_elasticity __read_mostly = 8;
3290 static int ip_min_valid_pmtu __read_mostly = IPV4_MIN_MTU;
3292 static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write,
3293 void __user *buffer,
3294 size_t *lenp, loff_t *ppos)
3296 struct net *net = (struct net *)__ctl->extra1;
3299 rt_cache_flush(net);
3300 fnhe_genid_bump(net);
3307 static struct ctl_table ipv4_route_table[] = {
3309 .procname = "gc_thresh",
3310 .data = &ipv4_dst_ops.gc_thresh,
3311 .maxlen = sizeof(int),
3313 .proc_handler = proc_dointvec,
3316 .procname = "max_size",
3317 .data = &ip_rt_max_size,
3318 .maxlen = sizeof(int),
3320 .proc_handler = proc_dointvec,
3323 /* Deprecated. Use gc_min_interval_ms */
3325 .procname = "gc_min_interval",
3326 .data = &ip_rt_gc_min_interval,
3327 .maxlen = sizeof(int),
3329 .proc_handler = proc_dointvec_jiffies,
3332 .procname = "gc_min_interval_ms",
3333 .data = &ip_rt_gc_min_interval,
3334 .maxlen = sizeof(int),
3336 .proc_handler = proc_dointvec_ms_jiffies,
3339 .procname = "gc_timeout",
3340 .data = &ip_rt_gc_timeout,
3341 .maxlen = sizeof(int),
3343 .proc_handler = proc_dointvec_jiffies,
3346 .procname = "gc_interval",
3347 .data = &ip_rt_gc_interval,
3348 .maxlen = sizeof(int),
3350 .proc_handler = proc_dointvec_jiffies,
3353 .procname = "redirect_load",
3354 .data = &ip_rt_redirect_load,
3355 .maxlen = sizeof(int),
3357 .proc_handler = proc_dointvec,
3360 .procname = "redirect_number",
3361 .data = &ip_rt_redirect_number,
3362 .maxlen = sizeof(int),
3364 .proc_handler = proc_dointvec,
3367 .procname = "redirect_silence",
3368 .data = &ip_rt_redirect_silence,
3369 .maxlen = sizeof(int),
3371 .proc_handler = proc_dointvec,
3374 .procname = "error_cost",
3375 .data = &ip_rt_error_cost,
3376 .maxlen = sizeof(int),
3378 .proc_handler = proc_dointvec,
3381 .procname = "error_burst",
3382 .data = &ip_rt_error_burst,
3383 .maxlen = sizeof(int),
3385 .proc_handler = proc_dointvec,
3388 .procname = "gc_elasticity",
3389 .data = &ip_rt_gc_elasticity,
3390 .maxlen = sizeof(int),
3392 .proc_handler = proc_dointvec,
3395 .procname = "mtu_expires",
3396 .data = &ip_rt_mtu_expires,
3397 .maxlen = sizeof(int),
3399 .proc_handler = proc_dointvec_jiffies,
3402 .procname = "min_pmtu",
3403 .data = &ip_rt_min_pmtu,
3404 .maxlen = sizeof(int),
3406 .proc_handler = proc_dointvec_minmax,
3407 .extra1 = &ip_min_valid_pmtu,
3410 .procname = "min_adv_mss",
3411 .data = &ip_rt_min_advmss,
3412 .maxlen = sizeof(int),
3414 .proc_handler = proc_dointvec,
3419 static const char ipv4_route_flush_procname[] = "flush";
3421 static struct ctl_table ipv4_route_flush_table[] = {
3423 .procname = ipv4_route_flush_procname,
3424 .maxlen = sizeof(int),
3426 .proc_handler = ipv4_sysctl_rtcache_flush,
3431 static __net_init int sysctl_route_net_init(struct net *net)
3433 struct ctl_table *tbl;
3435 tbl = ipv4_route_flush_table;
3436 if (!net_eq(net, &init_net)) {
3437 tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
3441 /* Don't export non-whitelisted sysctls to unprivileged users */
3442 if (net->user_ns != &init_user_ns) {
3443 if (tbl[0].procname != ipv4_route_flush_procname)
3444 tbl[0].procname = NULL;
3447 tbl[0].extra1 = net;
3449 net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
3450 if (!net->ipv4.route_hdr)
3455 if (tbl != ipv4_route_flush_table)
3461 static __net_exit void sysctl_route_net_exit(struct net *net)
3463 struct ctl_table *tbl;
3465 tbl = net->ipv4.route_hdr->ctl_table_arg;
3466 unregister_net_sysctl_table(net->ipv4.route_hdr);
3467 BUG_ON(tbl == ipv4_route_flush_table);
3471 static __net_initdata struct pernet_operations sysctl_route_ops = {
3472 .init = sysctl_route_net_init,
3473 .exit = sysctl_route_net_exit,
3477 static __net_init int rt_genid_init(struct net *net)
3479 atomic_set(&net->ipv4.rt_genid, 0);
3480 atomic_set(&net->fnhe_genid, 0);
3481 atomic_set(&net->ipv4.dev_addr_genid, get_random_int());
3485 static __net_initdata struct pernet_operations rt_genid_ops = {
3486 .init = rt_genid_init,
3489 static int __net_init ipv4_inetpeer_init(struct net *net)
3491 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
3495 inet_peer_base_init(bp);
3496 net->ipv4.peers = bp;
3500 static void __net_exit ipv4_inetpeer_exit(struct net *net)
3502 struct inet_peer_base *bp = net->ipv4.peers;
3504 net->ipv4.peers = NULL;
3505 inetpeer_invalidate_tree(bp);
3509 static __net_initdata struct pernet_operations ipv4_inetpeer_ops = {
3510 .init = ipv4_inetpeer_init,
3511 .exit = ipv4_inetpeer_exit,
3514 #ifdef CONFIG_IP_ROUTE_CLASSID
3515 struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
3516 #endif /* CONFIG_IP_ROUTE_CLASSID */
3518 int __init ip_rt_init(void)
3522 ip_idents = kmalloc_array(IP_IDENTS_SZ, sizeof(*ip_idents),
3525 panic("IP: failed to allocate ip_idents\n");
3527 prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
3529 ip_tstamps = kcalloc(IP_IDENTS_SZ, sizeof(*ip_tstamps), GFP_KERNEL);
3531 panic("IP: failed to allocate ip_tstamps\n");
3533 for_each_possible_cpu(cpu) {
3534 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
3536 INIT_LIST_HEAD(&ul->head);
3537 spin_lock_init(&ul->lock);
3539 #ifdef CONFIG_IP_ROUTE_CLASSID
3540 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
3542 panic("IP: failed to allocate ip_rt_acct\n");
3545 ipv4_dst_ops.kmem_cachep =
3546 kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
3547 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3549 ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
3551 if (dst_entries_init(&ipv4_dst_ops) < 0)
3552 panic("IP: failed to allocate ipv4_dst_ops counter\n");
3554 if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
3555 panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
3557 ipv4_dst_ops.gc_thresh = ~0;
3558 ip_rt_max_size = INT_MAX;
3563 if (ip_rt_proc_init())
3564 pr_err("Unable to create route proc files\n");
3569 rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL,
3570 RTNL_FLAG_DOIT_UNLOCKED);
3572 #ifdef CONFIG_SYSCTL
3573 register_pernet_subsys(&sysctl_route_ops);
3575 register_pernet_subsys(&rt_genid_ops);
3576 register_pernet_subsys(&ipv4_inetpeer_ops);
3580 #ifdef CONFIG_SYSCTL
3582 * We really need to sanitize the damn ipv4 init order, then all
3583 * this nonsense will go away.
3585 void __init ip_static_sysctl_init(void)
3587 register_net_sysctl(&init_net, "net/ipv4/route", ipv4_route_table);