1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
7 * ROUTE - implementation of the IP router.
10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11 * Alan Cox, <gw4pts@gw4pts.ampr.org>
12 * Linus Torvalds, <Linus.Torvalds@helsinki.fi>
13 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
16 * Alan Cox : Verify area fixes.
17 * Alan Cox : cli() protects routing changes
18 * Rui Oliveira : ICMP routing table updates
19 * (rco@di.uminho.pt) Routing table insertion and update
20 * Linus Torvalds : Rewrote bits to be sensible
21 * Alan Cox : Added BSD route gw semantics
22 * Alan Cox : Super /proc >4K
23 * Alan Cox : MTU in route table
24 * Alan Cox : MSS actually. Also added the window
26 * Sam Lantinga : Fixed route matching in rt_del()
27 * Alan Cox : Routing cache support.
28 * Alan Cox : Removed compatibility cruft.
29 * Alan Cox : RTF_REJECT support.
30 * Alan Cox : TCP irtt support.
31 * Jonathan Naylor : Added Metric support.
32 * Miquel van Smoorenburg : BSD API fixes.
33 * Miquel van Smoorenburg : Metrics.
34 * Alan Cox : Use __u32 properly
35 * Alan Cox : Aligned routing errors more closely with BSD
36 * our system is still very different.
37 * Alan Cox : Faster /proc handling
38 * Alexey Kuznetsov : Massive rework to support tree based routing,
39 * routing caches and better behaviour.
41 * Olaf Erb : irtt wasn't being copied right.
42 * Bjorn Ekwall : Kerneld route support.
43 * Alan Cox : Multicast fixed (I hope)
44 * Pavel Krauz : Limited broadcast fixed
45 * Mike McLagan : Routing by source
46 * Alexey Kuznetsov : End of old history. Split to fib.c and
47 * route.c and rewritten from scratch.
48 * Andi Kleen : Load-limit warning messages.
49 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
50 * Vitaly E. Lavrov : Race condition in ip_route_input_slow.
51 * Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow.
52 * Vladimir V. Ivanov : IP rule info (flowid) is really useful.
53 * Marc Boucher : routing by fwmark
54 * Robert Olsson : Added rt_cache statistics
55 * Arnaldo C. Melo : Convert proc stuff to seq_file
56 * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes.
57 * Ilia Sotnikov : Ignore TOS on PMTUD and Redirect
58 * Ilia Sotnikov : Removed TOS from hash calculations
61 #define pr_fmt(fmt) "IPv4: " fmt
63 #include <linux/module.h>
64 #include <linux/uaccess.h>
65 #include <linux/bitops.h>
66 #include <linux/types.h>
67 #include <linux/kernel.h>
69 #include <linux/string.h>
70 #include <linux/socket.h>
71 #include <linux/sockios.h>
72 #include <linux/errno.h>
74 #include <linux/inet.h>
75 #include <linux/netdevice.h>
76 #include <linux/proc_fs.h>
77 #include <linux/init.h>
78 #include <linux/skbuff.h>
79 #include <linux/inetdevice.h>
80 #include <linux/igmp.h>
81 #include <linux/pkt_sched.h>
82 #include <linux/mroute.h>
83 #include <linux/netfilter_ipv4.h>
84 #include <linux/random.h>
85 #include <linux/rcupdate.h>
86 #include <linux/times.h>
87 #include <linux/slab.h>
88 #include <linux/jhash.h>
90 #include <net/dst_metadata.h>
91 #include <net/net_namespace.h>
92 #include <net/protocol.h>
94 #include <net/route.h>
95 #include <net/inetpeer.h>
97 #include <net/ip_fib.h>
98 #include <net/nexthop.h>
101 #include <net/icmp.h>
102 #include <net/xfrm.h>
103 #include <net/lwtunnel.h>
104 #include <net/netevent.h>
105 #include <net/rtnetlink.h>
107 #include <linux/sysctl.h>
109 #include <net/secure_seq.h>
110 #include <net/ip_tunnels.h>
111 #include <net/l3mdev.h>
113 #include "fib_lookup.h"
115 #define RT_FL_TOS(oldflp4) \
116 ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
118 #define RT_GC_TIMEOUT (300*HZ)
120 static int ip_rt_max_size;
121 static int ip_rt_redirect_number __read_mostly = 9;
122 static int ip_rt_redirect_load __read_mostly = HZ / 50;
123 static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
124 static int ip_rt_error_cost __read_mostly = HZ;
125 static int ip_rt_error_burst __read_mostly = 5 * HZ;
126 static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
127 static u32 ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
128 static int ip_rt_min_advmss __read_mostly = 256;
130 static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
133 * Interface to generic destination cache.
136 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
137 static unsigned int ipv4_default_advmss(const struct dst_entry *dst);
138 static unsigned int ipv4_mtu(const struct dst_entry *dst);
139 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
140 static void ipv4_link_failure(struct sk_buff *skb);
141 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
142 struct sk_buff *skb, u32 mtu);
143 static void ip_do_redirect(struct dst_entry *dst, struct sock *sk,
144 struct sk_buff *skb);
145 static void ipv4_dst_destroy(struct dst_entry *dst);
147 static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
153 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
156 static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr);
158 static struct dst_ops ipv4_dst_ops = {
160 .check = ipv4_dst_check,
161 .default_advmss = ipv4_default_advmss,
163 .cow_metrics = ipv4_cow_metrics,
164 .destroy = ipv4_dst_destroy,
165 .negative_advice = ipv4_negative_advice,
166 .link_failure = ipv4_link_failure,
167 .update_pmtu = ip_rt_update_pmtu,
168 .redirect = ip_do_redirect,
169 .local_out = __ip_local_out,
170 .neigh_lookup = ipv4_neigh_lookup,
171 .confirm_neigh = ipv4_confirm_neigh,
174 #define ECN_OR_COST(class) TC_PRIO_##class
176 const __u8 ip_tos2prio[16] = {
178 ECN_OR_COST(BESTEFFORT),
180 ECN_OR_COST(BESTEFFORT),
186 ECN_OR_COST(INTERACTIVE),
188 ECN_OR_COST(INTERACTIVE),
189 TC_PRIO_INTERACTIVE_BULK,
190 ECN_OR_COST(INTERACTIVE_BULK),
191 TC_PRIO_INTERACTIVE_BULK,
192 ECN_OR_COST(INTERACTIVE_BULK)
194 EXPORT_SYMBOL(ip_tos2prio);
196 static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
197 #define RT_CACHE_STAT_INC(field) raw_cpu_inc(rt_cache_stat.field)
199 #ifdef CONFIG_PROC_FS
200 static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
204 return SEQ_START_TOKEN;
207 static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
213 static void rt_cache_seq_stop(struct seq_file *seq, void *v)
217 static int rt_cache_seq_show(struct seq_file *seq, void *v)
219 if (v == SEQ_START_TOKEN)
220 seq_printf(seq, "%-127s\n",
221 "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
222 "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
227 static const struct seq_operations rt_cache_seq_ops = {
228 .start = rt_cache_seq_start,
229 .next = rt_cache_seq_next,
230 .stop = rt_cache_seq_stop,
231 .show = rt_cache_seq_show,
234 static int rt_cache_seq_open(struct inode *inode, struct file *file)
236 return seq_open(file, &rt_cache_seq_ops);
239 static const struct file_operations rt_cache_seq_fops = {
240 .open = rt_cache_seq_open,
243 .release = seq_release,
247 static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
252 return SEQ_START_TOKEN;
254 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
255 if (!cpu_possible(cpu))
258 return &per_cpu(rt_cache_stat, cpu);
263 static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
267 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
268 if (!cpu_possible(cpu))
271 return &per_cpu(rt_cache_stat, cpu);
277 static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
282 static int rt_cpu_seq_show(struct seq_file *seq, void *v)
284 struct rt_cache_stat *st = v;
286 if (v == SEQ_START_TOKEN) {
287 seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
291 seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x "
292 " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
293 dst_entries_get_slow(&ipv4_dst_ops),
306 0, /* st->gc_total */
307 0, /* st->gc_ignored */
308 0, /* st->gc_goal_miss */
309 0, /* st->gc_dst_overflow */
310 0, /* st->in_hlist_search */
311 0 /* st->out_hlist_search */
316 static const struct seq_operations rt_cpu_seq_ops = {
317 .start = rt_cpu_seq_start,
318 .next = rt_cpu_seq_next,
319 .stop = rt_cpu_seq_stop,
320 .show = rt_cpu_seq_show,
324 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
326 return seq_open(file, &rt_cpu_seq_ops);
329 static const struct file_operations rt_cpu_seq_fops = {
330 .open = rt_cpu_seq_open,
333 .release = seq_release,
336 #ifdef CONFIG_IP_ROUTE_CLASSID
337 static int rt_acct_proc_show(struct seq_file *m, void *v)
339 struct ip_rt_acct *dst, *src;
342 dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
346 for_each_possible_cpu(i) {
347 src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
348 for (j = 0; j < 256; j++) {
349 dst[j].o_bytes += src[j].o_bytes;
350 dst[j].o_packets += src[j].o_packets;
351 dst[j].i_bytes += src[j].i_bytes;
352 dst[j].i_packets += src[j].i_packets;
356 seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
362 static int __net_init ip_rt_do_proc_init(struct net *net)
364 struct proc_dir_entry *pde;
366 pde = proc_create("rt_cache", 0444, net->proc_net,
371 pde = proc_create("rt_cache", 0444,
372 net->proc_net_stat, &rt_cpu_seq_fops);
376 #ifdef CONFIG_IP_ROUTE_CLASSID
377 pde = proc_create_single("rt_acct", 0, net->proc_net,
384 #ifdef CONFIG_IP_ROUTE_CLASSID
386 remove_proc_entry("rt_cache", net->proc_net_stat);
389 remove_proc_entry("rt_cache", net->proc_net);
394 static void __net_exit ip_rt_do_proc_exit(struct net *net)
396 remove_proc_entry("rt_cache", net->proc_net_stat);
397 remove_proc_entry("rt_cache", net->proc_net);
398 #ifdef CONFIG_IP_ROUTE_CLASSID
399 remove_proc_entry("rt_acct", net->proc_net);
403 static struct pernet_operations ip_rt_proc_ops __net_initdata = {
404 .init = ip_rt_do_proc_init,
405 .exit = ip_rt_do_proc_exit,
408 static int __init ip_rt_proc_init(void)
410 return register_pernet_subsys(&ip_rt_proc_ops);
414 static inline int ip_rt_proc_init(void)
418 #endif /* CONFIG_PROC_FS */
420 static inline bool rt_is_expired(const struct rtable *rth)
422 return rth->rt_genid != rt_genid_ipv4(dev_net(rth->dst.dev));
425 void rt_cache_flush(struct net *net)
427 rt_genid_bump_ipv4(net);
430 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
434 const struct rtable *rt = container_of(dst, struct rtable, dst);
435 struct net_device *dev = dst->dev;
440 if (likely(rt->rt_gw_family == AF_INET)) {
441 n = ip_neigh_gw4(dev, rt->rt_gw4);
442 } else if (rt->rt_gw_family == AF_INET6) {
443 n = ip_neigh_gw6(dev, &rt->rt_gw6);
447 pkey = skb ? ip_hdr(skb)->daddr : *((__be32 *) daddr);
448 n = ip_neigh_gw4(dev, pkey);
451 if (n && !refcount_inc_not_zero(&n->refcnt))
454 rcu_read_unlock_bh();
459 static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr)
461 const struct rtable *rt = container_of(dst, struct rtable, dst);
462 struct net_device *dev = dst->dev;
463 const __be32 *pkey = daddr;
465 if (rt->rt_gw_family == AF_INET) {
466 pkey = (const __be32 *)&rt->rt_gw4;
467 } else if (rt->rt_gw_family == AF_INET6) {
468 return __ipv6_confirm_neigh_stub(dev, &rt->rt_gw6);
471 (RTCF_MULTICAST | RTCF_BROADCAST | RTCF_LOCAL))) {
474 __ipv4_confirm_neigh(dev, *(__force u32 *)pkey);
477 #define IP_IDENTS_SZ 2048u
479 static atomic_t *ip_idents __read_mostly;
480 static u32 *ip_tstamps __read_mostly;
482 /* In order to protect privacy, we add a perturbation to identifiers
483 * if one generator is seldom used. This makes hard for an attacker
484 * to infer how many packets were sent between two points in time.
486 u32 ip_idents_reserve(u32 hash, int segs)
488 u32 *p_tstamp = ip_tstamps + hash % IP_IDENTS_SZ;
489 atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ;
490 u32 old = READ_ONCE(*p_tstamp);
491 u32 now = (u32)jiffies;
494 if (old != now && cmpxchg(p_tstamp, old, now) == old)
495 delta = prandom_u32_max(now - old);
497 /* Do not use atomic_add_return() as it makes UBSAN unhappy */
499 old = (u32)atomic_read(p_id);
500 new = old + delta + segs;
501 } while (atomic_cmpxchg(p_id, old, new) != old);
505 EXPORT_SYMBOL(ip_idents_reserve);
507 void __ip_select_ident(struct net *net, struct iphdr *iph, int segs)
511 /* Note the following code is not safe, but this is okay. */
512 if (unlikely(siphash_key_is_zero(&net->ipv4.ip_id_key)))
513 get_random_bytes(&net->ipv4.ip_id_key,
514 sizeof(net->ipv4.ip_id_key));
516 hash = siphash_3u32((__force u32)iph->daddr,
517 (__force u32)iph->saddr,
519 &net->ipv4.ip_id_key);
520 id = ip_idents_reserve(hash, segs);
523 EXPORT_SYMBOL(__ip_select_ident);
525 static void __build_flow_key(const struct net *net, struct flowi4 *fl4,
526 const struct sock *sk,
527 const struct iphdr *iph,
529 u8 prot, u32 mark, int flow_flags)
532 const struct inet_sock *inet = inet_sk(sk);
534 oif = sk->sk_bound_dev_if;
536 tos = RT_CONN_FLAGS(sk);
537 prot = inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol;
539 flowi4_init_output(fl4, oif, mark, tos,
540 RT_SCOPE_UNIVERSE, prot,
542 iph->daddr, iph->saddr, 0, 0,
543 sock_net_uid(net, sk));
546 static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
547 const struct sock *sk)
549 const struct net *net = dev_net(skb->dev);
550 const struct iphdr *iph = ip_hdr(skb);
551 int oif = skb->dev->ifindex;
552 u8 tos = RT_TOS(iph->tos);
553 u8 prot = iph->protocol;
554 u32 mark = skb->mark;
556 __build_flow_key(net, fl4, sk, iph, oif, tos, prot, mark, 0);
559 static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
561 const struct inet_sock *inet = inet_sk(sk);
562 const struct ip_options_rcu *inet_opt;
563 __be32 daddr = inet->inet_daddr;
566 inet_opt = rcu_dereference(inet->inet_opt);
567 if (inet_opt && inet_opt->opt.srr)
568 daddr = inet_opt->opt.faddr;
569 flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
570 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
571 inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
572 inet_sk_flowi_flags(sk),
573 daddr, inet->inet_saddr, 0, 0, sk->sk_uid);
577 static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk,
578 const struct sk_buff *skb)
581 build_skb_flow_key(fl4, skb, sk);
583 build_sk_flow_key(fl4, sk);
586 static DEFINE_SPINLOCK(fnhe_lock);
588 static void fnhe_flush_routes(struct fib_nh_exception *fnhe)
592 rt = rcu_dereference(fnhe->fnhe_rth_input);
594 RCU_INIT_POINTER(fnhe->fnhe_rth_input, NULL);
595 dst_dev_put(&rt->dst);
596 dst_release(&rt->dst);
598 rt = rcu_dereference(fnhe->fnhe_rth_output);
600 RCU_INIT_POINTER(fnhe->fnhe_rth_output, NULL);
601 dst_dev_put(&rt->dst);
602 dst_release(&rt->dst);
606 static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash)
608 struct fib_nh_exception *fnhe, *oldest;
610 oldest = rcu_dereference(hash->chain);
611 for (fnhe = rcu_dereference(oldest->fnhe_next); fnhe;
612 fnhe = rcu_dereference(fnhe->fnhe_next)) {
613 if (time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp))
616 fnhe_flush_routes(oldest);
620 static inline u32 fnhe_hashfun(__be32 daddr)
622 static u32 fnhe_hashrnd __read_mostly;
625 net_get_random_once(&fnhe_hashrnd, sizeof(fnhe_hashrnd));
626 hval = jhash_1word((__force u32) daddr, fnhe_hashrnd);
627 return hash_32(hval, FNHE_HASH_SHIFT);
630 static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe)
632 rt->rt_pmtu = fnhe->fnhe_pmtu;
633 rt->rt_mtu_locked = fnhe->fnhe_mtu_locked;
634 rt->dst.expires = fnhe->fnhe_expires;
637 rt->rt_flags |= RTCF_REDIRECTED;
638 rt->rt_gw_family = AF_INET;
639 rt->rt_gw4 = fnhe->fnhe_gw;
643 static void update_or_create_fnhe(struct fib_nh_common *nhc, __be32 daddr,
644 __be32 gw, u32 pmtu, bool lock,
645 unsigned long expires)
647 struct fnhe_hash_bucket *hash;
648 struct fib_nh_exception *fnhe;
654 genid = fnhe_genid(dev_net(nhc->nhc_dev));
655 hval = fnhe_hashfun(daddr);
657 spin_lock_bh(&fnhe_lock);
659 hash = rcu_dereference(nhc->nhc_exceptions);
661 hash = kcalloc(FNHE_HASH_SIZE, sizeof(*hash), GFP_ATOMIC);
664 rcu_assign_pointer(nhc->nhc_exceptions, hash);
670 for (fnhe = rcu_dereference(hash->chain); fnhe;
671 fnhe = rcu_dereference(fnhe->fnhe_next)) {
672 if (fnhe->fnhe_daddr == daddr)
678 if (fnhe->fnhe_genid != genid)
679 fnhe->fnhe_genid = genid;
683 fnhe->fnhe_pmtu = pmtu;
684 fnhe->fnhe_mtu_locked = lock;
686 fnhe->fnhe_expires = max(1UL, expires);
687 /* Update all cached dsts too */
688 rt = rcu_dereference(fnhe->fnhe_rth_input);
690 fill_route_from_fnhe(rt, fnhe);
691 rt = rcu_dereference(fnhe->fnhe_rth_output);
693 fill_route_from_fnhe(rt, fnhe);
695 if (depth > FNHE_RECLAIM_DEPTH)
696 fnhe = fnhe_oldest(hash);
698 fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC);
702 fnhe->fnhe_next = hash->chain;
703 rcu_assign_pointer(hash->chain, fnhe);
705 fnhe->fnhe_genid = genid;
706 fnhe->fnhe_daddr = daddr;
708 fnhe->fnhe_pmtu = pmtu;
709 fnhe->fnhe_mtu_locked = lock;
710 fnhe->fnhe_expires = max(1UL, expires);
712 /* Exception created; mark the cached routes for the nexthop
713 * stale, so anyone caching it rechecks if this exception
716 rt = rcu_dereference(nhc->nhc_rth_input);
718 rt->dst.obsolete = DST_OBSOLETE_KILL;
720 for_each_possible_cpu(i) {
721 struct rtable __rcu **prt;
722 prt = per_cpu_ptr(nhc->nhc_pcpu_rth_output, i);
723 rt = rcu_dereference(*prt);
725 rt->dst.obsolete = DST_OBSOLETE_KILL;
729 fnhe->fnhe_stamp = jiffies;
732 spin_unlock_bh(&fnhe_lock);
735 static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4,
738 __be32 new_gw = icmp_hdr(skb)->un.gateway;
739 __be32 old_gw = ip_hdr(skb)->saddr;
740 struct net_device *dev = skb->dev;
741 struct in_device *in_dev;
742 struct fib_result res;
746 switch (icmp_hdr(skb)->code & 7) {
748 case ICMP_REDIR_NETTOS:
749 case ICMP_REDIR_HOST:
750 case ICMP_REDIR_HOSTTOS:
757 if (rt->rt_gw_family != AF_INET || rt->rt_gw4 != old_gw)
760 in_dev = __in_dev_get_rcu(dev);
765 if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
766 ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
767 ipv4_is_zeronet(new_gw))
768 goto reject_redirect;
770 if (!IN_DEV_SHARED_MEDIA(in_dev)) {
771 if (!inet_addr_onlink(in_dev, new_gw, old_gw))
772 goto reject_redirect;
773 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
774 goto reject_redirect;
776 if (inet_addr_type(net, new_gw) != RTN_UNICAST)
777 goto reject_redirect;
780 n = __ipv4_neigh_lookup(rt->dst.dev, new_gw);
782 n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev);
784 if (!(n->nud_state & NUD_VALID)) {
785 neigh_event_send(n, NULL);
787 if (fib_lookup(net, fl4, &res, 0) == 0) {
788 struct fib_nh_common *nhc = FIB_RES_NHC(res);
790 update_or_create_fnhe(nhc, fl4->daddr, new_gw,
792 jiffies + ip_rt_gc_timeout);
795 rt->dst.obsolete = DST_OBSOLETE_KILL;
796 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
803 #ifdef CONFIG_IP_ROUTE_VERBOSE
804 if (IN_DEV_LOG_MARTIANS(in_dev)) {
805 const struct iphdr *iph = (const struct iphdr *) skb->data;
806 __be32 daddr = iph->daddr;
807 __be32 saddr = iph->saddr;
809 net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n"
810 " Advised path = %pI4 -> %pI4\n",
811 &old_gw, dev->name, &new_gw,
818 static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
822 const struct iphdr *iph = (const struct iphdr *) skb->data;
823 struct net *net = dev_net(skb->dev);
824 int oif = skb->dev->ifindex;
825 u8 tos = RT_TOS(iph->tos);
826 u8 prot = iph->protocol;
827 u32 mark = skb->mark;
829 rt = (struct rtable *) dst;
831 __build_flow_key(net, &fl4, sk, iph, oif, tos, prot, mark, 0);
832 __ip_do_redirect(rt, skb, &fl4, true);
835 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
837 struct rtable *rt = (struct rtable *)dst;
838 struct dst_entry *ret = dst;
841 if (dst->obsolete > 0) {
844 } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
855 * 1. The first ip_rt_redirect_number redirects are sent
856 * with exponential backoff, then we stop sending them at all,
857 * assuming that the host ignores our redirects.
858 * 2. If we did not see packets requiring redirects
859 * during ip_rt_redirect_silence, we assume that the host
860 * forgot redirected route and start to send redirects again.
862 * This algorithm is much cheaper and more intelligent than dumb load limiting
865 * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
866 * and "frag. need" (breaks PMTU discovery) in icmp.c.
869 void ip_rt_send_redirect(struct sk_buff *skb)
871 struct rtable *rt = skb_rtable(skb);
872 struct in_device *in_dev;
873 struct inet_peer *peer;
879 in_dev = __in_dev_get_rcu(rt->dst.dev);
880 if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
884 log_martians = IN_DEV_LOG_MARTIANS(in_dev);
885 vif = l3mdev_master_ifindex_rcu(rt->dst.dev);
888 net = dev_net(rt->dst.dev);
889 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, vif, 1);
891 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST,
892 rt_nexthop(rt, ip_hdr(skb)->daddr));
896 /* No redirected packets during ip_rt_redirect_silence;
897 * reset the algorithm.
899 if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) {
900 peer->rate_tokens = 0;
901 peer->n_redirects = 0;
904 /* Too many ignored redirects; do not send anything
905 * set dst.rate_last to the last seen redirected packet.
907 if (peer->n_redirects >= ip_rt_redirect_number) {
908 peer->rate_last = jiffies;
912 /* Check for load limit; set rate_last to the latest sent
915 if (peer->rate_tokens == 0 ||
918 (ip_rt_redirect_load << peer->rate_tokens)))) {
919 __be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr);
921 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
922 peer->rate_last = jiffies;
925 #ifdef CONFIG_IP_ROUTE_VERBOSE
927 peer->rate_tokens == ip_rt_redirect_number)
928 net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
929 &ip_hdr(skb)->saddr, inet_iif(skb),
930 &ip_hdr(skb)->daddr, &gw);
937 static int ip_error(struct sk_buff *skb)
939 struct rtable *rt = skb_rtable(skb);
940 struct net_device *dev = skb->dev;
941 struct in_device *in_dev;
942 struct inet_peer *peer;
948 if (netif_is_l3_master(skb->dev)) {
949 dev = __dev_get_by_index(dev_net(skb->dev), IPCB(skb)->iif);
954 in_dev = __in_dev_get_rcu(dev);
956 /* IP on this device is disabled. */
960 net = dev_net(rt->dst.dev);
961 if (!IN_DEV_FORWARD(in_dev)) {
962 switch (rt->dst.error) {
964 __IP_INC_STATS(net, IPSTATS_MIB_INADDRERRORS);
968 __IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
974 switch (rt->dst.error) {
979 code = ICMP_HOST_UNREACH;
982 code = ICMP_NET_UNREACH;
983 __IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
986 code = ICMP_PKT_FILTERED;
990 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr,
991 l3mdev_master_ifindex(skb->dev), 1);
996 peer->rate_tokens += now - peer->rate_last;
997 if (peer->rate_tokens > ip_rt_error_burst)
998 peer->rate_tokens = ip_rt_error_burst;
999 peer->rate_last = now;
1000 if (peer->rate_tokens >= ip_rt_error_cost)
1001 peer->rate_tokens -= ip_rt_error_cost;
1007 icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
1009 out: kfree_skb(skb);
1013 static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
1015 struct dst_entry *dst = &rt->dst;
1016 u32 old_mtu = ipv4_mtu(dst);
1017 struct fib_result res;
1020 if (ip_mtu_locked(dst))
1026 if (mtu < ip_rt_min_pmtu) {
1028 mtu = min(old_mtu, ip_rt_min_pmtu);
1031 if (rt->rt_pmtu == mtu && !lock &&
1032 time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2))
1036 if (fib_lookup(dev_net(dst->dev), fl4, &res, 0) == 0) {
1037 struct fib_nh_common *nhc = FIB_RES_NHC(res);
1039 update_or_create_fnhe(nhc, fl4->daddr, 0, mtu, lock,
1040 jiffies + ip_rt_mtu_expires);
1045 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
1046 struct sk_buff *skb, u32 mtu)
1048 struct rtable *rt = (struct rtable *) dst;
1051 ip_rt_build_flow_key(&fl4, sk, skb);
1052 __ip_rt_update_pmtu(rt, &fl4, mtu);
1055 void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
1056 int oif, u8 protocol)
1058 const struct iphdr *iph = (const struct iphdr *) skb->data;
1061 u32 mark = IP4_REPLY_MARK(net, skb->mark);
1063 __build_flow_key(net, &fl4, NULL, iph, oif,
1064 RT_TOS(iph->tos), protocol, mark, 0);
1065 rt = __ip_route_output_key(net, &fl4);
1067 __ip_rt_update_pmtu(rt, &fl4, mtu);
1071 EXPORT_SYMBOL_GPL(ipv4_update_pmtu);
1073 static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1075 const struct iphdr *iph = (const struct iphdr *) skb->data;
1079 __build_flow_key(sock_net(sk), &fl4, sk, iph, 0, 0, 0, 0, 0);
1081 if (!fl4.flowi4_mark)
1082 fl4.flowi4_mark = IP4_REPLY_MARK(sock_net(sk), skb->mark);
1084 rt = __ip_route_output_key(sock_net(sk), &fl4);
1086 __ip_rt_update_pmtu(rt, &fl4, mtu);
1091 void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1093 const struct iphdr *iph = (const struct iphdr *) skb->data;
1096 struct dst_entry *odst = NULL;
1098 struct net *net = sock_net(sk);
1102 if (!ip_sk_accept_pmtu(sk))
1105 odst = sk_dst_get(sk);
1107 if (sock_owned_by_user(sk) || !odst) {
1108 __ipv4_sk_update_pmtu(skb, sk, mtu);
1112 __build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
1114 rt = (struct rtable *)odst;
1115 if (odst->obsolete && !odst->ops->check(odst, 0)) {
1116 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1123 __ip_rt_update_pmtu((struct rtable *) xfrm_dst_path(&rt->dst), &fl4, mtu);
1125 if (!dst_check(&rt->dst, 0)) {
1127 dst_release(&rt->dst);
1129 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1137 sk_dst_set(sk, &rt->dst);
1143 EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
1145 void ipv4_redirect(struct sk_buff *skb, struct net *net,
1146 int oif, u8 protocol)
1148 const struct iphdr *iph = (const struct iphdr *) skb->data;
1152 __build_flow_key(net, &fl4, NULL, iph, oif,
1153 RT_TOS(iph->tos), protocol, 0, 0);
1154 rt = __ip_route_output_key(net, &fl4);
1156 __ip_do_redirect(rt, skb, &fl4, false);
1160 EXPORT_SYMBOL_GPL(ipv4_redirect);
1162 void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk)
1164 const struct iphdr *iph = (const struct iphdr *) skb->data;
1167 struct net *net = sock_net(sk);
1169 __build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
1170 rt = __ip_route_output_key(net, &fl4);
1172 __ip_do_redirect(rt, skb, &fl4, false);
1176 EXPORT_SYMBOL_GPL(ipv4_sk_redirect);
1178 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1180 struct rtable *rt = (struct rtable *) dst;
1182 /* All IPV4 dsts are created with ->obsolete set to the value
1183 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1184 * into this function always.
1186 * When a PMTU/redirect information update invalidates a route,
1187 * this is indicated by setting obsolete to DST_OBSOLETE_KILL or
1188 * DST_OBSOLETE_DEAD.
1190 if (dst->obsolete != DST_OBSOLETE_FORCE_CHK || rt_is_expired(rt))
1195 static void ipv4_send_dest_unreach(struct sk_buff *skb)
1197 struct ip_options opt;
1200 /* Recompile ip options since IPCB may not be valid anymore.
1201 * Also check we have a reasonable ipv4 header.
1203 if (!pskb_network_may_pull(skb, sizeof(struct iphdr)) ||
1204 ip_hdr(skb)->version != 4 || ip_hdr(skb)->ihl < 5)
1207 memset(&opt, 0, sizeof(opt));
1208 if (ip_hdr(skb)->ihl > 5) {
1209 if (!pskb_network_may_pull(skb, ip_hdr(skb)->ihl * 4))
1211 opt.optlen = ip_hdr(skb)->ihl * 4 - sizeof(struct iphdr);
1214 res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL);
1220 __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, &opt);
1223 static void ipv4_link_failure(struct sk_buff *skb)
1227 ipv4_send_dest_unreach(skb);
1229 rt = skb_rtable(skb);
1231 dst_set_expires(&rt->dst, 0);
1234 static int ip_rt_bug(struct net *net, struct sock *sk, struct sk_buff *skb)
1236 pr_debug("%s: %pI4 -> %pI4, %s\n",
1237 __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1238 skb->dev ? skb->dev->name : "?");
1245 We do not cache source address of outgoing interface,
1246 because it is used only by IP RR, TS and SRR options,
1247 so that it out of fast path.
1249 BTW remember: "addr" is allowed to be not aligned
1253 void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
1257 if (rt_is_output_route(rt))
1258 src = ip_hdr(skb)->saddr;
1260 struct fib_result res;
1261 struct iphdr *iph = ip_hdr(skb);
1262 struct flowi4 fl4 = {
1263 .daddr = iph->daddr,
1264 .saddr = iph->saddr,
1265 .flowi4_tos = RT_TOS(iph->tos),
1266 .flowi4_oif = rt->dst.dev->ifindex,
1267 .flowi4_iif = skb->dev->ifindex,
1268 .flowi4_mark = skb->mark,
1272 if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res, 0) == 0)
1273 src = fib_result_prefsrc(dev_net(rt->dst.dev), &res);
1275 src = inet_select_addr(rt->dst.dev,
1276 rt_nexthop(rt, iph->daddr),
1280 memcpy(addr, &src, 4);
1283 #ifdef CONFIG_IP_ROUTE_CLASSID
1284 static void set_class_tag(struct rtable *rt, u32 tag)
1286 if (!(rt->dst.tclassid & 0xFFFF))
1287 rt->dst.tclassid |= tag & 0xFFFF;
1288 if (!(rt->dst.tclassid & 0xFFFF0000))
1289 rt->dst.tclassid |= tag & 0xFFFF0000;
1293 static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
1295 unsigned int header_size = sizeof(struct tcphdr) + sizeof(struct iphdr);
1296 unsigned int advmss = max_t(unsigned int, ipv4_mtu(dst) - header_size,
1299 return min(advmss, IPV4_MAX_PMTU - header_size);
1302 static unsigned int ipv4_mtu(const struct dst_entry *dst)
1304 const struct rtable *rt = (const struct rtable *) dst;
1305 unsigned int mtu = rt->rt_pmtu;
1307 if (!mtu || time_after_eq(jiffies, rt->dst.expires))
1308 mtu = dst_metric_raw(dst, RTAX_MTU);
1313 mtu = READ_ONCE(dst->dev->mtu);
1315 if (unlikely(ip_mtu_locked(dst))) {
1316 if (rt->rt_gw_family && mtu > 576)
1320 mtu = min_t(unsigned int, mtu, IP_MAX_MTU);
1322 return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
1325 static void ip_del_fnhe(struct fib_nh_common *nhc, __be32 daddr)
1327 struct fnhe_hash_bucket *hash;
1328 struct fib_nh_exception *fnhe, __rcu **fnhe_p;
1329 u32 hval = fnhe_hashfun(daddr);
1331 spin_lock_bh(&fnhe_lock);
1333 hash = rcu_dereference_protected(nhc->nhc_exceptions,
1334 lockdep_is_held(&fnhe_lock));
1337 fnhe_p = &hash->chain;
1338 fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock));
1340 if (fnhe->fnhe_daddr == daddr) {
1341 rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
1342 fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
1343 /* set fnhe_daddr to 0 to ensure it won't bind with
1344 * new dsts in rt_bind_exception().
1346 fnhe->fnhe_daddr = 0;
1347 fnhe_flush_routes(fnhe);
1348 kfree_rcu(fnhe, rcu);
1351 fnhe_p = &fnhe->fnhe_next;
1352 fnhe = rcu_dereference_protected(fnhe->fnhe_next,
1353 lockdep_is_held(&fnhe_lock));
1356 spin_unlock_bh(&fnhe_lock);
1359 static struct fib_nh_exception *find_exception(struct fib_nh_common *nhc,
1362 struct fnhe_hash_bucket *hash = rcu_dereference(nhc->nhc_exceptions);
1363 struct fib_nh_exception *fnhe;
1369 hval = fnhe_hashfun(daddr);
1371 for (fnhe = rcu_dereference(hash[hval].chain); fnhe;
1372 fnhe = rcu_dereference(fnhe->fnhe_next)) {
1373 if (fnhe->fnhe_daddr == daddr) {
1374 if (fnhe->fnhe_expires &&
1375 time_after(jiffies, fnhe->fnhe_expires)) {
1376 ip_del_fnhe(nhc, daddr);
1386 * 1. mtu on route is locked - use it
1387 * 2. mtu from nexthop exception
1388 * 3. mtu from egress device
1391 u32 ip_mtu_from_fib_result(struct fib_result *res, __be32 daddr)
1393 struct fib_nh_common *nhc = res->nhc;
1394 struct net_device *dev = nhc->nhc_dev;
1395 struct fib_info *fi = res->fi;
1398 if (dev_net(dev)->ipv4.sysctl_ip_fwd_use_pmtu ||
1399 fi->fib_metrics->metrics[RTAX_LOCK - 1] & (1 << RTAX_MTU))
1403 struct fib_nh_exception *fnhe;
1405 fnhe = find_exception(nhc, daddr);
1406 if (fnhe && !time_after_eq(jiffies, fnhe->fnhe_expires))
1407 mtu = fnhe->fnhe_pmtu;
1411 mtu = min(READ_ONCE(dev->mtu), IP_MAX_MTU);
1413 return mtu - lwtunnel_headroom(nhc->nhc_lwtstate, mtu);
1416 static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
1417 __be32 daddr, const bool do_cache)
1421 spin_lock_bh(&fnhe_lock);
1423 if (daddr == fnhe->fnhe_daddr) {
1424 struct rtable __rcu **porig;
1425 struct rtable *orig;
1426 int genid = fnhe_genid(dev_net(rt->dst.dev));
1428 if (rt_is_input_route(rt))
1429 porig = &fnhe->fnhe_rth_input;
1431 porig = &fnhe->fnhe_rth_output;
1432 orig = rcu_dereference(*porig);
1434 if (fnhe->fnhe_genid != genid) {
1435 fnhe->fnhe_genid = genid;
1437 fnhe->fnhe_pmtu = 0;
1438 fnhe->fnhe_expires = 0;
1439 fnhe->fnhe_mtu_locked = false;
1440 fnhe_flush_routes(fnhe);
1443 fill_route_from_fnhe(rt, fnhe);
1446 rt->rt_gw_family = AF_INET;
1451 rcu_assign_pointer(*porig, rt);
1453 dst_dev_put(&orig->dst);
1454 dst_release(&orig->dst);
1459 fnhe->fnhe_stamp = jiffies;
1461 spin_unlock_bh(&fnhe_lock);
1466 static bool rt_cache_route(struct fib_nh_common *nhc, struct rtable *rt)
1468 struct rtable *orig, *prev, **p;
1471 if (rt_is_input_route(rt)) {
1472 p = (struct rtable **)&nhc->nhc_rth_input;
1474 p = (struct rtable **)raw_cpu_ptr(nhc->nhc_pcpu_rth_output);
1478 /* hold dst before doing cmpxchg() to avoid race condition
1482 prev = cmpxchg(p, orig, rt);
1485 dst_dev_put(&orig->dst);
1486 dst_release(&orig->dst);
1489 dst_release(&rt->dst);
1496 struct uncached_list {
1498 struct list_head head;
1501 static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list);
1503 void rt_add_uncached_list(struct rtable *rt)
1505 struct uncached_list *ul = raw_cpu_ptr(&rt_uncached_list);
1507 rt->rt_uncached_list = ul;
1509 spin_lock_bh(&ul->lock);
1510 list_add_tail(&rt->rt_uncached, &ul->head);
1511 spin_unlock_bh(&ul->lock);
1514 void rt_del_uncached_list(struct rtable *rt)
1516 if (!list_empty(&rt->rt_uncached)) {
1517 struct uncached_list *ul = rt->rt_uncached_list;
1519 spin_lock_bh(&ul->lock);
1520 list_del(&rt->rt_uncached);
1521 spin_unlock_bh(&ul->lock);
1525 static void ipv4_dst_destroy(struct dst_entry *dst)
1527 struct rtable *rt = (struct rtable *)dst;
1529 ip_dst_metrics_put(dst);
1530 rt_del_uncached_list(rt);
1533 void rt_flush_dev(struct net_device *dev)
1535 struct net *net = dev_net(dev);
1539 for_each_possible_cpu(cpu) {
1540 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
1542 spin_lock_bh(&ul->lock);
1543 list_for_each_entry(rt, &ul->head, rt_uncached) {
1544 if (rt->dst.dev != dev)
1546 rt->dst.dev = net->loopback_dev;
1547 dev_hold(rt->dst.dev);
1550 spin_unlock_bh(&ul->lock);
1554 static bool rt_cache_valid(const struct rtable *rt)
1557 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
1561 static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
1562 const struct fib_result *res,
1563 struct fib_nh_exception *fnhe,
1564 struct fib_info *fi, u16 type, u32 itag,
1565 const bool do_cache)
1567 bool cached = false;
1570 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
1572 if (nhc->nhc_gw_family && nhc->nhc_scope == RT_SCOPE_LINK) {
1573 rt->rt_gw_family = nhc->nhc_gw_family;
1574 /* only INET and INET6 are supported */
1575 if (likely(nhc->nhc_gw_family == AF_INET))
1576 rt->rt_gw4 = nhc->nhc_gw.ipv4;
1578 rt->rt_gw6 = nhc->nhc_gw.ipv6;
1581 ip_dst_init_metrics(&rt->dst, fi->fib_metrics);
1583 #ifdef CONFIG_IP_ROUTE_CLASSID
1584 if (nhc->nhc_family == AF_INET) {
1587 nh = container_of(nhc, struct fib_nh, nh_common);
1588 rt->dst.tclassid = nh->nh_tclassid;
1591 rt->dst.lwtstate = lwtstate_get(nhc->nhc_lwtstate);
1593 cached = rt_bind_exception(rt, fnhe, daddr, do_cache);
1595 cached = rt_cache_route(nhc, rt);
1596 if (unlikely(!cached)) {
1597 /* Routes we intend to cache in nexthop exception or
1598 * FIB nexthop have the DST_NOCACHE bit clear.
1599 * However, if we are unsuccessful at storing this
1600 * route into the cache we really need to set it.
1603 rt->rt_gw_family = AF_INET;
1606 rt_add_uncached_list(rt);
1609 rt_add_uncached_list(rt);
1611 #ifdef CONFIG_IP_ROUTE_CLASSID
1612 #ifdef CONFIG_IP_MULTIPLE_TABLES
1613 set_class_tag(rt, res->tclassid);
1615 set_class_tag(rt, itag);
1619 struct rtable *rt_dst_alloc(struct net_device *dev,
1620 unsigned int flags, u16 type,
1621 bool nopolicy, bool noxfrm, bool will_cache)
1625 rt = dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
1626 (will_cache ? 0 : DST_HOST) |
1627 (nopolicy ? DST_NOPOLICY : 0) |
1628 (noxfrm ? DST_NOXFRM : 0));
1631 rt->rt_genid = rt_genid_ipv4(dev_net(dev));
1632 rt->rt_flags = flags;
1634 rt->rt_is_input = 0;
1637 rt->rt_mtu_locked = 0;
1638 rt->rt_gw_family = 0;
1640 INIT_LIST_HEAD(&rt->rt_uncached);
1642 rt->dst.output = ip_output;
1643 if (flags & RTCF_LOCAL)
1644 rt->dst.input = ip_local_deliver;
1649 EXPORT_SYMBOL(rt_dst_alloc);
1651 struct rtable *rt_dst_clone(struct net_device *dev, struct rtable *rt)
1653 struct rtable *new_rt;
1655 new_rt = dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
1659 new_rt->rt_genid = rt_genid_ipv4(dev_net(dev));
1660 new_rt->rt_flags = rt->rt_flags;
1661 new_rt->rt_type = rt->rt_type;
1662 new_rt->rt_is_input = rt->rt_is_input;
1663 new_rt->rt_iif = rt->rt_iif;
1664 new_rt->rt_pmtu = rt->rt_pmtu;
1665 new_rt->rt_mtu_locked = rt->rt_mtu_locked;
1666 new_rt->rt_gw_family = rt->rt_gw_family;
1667 if (rt->rt_gw_family == AF_INET)
1668 new_rt->rt_gw4 = rt->rt_gw4;
1669 else if (rt->rt_gw_family == AF_INET6)
1670 new_rt->rt_gw6 = rt->rt_gw6;
1671 INIT_LIST_HEAD(&new_rt->rt_uncached);
1673 new_rt->dst.flags |= DST_HOST;
1674 new_rt->dst.input = rt->dst.input;
1675 new_rt->dst.output = rt->dst.output;
1676 new_rt->dst.error = rt->dst.error;
1677 new_rt->dst.lastuse = jiffies;
1678 new_rt->dst.lwtstate = lwtstate_get(rt->dst.lwtstate);
1682 EXPORT_SYMBOL(rt_dst_clone);
1684 /* called in rcu_read_lock() section */
1685 int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1686 u8 tos, struct net_device *dev,
1687 struct in_device *in_dev, u32 *itag)
1691 /* Primary sanity checks. */
1695 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
1696 skb->protocol != htons(ETH_P_IP))
1699 if (ipv4_is_loopback(saddr) && !IN_DEV_ROUTE_LOCALNET(in_dev))
1702 if (ipv4_is_zeronet(saddr)) {
1703 if (!ipv4_is_local_multicast(daddr) &&
1704 ip_hdr(skb)->protocol != IPPROTO_IGMP)
1707 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
1715 /* called in rcu_read_lock() section */
1716 static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1717 u8 tos, struct net_device *dev, int our)
1719 struct in_device *in_dev = __in_dev_get_rcu(dev);
1720 unsigned int flags = RTCF_MULTICAST;
1725 err = ip_mc_validate_source(skb, daddr, saddr, tos, dev, in_dev, &itag);
1730 flags |= RTCF_LOCAL;
1732 rth = rt_dst_alloc(dev_net(dev)->loopback_dev, flags, RTN_MULTICAST,
1733 IN_DEV_CONF_GET(in_dev, NOPOLICY), false, false);
1737 #ifdef CONFIG_IP_ROUTE_CLASSID
1738 rth->dst.tclassid = itag;
1740 rth->dst.output = ip_rt_bug;
1741 rth->rt_is_input= 1;
1743 #ifdef CONFIG_IP_MROUTE
1744 if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
1745 rth->dst.input = ip_mr_input;
1747 RT_CACHE_STAT_INC(in_slow_mc);
1749 skb_dst_set(skb, &rth->dst);
1754 static void ip_handle_martian_source(struct net_device *dev,
1755 struct in_device *in_dev,
1756 struct sk_buff *skb,
1760 RT_CACHE_STAT_INC(in_martian_src);
1761 #ifdef CONFIG_IP_ROUTE_VERBOSE
1762 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
1764 * RFC1812 recommendation, if source is martian,
1765 * the only hint is MAC header.
1767 pr_warn("martian source %pI4 from %pI4, on dev %s\n",
1768 &daddr, &saddr, dev->name);
1769 if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
1770 print_hex_dump(KERN_WARNING, "ll header: ",
1771 DUMP_PREFIX_OFFSET, 16, 1,
1772 skb_mac_header(skb),
1773 dev->hard_header_len, false);
1779 /* called in rcu_read_lock() section */
1780 static int __mkroute_input(struct sk_buff *skb,
1781 const struct fib_result *res,
1782 struct in_device *in_dev,
1783 __be32 daddr, __be32 saddr, u32 tos)
1785 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
1786 struct net_device *dev = nhc->nhc_dev;
1787 struct fib_nh_exception *fnhe;
1790 struct in_device *out_dev;
1794 /* get a working reference to the output device */
1795 out_dev = __in_dev_get_rcu(dev);
1797 net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
1801 err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
1802 in_dev->dev, in_dev, &itag);
1804 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1810 do_cache = res->fi && !itag;
1811 if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) &&
1812 skb->protocol == htons(ETH_P_IP)) {
1815 gw = nhc->nhc_gw_family == AF_INET ? nhc->nhc_gw.ipv4 : 0;
1816 if (IN_DEV_SHARED_MEDIA(out_dev) ||
1817 inet_addr_onlink(out_dev, saddr, gw))
1818 IPCB(skb)->flags |= IPSKB_DOREDIRECT;
1821 if (skb->protocol != htons(ETH_P_IP)) {
1822 /* Not IP (i.e. ARP). Do not create route, if it is
1823 * invalid for proxy arp. DNAT routes are always valid.
1825 * Proxy arp feature have been extended to allow, ARP
1826 * replies back to the same interface, to support
1827 * Private VLAN switch technologies. See arp.c.
1829 if (out_dev == in_dev &&
1830 IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
1836 fnhe = find_exception(nhc, daddr);
1839 rth = rcu_dereference(fnhe->fnhe_rth_input);
1841 rth = rcu_dereference(nhc->nhc_rth_input);
1842 if (rt_cache_valid(rth)) {
1843 skb_dst_set_noref(skb, &rth->dst);
1848 rth = rt_dst_alloc(out_dev->dev, 0, res->type,
1849 IN_DEV_CONF_GET(in_dev, NOPOLICY),
1850 IN_DEV_CONF_GET(out_dev, NOXFRM), do_cache);
1856 rth->rt_is_input = 1;
1857 RT_CACHE_STAT_INC(in_slow_tot);
1859 rth->dst.input = ip_forward;
1861 rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag,
1863 lwtunnel_set_redirect(&rth->dst);
1864 skb_dst_set(skb, &rth->dst);
1871 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1872 /* To make ICMP packets follow the right flow, the multipath hash is
1873 * calculated from the inner IP addresses.
1875 static void ip_multipath_l3_keys(const struct sk_buff *skb,
1876 struct flow_keys *hash_keys)
1878 const struct iphdr *outer_iph = ip_hdr(skb);
1879 const struct iphdr *key_iph = outer_iph;
1880 const struct iphdr *inner_iph;
1881 const struct icmphdr *icmph;
1882 struct iphdr _inner_iph;
1883 struct icmphdr _icmph;
1885 if (likely(outer_iph->protocol != IPPROTO_ICMP))
1888 if (unlikely((outer_iph->frag_off & htons(IP_OFFSET)) != 0))
1891 icmph = skb_header_pointer(skb, outer_iph->ihl * 4, sizeof(_icmph),
1896 if (icmph->type != ICMP_DEST_UNREACH &&
1897 icmph->type != ICMP_REDIRECT &&
1898 icmph->type != ICMP_TIME_EXCEEDED &&
1899 icmph->type != ICMP_PARAMETERPROB)
1902 inner_iph = skb_header_pointer(skb,
1903 outer_iph->ihl * 4 + sizeof(_icmph),
1904 sizeof(_inner_iph), &_inner_iph);
1908 key_iph = inner_iph;
1910 hash_keys->addrs.v4addrs.src = key_iph->saddr;
1911 hash_keys->addrs.v4addrs.dst = key_iph->daddr;
1914 /* if skb is set it will be used and fl4 can be NULL */
1915 int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4,
1916 const struct sk_buff *skb, struct flow_keys *flkeys)
1918 u32 multipath_hash = fl4 ? fl4->flowi4_multipath_hash : 0;
1919 struct flow_keys hash_keys;
1922 switch (net->ipv4.sysctl_fib_multipath_hash_policy) {
1924 memset(&hash_keys, 0, sizeof(hash_keys));
1925 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1927 ip_multipath_l3_keys(skb, &hash_keys);
1929 hash_keys.addrs.v4addrs.src = fl4->saddr;
1930 hash_keys.addrs.v4addrs.dst = fl4->daddr;
1934 /* skb is currently provided only when forwarding */
1936 unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
1937 struct flow_keys keys;
1939 /* short-circuit if we already have L4 hash present */
1941 return skb_get_hash_raw(skb) >> 1;
1943 memset(&hash_keys, 0, sizeof(hash_keys));
1946 skb_flow_dissect_flow_keys(skb, &keys, flag);
1950 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1951 hash_keys.addrs.v4addrs.src = flkeys->addrs.v4addrs.src;
1952 hash_keys.addrs.v4addrs.dst = flkeys->addrs.v4addrs.dst;
1953 hash_keys.ports.src = flkeys->ports.src;
1954 hash_keys.ports.dst = flkeys->ports.dst;
1955 hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
1957 memset(&hash_keys, 0, sizeof(hash_keys));
1958 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1959 hash_keys.addrs.v4addrs.src = fl4->saddr;
1960 hash_keys.addrs.v4addrs.dst = fl4->daddr;
1961 hash_keys.ports.src = fl4->fl4_sport;
1962 hash_keys.ports.dst = fl4->fl4_dport;
1963 hash_keys.basic.ip_proto = fl4->flowi4_proto;
1967 memset(&hash_keys, 0, sizeof(hash_keys));
1968 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1969 /* skb is currently provided only when forwarding */
1971 struct flow_keys keys;
1973 skb_flow_dissect_flow_keys(skb, &keys, 0);
1975 hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
1976 hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
1978 /* Same as case 0 */
1979 hash_keys.addrs.v4addrs.src = fl4->saddr;
1980 hash_keys.addrs.v4addrs.dst = fl4->daddr;
1984 mhash = flow_hash_from_keys(&hash_keys);
1987 mhash = jhash_2words(mhash, multipath_hash, 0);
1991 #endif /* CONFIG_IP_ROUTE_MULTIPATH */
1993 static int ip_mkroute_input(struct sk_buff *skb,
1994 struct fib_result *res,
1995 struct in_device *in_dev,
1996 __be32 daddr, __be32 saddr, u32 tos,
1997 struct flow_keys *hkeys)
1999 #ifdef CONFIG_IP_ROUTE_MULTIPATH
2000 if (res->fi && fib_info_num_path(res->fi) > 1) {
2001 int h = fib_multipath_hash(res->fi->fib_net, NULL, skb, hkeys);
2003 fib_select_multipath(res, h);
2007 /* create a routing cache entry */
2008 return __mkroute_input(skb, res, in_dev, daddr, saddr, tos);
2012 * NOTE. We drop all the packets that has local source
2013 * addresses, because every properly looped back packet
2014 * must have correct destination already attached by output routine.
2016 * Such approach solves two big problems:
2017 * 1. Not simplex devices are handled properly.
2018 * 2. IP spoofing attempts are filtered with 100% of guarantee.
2019 * called with rcu_read_lock()
2022 static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2023 u8 tos, struct net_device *dev,
2024 struct fib_result *res)
2026 struct in_device *in_dev = __in_dev_get_rcu(dev);
2027 struct flow_keys *flkeys = NULL, _flkeys;
2028 struct net *net = dev_net(dev);
2029 struct ip_tunnel_info *tun_info;
2031 unsigned int flags = 0;
2035 bool do_cache = true;
2037 /* IP on this device is disabled. */
2042 /* Check for the most weird martians, which can be not detected
2046 tun_info = skb_tunnel_info(skb);
2047 if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
2048 fl4.flowi4_tun_key.tun_id = tun_info->key.tun_id;
2050 fl4.flowi4_tun_key.tun_id = 0;
2053 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
2054 goto martian_source;
2058 if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
2061 /* Accept zero addresses only to limited broadcast;
2062 * I even do not know to fix it or not. Waiting for complains :-)
2064 if (ipv4_is_zeronet(saddr))
2065 goto martian_source;
2067 if (ipv4_is_zeronet(daddr))
2068 goto martian_destination;
2070 /* Following code try to avoid calling IN_DEV_NET_ROUTE_LOCALNET(),
2071 * and call it once if daddr or/and saddr are loopback addresses
2073 if (ipv4_is_loopback(daddr)) {
2074 if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
2075 goto martian_destination;
2076 } else if (ipv4_is_loopback(saddr)) {
2077 if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
2078 goto martian_source;
2082 * Now we are ready to route packet.
2085 fl4.flowi4_iif = dev->ifindex;
2086 fl4.flowi4_mark = skb->mark;
2087 fl4.flowi4_tos = tos;
2088 fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
2089 fl4.flowi4_flags = 0;
2092 fl4.flowi4_uid = sock_net_uid(net, NULL);
2094 if (fib4_rules_early_flow_dissect(net, skb, &fl4, &_flkeys)) {
2097 fl4.flowi4_proto = 0;
2102 err = fib_lookup(net, &fl4, res, 0);
2104 if (!IN_DEV_FORWARD(in_dev))
2105 err = -EHOSTUNREACH;
2109 if (res->type == RTN_BROADCAST) {
2110 if (IN_DEV_BFORWARD(in_dev))
2112 /* not do cache if bc_forwarding is enabled */
2113 if (IPV4_DEVCONF_ALL(net, BC_FORWARDING))
2118 if (res->type == RTN_LOCAL) {
2119 err = fib_validate_source(skb, saddr, daddr, tos,
2120 0, dev, in_dev, &itag);
2122 goto martian_source;
2126 if (!IN_DEV_FORWARD(in_dev)) {
2127 err = -EHOSTUNREACH;
2130 if (res->type != RTN_UNICAST)
2131 goto martian_destination;
2134 err = ip_mkroute_input(skb, res, in_dev, daddr, saddr, tos, flkeys);
2138 if (skb->protocol != htons(ETH_P_IP))
2141 if (!ipv4_is_zeronet(saddr)) {
2142 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
2145 goto martian_source;
2147 flags |= RTCF_BROADCAST;
2148 res->type = RTN_BROADCAST;
2149 RT_CACHE_STAT_INC(in_brd);
2152 do_cache &= res->fi && !itag;
2154 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
2156 rth = rcu_dereference(nhc->nhc_rth_input);
2157 if (rt_cache_valid(rth)) {
2158 skb_dst_set_noref(skb, &rth->dst);
2164 rth = rt_dst_alloc(l3mdev_master_dev_rcu(dev) ? : net->loopback_dev,
2165 flags | RTCF_LOCAL, res->type,
2166 IN_DEV_CONF_GET(in_dev, NOPOLICY), false, do_cache);
2170 rth->dst.output= ip_rt_bug;
2171 #ifdef CONFIG_IP_ROUTE_CLASSID
2172 rth->dst.tclassid = itag;
2174 rth->rt_is_input = 1;
2176 RT_CACHE_STAT_INC(in_slow_tot);
2177 if (res->type == RTN_UNREACHABLE) {
2178 rth->dst.input= ip_error;
2179 rth->dst.error= -err;
2180 rth->rt_flags &= ~RTCF_LOCAL;
2184 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
2186 rth->dst.lwtstate = lwtstate_get(nhc->nhc_lwtstate);
2187 if (lwtunnel_input_redirect(rth->dst.lwtstate)) {
2188 WARN_ON(rth->dst.input == lwtunnel_input);
2189 rth->dst.lwtstate->orig_input = rth->dst.input;
2190 rth->dst.input = lwtunnel_input;
2193 if (unlikely(!rt_cache_route(nhc, rth)))
2194 rt_add_uncached_list(rth);
2196 skb_dst_set(skb, &rth->dst);
2201 RT_CACHE_STAT_INC(in_no_route);
2202 res->type = RTN_UNREACHABLE;
2208 * Do not cache martian addresses: they should be logged (RFC1812)
2210 martian_destination:
2211 RT_CACHE_STAT_INC(in_martian_dst);
2212 #ifdef CONFIG_IP_ROUTE_VERBOSE
2213 if (IN_DEV_LOG_MARTIANS(in_dev))
2214 net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n",
2215 &daddr, &saddr, dev->name);
2227 ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
2231 int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2232 u8 tos, struct net_device *dev)
2234 struct fib_result res;
2237 tos &= IPTOS_RT_MASK;
2239 err = ip_route_input_rcu(skb, daddr, saddr, tos, dev, &res);
2244 EXPORT_SYMBOL(ip_route_input_noref);
2246 /* called with rcu_read_lock held */
2247 int ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2248 u8 tos, struct net_device *dev, struct fib_result *res)
2250 /* Multicast recognition logic is moved from route cache to here.
2251 The problem was that too many Ethernet cards have broken/missing
2252 hardware multicast filters :-( As result the host on multicasting
2253 network acquires a lot of useless route cache entries, sort of
2254 SDR messages from all the world. Now we try to get rid of them.
2255 Really, provided software IP multicast filter is organized
2256 reasonably (at least, hashed), it does not result in a slowdown
2257 comparing with route cache reject entries.
2258 Note, that multicast routers are not affected, because
2259 route cache entry is created eventually.
2261 if (ipv4_is_multicast(daddr)) {
2262 struct in_device *in_dev = __in_dev_get_rcu(dev);
2268 our = ip_check_mc_rcu(in_dev, daddr, saddr,
2269 ip_hdr(skb)->protocol);
2271 /* check l3 master if no match yet */
2272 if (!our && netif_is_l3_slave(dev)) {
2273 struct in_device *l3_in_dev;
2275 l3_in_dev = __in_dev_get_rcu(skb->dev);
2277 our = ip_check_mc_rcu(l3_in_dev, daddr, saddr,
2278 ip_hdr(skb)->protocol);
2282 #ifdef CONFIG_IP_MROUTE
2284 (!ipv4_is_local_multicast(daddr) &&
2285 IN_DEV_MFORWARD(in_dev))
2288 err = ip_route_input_mc(skb, daddr, saddr,
2294 return ip_route_input_slow(skb, daddr, saddr, tos, dev, res);
2297 /* called with rcu_read_lock() */
2298 static struct rtable *__mkroute_output(const struct fib_result *res,
2299 const struct flowi4 *fl4, int orig_oif,
2300 struct net_device *dev_out,
2303 struct fib_info *fi = res->fi;
2304 struct fib_nh_exception *fnhe;
2305 struct in_device *in_dev;
2306 u16 type = res->type;
2310 in_dev = __in_dev_get_rcu(dev_out);
2312 return ERR_PTR(-EINVAL);
2314 if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
2315 if (ipv4_is_loopback(fl4->saddr) &&
2316 !(dev_out->flags & IFF_LOOPBACK) &&
2317 !netif_is_l3_master(dev_out))
2318 return ERR_PTR(-EINVAL);
2320 if (ipv4_is_lbcast(fl4->daddr))
2321 type = RTN_BROADCAST;
2322 else if (ipv4_is_multicast(fl4->daddr))
2323 type = RTN_MULTICAST;
2324 else if (ipv4_is_zeronet(fl4->daddr))
2325 return ERR_PTR(-EINVAL);
2327 if (dev_out->flags & IFF_LOOPBACK)
2328 flags |= RTCF_LOCAL;
2331 if (type == RTN_BROADCAST) {
2332 flags |= RTCF_BROADCAST | RTCF_LOCAL;
2334 } else if (type == RTN_MULTICAST) {
2335 flags |= RTCF_MULTICAST | RTCF_LOCAL;
2336 if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
2338 flags &= ~RTCF_LOCAL;
2341 /* If multicast route do not exist use
2342 * default one, but do not gateway in this case.
2345 if (fi && res->prefixlen < 4)
2347 } else if ((type == RTN_LOCAL) && (orig_oif != 0) &&
2348 (orig_oif != dev_out->ifindex)) {
2349 /* For local routes that require a particular output interface
2350 * we do not want to cache the result. Caching the result
2351 * causes incorrect behaviour when there are multiple source
2352 * addresses on the interface, the end result being that if the
2353 * intended recipient is waiting on that interface for the
2354 * packet he won't receive it because it will be delivered on
2355 * the loopback interface and the IP_PKTINFO ipi_ifindex will
2356 * be set to the loopback interface as well.
2362 do_cache &= fi != NULL;
2364 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
2365 struct rtable __rcu **prth;
2367 fnhe = find_exception(nhc, fl4->daddr);
2371 prth = &fnhe->fnhe_rth_output;
2373 if (unlikely(fl4->flowi4_flags &
2374 FLOWI_FLAG_KNOWN_NH &&
2375 !(nhc->nhc_gw_family &&
2376 nhc->nhc_scope == RT_SCOPE_LINK))) {
2380 prth = raw_cpu_ptr(nhc->nhc_pcpu_rth_output);
2382 rth = rcu_dereference(*prth);
2383 if (rt_cache_valid(rth) && dst_hold_safe(&rth->dst))
2388 rth = rt_dst_alloc(dev_out, flags, type,
2389 IN_DEV_CONF_GET(in_dev, NOPOLICY),
2390 IN_DEV_CONF_GET(in_dev, NOXFRM),
2393 return ERR_PTR(-ENOBUFS);
2395 rth->rt_iif = orig_oif;
2397 RT_CACHE_STAT_INC(out_slow_tot);
2399 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
2400 if (flags & RTCF_LOCAL &&
2401 !(dev_out->flags & IFF_LOOPBACK)) {
2402 rth->dst.output = ip_mc_output;
2403 RT_CACHE_STAT_INC(out_slow_mc);
2405 #ifdef CONFIG_IP_MROUTE
2406 if (type == RTN_MULTICAST) {
2407 if (IN_DEV_MFORWARD(in_dev) &&
2408 !ipv4_is_local_multicast(fl4->daddr)) {
2409 rth->dst.input = ip_mr_input;
2410 rth->dst.output = ip_mc_output;
2416 rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0, do_cache);
2417 lwtunnel_set_redirect(&rth->dst);
2423 * Major route resolver routine.
2426 struct rtable *ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
2427 const struct sk_buff *skb)
2429 __u8 tos = RT_FL_TOS(fl4);
2430 struct fib_result res = {
2438 fl4->flowi4_iif = LOOPBACK_IFINDEX;
2439 fl4->flowi4_tos = tos & IPTOS_RT_MASK;
2440 fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
2441 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
2444 rth = ip_route_output_key_hash_rcu(net, fl4, &res, skb);
2449 EXPORT_SYMBOL_GPL(ip_route_output_key_hash);
2451 struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *fl4,
2452 struct fib_result *res,
2453 const struct sk_buff *skb)
2455 struct net_device *dev_out = NULL;
2456 int orig_oif = fl4->flowi4_oif;
2457 unsigned int flags = 0;
2459 int err = -ENETUNREACH;
2462 rth = ERR_PTR(-EINVAL);
2463 if (ipv4_is_multicast(fl4->saddr) ||
2464 ipv4_is_lbcast(fl4->saddr) ||
2465 ipv4_is_zeronet(fl4->saddr))
2468 /* I removed check for oif == dev_out->oif here.
2469 It was wrong for two reasons:
2470 1. ip_dev_find(net, saddr) can return wrong iface, if saddr
2471 is assigned to multiple interfaces.
2472 2. Moreover, we are allowed to send packets with saddr
2473 of another iface. --ANK
2476 if (fl4->flowi4_oif == 0 &&
2477 (ipv4_is_multicast(fl4->daddr) ||
2478 ipv4_is_lbcast(fl4->daddr))) {
2479 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2480 dev_out = __ip_dev_find(net, fl4->saddr, false);
2484 /* Special hack: user can direct multicasts
2485 and limited broadcast via necessary interface
2486 without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2487 This hack is not just for fun, it allows
2488 vic,vat and friends to work.
2489 They bind socket to loopback, set ttl to zero
2490 and expect that it will work.
2491 From the viewpoint of routing cache they are broken,
2492 because we are not allowed to build multicast path
2493 with loopback source addr (look, routing cache
2494 cannot know, that ttl is zero, so that packet
2495 will not leave this host and route is valid).
2496 Luckily, this hack is good workaround.
2499 fl4->flowi4_oif = dev_out->ifindex;
2503 if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) {
2504 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2505 if (!__ip_dev_find(net, fl4->saddr, false))
2511 if (fl4->flowi4_oif) {
2512 dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
2513 rth = ERR_PTR(-ENODEV);
2517 /* RACE: Check return value of inet_select_addr instead. */
2518 if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
2519 rth = ERR_PTR(-ENETUNREACH);
2522 if (ipv4_is_local_multicast(fl4->daddr) ||
2523 ipv4_is_lbcast(fl4->daddr) ||
2524 fl4->flowi4_proto == IPPROTO_IGMP) {
2526 fl4->saddr = inet_select_addr(dev_out, 0,
2531 if (ipv4_is_multicast(fl4->daddr))
2532 fl4->saddr = inet_select_addr(dev_out, 0,
2534 else if (!fl4->daddr)
2535 fl4->saddr = inet_select_addr(dev_out, 0,
2541 fl4->daddr = fl4->saddr;
2543 fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
2544 dev_out = net->loopback_dev;
2545 fl4->flowi4_oif = LOOPBACK_IFINDEX;
2546 res->type = RTN_LOCAL;
2547 flags |= RTCF_LOCAL;
2551 err = fib_lookup(net, fl4, res, 0);
2555 if (fl4->flowi4_oif &&
2556 (ipv4_is_multicast(fl4->daddr) ||
2557 !netif_index_is_l3_master(net, fl4->flowi4_oif))) {
2558 /* Apparently, routing tables are wrong. Assume,
2559 that the destination is on link.
2562 Because we are allowed to send to iface
2563 even if it has NO routes and NO assigned
2564 addresses. When oif is specified, routing
2565 tables are looked up with only one purpose:
2566 to catch if destination is gatewayed, rather than
2567 direct. Moreover, if MSG_DONTROUTE is set,
2568 we send packet, ignoring both routing tables
2569 and ifaddr state. --ANK
2572 We could make it even if oif is unknown,
2573 likely IPv6, but we do not.
2576 if (fl4->saddr == 0)
2577 fl4->saddr = inet_select_addr(dev_out, 0,
2579 res->type = RTN_UNICAST;
2586 if (res->type == RTN_LOCAL) {
2588 if (res->fi->fib_prefsrc)
2589 fl4->saddr = res->fi->fib_prefsrc;
2591 fl4->saddr = fl4->daddr;
2594 /* L3 master device is the loopback for that domain */
2595 dev_out = l3mdev_master_dev_rcu(FIB_RES_DEV(*res)) ? :
2598 /* make sure orig_oif points to fib result device even
2599 * though packet rx/tx happens over loopback or l3mdev
2601 orig_oif = FIB_RES_OIF(*res);
2603 fl4->flowi4_oif = dev_out->ifindex;
2604 flags |= RTCF_LOCAL;
2608 fib_select_path(net, res, fl4, skb);
2610 dev_out = FIB_RES_DEV(*res);
2611 fl4->flowi4_oif = dev_out->ifindex;
2615 rth = __mkroute_output(res, fl4, orig_oif, dev_out, flags);
2621 static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
2626 static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
2628 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
2630 return mtu ? : dst->dev->mtu;
2633 static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
2634 struct sk_buff *skb, u32 mtu)
2638 static void ipv4_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
2639 struct sk_buff *skb)
2643 static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst,
2649 static struct dst_ops ipv4_dst_blackhole_ops = {
2651 .check = ipv4_blackhole_dst_check,
2652 .mtu = ipv4_blackhole_mtu,
2653 .default_advmss = ipv4_default_advmss,
2654 .update_pmtu = ipv4_rt_blackhole_update_pmtu,
2655 .redirect = ipv4_rt_blackhole_redirect,
2656 .cow_metrics = ipv4_rt_blackhole_cow_metrics,
2657 .neigh_lookup = ipv4_neigh_lookup,
2660 struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2662 struct rtable *ort = (struct rtable *) dst_orig;
2665 rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_DEAD, 0);
2667 struct dst_entry *new = &rt->dst;
2670 new->input = dst_discard;
2671 new->output = dst_discard_out;
2673 new->dev = net->loopback_dev;
2677 rt->rt_is_input = ort->rt_is_input;
2678 rt->rt_iif = ort->rt_iif;
2679 rt->rt_pmtu = ort->rt_pmtu;
2680 rt->rt_mtu_locked = ort->rt_mtu_locked;
2682 rt->rt_genid = rt_genid_ipv4(net);
2683 rt->rt_flags = ort->rt_flags;
2684 rt->rt_type = ort->rt_type;
2685 rt->rt_gw_family = ort->rt_gw_family;
2686 if (rt->rt_gw_family == AF_INET)
2687 rt->rt_gw4 = ort->rt_gw4;
2688 else if (rt->rt_gw_family == AF_INET6)
2689 rt->rt_gw6 = ort->rt_gw6;
2691 INIT_LIST_HEAD(&rt->rt_uncached);
2694 dst_release(dst_orig);
2696 return rt ? &rt->dst : ERR_PTR(-ENOMEM);
2699 struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
2700 const struct sock *sk)
2702 struct rtable *rt = __ip_route_output_key(net, flp4);
2707 if (flp4->flowi4_proto)
2708 rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst,
2709 flowi4_to_flowi(flp4),
2714 EXPORT_SYMBOL_GPL(ip_route_output_flow);
2716 /* called with rcu_read_lock held */
2717 static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
2718 struct rtable *rt, u32 table_id, struct flowi4 *fl4,
2719 struct sk_buff *skb, u32 portid, u32 seq)
2722 struct nlmsghdr *nlh;
2723 unsigned long expires = 0;
2725 u32 metrics[RTAX_MAX];
2727 nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*r), 0);
2731 r = nlmsg_data(nlh);
2732 r->rtm_family = AF_INET;
2733 r->rtm_dst_len = 32;
2735 r->rtm_tos = fl4 ? fl4->flowi4_tos : 0;
2736 r->rtm_table = table_id < 256 ? table_id : RT_TABLE_COMPAT;
2737 if (nla_put_u32(skb, RTA_TABLE, table_id))
2738 goto nla_put_failure;
2739 r->rtm_type = rt->rt_type;
2740 r->rtm_scope = RT_SCOPE_UNIVERSE;
2741 r->rtm_protocol = RTPROT_UNSPEC;
2742 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2743 if (rt->rt_flags & RTCF_NOTIFY)
2744 r->rtm_flags |= RTM_F_NOTIFY;
2745 if (IPCB(skb)->flags & IPSKB_DOREDIRECT)
2746 r->rtm_flags |= RTCF_DOREDIRECT;
2748 if (nla_put_in_addr(skb, RTA_DST, dst))
2749 goto nla_put_failure;
2751 r->rtm_src_len = 32;
2752 if (nla_put_in_addr(skb, RTA_SRC, src))
2753 goto nla_put_failure;
2756 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
2757 goto nla_put_failure;
2758 #ifdef CONFIG_IP_ROUTE_CLASSID
2759 if (rt->dst.tclassid &&
2760 nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
2761 goto nla_put_failure;
2763 if (fl4 && !rt_is_input_route(rt) &&
2764 fl4->saddr != src) {
2765 if (nla_put_in_addr(skb, RTA_PREFSRC, fl4->saddr))
2766 goto nla_put_failure;
2768 if (rt->rt_gw_family == AF_INET &&
2769 nla_put_in_addr(skb, RTA_GATEWAY, rt->rt_gw4)) {
2770 goto nla_put_failure;
2771 } else if (rt->rt_gw_family == AF_INET6) {
2772 int alen = sizeof(struct in6_addr);
2776 nla = nla_reserve(skb, RTA_VIA, alen + 2);
2778 goto nla_put_failure;
2780 via = nla_data(nla);
2781 via->rtvia_family = AF_INET6;
2782 memcpy(via->rtvia_addr, &rt->rt_gw6, alen);
2785 expires = rt->dst.expires;
2787 unsigned long now = jiffies;
2789 if (time_before(now, expires))
2795 memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
2796 if (rt->rt_pmtu && expires)
2797 metrics[RTAX_MTU - 1] = rt->rt_pmtu;
2798 if (rt->rt_mtu_locked && expires)
2799 metrics[RTAX_LOCK - 1] |= BIT(RTAX_MTU);
2800 if (rtnetlink_put_metrics(skb, metrics) < 0)
2801 goto nla_put_failure;
2804 if (fl4->flowi4_mark &&
2805 nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark))
2806 goto nla_put_failure;
2808 if (!uid_eq(fl4->flowi4_uid, INVALID_UID) &&
2809 nla_put_u32(skb, RTA_UID,
2810 from_kuid_munged(current_user_ns(),
2812 goto nla_put_failure;
2814 if (rt_is_input_route(rt)) {
2815 #ifdef CONFIG_IP_MROUTE
2816 if (ipv4_is_multicast(dst) &&
2817 !ipv4_is_local_multicast(dst) &&
2818 IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
2819 int err = ipmr_get_route(net, skb,
2820 fl4->saddr, fl4->daddr,
2826 goto nla_put_failure;
2830 if (nla_put_u32(skb, RTA_IIF, fl4->flowi4_iif))
2831 goto nla_put_failure;
2835 error = rt->dst.error;
2837 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0)
2838 goto nla_put_failure;
2840 nlmsg_end(skb, nlh);
2844 nlmsg_cancel(skb, nlh);
2848 static int fnhe_dump_bucket(struct net *net, struct sk_buff *skb,
2849 struct netlink_callback *cb, u32 table_id,
2850 struct fnhe_hash_bucket *bucket, int genid,
2851 int *fa_index, int fa_start)
2855 for (i = 0; i < FNHE_HASH_SIZE; i++) {
2856 struct fib_nh_exception *fnhe;
2858 for (fnhe = rcu_dereference(bucket[i].chain); fnhe;
2859 fnhe = rcu_dereference(fnhe->fnhe_next)) {
2863 if (*fa_index < fa_start)
2866 if (fnhe->fnhe_genid != genid)
2869 if (fnhe->fnhe_expires &&
2870 time_after(jiffies, fnhe->fnhe_expires))
2873 rt = rcu_dereference(fnhe->fnhe_rth_input);
2875 rt = rcu_dereference(fnhe->fnhe_rth_output);
2879 err = rt_fill_info(net, fnhe->fnhe_daddr, 0, rt,
2880 table_id, NULL, skb,
2881 NETLINK_CB(cb->skb).portid,
2882 cb->nlh->nlmsg_seq);
2893 int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb,
2894 u32 table_id, struct fib_info *fi,
2895 int *fa_index, int fa_start)
2897 struct net *net = sock_net(cb->skb->sk);
2898 int nhsel, genid = fnhe_genid(net);
2900 for (nhsel = 0; nhsel < fib_info_num_path(fi); nhsel++) {
2901 struct fib_nh_common *nhc = fib_info_nhc(fi, nhsel);
2902 struct fnhe_hash_bucket *bucket;
2905 if (nhc->nhc_flags & RTNH_F_DEAD)
2909 bucket = rcu_dereference(nhc->nhc_exceptions);
2912 err = fnhe_dump_bucket(net, skb, cb, table_id, bucket,
2913 genid, fa_index, fa_start);
2922 static struct sk_buff *inet_rtm_getroute_build_skb(__be32 src, __be32 dst,
2923 u8 ip_proto, __be16 sport,
2926 struct sk_buff *skb;
2929 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2933 /* Reserve room for dummy headers, this skb can pass
2934 * through good chunk of routing engine.
2936 skb_reset_mac_header(skb);
2937 skb_reset_network_header(skb);
2938 skb->protocol = htons(ETH_P_IP);
2939 iph = skb_put(skb, sizeof(struct iphdr));
2940 iph->protocol = ip_proto;
2946 skb_set_transport_header(skb, skb->len);
2948 switch (iph->protocol) {
2950 struct udphdr *udph;
2952 udph = skb_put_zero(skb, sizeof(struct udphdr));
2953 udph->source = sport;
2955 udph->len = sizeof(struct udphdr);
2960 struct tcphdr *tcph;
2962 tcph = skb_put_zero(skb, sizeof(struct tcphdr));
2963 tcph->source = sport;
2965 tcph->doff = sizeof(struct tcphdr) / 4;
2967 tcph->check = ~tcp_v4_check(sizeof(struct tcphdr),
2971 case IPPROTO_ICMP: {
2972 struct icmphdr *icmph;
2974 icmph = skb_put_zero(skb, sizeof(struct icmphdr));
2975 icmph->type = ICMP_ECHO;
2983 static int inet_rtm_valid_getroute_req(struct sk_buff *skb,
2984 const struct nlmsghdr *nlh,
2986 struct netlink_ext_ack *extack)
2991 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
2992 NL_SET_ERR_MSG(extack,
2993 "ipv4: Invalid header for route get request");
2997 if (!netlink_strict_get_check(skb))
2998 return nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
2999 rtm_ipv4_policy, extack);
3001 rtm = nlmsg_data(nlh);
3002 if ((rtm->rtm_src_len && rtm->rtm_src_len != 32) ||
3003 (rtm->rtm_dst_len && rtm->rtm_dst_len != 32) ||
3004 rtm->rtm_table || rtm->rtm_protocol ||
3005 rtm->rtm_scope || rtm->rtm_type) {
3006 NL_SET_ERR_MSG(extack, "ipv4: Invalid values in header for route get request");
3010 if (rtm->rtm_flags & ~(RTM_F_NOTIFY |
3011 RTM_F_LOOKUP_TABLE |
3013 NL_SET_ERR_MSG(extack, "ipv4: Unsupported rtm_flags for route get request");
3017 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
3018 rtm_ipv4_policy, extack);
3022 if ((tb[RTA_SRC] && !rtm->rtm_src_len) ||
3023 (tb[RTA_DST] && !rtm->rtm_dst_len)) {
3024 NL_SET_ERR_MSG(extack, "ipv4: rtm_src_len and rtm_dst_len must be 32 for IPv4");
3028 for (i = 0; i <= RTA_MAX; i++) {
3044 NL_SET_ERR_MSG(extack, "ipv4: Unsupported attribute in route get request");
3052 static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
3053 struct netlink_ext_ack *extack)
3055 struct net *net = sock_net(in_skb->sk);
3056 struct nlattr *tb[RTA_MAX+1];
3057 u32 table_id = RT_TABLE_MAIN;
3058 __be16 sport = 0, dport = 0;
3059 struct fib_result res = {};
3060 u8 ip_proto = IPPROTO_UDP;
3061 struct rtable *rt = NULL;
3062 struct sk_buff *skb;
3064 struct flowi4 fl4 = {};
3072 err = inet_rtm_valid_getroute_req(in_skb, nlh, tb, extack);
3076 rtm = nlmsg_data(nlh);
3077 src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
3078 dst = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0;
3079 iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
3080 mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
3082 uid = make_kuid(current_user_ns(), nla_get_u32(tb[RTA_UID]));
3084 uid = (iif ? INVALID_UID : current_uid());
3086 if (tb[RTA_IP_PROTO]) {
3087 err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
3088 &ip_proto, AF_INET, extack);
3094 sport = nla_get_be16(tb[RTA_SPORT]);
3097 dport = nla_get_be16(tb[RTA_DPORT]);
3099 skb = inet_rtm_getroute_build_skb(src, dst, ip_proto, sport, dport);
3105 fl4.flowi4_tos = rtm->rtm_tos;
3106 fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
3107 fl4.flowi4_mark = mark;
3108 fl4.flowi4_uid = uid;
3110 fl4.fl4_sport = sport;
3112 fl4.fl4_dport = dport;
3113 fl4.flowi4_proto = ip_proto;
3118 struct net_device *dev;
3120 dev = dev_get_by_index_rcu(net, iif);
3126 fl4.flowi4_iif = iif; /* for rt_fill_info */
3129 err = ip_route_input_rcu(skb, dst, src, rtm->rtm_tos,
3132 rt = skb_rtable(skb);
3133 if (err == 0 && rt->dst.error)
3134 err = -rt->dst.error;
3136 fl4.flowi4_iif = LOOPBACK_IFINDEX;
3137 skb->dev = net->loopback_dev;
3138 rt = ip_route_output_key_hash_rcu(net, &fl4, &res, skb);
3143 skb_dst_set(skb, &rt->dst);
3149 if (rtm->rtm_flags & RTM_F_NOTIFY)
3150 rt->rt_flags |= RTCF_NOTIFY;
3152 if (rtm->rtm_flags & RTM_F_LOOKUP_TABLE)
3153 table_id = res.table ? res.table->tb_id : 0;
3155 /* reset skb for netlink reply msg */
3157 skb_reset_network_header(skb);
3158 skb_reset_transport_header(skb);
3159 skb_reset_mac_header(skb);
3161 if (rtm->rtm_flags & RTM_F_FIB_MATCH) {
3163 err = fib_props[res.type].error;
3165 err = -EHOSTUNREACH;
3168 err = fib_dump_info(skb, NETLINK_CB(in_skb).portid,
3169 nlh->nlmsg_seq, RTM_NEWROUTE, table_id,
3170 rt->rt_type, res.prefix, res.prefixlen,
3171 fl4.flowi4_tos, res.fi, 0);
3173 err = rt_fill_info(net, dst, src, rt, table_id, &fl4, skb,
3174 NETLINK_CB(in_skb).portid, nlh->nlmsg_seq);
3181 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
3191 void ip_rt_multicast_event(struct in_device *in_dev)
3193 rt_cache_flush(dev_net(in_dev->dev));
3196 #ifdef CONFIG_SYSCTL
3197 static int ip_rt_gc_interval __read_mostly = 60 * HZ;
3198 static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
3199 static int ip_rt_gc_elasticity __read_mostly = 8;
3200 static int ip_min_valid_pmtu __read_mostly = IPV4_MIN_MTU;
3202 static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write,
3203 void __user *buffer,
3204 size_t *lenp, loff_t *ppos)
3206 struct net *net = (struct net *)__ctl->extra1;
3209 rt_cache_flush(net);
3210 fnhe_genid_bump(net);
3217 static struct ctl_table ipv4_route_table[] = {
3219 .procname = "gc_thresh",
3220 .data = &ipv4_dst_ops.gc_thresh,
3221 .maxlen = sizeof(int),
3223 .proc_handler = proc_dointvec,
3226 .procname = "max_size",
3227 .data = &ip_rt_max_size,
3228 .maxlen = sizeof(int),
3230 .proc_handler = proc_dointvec,
3233 /* Deprecated. Use gc_min_interval_ms */
3235 .procname = "gc_min_interval",
3236 .data = &ip_rt_gc_min_interval,
3237 .maxlen = sizeof(int),
3239 .proc_handler = proc_dointvec_jiffies,
3242 .procname = "gc_min_interval_ms",
3243 .data = &ip_rt_gc_min_interval,
3244 .maxlen = sizeof(int),
3246 .proc_handler = proc_dointvec_ms_jiffies,
3249 .procname = "gc_timeout",
3250 .data = &ip_rt_gc_timeout,
3251 .maxlen = sizeof(int),
3253 .proc_handler = proc_dointvec_jiffies,
3256 .procname = "gc_interval",
3257 .data = &ip_rt_gc_interval,
3258 .maxlen = sizeof(int),
3260 .proc_handler = proc_dointvec_jiffies,
3263 .procname = "redirect_load",
3264 .data = &ip_rt_redirect_load,
3265 .maxlen = sizeof(int),
3267 .proc_handler = proc_dointvec,
3270 .procname = "redirect_number",
3271 .data = &ip_rt_redirect_number,
3272 .maxlen = sizeof(int),
3274 .proc_handler = proc_dointvec,
3277 .procname = "redirect_silence",
3278 .data = &ip_rt_redirect_silence,
3279 .maxlen = sizeof(int),
3281 .proc_handler = proc_dointvec,
3284 .procname = "error_cost",
3285 .data = &ip_rt_error_cost,
3286 .maxlen = sizeof(int),
3288 .proc_handler = proc_dointvec,
3291 .procname = "error_burst",
3292 .data = &ip_rt_error_burst,
3293 .maxlen = sizeof(int),
3295 .proc_handler = proc_dointvec,
3298 .procname = "gc_elasticity",
3299 .data = &ip_rt_gc_elasticity,
3300 .maxlen = sizeof(int),
3302 .proc_handler = proc_dointvec,
3305 .procname = "mtu_expires",
3306 .data = &ip_rt_mtu_expires,
3307 .maxlen = sizeof(int),
3309 .proc_handler = proc_dointvec_jiffies,
3312 .procname = "min_pmtu",
3313 .data = &ip_rt_min_pmtu,
3314 .maxlen = sizeof(int),
3316 .proc_handler = proc_dointvec_minmax,
3317 .extra1 = &ip_min_valid_pmtu,
3320 .procname = "min_adv_mss",
3321 .data = &ip_rt_min_advmss,
3322 .maxlen = sizeof(int),
3324 .proc_handler = proc_dointvec,
3329 static struct ctl_table ipv4_route_flush_table[] = {
3331 .procname = "flush",
3332 .maxlen = sizeof(int),
3334 .proc_handler = ipv4_sysctl_rtcache_flush,
3339 static __net_init int sysctl_route_net_init(struct net *net)
3341 struct ctl_table *tbl;
3343 tbl = ipv4_route_flush_table;
3344 if (!net_eq(net, &init_net)) {
3345 tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
3349 /* Don't export sysctls to unprivileged users */
3350 if (net->user_ns != &init_user_ns)
3351 tbl[0].procname = NULL;
3353 tbl[0].extra1 = net;
3355 net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
3356 if (!net->ipv4.route_hdr)
3361 if (tbl != ipv4_route_flush_table)
3367 static __net_exit void sysctl_route_net_exit(struct net *net)
3369 struct ctl_table *tbl;
3371 tbl = net->ipv4.route_hdr->ctl_table_arg;
3372 unregister_net_sysctl_table(net->ipv4.route_hdr);
3373 BUG_ON(tbl == ipv4_route_flush_table);
3377 static __net_initdata struct pernet_operations sysctl_route_ops = {
3378 .init = sysctl_route_net_init,
3379 .exit = sysctl_route_net_exit,
3383 static __net_init int rt_genid_init(struct net *net)
3385 atomic_set(&net->ipv4.rt_genid, 0);
3386 atomic_set(&net->fnhe_genid, 0);
3387 atomic_set(&net->ipv4.dev_addr_genid, get_random_int());
3391 static __net_initdata struct pernet_operations rt_genid_ops = {
3392 .init = rt_genid_init,
3395 static int __net_init ipv4_inetpeer_init(struct net *net)
3397 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
3401 inet_peer_base_init(bp);
3402 net->ipv4.peers = bp;
3406 static void __net_exit ipv4_inetpeer_exit(struct net *net)
3408 struct inet_peer_base *bp = net->ipv4.peers;
3410 net->ipv4.peers = NULL;
3411 inetpeer_invalidate_tree(bp);
3415 static __net_initdata struct pernet_operations ipv4_inetpeer_ops = {
3416 .init = ipv4_inetpeer_init,
3417 .exit = ipv4_inetpeer_exit,
3420 #ifdef CONFIG_IP_ROUTE_CLASSID
3421 struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
3422 #endif /* CONFIG_IP_ROUTE_CLASSID */
3424 int __init ip_rt_init(void)
3428 ip_idents = kmalloc_array(IP_IDENTS_SZ, sizeof(*ip_idents),
3431 panic("IP: failed to allocate ip_idents\n");
3433 prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
3435 ip_tstamps = kcalloc(IP_IDENTS_SZ, sizeof(*ip_tstamps), GFP_KERNEL);
3437 panic("IP: failed to allocate ip_tstamps\n");
3439 for_each_possible_cpu(cpu) {
3440 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
3442 INIT_LIST_HEAD(&ul->head);
3443 spin_lock_init(&ul->lock);
3445 #ifdef CONFIG_IP_ROUTE_CLASSID
3446 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
3448 panic("IP: failed to allocate ip_rt_acct\n");
3451 ipv4_dst_ops.kmem_cachep =
3452 kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
3453 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3455 ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
3457 if (dst_entries_init(&ipv4_dst_ops) < 0)
3458 panic("IP: failed to allocate ipv4_dst_ops counter\n");
3460 if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
3461 panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
3463 ipv4_dst_ops.gc_thresh = ~0;
3464 ip_rt_max_size = INT_MAX;
3469 if (ip_rt_proc_init())
3470 pr_err("Unable to create route proc files\n");
3475 rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL,
3476 RTNL_FLAG_DOIT_UNLOCKED);
3478 #ifdef CONFIG_SYSCTL
3479 register_pernet_subsys(&sysctl_route_ops);
3481 register_pernet_subsys(&rt_genid_ops);
3482 register_pernet_subsys(&ipv4_inetpeer_ops);
3486 #ifdef CONFIG_SYSCTL
3488 * We really need to sanitize the damn ipv4 init order, then all
3489 * this nonsense will go away.
3491 void __init ip_static_sysctl_init(void)
3493 register_net_sysctl(&init_net, "net/ipv4/route", ipv4_route_table);