2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * ROUTE - implementation of the IP router.
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
11 * Linus Torvalds, <Linus.Torvalds@helsinki.fi>
12 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
15 * Alan Cox : Verify area fixes.
16 * Alan Cox : cli() protects routing changes
17 * Rui Oliveira : ICMP routing table updates
18 * (rco@di.uminho.pt) Routing table insertion and update
19 * Linus Torvalds : Rewrote bits to be sensible
20 * Alan Cox : Added BSD route gw semantics
21 * Alan Cox : Super /proc >4K
22 * Alan Cox : MTU in route table
23 * Alan Cox : MSS actually. Also added the window
25 * Sam Lantinga : Fixed route matching in rt_del()
26 * Alan Cox : Routing cache support.
27 * Alan Cox : Removed compatibility cruft.
28 * Alan Cox : RTF_REJECT support.
29 * Alan Cox : TCP irtt support.
30 * Jonathan Naylor : Added Metric support.
31 * Miquel van Smoorenburg : BSD API fixes.
32 * Miquel van Smoorenburg : Metrics.
33 * Alan Cox : Use __u32 properly
34 * Alan Cox : Aligned routing errors more closely with BSD
35 * our system is still very different.
36 * Alan Cox : Faster /proc handling
37 * Alexey Kuznetsov : Massive rework to support tree based routing,
38 * routing caches and better behaviour.
40 * Olaf Erb : irtt wasn't being copied right.
41 * Bjorn Ekwall : Kerneld route support.
42 * Alan Cox : Multicast fixed (I hope)
43 * Pavel Krauz : Limited broadcast fixed
44 * Mike McLagan : Routing by source
45 * Alexey Kuznetsov : End of old history. Split to fib.c and
46 * route.c and rewritten from scratch.
47 * Andi Kleen : Load-limit warning messages.
48 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
49 * Vitaly E. Lavrov : Race condition in ip_route_input_slow.
50 * Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow.
51 * Vladimir V. Ivanov : IP rule info (flowid) is really useful.
52 * Marc Boucher : routing by fwmark
53 * Robert Olsson : Added rt_cache statistics
54 * Arnaldo C. Melo : Convert proc stuff to seq_file
55 * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes.
56 * Ilia Sotnikov : Ignore TOS on PMTUD and Redirect
57 * Ilia Sotnikov : Removed TOS from hash calculations
59 * This program is free software; you can redistribute it and/or
60 * modify it under the terms of the GNU General Public License
61 * as published by the Free Software Foundation; either version
62 * 2 of the License, or (at your option) any later version.
65 #include <linux/module.h>
66 #include <asm/uaccess.h>
67 #include <asm/system.h>
68 #include <linux/bitops.h>
69 #include <linux/types.h>
70 #include <linux/kernel.h>
72 #include <linux/bootmem.h>
73 #include <linux/string.h>
74 #include <linux/socket.h>
75 #include <linux/sockios.h>
76 #include <linux/errno.h>
78 #include <linux/inet.h>
79 #include <linux/netdevice.h>
80 #include <linux/proc_fs.h>
81 #include <linux/init.h>
82 #include <linux/workqueue.h>
83 #include <linux/skbuff.h>
84 #include <linux/inetdevice.h>
85 #include <linux/igmp.h>
86 #include <linux/pkt_sched.h>
87 #include <linux/mroute.h>
88 #include <linux/netfilter_ipv4.h>
89 #include <linux/random.h>
90 #include <linux/jhash.h>
91 #include <linux/rcupdate.h>
92 #include <linux/times.h>
94 #include <net/net_namespace.h>
95 #include <net/protocol.h>
97 #include <net/route.h>
98 #include <net/inetpeer.h>
100 #include <net/ip_fib.h>
103 #include <net/icmp.h>
104 #include <net/xfrm.h>
105 #include <net/netevent.h>
106 #include <net/rtnetlink.h>
108 #include <linux/sysctl.h>
111 #define RT_FL_TOS(oldflp) \
112 ((u32)(oldflp->fl4_tos & (IPTOS_RT_MASK | RTO_ONLINK)))
114 #define IP_MAX_MTU 0xFFF0
116 #define RT_GC_TIMEOUT (300*HZ)
118 static int ip_rt_max_size;
119 static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
120 static int ip_rt_gc_interval __read_mostly = 60 * HZ;
121 static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
122 static int ip_rt_redirect_number __read_mostly = 9;
123 static int ip_rt_redirect_load __read_mostly = HZ / 50;
124 static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
125 static int ip_rt_error_cost __read_mostly = HZ;
126 static int ip_rt_error_burst __read_mostly = 5 * HZ;
127 static int ip_rt_gc_elasticity __read_mostly = 8;
128 static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
129 static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
130 static int ip_rt_min_advmss __read_mostly = 256;
131 static int ip_rt_secret_interval __read_mostly = 10 * 60 * HZ;
132 static int rt_chain_length_max __read_mostly = 20;
134 static void rt_worker_func(struct work_struct *work);
135 static DECLARE_DELAYED_WORK(expires_work, rt_worker_func);
138 * Interface to generic destination cache.
141 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
142 static void ipv4_dst_destroy(struct dst_entry *dst);
143 static void ipv4_dst_ifdown(struct dst_entry *dst,
144 struct net_device *dev, int how);
145 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
146 static void ipv4_link_failure(struct sk_buff *skb);
147 static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
148 static int rt_garbage_collect(struct dst_ops *ops);
149 static void rt_emergency_hash_rebuild(struct net *net);
152 static struct dst_ops ipv4_dst_ops = {
154 .protocol = __constant_htons(ETH_P_IP),
155 .gc = rt_garbage_collect,
156 .check = ipv4_dst_check,
157 .destroy = ipv4_dst_destroy,
158 .ifdown = ipv4_dst_ifdown,
159 .negative_advice = ipv4_negative_advice,
160 .link_failure = ipv4_link_failure,
161 .update_pmtu = ip_rt_update_pmtu,
162 .local_out = __ip_local_out,
163 .entry_size = sizeof(struct rtable),
164 .entries = ATOMIC_INIT(0),
167 #define ECN_OR_COST(class) TC_PRIO_##class
169 const __u8 ip_tos2prio[16] = {
173 ECN_OR_COST(BESTEFFORT),
179 ECN_OR_COST(INTERACTIVE),
181 ECN_OR_COST(INTERACTIVE),
182 TC_PRIO_INTERACTIVE_BULK,
183 ECN_OR_COST(INTERACTIVE_BULK),
184 TC_PRIO_INTERACTIVE_BULK,
185 ECN_OR_COST(INTERACTIVE_BULK)
193 /* The locking scheme is rather straight forward:
195 * 1) Read-Copy Update protects the buckets of the central route hash.
196 * 2) Only writers remove entries, and they hold the lock
197 * as they look at rtable reference counts.
198 * 3) Only readers acquire references to rtable entries,
199 * they do so with atomic increments and with the
203 struct rt_hash_bucket {
204 struct rtable *chain;
207 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
208 defined(CONFIG_PROVE_LOCKING)
210 * Instead of using one spinlock for each rt_hash_bucket, we use a table of spinlocks
211 * The size of this table is a power of two and depends on the number of CPUS.
212 * (on lockdep we have a quite big spinlock_t, so keep the size down there)
214 #ifdef CONFIG_LOCKDEP
215 # define RT_HASH_LOCK_SZ 256
218 # define RT_HASH_LOCK_SZ 4096
220 # define RT_HASH_LOCK_SZ 2048
222 # define RT_HASH_LOCK_SZ 1024
224 # define RT_HASH_LOCK_SZ 512
226 # define RT_HASH_LOCK_SZ 256
230 static spinlock_t *rt_hash_locks;
231 # define rt_hash_lock_addr(slot) &rt_hash_locks[(slot) & (RT_HASH_LOCK_SZ - 1)]
233 static __init void rt_hash_lock_init(void)
237 rt_hash_locks = kmalloc(sizeof(spinlock_t) * RT_HASH_LOCK_SZ,
240 panic("IP: failed to allocate rt_hash_locks\n");
242 for (i = 0; i < RT_HASH_LOCK_SZ; i++)
243 spin_lock_init(&rt_hash_locks[i]);
246 # define rt_hash_lock_addr(slot) NULL
248 static inline void rt_hash_lock_init(void)
253 static struct rt_hash_bucket *rt_hash_table __read_mostly;
254 static unsigned rt_hash_mask __read_mostly;
255 static unsigned int rt_hash_log __read_mostly;
257 static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
258 #define RT_CACHE_STAT_INC(field) \
259 (__raw_get_cpu_var(rt_cache_stat).field++)
261 static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
264 return jhash_3words((__force u32)(__be32)(daddr),
265 (__force u32)(__be32)(saddr),
270 static inline int rt_genid(struct net *net)
272 return atomic_read(&net->ipv4.rt_genid);
275 #ifdef CONFIG_PROC_FS
276 struct rt_cache_iter_state {
277 struct seq_net_private p;
282 static struct rtable *rt_cache_get_first(struct seq_file *seq)
284 struct rt_cache_iter_state *st = seq->private;
285 struct rtable *r = NULL;
287 for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) {
288 if (!rt_hash_table[st->bucket].chain)
291 r = rcu_dereference(rt_hash_table[st->bucket].chain);
293 if (dev_net(r->u.dst.dev) == seq_file_net(seq) &&
294 r->rt_genid == st->genid)
296 r = rcu_dereference(r->u.dst.rt_next);
298 rcu_read_unlock_bh();
303 static struct rtable *__rt_cache_get_next(struct seq_file *seq,
306 struct rt_cache_iter_state *st = seq->private;
308 r = r->u.dst.rt_next;
310 rcu_read_unlock_bh();
312 if (--st->bucket < 0)
314 } while (!rt_hash_table[st->bucket].chain);
316 r = rt_hash_table[st->bucket].chain;
318 return rcu_dereference(r);
321 static struct rtable *rt_cache_get_next(struct seq_file *seq,
324 struct rt_cache_iter_state *st = seq->private;
325 while ((r = __rt_cache_get_next(seq, r)) != NULL) {
326 if (dev_net(r->u.dst.dev) != seq_file_net(seq))
328 if (r->rt_genid == st->genid)
334 static struct rtable *rt_cache_get_idx(struct seq_file *seq, loff_t pos)
336 struct rtable *r = rt_cache_get_first(seq);
339 while (pos && (r = rt_cache_get_next(seq, r)))
341 return pos ? NULL : r;
344 static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
346 struct rt_cache_iter_state *st = seq->private;
348 return rt_cache_get_idx(seq, *pos - 1);
349 st->genid = rt_genid(seq_file_net(seq));
350 return SEQ_START_TOKEN;
353 static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
357 if (v == SEQ_START_TOKEN)
358 r = rt_cache_get_first(seq);
360 r = rt_cache_get_next(seq, v);
365 static void rt_cache_seq_stop(struct seq_file *seq, void *v)
367 if (v && v != SEQ_START_TOKEN)
368 rcu_read_unlock_bh();
371 static int rt_cache_seq_show(struct seq_file *seq, void *v)
373 if (v == SEQ_START_TOKEN)
374 seq_printf(seq, "%-127s\n",
375 "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
376 "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
379 struct rtable *r = v;
382 seq_printf(seq, "%s\t%08lX\t%08lX\t%8X\t%d\t%u\t%d\t"
383 "%08lX\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
384 r->u.dst.dev ? r->u.dst.dev->name : "*",
385 (unsigned long)r->rt_dst, (unsigned long)r->rt_gateway,
386 r->rt_flags, atomic_read(&r->u.dst.__refcnt),
387 r->u.dst.__use, 0, (unsigned long)r->rt_src,
388 (dst_metric(&r->u.dst, RTAX_ADVMSS) ?
389 (int)dst_metric(&r->u.dst, RTAX_ADVMSS) + 40 : 0),
390 dst_metric(&r->u.dst, RTAX_WINDOW),
391 (int)((dst_metric(&r->u.dst, RTAX_RTT) >> 3) +
392 dst_metric(&r->u.dst, RTAX_RTTVAR)),
394 r->u.dst.hh ? atomic_read(&r->u.dst.hh->hh_refcnt) : -1,
395 r->u.dst.hh ? (r->u.dst.hh->hh_output ==
397 r->rt_spec_dst, &len);
399 seq_printf(seq, "%*s\n", 127 - len, "");
404 static const struct seq_operations rt_cache_seq_ops = {
405 .start = rt_cache_seq_start,
406 .next = rt_cache_seq_next,
407 .stop = rt_cache_seq_stop,
408 .show = rt_cache_seq_show,
411 static int rt_cache_seq_open(struct inode *inode, struct file *file)
413 return seq_open_net(inode, file, &rt_cache_seq_ops,
414 sizeof(struct rt_cache_iter_state));
417 static const struct file_operations rt_cache_seq_fops = {
418 .owner = THIS_MODULE,
419 .open = rt_cache_seq_open,
422 .release = seq_release_net,
426 static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
431 return SEQ_START_TOKEN;
433 for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
434 if (!cpu_possible(cpu))
437 return &per_cpu(rt_cache_stat, cpu);
442 static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
446 for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
447 if (!cpu_possible(cpu))
450 return &per_cpu(rt_cache_stat, cpu);
456 static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
461 static int rt_cpu_seq_show(struct seq_file *seq, void *v)
463 struct rt_cache_stat *st = v;
465 if (v == SEQ_START_TOKEN) {
466 seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
470 seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x "
471 " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
472 atomic_read(&ipv4_dst_ops.entries),
495 static const struct seq_operations rt_cpu_seq_ops = {
496 .start = rt_cpu_seq_start,
497 .next = rt_cpu_seq_next,
498 .stop = rt_cpu_seq_stop,
499 .show = rt_cpu_seq_show,
503 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
505 return seq_open(file, &rt_cpu_seq_ops);
508 static const struct file_operations rt_cpu_seq_fops = {
509 .owner = THIS_MODULE,
510 .open = rt_cpu_seq_open,
513 .release = seq_release,
516 #ifdef CONFIG_NET_CLS_ROUTE
517 static int ip_rt_acct_read(char *buffer, char **start, off_t offset,
518 int length, int *eof, void *data)
522 if ((offset & 3) || (length & 3))
525 if (offset >= sizeof(struct ip_rt_acct) * 256) {
530 if (offset + length >= sizeof(struct ip_rt_acct) * 256) {
531 length = sizeof(struct ip_rt_acct) * 256 - offset;
535 offset /= sizeof(u32);
538 u32 *dst = (u32 *) buffer;
541 memset(dst, 0, length);
543 for_each_possible_cpu(i) {
547 src = ((u32 *) per_cpu_ptr(ip_rt_acct, i)) + offset;
548 for (j = 0; j < length/4; j++)
556 static int __net_init ip_rt_do_proc_init(struct net *net)
558 struct proc_dir_entry *pde;
560 pde = proc_net_fops_create(net, "rt_cache", S_IRUGO,
565 pde = proc_create("rt_cache", S_IRUGO,
566 net->proc_net_stat, &rt_cpu_seq_fops);
570 #ifdef CONFIG_NET_CLS_ROUTE
571 pde = create_proc_read_entry("rt_acct", 0, net->proc_net,
572 ip_rt_acct_read, NULL);
578 #ifdef CONFIG_NET_CLS_ROUTE
580 remove_proc_entry("rt_cache", net->proc_net_stat);
583 remove_proc_entry("rt_cache", net->proc_net);
588 static void __net_exit ip_rt_do_proc_exit(struct net *net)
590 remove_proc_entry("rt_cache", net->proc_net_stat);
591 remove_proc_entry("rt_cache", net->proc_net);
592 remove_proc_entry("rt_acct", net->proc_net);
595 static struct pernet_operations ip_rt_proc_ops __net_initdata = {
596 .init = ip_rt_do_proc_init,
597 .exit = ip_rt_do_proc_exit,
600 static int __init ip_rt_proc_init(void)
602 return register_pernet_subsys(&ip_rt_proc_ops);
606 static inline int ip_rt_proc_init(void)
610 #endif /* CONFIG_PROC_FS */
612 static inline void rt_free(struct rtable *rt)
614 call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free);
617 static inline void rt_drop(struct rtable *rt)
620 call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free);
623 static inline int rt_fast_clean(struct rtable *rth)
625 /* Kill broadcast/multicast entries very aggresively, if they
626 collide in hash table with more useful entries */
627 return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) &&
628 rth->fl.iif && rth->u.dst.rt_next;
631 static inline int rt_valuable(struct rtable *rth)
633 return (rth->rt_flags & (RTCF_REDIRECTED | RTCF_NOTIFY)) ||
637 static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long tmo2)
642 if (atomic_read(&rth->u.dst.__refcnt))
646 if (rth->u.dst.expires &&
647 time_after_eq(jiffies, rth->u.dst.expires))
650 age = jiffies - rth->u.dst.lastuse;
652 if ((age <= tmo1 && !rt_fast_clean(rth)) ||
653 (age <= tmo2 && rt_valuable(rth)))
659 /* Bits of score are:
661 * 30: not quite useless
662 * 29..0: usage counter
664 static inline u32 rt_score(struct rtable *rt)
666 u32 score = jiffies - rt->u.dst.lastuse;
668 score = ~score & ~(3<<30);
674 !(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL)))
680 static inline bool rt_caching(const struct net *net)
682 return net->ipv4.current_rt_cache_rebuild_count <=
683 net->ipv4.sysctl_rt_cache_rebuild_count;
686 static inline bool compare_hash_inputs(const struct flowi *fl1,
687 const struct flowi *fl2)
689 return (__force u32)(((fl1->nl_u.ip4_u.daddr ^ fl2->nl_u.ip4_u.daddr) |
690 (fl1->nl_u.ip4_u.saddr ^ fl2->nl_u.ip4_u.saddr) |
691 (fl1->iif ^ fl2->iif)) == 0);
694 static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
696 return ((__force u32)((fl1->nl_u.ip4_u.daddr ^ fl2->nl_u.ip4_u.daddr) |
697 (fl1->nl_u.ip4_u.saddr ^ fl2->nl_u.ip4_u.saddr)) |
698 (fl1->mark ^ fl2->mark) |
699 (*(u16 *)&fl1->nl_u.ip4_u.tos ^
700 *(u16 *)&fl2->nl_u.ip4_u.tos) |
701 (fl1->oif ^ fl2->oif) |
702 (fl1->iif ^ fl2->iif)) == 0;
705 static inline int compare_netns(struct rtable *rt1, struct rtable *rt2)
707 return dev_net(rt1->u.dst.dev) == dev_net(rt2->u.dst.dev);
710 static inline int rt_is_expired(struct rtable *rth)
712 return rth->rt_genid != rt_genid(dev_net(rth->u.dst.dev));
716 * Perform a full scan of hash table and free all entries.
717 * Can be called by a softirq or a process.
718 * In the later case, we want to be reschedule if necessary
720 static void rt_do_flush(int process_context)
723 struct rtable *rth, *next;
724 struct rtable * tail;
726 for (i = 0; i <= rt_hash_mask; i++) {
727 if (process_context && need_resched())
729 rth = rt_hash_table[i].chain;
733 spin_lock_bh(rt_hash_lock_addr(i));
736 struct rtable ** prev, * p;
738 rth = rt_hash_table[i].chain;
740 /* defer releasing the head of the list after spin_unlock */
741 for (tail = rth; tail; tail = tail->u.dst.rt_next)
742 if (!rt_is_expired(tail))
745 rt_hash_table[i].chain = tail;
747 /* call rt_free on entries after the tail requiring flush */
748 prev = &rt_hash_table[i].chain;
749 for (p = *prev; p; p = next) {
750 next = p->u.dst.rt_next;
751 if (!rt_is_expired(p)) {
752 prev = &p->u.dst.rt_next;
760 rth = rt_hash_table[i].chain;
761 rt_hash_table[i].chain = NULL;
764 spin_unlock_bh(rt_hash_lock_addr(i));
766 for (; rth != tail; rth = next) {
767 next = rth->u.dst.rt_next;
774 * While freeing expired entries, we compute average chain length
775 * and standard deviation, using fixed-point arithmetic.
776 * This to have an estimation of rt_chain_length_max
777 * rt_chain_length_max = max(elasticity, AVG + 4*SD)
778 * We use 3 bits for frational part, and 29 (or 61) for magnitude.
782 #define ONE (1UL << FRACT_BITS)
784 static void rt_check_expire(void)
786 static unsigned int rover;
787 unsigned int i = rover, goal;
788 struct rtable *rth, **rthp;
789 unsigned long length = 0, samples = 0;
790 unsigned long sum = 0, sum2 = 0;
793 mult = ((u64)ip_rt_gc_interval) << rt_hash_log;
794 if (ip_rt_gc_timeout > 1)
795 do_div(mult, ip_rt_gc_timeout);
796 goal = (unsigned int)mult;
797 if (goal > rt_hash_mask)
798 goal = rt_hash_mask + 1;
800 for (; goal > 0; goal--) {
801 unsigned long tmo = ip_rt_gc_timeout;
803 i = (i + 1) & rt_hash_mask;
804 rthp = &rt_hash_table[i].chain;
813 spin_lock_bh(rt_hash_lock_addr(i));
814 while ((rth = *rthp) != NULL) {
815 if (rt_is_expired(rth)) {
816 *rthp = rth->u.dst.rt_next;
820 if (rth->u.dst.expires) {
821 /* Entry is expired even if it is in use */
822 if (time_before_eq(jiffies, rth->u.dst.expires)) {
824 rthp = &rth->u.dst.rt_next;
826 * Only bump our length if the hash
827 * inputs on entries n and n+1 are not
828 * the same, we only count entries on
829 * a chain with equal hash inputs once
830 * so that entries for different QOS
831 * levels, and other non-hash input
832 * attributes don't unfairly skew
833 * the length computation
835 if ((*rthp == NULL) ||
836 !compare_hash_inputs(&(*rthp)->fl,
841 } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout)) {
843 rthp = &rth->u.dst.rt_next;
844 if ((*rthp == NULL) ||
845 !compare_hash_inputs(&(*rthp)->fl,
851 /* Cleanup aged off entries. */
852 *rthp = rth->u.dst.rt_next;
855 spin_unlock_bh(rt_hash_lock_addr(i));
857 sum2 += length*length;
860 unsigned long avg = sum / samples;
861 unsigned long sd = int_sqrt(sum2 / samples - avg*avg);
862 rt_chain_length_max = max_t(unsigned long,
864 (avg + 4*sd) >> FRACT_BITS);
870 * rt_worker_func() is run in process context.
871 * we call rt_check_expire() to scan part of the hash table
873 static void rt_worker_func(struct work_struct *work)
876 schedule_delayed_work(&expires_work, ip_rt_gc_interval);
880 * Pertubation of rt_genid by a small quantity [1..256]
881 * Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
882 * many times (2^24) without giving recent rt_genid.
883 * Jenkins hash is strong enough that litle changes of rt_genid are OK.
885 static void rt_cache_invalidate(struct net *net)
887 unsigned char shuffle;
889 get_random_bytes(&shuffle, sizeof(shuffle));
890 atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
894 * delay < 0 : invalidate cache (fast : entries will be deleted later)
895 * delay >= 0 : invalidate & flush cache (can be long)
897 void rt_cache_flush(struct net *net, int delay)
899 rt_cache_invalidate(net);
901 rt_do_flush(!in_softirq());
905 * We change rt_genid and let gc do the cleanup
907 static void rt_secret_rebuild(unsigned long __net)
909 struct net *net = (struct net *)__net;
910 rt_cache_invalidate(net);
911 mod_timer(&net->ipv4.rt_secret_timer, jiffies + ip_rt_secret_interval);
914 static void rt_secret_rebuild_oneshot(struct net *net)
916 del_timer_sync(&net->ipv4.rt_secret_timer);
917 rt_cache_invalidate(net);
918 if (ip_rt_secret_interval) {
919 net->ipv4.rt_secret_timer.expires += ip_rt_secret_interval;
920 add_timer(&net->ipv4.rt_secret_timer);
924 static void rt_emergency_hash_rebuild(struct net *net)
926 if (net_ratelimit()) {
927 printk(KERN_WARNING "Route hash chain too long!\n");
928 printk(KERN_WARNING "Adjust your secret_interval!\n");
931 rt_secret_rebuild_oneshot(net);
935 Short description of GC goals.
937 We want to build algorithm, which will keep routing cache
938 at some equilibrium point, when number of aged off entries
939 is kept approximately equal to newly generated ones.
941 Current expiration strength is variable "expire".
942 We try to adjust it dynamically, so that if networking
943 is idle expires is large enough to keep enough of warm entries,
944 and when load increases it reduces to limit cache size.
947 static int rt_garbage_collect(struct dst_ops *ops)
949 static unsigned long expire = RT_GC_TIMEOUT;
950 static unsigned long last_gc;
952 static int equilibrium;
953 struct rtable *rth, **rthp;
954 unsigned long now = jiffies;
958 * Garbage collection is pretty expensive,
959 * do not make it too frequently.
962 RT_CACHE_STAT_INC(gc_total);
964 if (now - last_gc < ip_rt_gc_min_interval &&
965 atomic_read(&ipv4_dst_ops.entries) < ip_rt_max_size) {
966 RT_CACHE_STAT_INC(gc_ignored);
970 /* Calculate number of entries, which we want to expire now. */
971 goal = atomic_read(&ipv4_dst_ops.entries) -
972 (ip_rt_gc_elasticity << rt_hash_log);
974 if (equilibrium < ipv4_dst_ops.gc_thresh)
975 equilibrium = ipv4_dst_ops.gc_thresh;
976 goal = atomic_read(&ipv4_dst_ops.entries) - equilibrium;
978 equilibrium += min_t(unsigned int, goal >> 1, rt_hash_mask + 1);
979 goal = atomic_read(&ipv4_dst_ops.entries) - equilibrium;
982 /* We are in dangerous area. Try to reduce cache really
985 goal = max_t(unsigned int, goal >> 1, rt_hash_mask + 1);
986 equilibrium = atomic_read(&ipv4_dst_ops.entries) - goal;
989 if (now - last_gc >= ip_rt_gc_min_interval)
1000 for (i = rt_hash_mask, k = rover; i >= 0; i--) {
1001 unsigned long tmo = expire;
1003 k = (k + 1) & rt_hash_mask;
1004 rthp = &rt_hash_table[k].chain;
1005 spin_lock_bh(rt_hash_lock_addr(k));
1006 while ((rth = *rthp) != NULL) {
1007 if (!rt_is_expired(rth) &&
1008 !rt_may_expire(rth, tmo, expire)) {
1010 rthp = &rth->u.dst.rt_next;
1013 *rthp = rth->u.dst.rt_next;
1017 spin_unlock_bh(rt_hash_lock_addr(k));
1026 /* Goal is not achieved. We stop process if:
1028 - if expire reduced to zero. Otherwise, expire is halfed.
1029 - if table is not full.
1030 - if we are called from interrupt.
1031 - jiffies check is just fallback/debug loop breaker.
1032 We will not spin here for long time in any case.
1035 RT_CACHE_STAT_INC(gc_goal_miss);
1041 #if RT_CACHE_DEBUG >= 2
1042 printk(KERN_DEBUG "expire>> %u %d %d %d\n", expire,
1043 atomic_read(&ipv4_dst_ops.entries), goal, i);
1046 if (atomic_read(&ipv4_dst_ops.entries) < ip_rt_max_size)
1048 } while (!in_softirq() && time_before_eq(jiffies, now));
1050 if (atomic_read(&ipv4_dst_ops.entries) < ip_rt_max_size)
1052 if (net_ratelimit())
1053 printk(KERN_WARNING "dst cache overflow\n");
1054 RT_CACHE_STAT_INC(gc_dst_overflow);
1058 expire += ip_rt_gc_min_interval;
1059 if (expire > ip_rt_gc_timeout ||
1060 atomic_read(&ipv4_dst_ops.entries) < ipv4_dst_ops.gc_thresh)
1061 expire = ip_rt_gc_timeout;
1062 #if RT_CACHE_DEBUG >= 2
1063 printk(KERN_DEBUG "expire++ %u %d %d %d\n", expire,
1064 atomic_read(&ipv4_dst_ops.entries), goal, rover);
1069 static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp)
1071 struct rtable *rth, **rthp;
1072 struct rtable *rthi;
1074 struct rtable *cand, **candp;
1077 int attempts = !in_softirq();
1081 min_score = ~(u32)0;
1086 if (!rt_caching(dev_net(rt->u.dst.dev))) {
1091 rthp = &rt_hash_table[hash].chain;
1094 spin_lock_bh(rt_hash_lock_addr(hash));
1095 while ((rth = *rthp) != NULL) {
1096 if (rt_is_expired(rth)) {
1097 *rthp = rth->u.dst.rt_next;
1101 if (compare_keys(&rth->fl, &rt->fl) && compare_netns(rth, rt)) {
1103 *rthp = rth->u.dst.rt_next;
1105 * Since lookup is lockfree, the deletion
1106 * must be visible to another weakly ordered CPU before
1107 * the insertion at the start of the hash chain.
1109 rcu_assign_pointer(rth->u.dst.rt_next,
1110 rt_hash_table[hash].chain);
1112 * Since lookup is lockfree, the update writes
1113 * must be ordered for consistency on SMP.
1115 rcu_assign_pointer(rt_hash_table[hash].chain, rth);
1117 dst_use(&rth->u.dst, now);
1118 spin_unlock_bh(rt_hash_lock_addr(hash));
1125 if (!atomic_read(&rth->u.dst.__refcnt)) {
1126 u32 score = rt_score(rth);
1128 if (score <= min_score) {
1137 rthp = &rth->u.dst.rt_next;
1140 * check to see if the next entry in the chain
1141 * contains the same hash input values as rt. If it does
1142 * This is where we will insert into the list, instead of
1143 * at the head. This groups entries that differ by aspects not
1144 * relvant to the hash function together, which we use to adjust
1147 if (*rthp && compare_hash_inputs(&(*rthp)->fl, &rt->fl))
1152 /* ip_rt_gc_elasticity used to be average length of chain
1153 * length, when exceeded gc becomes really aggressive.
1155 * The second limit is less certain. At the moment it allows
1156 * only 2 entries per bucket. We will see.
1158 if (chain_length > ip_rt_gc_elasticity) {
1159 *candp = cand->u.dst.rt_next;
1163 if (chain_length > rt_chain_length_max) {
1164 struct net *net = dev_net(rt->u.dst.dev);
1165 int num = ++net->ipv4.current_rt_cache_rebuild_count;
1166 if (!rt_caching(dev_net(rt->u.dst.dev))) {
1167 printk(KERN_WARNING "%s: %d rebuilds is over limit, route caching disabled\n",
1168 rt->u.dst.dev->name, num);
1170 rt_emergency_hash_rebuild(dev_net(rt->u.dst.dev));
1174 /* Try to bind route to arp only if it is output
1175 route or unicast forwarding path.
1177 if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) {
1178 int err = arp_bind_neighbour(&rt->u.dst);
1180 spin_unlock_bh(rt_hash_lock_addr(hash));
1182 if (err != -ENOBUFS) {
1187 /* Neighbour tables are full and nothing
1188 can be released. Try to shrink route cache,
1189 it is most likely it holds some neighbour records.
1191 if (attempts-- > 0) {
1192 int saved_elasticity = ip_rt_gc_elasticity;
1193 int saved_int = ip_rt_gc_min_interval;
1194 ip_rt_gc_elasticity = 1;
1195 ip_rt_gc_min_interval = 0;
1196 rt_garbage_collect(&ipv4_dst_ops);
1197 ip_rt_gc_min_interval = saved_int;
1198 ip_rt_gc_elasticity = saved_elasticity;
1202 if (net_ratelimit())
1203 printk(KERN_WARNING "Neighbour table overflow.\n");
1210 rt->u.dst.rt_next = rthi->u.dst.rt_next;
1212 rt->u.dst.rt_next = rt_hash_table[hash].chain;
1214 #if RT_CACHE_DEBUG >= 2
1215 if (rt->u.dst.rt_next) {
1217 printk(KERN_DEBUG "rt_cache @%02x: " NIPQUAD_FMT, hash,
1218 NIPQUAD(rt->rt_dst));
1219 for (trt = rt->u.dst.rt_next; trt; trt = trt->u.dst.rt_next)
1220 printk(" . " NIPQUAD_FMT, NIPQUAD(trt->rt_dst));
1225 * Since lookup is lockfree, we must make sure
1226 * previous writes to rt are comitted to memory
1227 * before making rt visible to other CPUS.
1230 rcu_assign_pointer(rthi->u.dst.rt_next, rt);
1232 rcu_assign_pointer(rt_hash_table[hash].chain, rt);
1234 spin_unlock_bh(rt_hash_lock_addr(hash));
1239 void rt_bind_peer(struct rtable *rt, int create)
1241 static DEFINE_SPINLOCK(rt_peer_lock);
1242 struct inet_peer *peer;
1244 peer = inet_getpeer(rt->rt_dst, create);
1246 spin_lock_bh(&rt_peer_lock);
1247 if (rt->peer == NULL) {
1251 spin_unlock_bh(&rt_peer_lock);
1257 * Peer allocation may fail only in serious out-of-memory conditions. However
1258 * we still can generate some output.
1259 * Random ID selection looks a bit dangerous because we have no chances to
1260 * select ID being unique in a reasonable period of time.
1261 * But broken packet identifier may be better than no packet at all.
1263 static void ip_select_fb_ident(struct iphdr *iph)
1265 static DEFINE_SPINLOCK(ip_fb_id_lock);
1266 static u32 ip_fallback_id;
1269 spin_lock_bh(&ip_fb_id_lock);
1270 salt = secure_ip_id((__force __be32)ip_fallback_id ^ iph->daddr);
1271 iph->id = htons(salt & 0xFFFF);
1272 ip_fallback_id = salt;
1273 spin_unlock_bh(&ip_fb_id_lock);
1276 void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
1278 struct rtable *rt = (struct rtable *) dst;
1281 if (rt->peer == NULL)
1282 rt_bind_peer(rt, 1);
1284 /* If peer is attached to destination, it is never detached,
1285 so that we need not to grab a lock to dereference it.
1288 iph->id = htons(inet_getid(rt->peer, more));
1292 printk(KERN_DEBUG "rt_bind_peer(0) @%p\n",
1293 __builtin_return_address(0));
1295 ip_select_fb_ident(iph);
1298 static void rt_del(unsigned hash, struct rtable *rt)
1300 struct rtable **rthp, *aux;
1302 rthp = &rt_hash_table[hash].chain;
1303 spin_lock_bh(rt_hash_lock_addr(hash));
1305 while ((aux = *rthp) != NULL) {
1306 if (aux == rt || rt_is_expired(aux)) {
1307 *rthp = aux->u.dst.rt_next;
1311 rthp = &aux->u.dst.rt_next;
1313 spin_unlock_bh(rt_hash_lock_addr(hash));
1316 void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
1317 __be32 saddr, struct net_device *dev)
1320 struct in_device *in_dev = in_dev_get(dev);
1321 struct rtable *rth, **rthp;
1322 __be32 skeys[2] = { saddr, 0 };
1323 int ikeys[2] = { dev->ifindex, 0 };
1324 struct netevent_redirect netevent;
1331 if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev)
1332 || ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw)
1333 || ipv4_is_zeronet(new_gw))
1334 goto reject_redirect;
1336 if (!rt_caching(net))
1337 goto reject_redirect;
1339 if (!IN_DEV_SHARED_MEDIA(in_dev)) {
1340 if (!inet_addr_onlink(in_dev, new_gw, old_gw))
1341 goto reject_redirect;
1342 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
1343 goto reject_redirect;
1345 if (inet_addr_type(net, new_gw) != RTN_UNICAST)
1346 goto reject_redirect;
1349 for (i = 0; i < 2; i++) {
1350 for (k = 0; k < 2; k++) {
1351 unsigned hash = rt_hash(daddr, skeys[i], ikeys[k],
1354 rthp=&rt_hash_table[hash].chain;
1357 while ((rth = rcu_dereference(*rthp)) != NULL) {
1360 if (rth->fl.fl4_dst != daddr ||
1361 rth->fl.fl4_src != skeys[i] ||
1362 rth->fl.oif != ikeys[k] ||
1364 rt_is_expired(rth) ||
1365 !net_eq(dev_net(rth->u.dst.dev), net)) {
1366 rthp = &rth->u.dst.rt_next;
1370 if (rth->rt_dst != daddr ||
1371 rth->rt_src != saddr ||
1373 rth->rt_gateway != old_gw ||
1374 rth->u.dst.dev != dev)
1377 dst_hold(&rth->u.dst);
1380 rt = dst_alloc(&ipv4_dst_ops);
1387 /* Copy all the information. */
1389 INIT_RCU_HEAD(&rt->u.dst.rcu_head);
1390 rt->u.dst.__use = 1;
1391 atomic_set(&rt->u.dst.__refcnt, 1);
1392 rt->u.dst.child = NULL;
1394 dev_hold(rt->u.dst.dev);
1396 in_dev_hold(rt->idev);
1397 rt->u.dst.obsolete = 0;
1398 rt->u.dst.lastuse = jiffies;
1399 rt->u.dst.path = &rt->u.dst;
1400 rt->u.dst.neighbour = NULL;
1401 rt->u.dst.hh = NULL;
1403 rt->u.dst.xfrm = NULL;
1405 rt->rt_genid = rt_genid(net);
1406 rt->rt_flags |= RTCF_REDIRECTED;
1408 /* Gateway is different ... */
1409 rt->rt_gateway = new_gw;
1411 /* Redirect received -> path was valid */
1412 dst_confirm(&rth->u.dst);
1415 atomic_inc(&rt->peer->refcnt);
1417 if (arp_bind_neighbour(&rt->u.dst) ||
1418 !(rt->u.dst.neighbour->nud_state &
1420 if (rt->u.dst.neighbour)
1421 neigh_event_send(rt->u.dst.neighbour, NULL);
1427 netevent.old = &rth->u.dst;
1428 netevent.new = &rt->u.dst;
1429 call_netevent_notifiers(NETEVENT_REDIRECT,
1433 if (!rt_intern_hash(hash, rt, &rt))
1446 #ifdef CONFIG_IP_ROUTE_VERBOSE
1447 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
1448 printk(KERN_INFO "Redirect from " NIPQUAD_FMT " on %s about "
1449 NIPQUAD_FMT " ignored.\n"
1450 " Advised path = " NIPQUAD_FMT " -> " NIPQUAD_FMT "\n",
1451 NIPQUAD(old_gw), dev->name, NIPQUAD(new_gw),
1452 NIPQUAD(saddr), NIPQUAD(daddr));
1457 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1459 struct rtable *rt = (struct rtable *)dst;
1460 struct dst_entry *ret = dst;
1463 if (dst->obsolete) {
1466 } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
1467 rt->u.dst.expires) {
1468 unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src,
1470 rt_genid(dev_net(dst->dev)));
1471 #if RT_CACHE_DEBUG >= 1
1472 printk(KERN_DEBUG "ipv4_negative_advice: redirect to "
1473 NIPQUAD_FMT "/%02x dropped\n",
1474 NIPQUAD(rt->rt_dst), rt->fl.fl4_tos);
1485 * 1. The first ip_rt_redirect_number redirects are sent
1486 * with exponential backoff, then we stop sending them at all,
1487 * assuming that the host ignores our redirects.
1488 * 2. If we did not see packets requiring redirects
1489 * during ip_rt_redirect_silence, we assume that the host
1490 * forgot redirected route and start to send redirects again.
1492 * This algorithm is much cheaper and more intelligent than dumb load limiting
1495 * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
1496 * and "frag. need" (breaks PMTU discovery) in icmp.c.
1499 void ip_rt_send_redirect(struct sk_buff *skb)
1501 struct rtable *rt = skb->rtable;
1502 struct in_device *in_dev = in_dev_get(rt->u.dst.dev);
1507 if (!IN_DEV_TX_REDIRECTS(in_dev))
1510 /* No redirected packets during ip_rt_redirect_silence;
1511 * reset the algorithm.
1513 if (time_after(jiffies, rt->u.dst.rate_last + ip_rt_redirect_silence))
1514 rt->u.dst.rate_tokens = 0;
1516 /* Too many ignored redirects; do not send anything
1517 * set u.dst.rate_last to the last seen redirected packet.
1519 if (rt->u.dst.rate_tokens >= ip_rt_redirect_number) {
1520 rt->u.dst.rate_last = jiffies;
1524 /* Check for load limit; set rate_last to the latest sent
1527 if (rt->u.dst.rate_tokens == 0 ||
1529 (rt->u.dst.rate_last +
1530 (ip_rt_redirect_load << rt->u.dst.rate_tokens)))) {
1531 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
1532 rt->u.dst.rate_last = jiffies;
1533 ++rt->u.dst.rate_tokens;
1534 #ifdef CONFIG_IP_ROUTE_VERBOSE
1535 if (IN_DEV_LOG_MARTIANS(in_dev) &&
1536 rt->u.dst.rate_tokens == ip_rt_redirect_number &&
1538 printk(KERN_WARNING "host " NIPQUAD_FMT "/if%d ignores "
1539 "redirects for " NIPQUAD_FMT " to " NIPQUAD_FMT ".\n",
1540 NIPQUAD(rt->rt_src), rt->rt_iif,
1541 NIPQUAD(rt->rt_dst), NIPQUAD(rt->rt_gateway));
1548 static int ip_error(struct sk_buff *skb)
1550 struct rtable *rt = skb->rtable;
1554 switch (rt->u.dst.error) {
1559 code = ICMP_HOST_UNREACH;
1562 code = ICMP_NET_UNREACH;
1563 IP_INC_STATS_BH(dev_net(rt->u.dst.dev),
1564 IPSTATS_MIB_INNOROUTES);
1567 code = ICMP_PKT_FILTERED;
1572 rt->u.dst.rate_tokens += now - rt->u.dst.rate_last;
1573 if (rt->u.dst.rate_tokens > ip_rt_error_burst)
1574 rt->u.dst.rate_tokens = ip_rt_error_burst;
1575 rt->u.dst.rate_last = now;
1576 if (rt->u.dst.rate_tokens >= ip_rt_error_cost) {
1577 rt->u.dst.rate_tokens -= ip_rt_error_cost;
1578 icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
1581 out: kfree_skb(skb);
1586 * The last two values are not from the RFC but
1587 * are needed for AMPRnet AX.25 paths.
1590 static const unsigned short mtu_plateau[] =
1591 {32000, 17914, 8166, 4352, 2002, 1492, 576, 296, 216, 128 };
1593 static inline unsigned short guess_mtu(unsigned short old_mtu)
1597 for (i = 0; i < ARRAY_SIZE(mtu_plateau); i++)
1598 if (old_mtu > mtu_plateau[i])
1599 return mtu_plateau[i];
1603 unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
1604 unsigned short new_mtu,
1605 struct net_device *dev)
1608 unsigned short old_mtu = ntohs(iph->tot_len);
1610 int ikeys[2] = { dev->ifindex, 0 };
1611 __be32 skeys[2] = { iph->saddr, 0, };
1612 __be32 daddr = iph->daddr;
1613 unsigned short est_mtu = 0;
1615 if (ipv4_config.no_pmtu_disc)
1618 for (k = 0; k < 2; k++) {
1619 for (i = 0; i < 2; i++) {
1620 unsigned hash = rt_hash(daddr, skeys[i], ikeys[k],
1624 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
1625 rth = rcu_dereference(rth->u.dst.rt_next)) {
1626 unsigned short mtu = new_mtu;
1628 if (rth->fl.fl4_dst != daddr ||
1629 rth->fl.fl4_src != skeys[i] ||
1630 rth->rt_dst != daddr ||
1631 rth->rt_src != iph->saddr ||
1632 rth->fl.oif != ikeys[k] ||
1634 dst_metric_locked(&rth->u.dst, RTAX_MTU) ||
1635 !net_eq(dev_net(rth->u.dst.dev), net) ||
1639 if (new_mtu < 68 || new_mtu >= old_mtu) {
1641 /* BSD 4.2 compatibility hack :-( */
1643 old_mtu >= dst_mtu(&rth->u.dst) &&
1644 old_mtu >= 68 + (iph->ihl << 2))
1645 old_mtu -= iph->ihl << 2;
1647 mtu = guess_mtu(old_mtu);
1649 if (mtu <= dst_mtu(&rth->u.dst)) {
1650 if (mtu < dst_mtu(&rth->u.dst)) {
1651 dst_confirm(&rth->u.dst);
1652 if (mtu < ip_rt_min_pmtu) {
1653 mtu = ip_rt_min_pmtu;
1654 rth->u.dst.metrics[RTAX_LOCK-1] |=
1657 rth->u.dst.metrics[RTAX_MTU-1] = mtu;
1658 dst_set_expires(&rth->u.dst,
1667 return est_mtu ? : new_mtu;
1670 static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
1672 if (dst_mtu(dst) > mtu && mtu >= 68 &&
1673 !(dst_metric_locked(dst, RTAX_MTU))) {
1674 if (mtu < ip_rt_min_pmtu) {
1675 mtu = ip_rt_min_pmtu;
1676 dst->metrics[RTAX_LOCK-1] |= (1 << RTAX_MTU);
1678 dst->metrics[RTAX_MTU-1] = mtu;
1679 dst_set_expires(dst, ip_rt_mtu_expires);
1680 call_netevent_notifiers(NETEVENT_PMTU_UPDATE, dst);
1684 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1689 static void ipv4_dst_destroy(struct dst_entry *dst)
1691 struct rtable *rt = (struct rtable *) dst;
1692 struct inet_peer *peer = rt->peer;
1693 struct in_device *idev = rt->idev;
1706 static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
1709 struct rtable *rt = (struct rtable *) dst;
1710 struct in_device *idev = rt->idev;
1711 if (dev != dev_net(dev)->loopback_dev && idev && idev->dev == dev) {
1712 struct in_device *loopback_idev =
1713 in_dev_get(dev_net(dev)->loopback_dev);
1714 if (loopback_idev) {
1715 rt->idev = loopback_idev;
1721 static void ipv4_link_failure(struct sk_buff *skb)
1725 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
1729 dst_set_expires(&rt->u.dst, 0);
1732 static int ip_rt_bug(struct sk_buff *skb)
1734 printk(KERN_DEBUG "ip_rt_bug: " NIPQUAD_FMT " -> " NIPQUAD_FMT ", %s\n",
1735 NIPQUAD(ip_hdr(skb)->saddr), NIPQUAD(ip_hdr(skb)->daddr),
1736 skb->dev ? skb->dev->name : "?");
1742 We do not cache source address of outgoing interface,
1743 because it is used only by IP RR, TS and SRR options,
1744 so that it out of fast path.
1746 BTW remember: "addr" is allowed to be not aligned
1750 void ip_rt_get_source(u8 *addr, struct rtable *rt)
1753 struct fib_result res;
1755 if (rt->fl.iif == 0)
1757 else if (fib_lookup(dev_net(rt->u.dst.dev), &rt->fl, &res) == 0) {
1758 src = FIB_RES_PREFSRC(res);
1761 src = inet_select_addr(rt->u.dst.dev, rt->rt_gateway,
1763 memcpy(addr, &src, 4);
1766 #ifdef CONFIG_NET_CLS_ROUTE
1767 static void set_class_tag(struct rtable *rt, u32 tag)
1769 if (!(rt->u.dst.tclassid & 0xFFFF))
1770 rt->u.dst.tclassid |= tag & 0xFFFF;
1771 if (!(rt->u.dst.tclassid & 0xFFFF0000))
1772 rt->u.dst.tclassid |= tag & 0xFFFF0000;
1776 static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag)
1778 struct fib_info *fi = res->fi;
1781 if (FIB_RES_GW(*res) &&
1782 FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
1783 rt->rt_gateway = FIB_RES_GW(*res);
1784 memcpy(rt->u.dst.metrics, fi->fib_metrics,
1785 sizeof(rt->u.dst.metrics));
1786 if (fi->fib_mtu == 0) {
1787 rt->u.dst.metrics[RTAX_MTU-1] = rt->u.dst.dev->mtu;
1788 if (dst_metric_locked(&rt->u.dst, RTAX_MTU) &&
1789 rt->rt_gateway != rt->rt_dst &&
1790 rt->u.dst.dev->mtu > 576)
1791 rt->u.dst.metrics[RTAX_MTU-1] = 576;
1793 #ifdef CONFIG_NET_CLS_ROUTE
1794 rt->u.dst.tclassid = FIB_RES_NH(*res).nh_tclassid;
1797 rt->u.dst.metrics[RTAX_MTU-1]= rt->u.dst.dev->mtu;
1799 if (dst_metric(&rt->u.dst, RTAX_HOPLIMIT) == 0)
1800 rt->u.dst.metrics[RTAX_HOPLIMIT-1] = sysctl_ip_default_ttl;
1801 if (dst_mtu(&rt->u.dst) > IP_MAX_MTU)
1802 rt->u.dst.metrics[RTAX_MTU-1] = IP_MAX_MTU;
1803 if (dst_metric(&rt->u.dst, RTAX_ADVMSS) == 0)
1804 rt->u.dst.metrics[RTAX_ADVMSS-1] = max_t(unsigned int, rt->u.dst.dev->mtu - 40,
1806 if (dst_metric(&rt->u.dst, RTAX_ADVMSS) > 65535 - 40)
1807 rt->u.dst.metrics[RTAX_ADVMSS-1] = 65535 - 40;
1809 #ifdef CONFIG_NET_CLS_ROUTE
1810 #ifdef CONFIG_IP_MULTIPLE_TABLES
1811 set_class_tag(rt, fib_rules_tclass(res));
1813 set_class_tag(rt, itag);
1815 rt->rt_type = res->type;
1818 static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1819 u8 tos, struct net_device *dev, int our)
1824 struct in_device *in_dev = in_dev_get(dev);
1827 /* Primary sanity checks. */
1832 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
1833 ipv4_is_loopback(saddr) || skb->protocol != htons(ETH_P_IP))
1836 if (ipv4_is_zeronet(saddr)) {
1837 if (!ipv4_is_local_multicast(daddr))
1839 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
1840 } else if (fib_validate_source(saddr, 0, tos, 0,
1841 dev, &spec_dst, &itag) < 0)
1844 rth = dst_alloc(&ipv4_dst_ops);
1848 rth->u.dst.output= ip_rt_bug;
1850 atomic_set(&rth->u.dst.__refcnt, 1);
1851 rth->u.dst.flags= DST_HOST;
1852 if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
1853 rth->u.dst.flags |= DST_NOPOLICY;
1854 rth->fl.fl4_dst = daddr;
1855 rth->rt_dst = daddr;
1856 rth->fl.fl4_tos = tos;
1857 rth->fl.mark = skb->mark;
1858 rth->fl.fl4_src = saddr;
1859 rth->rt_src = saddr;
1860 #ifdef CONFIG_NET_CLS_ROUTE
1861 rth->u.dst.tclassid = itag;
1864 rth->fl.iif = dev->ifindex;
1865 rth->u.dst.dev = init_net.loopback_dev;
1866 dev_hold(rth->u.dst.dev);
1867 rth->idev = in_dev_get(rth->u.dst.dev);
1869 rth->rt_gateway = daddr;
1870 rth->rt_spec_dst= spec_dst;
1871 rth->rt_genid = rt_genid(dev_net(dev));
1872 rth->rt_flags = RTCF_MULTICAST;
1873 rth->rt_type = RTN_MULTICAST;
1875 rth->u.dst.input= ip_local_deliver;
1876 rth->rt_flags |= RTCF_LOCAL;
1879 #ifdef CONFIG_IP_MROUTE
1880 if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
1881 rth->u.dst.input = ip_mr_input;
1883 RT_CACHE_STAT_INC(in_slow_mc);
1886 hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev)));
1887 return rt_intern_hash(hash, rth, &skb->rtable);
1899 static void ip_handle_martian_source(struct net_device *dev,
1900 struct in_device *in_dev,
1901 struct sk_buff *skb,
1905 RT_CACHE_STAT_INC(in_martian_src);
1906 #ifdef CONFIG_IP_ROUTE_VERBOSE
1907 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
1909 * RFC1812 recommendation, if source is martian,
1910 * the only hint is MAC header.
1912 printk(KERN_WARNING "martian source " NIPQUAD_FMT " from "
1913 NIPQUAD_FMT", on dev %s\n",
1914 NIPQUAD(daddr), NIPQUAD(saddr), dev->name);
1915 if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
1917 const unsigned char *p = skb_mac_header(skb);
1918 printk(KERN_WARNING "ll header: ");
1919 for (i = 0; i < dev->hard_header_len; i++, p++) {
1921 if (i < (dev->hard_header_len - 1))
1930 static int __mkroute_input(struct sk_buff *skb,
1931 struct fib_result *res,
1932 struct in_device *in_dev,
1933 __be32 daddr, __be32 saddr, u32 tos,
1934 struct rtable **result)
1939 struct in_device *out_dev;
1944 /* get a working reference to the output device */
1945 out_dev = in_dev_get(FIB_RES_DEV(*res));
1946 if (out_dev == NULL) {
1947 if (net_ratelimit())
1948 printk(KERN_CRIT "Bug in ip_route_input" \
1949 "_slow(). Please, report\n");
1954 err = fib_validate_source(saddr, daddr, tos, FIB_RES_OIF(*res),
1955 in_dev->dev, &spec_dst, &itag);
1957 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1965 flags |= RTCF_DIRECTSRC;
1967 if (out_dev == in_dev && err &&
1968 (IN_DEV_SHARED_MEDIA(out_dev) ||
1969 inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
1970 flags |= RTCF_DOREDIRECT;
1972 if (skb->protocol != htons(ETH_P_IP)) {
1973 /* Not IP (i.e. ARP). Do not create route, if it is
1974 * invalid for proxy arp. DNAT routes are always valid.
1976 if (out_dev == in_dev) {
1983 rth = dst_alloc(&ipv4_dst_ops);
1989 atomic_set(&rth->u.dst.__refcnt, 1);
1990 rth->u.dst.flags= DST_HOST;
1991 if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
1992 rth->u.dst.flags |= DST_NOPOLICY;
1993 if (IN_DEV_CONF_GET(out_dev, NOXFRM))
1994 rth->u.dst.flags |= DST_NOXFRM;
1995 rth->fl.fl4_dst = daddr;
1996 rth->rt_dst = daddr;
1997 rth->fl.fl4_tos = tos;
1998 rth->fl.mark = skb->mark;
1999 rth->fl.fl4_src = saddr;
2000 rth->rt_src = saddr;
2001 rth->rt_gateway = daddr;
2003 rth->fl.iif = in_dev->dev->ifindex;
2004 rth->u.dst.dev = (out_dev)->dev;
2005 dev_hold(rth->u.dst.dev);
2006 rth->idev = in_dev_get(rth->u.dst.dev);
2008 rth->rt_spec_dst= spec_dst;
2010 rth->u.dst.input = ip_forward;
2011 rth->u.dst.output = ip_output;
2012 rth->rt_genid = rt_genid(dev_net(rth->u.dst.dev));
2014 rt_set_nexthop(rth, res, itag);
2016 rth->rt_flags = flags;
2021 /* release the working reference to the output device */
2022 in_dev_put(out_dev);
2026 static int ip_mkroute_input(struct sk_buff *skb,
2027 struct fib_result *res,
2028 const struct flowi *fl,
2029 struct in_device *in_dev,
2030 __be32 daddr, __be32 saddr, u32 tos)
2032 struct rtable* rth = NULL;
2036 #ifdef CONFIG_IP_ROUTE_MULTIPATH
2037 if (res->fi && res->fi->fib_nhs > 1 && fl->oif == 0)
2038 fib_select_multipath(fl, res);
2041 /* create a routing cache entry */
2042 err = __mkroute_input(skb, res, in_dev, daddr, saddr, tos, &rth);
2046 /* put it into the cache */
2047 hash = rt_hash(daddr, saddr, fl->iif,
2048 rt_genid(dev_net(rth->u.dst.dev)));
2049 return rt_intern_hash(hash, rth, &skb->rtable);
2053 * NOTE. We drop all the packets that has local source
2054 * addresses, because every properly looped back packet
2055 * must have correct destination already attached by output routine.
2057 * Such approach solves two big problems:
2058 * 1. Not simplex devices are handled properly.
2059 * 2. IP spoofing attempts are filtered with 100% of guarantee.
2062 static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2063 u8 tos, struct net_device *dev)
2065 struct fib_result res;
2066 struct in_device *in_dev = in_dev_get(dev);
2067 struct flowi fl = { .nl_u = { .ip4_u =
2071 .scope = RT_SCOPE_UNIVERSE,
2074 .iif = dev->ifindex };
2077 struct rtable * rth;
2082 struct net * net = dev_net(dev);
2084 /* IP on this device is disabled. */
2089 /* Check for the most weird martians, which can be not detected
2093 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
2094 ipv4_is_loopback(saddr))
2095 goto martian_source;
2097 if (daddr == htonl(0xFFFFFFFF) || (saddr == 0 && daddr == 0))
2100 /* Accept zero addresses only to limited broadcast;
2101 * I even do not know to fix it or not. Waiting for complains :-)
2103 if (ipv4_is_zeronet(saddr))
2104 goto martian_source;
2106 if (ipv4_is_lbcast(daddr) || ipv4_is_zeronet(daddr) ||
2107 ipv4_is_loopback(daddr))
2108 goto martian_destination;
2111 * Now we are ready to route packet.
2113 if ((err = fib_lookup(net, &fl, &res)) != 0) {
2114 if (!IN_DEV_FORWARD(in_dev))
2120 RT_CACHE_STAT_INC(in_slow_tot);
2122 if (res.type == RTN_BROADCAST)
2125 if (res.type == RTN_LOCAL) {
2127 result = fib_validate_source(saddr, daddr, tos,
2128 net->loopback_dev->ifindex,
2129 dev, &spec_dst, &itag);
2131 goto martian_source;
2133 flags |= RTCF_DIRECTSRC;
2138 if (!IN_DEV_FORWARD(in_dev))
2140 if (res.type != RTN_UNICAST)
2141 goto martian_destination;
2143 err = ip_mkroute_input(skb, &res, &fl, in_dev, daddr, saddr, tos);
2151 if (skb->protocol != htons(ETH_P_IP))
2154 if (ipv4_is_zeronet(saddr))
2155 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
2157 err = fib_validate_source(saddr, 0, tos, 0, dev, &spec_dst,
2160 goto martian_source;
2162 flags |= RTCF_DIRECTSRC;
2164 flags |= RTCF_BROADCAST;
2165 res.type = RTN_BROADCAST;
2166 RT_CACHE_STAT_INC(in_brd);
2169 rth = dst_alloc(&ipv4_dst_ops);
2173 rth->u.dst.output= ip_rt_bug;
2174 rth->rt_genid = rt_genid(net);
2176 atomic_set(&rth->u.dst.__refcnt, 1);
2177 rth->u.dst.flags= DST_HOST;
2178 if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
2179 rth->u.dst.flags |= DST_NOPOLICY;
2180 rth->fl.fl4_dst = daddr;
2181 rth->rt_dst = daddr;
2182 rth->fl.fl4_tos = tos;
2183 rth->fl.mark = skb->mark;
2184 rth->fl.fl4_src = saddr;
2185 rth->rt_src = saddr;
2186 #ifdef CONFIG_NET_CLS_ROUTE
2187 rth->u.dst.tclassid = itag;
2190 rth->fl.iif = dev->ifindex;
2191 rth->u.dst.dev = net->loopback_dev;
2192 dev_hold(rth->u.dst.dev);
2193 rth->idev = in_dev_get(rth->u.dst.dev);
2194 rth->rt_gateway = daddr;
2195 rth->rt_spec_dst= spec_dst;
2196 rth->u.dst.input= ip_local_deliver;
2197 rth->rt_flags = flags|RTCF_LOCAL;
2198 if (res.type == RTN_UNREACHABLE) {
2199 rth->u.dst.input= ip_error;
2200 rth->u.dst.error= -err;
2201 rth->rt_flags &= ~RTCF_LOCAL;
2203 rth->rt_type = res.type;
2204 hash = rt_hash(daddr, saddr, fl.iif, rt_genid(net));
2205 err = rt_intern_hash(hash, rth, &skb->rtable);
2209 RT_CACHE_STAT_INC(in_no_route);
2210 spec_dst = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
2211 res.type = RTN_UNREACHABLE;
2217 * Do not cache martian addresses: they should be logged (RFC1812)
2219 martian_destination:
2220 RT_CACHE_STAT_INC(in_martian_dst);
2221 #ifdef CONFIG_IP_ROUTE_VERBOSE
2222 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
2223 printk(KERN_WARNING "martian destination " NIPQUAD_FMT " from "
2224 NIPQUAD_FMT ", dev %s\n",
2225 NIPQUAD(daddr), NIPQUAD(saddr), dev->name);
2229 err = -EHOSTUNREACH;
2241 ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
2245 int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2246 u8 tos, struct net_device *dev)
2248 struct rtable * rth;
2250 int iif = dev->ifindex;
2255 if (!rt_caching(net))
2258 tos &= IPTOS_RT_MASK;
2259 hash = rt_hash(daddr, saddr, iif, rt_genid(net));
2262 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
2263 rth = rcu_dereference(rth->u.dst.rt_next)) {
2264 if (((rth->fl.fl4_dst ^ daddr) |
2265 (rth->fl.fl4_src ^ saddr) |
2266 (rth->fl.iif ^ iif) |
2268 (rth->fl.fl4_tos ^ tos)) == 0 &&
2269 rth->fl.mark == skb->mark &&
2270 net_eq(dev_net(rth->u.dst.dev), net) &&
2271 !rt_is_expired(rth)) {
2272 dst_use(&rth->u.dst, jiffies);
2273 RT_CACHE_STAT_INC(in_hit);
2278 RT_CACHE_STAT_INC(in_hlist_search);
2283 /* Multicast recognition logic is moved from route cache to here.
2284 The problem was that too many Ethernet cards have broken/missing
2285 hardware multicast filters :-( As result the host on multicasting
2286 network acquires a lot of useless route cache entries, sort of
2287 SDR messages from all the world. Now we try to get rid of them.
2288 Really, provided software IP multicast filter is organized
2289 reasonably (at least, hashed), it does not result in a slowdown
2290 comparing with route cache reject entries.
2291 Note, that multicast routers are not affected, because
2292 route cache entry is created eventually.
2294 if (ipv4_is_multicast(daddr)) {
2295 struct in_device *in_dev;
2298 if ((in_dev = __in_dev_get_rcu(dev)) != NULL) {
2299 int our = ip_check_mc(in_dev, daddr, saddr,
2300 ip_hdr(skb)->protocol);
2302 #ifdef CONFIG_IP_MROUTE
2303 || (!ipv4_is_local_multicast(daddr) &&
2304 IN_DEV_MFORWARD(in_dev))
2308 return ip_route_input_mc(skb, daddr, saddr,
2315 return ip_route_input_slow(skb, daddr, saddr, tos, dev);
2318 static int __mkroute_output(struct rtable **result,
2319 struct fib_result *res,
2320 const struct flowi *fl,
2321 const struct flowi *oldflp,
2322 struct net_device *dev_out,
2326 struct in_device *in_dev;
2327 u32 tos = RT_FL_TOS(oldflp);
2330 if (ipv4_is_loopback(fl->fl4_src) && !(dev_out->flags&IFF_LOOPBACK))
2333 if (fl->fl4_dst == htonl(0xFFFFFFFF))
2334 res->type = RTN_BROADCAST;
2335 else if (ipv4_is_multicast(fl->fl4_dst))
2336 res->type = RTN_MULTICAST;
2337 else if (ipv4_is_lbcast(fl->fl4_dst) || ipv4_is_zeronet(fl->fl4_dst))
2340 if (dev_out->flags & IFF_LOOPBACK)
2341 flags |= RTCF_LOCAL;
2343 /* get work reference to inet device */
2344 in_dev = in_dev_get(dev_out);
2348 if (res->type == RTN_BROADCAST) {
2349 flags |= RTCF_BROADCAST | RTCF_LOCAL;
2351 fib_info_put(res->fi);
2354 } else if (res->type == RTN_MULTICAST) {
2355 flags |= RTCF_MULTICAST|RTCF_LOCAL;
2356 if (!ip_check_mc(in_dev, oldflp->fl4_dst, oldflp->fl4_src,
2358 flags &= ~RTCF_LOCAL;
2359 /* If multicast route do not exist use
2360 default one, but do not gateway in this case.
2363 if (res->fi && res->prefixlen < 4) {
2364 fib_info_put(res->fi);
2370 rth = dst_alloc(&ipv4_dst_ops);
2376 atomic_set(&rth->u.dst.__refcnt, 1);
2377 rth->u.dst.flags= DST_HOST;
2378 if (IN_DEV_CONF_GET(in_dev, NOXFRM))
2379 rth->u.dst.flags |= DST_NOXFRM;
2380 if (IN_DEV_CONF_GET(in_dev, NOPOLICY))
2381 rth->u.dst.flags |= DST_NOPOLICY;
2383 rth->fl.fl4_dst = oldflp->fl4_dst;
2384 rth->fl.fl4_tos = tos;
2385 rth->fl.fl4_src = oldflp->fl4_src;
2386 rth->fl.oif = oldflp->oif;
2387 rth->fl.mark = oldflp->mark;
2388 rth->rt_dst = fl->fl4_dst;
2389 rth->rt_src = fl->fl4_src;
2390 rth->rt_iif = oldflp->oif ? : dev_out->ifindex;
2391 /* get references to the devices that are to be hold by the routing
2393 rth->u.dst.dev = dev_out;
2395 rth->idev = in_dev_get(dev_out);
2396 rth->rt_gateway = fl->fl4_dst;
2397 rth->rt_spec_dst= fl->fl4_src;
2399 rth->u.dst.output=ip_output;
2400 rth->rt_genid = rt_genid(dev_net(dev_out));
2402 RT_CACHE_STAT_INC(out_slow_tot);
2404 if (flags & RTCF_LOCAL) {
2405 rth->u.dst.input = ip_local_deliver;
2406 rth->rt_spec_dst = fl->fl4_dst;
2408 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
2409 rth->rt_spec_dst = fl->fl4_src;
2410 if (flags & RTCF_LOCAL &&
2411 !(dev_out->flags & IFF_LOOPBACK)) {
2412 rth->u.dst.output = ip_mc_output;
2413 RT_CACHE_STAT_INC(out_slow_mc);
2415 #ifdef CONFIG_IP_MROUTE
2416 if (res->type == RTN_MULTICAST) {
2417 if (IN_DEV_MFORWARD(in_dev) &&
2418 !ipv4_is_local_multicast(oldflp->fl4_dst)) {
2419 rth->u.dst.input = ip_mr_input;
2420 rth->u.dst.output = ip_mc_output;
2426 rt_set_nexthop(rth, res, 0);
2428 rth->rt_flags = flags;
2432 /* release work reference to inet device */
2438 static int ip_mkroute_output(struct rtable **rp,
2439 struct fib_result *res,
2440 const struct flowi *fl,
2441 const struct flowi *oldflp,
2442 struct net_device *dev_out,
2445 struct rtable *rth = NULL;
2446 int err = __mkroute_output(&rth, res, fl, oldflp, dev_out, flags);
2449 hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif,
2450 rt_genid(dev_net(dev_out)));
2451 err = rt_intern_hash(hash, rth, rp);
2458 * Major route resolver routine.
2461 static int ip_route_output_slow(struct net *net, struct rtable **rp,
2462 const struct flowi *oldflp)
2464 u32 tos = RT_FL_TOS(oldflp);
2465 struct flowi fl = { .nl_u = { .ip4_u =
2466 { .daddr = oldflp->fl4_dst,
2467 .saddr = oldflp->fl4_src,
2468 .tos = tos & IPTOS_RT_MASK,
2469 .scope = ((tos & RTO_ONLINK) ?
2473 .mark = oldflp->mark,
2474 .iif = net->loopback_dev->ifindex,
2475 .oif = oldflp->oif };
2476 struct fib_result res;
2478 struct net_device *dev_out = NULL;
2484 #ifdef CONFIG_IP_MULTIPLE_TABLES
2488 if (oldflp->fl4_src) {
2490 if (ipv4_is_multicast(oldflp->fl4_src) ||
2491 ipv4_is_lbcast(oldflp->fl4_src) ||
2492 ipv4_is_zeronet(oldflp->fl4_src))
2495 /* I removed check for oif == dev_out->oif here.
2496 It was wrong for two reasons:
2497 1. ip_dev_find(net, saddr) can return wrong iface, if saddr
2498 is assigned to multiple interfaces.
2499 2. Moreover, we are allowed to send packets with saddr
2500 of another iface. --ANK
2503 if (oldflp->oif == 0
2504 && (ipv4_is_multicast(oldflp->fl4_dst) ||
2505 oldflp->fl4_dst == htonl(0xFFFFFFFF))) {
2506 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2507 dev_out = ip_dev_find(net, oldflp->fl4_src);
2508 if (dev_out == NULL)
2511 /* Special hack: user can direct multicasts
2512 and limited broadcast via necessary interface
2513 without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2514 This hack is not just for fun, it allows
2515 vic,vat and friends to work.
2516 They bind socket to loopback, set ttl to zero
2517 and expect that it will work.
2518 From the viewpoint of routing cache they are broken,
2519 because we are not allowed to build multicast path
2520 with loopback source addr (look, routing cache
2521 cannot know, that ttl is zero, so that packet
2522 will not leave this host and route is valid).
2523 Luckily, this hack is good workaround.
2526 fl.oif = dev_out->ifindex;
2530 if (!(oldflp->flags & FLOWI_FLAG_ANYSRC)) {
2531 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2532 dev_out = ip_dev_find(net, oldflp->fl4_src);
2533 if (dev_out == NULL)
2542 dev_out = dev_get_by_index(net, oldflp->oif);
2544 if (dev_out == NULL)
2547 /* RACE: Check return value of inet_select_addr instead. */
2548 if (__in_dev_get_rtnl(dev_out) == NULL) {
2550 goto out; /* Wrong error code */
2553 if (ipv4_is_local_multicast(oldflp->fl4_dst) ||
2554 oldflp->fl4_dst == htonl(0xFFFFFFFF)) {
2556 fl.fl4_src = inet_select_addr(dev_out, 0,
2561 if (ipv4_is_multicast(oldflp->fl4_dst))
2562 fl.fl4_src = inet_select_addr(dev_out, 0,
2564 else if (!oldflp->fl4_dst)
2565 fl.fl4_src = inet_select_addr(dev_out, 0,
2571 fl.fl4_dst = fl.fl4_src;
2573 fl.fl4_dst = fl.fl4_src = htonl(INADDR_LOOPBACK);
2576 dev_out = net->loopback_dev;
2578 fl.oif = net->loopback_dev->ifindex;
2579 res.type = RTN_LOCAL;
2580 flags |= RTCF_LOCAL;
2584 if (fib_lookup(net, &fl, &res)) {
2587 /* Apparently, routing tables are wrong. Assume,
2588 that the destination is on link.
2591 Because we are allowed to send to iface
2592 even if it has NO routes and NO assigned
2593 addresses. When oif is specified, routing
2594 tables are looked up with only one purpose:
2595 to catch if destination is gatewayed, rather than
2596 direct. Moreover, if MSG_DONTROUTE is set,
2597 we send packet, ignoring both routing tables
2598 and ifaddr state. --ANK
2601 We could make it even if oif is unknown,
2602 likely IPv6, but we do not.
2605 if (fl.fl4_src == 0)
2606 fl.fl4_src = inet_select_addr(dev_out, 0,
2608 res.type = RTN_UNICAST;
2618 if (res.type == RTN_LOCAL) {
2620 fl.fl4_src = fl.fl4_dst;
2623 dev_out = net->loopback_dev;
2625 fl.oif = dev_out->ifindex;
2627 fib_info_put(res.fi);
2629 flags |= RTCF_LOCAL;
2633 #ifdef CONFIG_IP_ROUTE_MULTIPATH
2634 if (res.fi->fib_nhs > 1 && fl.oif == 0)
2635 fib_select_multipath(&fl, &res);
2638 if (!res.prefixlen && res.type == RTN_UNICAST && !fl.oif)
2639 fib_select_default(net, &fl, &res);
2642 fl.fl4_src = FIB_RES_PREFSRC(res);
2646 dev_out = FIB_RES_DEV(res);
2648 fl.oif = dev_out->ifindex;
2652 err = ip_mkroute_output(rp, &res, &fl, oldflp, dev_out, flags);
2662 int __ip_route_output_key(struct net *net, struct rtable **rp,
2663 const struct flowi *flp)
2668 if (!rt_caching(net))
2671 hash = rt_hash(flp->fl4_dst, flp->fl4_src, flp->oif, rt_genid(net));
2674 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
2675 rth = rcu_dereference(rth->u.dst.rt_next)) {
2676 if (rth->fl.fl4_dst == flp->fl4_dst &&
2677 rth->fl.fl4_src == flp->fl4_src &&
2679 rth->fl.oif == flp->oif &&
2680 rth->fl.mark == flp->mark &&
2681 !((rth->fl.fl4_tos ^ flp->fl4_tos) &
2682 (IPTOS_RT_MASK | RTO_ONLINK)) &&
2683 net_eq(dev_net(rth->u.dst.dev), net) &&
2684 !rt_is_expired(rth)) {
2685 dst_use(&rth->u.dst, jiffies);
2686 RT_CACHE_STAT_INC(out_hit);
2687 rcu_read_unlock_bh();
2691 RT_CACHE_STAT_INC(out_hlist_search);
2693 rcu_read_unlock_bh();
2696 return ip_route_output_slow(net, rp, flp);
2699 EXPORT_SYMBOL_GPL(__ip_route_output_key);
2701 static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
2705 static struct dst_ops ipv4_dst_blackhole_ops = {
2707 .protocol = __constant_htons(ETH_P_IP),
2708 .destroy = ipv4_dst_destroy,
2709 .check = ipv4_dst_check,
2710 .update_pmtu = ipv4_rt_blackhole_update_pmtu,
2711 .entry_size = sizeof(struct rtable),
2712 .entries = ATOMIC_INIT(0),
2716 static int ipv4_dst_blackhole(struct net *net, struct rtable **rp, struct flowi *flp)
2718 struct rtable *ort = *rp;
2719 struct rtable *rt = (struct rtable *)
2720 dst_alloc(&ipv4_dst_blackhole_ops);
2723 struct dst_entry *new = &rt->u.dst;
2725 atomic_set(&new->__refcnt, 1);
2727 new->input = dst_discard;
2728 new->output = dst_discard;
2729 memcpy(new->metrics, ort->u.dst.metrics, RTAX_MAX*sizeof(u32));
2731 new->dev = ort->u.dst.dev;
2737 rt->idev = ort->idev;
2739 in_dev_hold(rt->idev);
2740 rt->rt_genid = rt_genid(net);
2741 rt->rt_flags = ort->rt_flags;
2742 rt->rt_type = ort->rt_type;
2743 rt->rt_dst = ort->rt_dst;
2744 rt->rt_src = ort->rt_src;
2745 rt->rt_iif = ort->rt_iif;
2746 rt->rt_gateway = ort->rt_gateway;
2747 rt->rt_spec_dst = ort->rt_spec_dst;
2748 rt->peer = ort->peer;
2750 atomic_inc(&rt->peer->refcnt);
2755 dst_release(&(*rp)->u.dst);
2757 return (rt ? 0 : -ENOMEM);
2760 int ip_route_output_flow(struct net *net, struct rtable **rp, struct flowi *flp,
2761 struct sock *sk, int flags)
2765 if ((err = __ip_route_output_key(net, rp, flp)) != 0)
2770 flp->fl4_src = (*rp)->rt_src;
2772 flp->fl4_dst = (*rp)->rt_dst;
2773 err = __xfrm_lookup((struct dst_entry **)rp, flp, sk,
2774 flags ? XFRM_LOOKUP_WAIT : 0);
2775 if (err == -EREMOTE)
2776 err = ipv4_dst_blackhole(net, rp, flp);
2784 EXPORT_SYMBOL_GPL(ip_route_output_flow);
2786 int ip_route_output_key(struct net *net, struct rtable **rp, struct flowi *flp)
2788 return ip_route_output_flow(net, rp, flp, NULL, 0);
2791 static int rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
2792 int nowait, unsigned int flags)
2794 struct rtable *rt = skb->rtable;
2796 struct nlmsghdr *nlh;
2798 u32 id = 0, ts = 0, tsage = 0, error;
2800 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
2804 r = nlmsg_data(nlh);
2805 r->rtm_family = AF_INET;
2806 r->rtm_dst_len = 32;
2808 r->rtm_tos = rt->fl.fl4_tos;
2809 r->rtm_table = RT_TABLE_MAIN;
2810 NLA_PUT_U32(skb, RTA_TABLE, RT_TABLE_MAIN);
2811 r->rtm_type = rt->rt_type;
2812 r->rtm_scope = RT_SCOPE_UNIVERSE;
2813 r->rtm_protocol = RTPROT_UNSPEC;
2814 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2815 if (rt->rt_flags & RTCF_NOTIFY)
2816 r->rtm_flags |= RTM_F_NOTIFY;
2818 NLA_PUT_BE32(skb, RTA_DST, rt->rt_dst);
2820 if (rt->fl.fl4_src) {
2821 r->rtm_src_len = 32;
2822 NLA_PUT_BE32(skb, RTA_SRC, rt->fl.fl4_src);
2825 NLA_PUT_U32(skb, RTA_OIF, rt->u.dst.dev->ifindex);
2826 #ifdef CONFIG_NET_CLS_ROUTE
2827 if (rt->u.dst.tclassid)
2828 NLA_PUT_U32(skb, RTA_FLOW, rt->u.dst.tclassid);
2831 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst);
2832 else if (rt->rt_src != rt->fl.fl4_src)
2833 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_src);
2835 if (rt->rt_dst != rt->rt_gateway)
2836 NLA_PUT_BE32(skb, RTA_GATEWAY, rt->rt_gateway);
2838 if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0)
2839 goto nla_put_failure;
2841 error = rt->u.dst.error;
2842 expires = rt->u.dst.expires ? rt->u.dst.expires - jiffies : 0;
2844 id = rt->peer->ip_id_count;
2845 if (rt->peer->tcp_ts_stamp) {
2846 ts = rt->peer->tcp_ts;
2847 tsage = get_seconds() - rt->peer->tcp_ts_stamp;
2852 #ifdef CONFIG_IP_MROUTE
2853 __be32 dst = rt->rt_dst;
2855 if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) &&
2856 IPV4_DEVCONF_ALL(&init_net, MC_FORWARDING)) {
2857 int err = ipmr_get_route(skb, r, nowait);
2862 goto nla_put_failure;
2864 if (err == -EMSGSIZE)
2865 goto nla_put_failure;
2871 NLA_PUT_U32(skb, RTA_IIF, rt->fl.iif);
2874 if (rtnl_put_cacheinfo(skb, &rt->u.dst, id, ts, tsage,
2875 expires, error) < 0)
2876 goto nla_put_failure;
2878 return nlmsg_end(skb, nlh);
2881 nlmsg_cancel(skb, nlh);
2885 static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
2887 struct net *net = sock_net(in_skb->sk);
2889 struct nlattr *tb[RTA_MAX+1];
2890 struct rtable *rt = NULL;
2895 struct sk_buff *skb;
2897 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
2901 rtm = nlmsg_data(nlh);
2903 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2909 /* Reserve room for dummy headers, this skb can pass
2910 through good chunk of routing engine.
2912 skb_reset_mac_header(skb);
2913 skb_reset_network_header(skb);
2915 /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
2916 ip_hdr(skb)->protocol = IPPROTO_ICMP;
2917 skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
2919 src = tb[RTA_SRC] ? nla_get_be32(tb[RTA_SRC]) : 0;
2920 dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0;
2921 iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
2924 struct net_device *dev;
2926 dev = __dev_get_by_index(net, iif);
2932 skb->protocol = htons(ETH_P_IP);
2935 err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev);
2939 if (err == 0 && rt->u.dst.error)
2940 err = -rt->u.dst.error;
2947 .tos = rtm->rtm_tos,
2950 .oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0,
2952 err = ip_route_output_key(net, &rt, &fl);
2959 if (rtm->rtm_flags & RTM_F_NOTIFY)
2960 rt->rt_flags |= RTCF_NOTIFY;
2962 err = rt_fill_info(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
2963 RTM_NEWROUTE, 0, 0);
2967 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid);
2976 int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
2983 net = sock_net(skb->sk);
2988 s_idx = idx = cb->args[1];
2989 for (h = s_h; h <= rt_hash_mask; h++, s_idx = 0) {
2990 if (!rt_hash_table[h].chain)
2993 for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt;
2994 rt = rcu_dereference(rt->u.dst.rt_next), idx++) {
2995 if (!net_eq(dev_net(rt->u.dst.dev), net) || idx < s_idx)
2997 if (rt_is_expired(rt))
2999 skb->dst = dst_clone(&rt->u.dst);
3000 if (rt_fill_info(skb, NETLINK_CB(cb->skb).pid,
3001 cb->nlh->nlmsg_seq, RTM_NEWROUTE,
3002 1, NLM_F_MULTI) <= 0) {
3003 dst_release(xchg(&skb->dst, NULL));
3004 rcu_read_unlock_bh();
3007 dst_release(xchg(&skb->dst, NULL));
3009 rcu_read_unlock_bh();
3018 void ip_rt_multicast_event(struct in_device *in_dev)
3020 rt_cache_flush(dev_net(in_dev->dev), 0);
3023 #ifdef CONFIG_SYSCTL
3024 static int ipv4_sysctl_rtcache_flush(ctl_table *__ctl, int write,
3025 struct file *filp, void __user *buffer,
3026 size_t *lenp, loff_t *ppos)
3033 memcpy(&ctl, __ctl, sizeof(ctl));
3034 ctl.data = &flush_delay;
3035 proc_dointvec(&ctl, write, filp, buffer, lenp, ppos);
3037 net = (struct net *)__ctl->extra1;
3038 rt_cache_flush(net, flush_delay);
3045 static int ipv4_sysctl_rtcache_flush_strategy(ctl_table *table,
3046 void __user *oldval,
3047 size_t __user *oldlenp,
3048 void __user *newval,
3053 if (newlen != sizeof(int))
3055 if (get_user(delay, (int __user *)newval))
3057 net = (struct net *)table->extra1;
3058 rt_cache_flush(net, delay);
3062 static void rt_secret_reschedule(int old)
3065 int new = ip_rt_secret_interval;
3066 int diff = new - old;
3073 int deleted = del_timer_sync(&net->ipv4.rt_secret_timer);
3079 long time = net->ipv4.rt_secret_timer.expires - jiffies;
3081 if (time <= 0 || (time += diff) <= 0)
3084 net->ipv4.rt_secret_timer.expires = time;
3086 net->ipv4.rt_secret_timer.expires = new;
3088 net->ipv4.rt_secret_timer.expires += jiffies;
3089 add_timer(&net->ipv4.rt_secret_timer);
3094 static int ipv4_sysctl_rt_secret_interval(ctl_table *ctl, int write,
3096 void __user *buffer, size_t *lenp,
3099 int old = ip_rt_secret_interval;
3100 int ret = proc_dointvec_jiffies(ctl, write, filp, buffer, lenp, ppos);
3102 rt_secret_reschedule(old);
3107 static int ipv4_sysctl_rt_secret_interval_strategy(ctl_table *table,
3108 void __user *oldval,
3109 size_t __user *oldlenp,
3110 void __user *newval,
3113 int old = ip_rt_secret_interval;
3114 int ret = sysctl_jiffies(table, oldval, oldlenp, newval, newlen);
3116 rt_secret_reschedule(old);
3121 static ctl_table ipv4_route_table[] = {
3123 .ctl_name = NET_IPV4_ROUTE_GC_THRESH,
3124 .procname = "gc_thresh",
3125 .data = &ipv4_dst_ops.gc_thresh,
3126 .maxlen = sizeof(int),
3128 .proc_handler = &proc_dointvec,
3131 .ctl_name = NET_IPV4_ROUTE_MAX_SIZE,
3132 .procname = "max_size",
3133 .data = &ip_rt_max_size,
3134 .maxlen = sizeof(int),
3136 .proc_handler = &proc_dointvec,
3139 /* Deprecated. Use gc_min_interval_ms */
3141 .ctl_name = NET_IPV4_ROUTE_GC_MIN_INTERVAL,
3142 .procname = "gc_min_interval",
3143 .data = &ip_rt_gc_min_interval,
3144 .maxlen = sizeof(int),
3146 .proc_handler = &proc_dointvec_jiffies,
3147 .strategy = &sysctl_jiffies,
3150 .ctl_name = NET_IPV4_ROUTE_GC_MIN_INTERVAL_MS,
3151 .procname = "gc_min_interval_ms",
3152 .data = &ip_rt_gc_min_interval,
3153 .maxlen = sizeof(int),
3155 .proc_handler = &proc_dointvec_ms_jiffies,
3156 .strategy = &sysctl_ms_jiffies,
3159 .ctl_name = NET_IPV4_ROUTE_GC_TIMEOUT,
3160 .procname = "gc_timeout",
3161 .data = &ip_rt_gc_timeout,
3162 .maxlen = sizeof(int),
3164 .proc_handler = &proc_dointvec_jiffies,
3165 .strategy = &sysctl_jiffies,
3168 .ctl_name = NET_IPV4_ROUTE_GC_INTERVAL,
3169 .procname = "gc_interval",
3170 .data = &ip_rt_gc_interval,
3171 .maxlen = sizeof(int),
3173 .proc_handler = &proc_dointvec_jiffies,
3174 .strategy = &sysctl_jiffies,
3177 .ctl_name = NET_IPV4_ROUTE_REDIRECT_LOAD,
3178 .procname = "redirect_load",
3179 .data = &ip_rt_redirect_load,
3180 .maxlen = sizeof(int),
3182 .proc_handler = &proc_dointvec,
3185 .ctl_name = NET_IPV4_ROUTE_REDIRECT_NUMBER,
3186 .procname = "redirect_number",
3187 .data = &ip_rt_redirect_number,
3188 .maxlen = sizeof(int),
3190 .proc_handler = &proc_dointvec,
3193 .ctl_name = NET_IPV4_ROUTE_REDIRECT_SILENCE,
3194 .procname = "redirect_silence",
3195 .data = &ip_rt_redirect_silence,
3196 .maxlen = sizeof(int),
3198 .proc_handler = &proc_dointvec,
3201 .ctl_name = NET_IPV4_ROUTE_ERROR_COST,
3202 .procname = "error_cost",
3203 .data = &ip_rt_error_cost,
3204 .maxlen = sizeof(int),
3206 .proc_handler = &proc_dointvec,
3209 .ctl_name = NET_IPV4_ROUTE_ERROR_BURST,
3210 .procname = "error_burst",
3211 .data = &ip_rt_error_burst,
3212 .maxlen = sizeof(int),
3214 .proc_handler = &proc_dointvec,
3217 .ctl_name = NET_IPV4_ROUTE_GC_ELASTICITY,
3218 .procname = "gc_elasticity",
3219 .data = &ip_rt_gc_elasticity,
3220 .maxlen = sizeof(int),
3222 .proc_handler = &proc_dointvec,
3225 .ctl_name = NET_IPV4_ROUTE_MTU_EXPIRES,
3226 .procname = "mtu_expires",
3227 .data = &ip_rt_mtu_expires,
3228 .maxlen = sizeof(int),
3230 .proc_handler = &proc_dointvec_jiffies,
3231 .strategy = &sysctl_jiffies,
3234 .ctl_name = NET_IPV4_ROUTE_MIN_PMTU,
3235 .procname = "min_pmtu",
3236 .data = &ip_rt_min_pmtu,
3237 .maxlen = sizeof(int),
3239 .proc_handler = &proc_dointvec,
3242 .ctl_name = NET_IPV4_ROUTE_MIN_ADVMSS,
3243 .procname = "min_adv_mss",
3244 .data = &ip_rt_min_advmss,
3245 .maxlen = sizeof(int),
3247 .proc_handler = &proc_dointvec,
3250 .ctl_name = NET_IPV4_ROUTE_SECRET_INTERVAL,
3251 .procname = "secret_interval",
3252 .data = &ip_rt_secret_interval,
3253 .maxlen = sizeof(int),
3255 .proc_handler = &ipv4_sysctl_rt_secret_interval,
3256 .strategy = &ipv4_sysctl_rt_secret_interval_strategy,
3261 static struct ctl_table empty[1];
3263 static struct ctl_table ipv4_skeleton[] =
3265 { .procname = "route", .ctl_name = NET_IPV4_ROUTE,
3266 .mode = 0555, .child = ipv4_route_table},
3267 { .procname = "neigh", .ctl_name = NET_IPV4_NEIGH,
3268 .mode = 0555, .child = empty},
3272 static __net_initdata struct ctl_path ipv4_path[] = {
3273 { .procname = "net", .ctl_name = CTL_NET, },
3274 { .procname = "ipv4", .ctl_name = NET_IPV4, },
3278 static struct ctl_table ipv4_route_flush_table[] = {
3280 .ctl_name = NET_IPV4_ROUTE_FLUSH,
3281 .procname = "flush",
3282 .maxlen = sizeof(int),
3284 .proc_handler = &ipv4_sysctl_rtcache_flush,
3285 .strategy = &ipv4_sysctl_rtcache_flush_strategy,
3290 static __net_initdata struct ctl_path ipv4_route_path[] = {
3291 { .procname = "net", .ctl_name = CTL_NET, },
3292 { .procname = "ipv4", .ctl_name = NET_IPV4, },
3293 { .procname = "route", .ctl_name = NET_IPV4_ROUTE, },
3297 static __net_init int sysctl_route_net_init(struct net *net)
3299 struct ctl_table *tbl;
3301 tbl = ipv4_route_flush_table;
3302 if (net != &init_net) {
3303 tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
3307 tbl[0].extra1 = net;
3309 net->ipv4.route_hdr =
3310 register_net_sysctl_table(net, ipv4_route_path, tbl);
3311 if (net->ipv4.route_hdr == NULL)
3316 if (tbl != ipv4_route_flush_table)
3322 static __net_exit void sysctl_route_net_exit(struct net *net)
3324 struct ctl_table *tbl;
3326 tbl = net->ipv4.route_hdr->ctl_table_arg;
3327 unregister_net_sysctl_table(net->ipv4.route_hdr);
3328 BUG_ON(tbl == ipv4_route_flush_table);
3332 static __net_initdata struct pernet_operations sysctl_route_ops = {
3333 .init = sysctl_route_net_init,
3334 .exit = sysctl_route_net_exit,
3339 static __net_init int rt_secret_timer_init(struct net *net)
3341 atomic_set(&net->ipv4.rt_genid,
3342 (int) ((num_physpages ^ (num_physpages>>8)) ^
3343 (jiffies ^ (jiffies >> 7))));
3345 net->ipv4.rt_secret_timer.function = rt_secret_rebuild;
3346 net->ipv4.rt_secret_timer.data = (unsigned long)net;
3347 init_timer_deferrable(&net->ipv4.rt_secret_timer);
3349 if (ip_rt_secret_interval) {
3350 net->ipv4.rt_secret_timer.expires =
3351 jiffies + net_random() % ip_rt_secret_interval +
3352 ip_rt_secret_interval;
3353 add_timer(&net->ipv4.rt_secret_timer);
3358 static __net_exit void rt_secret_timer_exit(struct net *net)
3360 del_timer_sync(&net->ipv4.rt_secret_timer);
3363 static __net_initdata struct pernet_operations rt_secret_timer_ops = {
3364 .init = rt_secret_timer_init,
3365 .exit = rt_secret_timer_exit,
3369 #ifdef CONFIG_NET_CLS_ROUTE
3370 struct ip_rt_acct *ip_rt_acct __read_mostly;
3371 #endif /* CONFIG_NET_CLS_ROUTE */
3373 static __initdata unsigned long rhash_entries;
3374 static int __init set_rhash_entries(char *str)
3378 rhash_entries = simple_strtoul(str, &str, 0);
3381 __setup("rhash_entries=", set_rhash_entries);
3383 int __init ip_rt_init(void)
3387 #ifdef CONFIG_NET_CLS_ROUTE
3388 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct));
3390 panic("IP: failed to allocate ip_rt_acct\n");
3393 ipv4_dst_ops.kmem_cachep =
3394 kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
3395 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3397 ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
3399 rt_hash_table = (struct rt_hash_bucket *)
3400 alloc_large_system_hash("IP route cache",
3401 sizeof(struct rt_hash_bucket),
3403 (num_physpages >= 128 * 1024) ?
3409 memset(rt_hash_table, 0, (rt_hash_mask + 1) * sizeof(struct rt_hash_bucket));
3410 rt_hash_lock_init();
3412 ipv4_dst_ops.gc_thresh = (rt_hash_mask + 1);
3413 ip_rt_max_size = (rt_hash_mask + 1) * 16;
3418 /* All the timers, started at system startup tend
3419 to synchronize. Perturb it a bit.
3421 schedule_delayed_work(&expires_work,
3422 net_random() % ip_rt_gc_interval + ip_rt_gc_interval);
3424 if (register_pernet_subsys(&rt_secret_timer_ops))
3425 printk(KERN_ERR "Unable to setup rt_secret_timer\n");
3427 if (ip_rt_proc_init())
3428 printk(KERN_ERR "Unable to create route proc files\n");
3433 rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL);
3435 #ifdef CONFIG_SYSCTL
3436 register_pernet_subsys(&sysctl_route_ops);
3441 #ifdef CONFIG_SYSCTL
3443 * We really need to sanitize the damn ipv4 init order, then all
3444 * this nonsense will go away.
3446 void __init ip_static_sysctl_init(void)
3448 register_sysctl_paths(ipv4_path, ipv4_skeleton);
3452 EXPORT_SYMBOL(__ip_select_ident);
3453 EXPORT_SYMBOL(ip_route_input);
3454 EXPORT_SYMBOL(ip_route_output_key);