ipv4: Dump route exceptions if requested
[linux-2.6-microblaze.git] / net / ipv4 / route.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * INET         An implementation of the TCP/IP protocol suite for the LINUX
4  *              operating system.  INET is implemented using the  BSD Socket
5  *              interface as the means of communication with the user level.
6  *
7  *              ROUTE - implementation of the IP router.
8  *
9  * Authors:     Ross Biro
10  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11  *              Alan Cox, <gw4pts@gw4pts.ampr.org>
12  *              Linus Torvalds, <Linus.Torvalds@helsinki.fi>
13  *              Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
14  *
15  * Fixes:
16  *              Alan Cox        :       Verify area fixes.
17  *              Alan Cox        :       cli() protects routing changes
18  *              Rui Oliveira    :       ICMP routing table updates
19  *              (rco@di.uminho.pt)      Routing table insertion and update
20  *              Linus Torvalds  :       Rewrote bits to be sensible
21  *              Alan Cox        :       Added BSD route gw semantics
22  *              Alan Cox        :       Super /proc >4K
23  *              Alan Cox        :       MTU in route table
24  *              Alan Cox        :       MSS actually. Also added the window
25  *                                      clamper.
26  *              Sam Lantinga    :       Fixed route matching in rt_del()
27  *              Alan Cox        :       Routing cache support.
28  *              Alan Cox        :       Removed compatibility cruft.
29  *              Alan Cox        :       RTF_REJECT support.
30  *              Alan Cox        :       TCP irtt support.
31  *              Jonathan Naylor :       Added Metric support.
32  *      Miquel van Smoorenburg  :       BSD API fixes.
33  *      Miquel van Smoorenburg  :       Metrics.
34  *              Alan Cox        :       Use __u32 properly
35  *              Alan Cox        :       Aligned routing errors more closely with BSD
36  *                                      our system is still very different.
37  *              Alan Cox        :       Faster /proc handling
38  *      Alexey Kuznetsov        :       Massive rework to support tree based routing,
39  *                                      routing caches and better behaviour.
40  *
41  *              Olaf Erb        :       irtt wasn't being copied right.
42  *              Bjorn Ekwall    :       Kerneld route support.
43  *              Alan Cox        :       Multicast fixed (I hope)
44  *              Pavel Krauz     :       Limited broadcast fixed
45  *              Mike McLagan    :       Routing by source
46  *      Alexey Kuznetsov        :       End of old history. Split to fib.c and
47  *                                      route.c and rewritten from scratch.
48  *              Andi Kleen      :       Load-limit warning messages.
49  *      Vitaly E. Lavrov        :       Transparent proxy revived after year coma.
50  *      Vitaly E. Lavrov        :       Race condition in ip_route_input_slow.
51  *      Tobias Ringstrom        :       Uninitialized res.type in ip_route_output_slow.
52  *      Vladimir V. Ivanov      :       IP rule info (flowid) is really useful.
53  *              Marc Boucher    :       routing by fwmark
54  *      Robert Olsson           :       Added rt_cache statistics
55  *      Arnaldo C. Melo         :       Convert proc stuff to seq_file
56  *      Eric Dumazet            :       hashed spinlocks and rt_check_expire() fixes.
57  *      Ilia Sotnikov           :       Ignore TOS on PMTUD and Redirect
58  *      Ilia Sotnikov           :       Removed TOS from hash calculations
59  */
60
61 #define pr_fmt(fmt) "IPv4: " fmt
62
63 #include <linux/module.h>
64 #include <linux/uaccess.h>
65 #include <linux/bitops.h>
66 #include <linux/types.h>
67 #include <linux/kernel.h>
68 #include <linux/mm.h>
69 #include <linux/string.h>
70 #include <linux/socket.h>
71 #include <linux/sockios.h>
72 #include <linux/errno.h>
73 #include <linux/in.h>
74 #include <linux/inet.h>
75 #include <linux/netdevice.h>
76 #include <linux/proc_fs.h>
77 #include <linux/init.h>
78 #include <linux/skbuff.h>
79 #include <linux/inetdevice.h>
80 #include <linux/igmp.h>
81 #include <linux/pkt_sched.h>
82 #include <linux/mroute.h>
83 #include <linux/netfilter_ipv4.h>
84 #include <linux/random.h>
85 #include <linux/rcupdate.h>
86 #include <linux/times.h>
87 #include <linux/slab.h>
88 #include <linux/jhash.h>
89 #include <net/dst.h>
90 #include <net/dst_metadata.h>
91 #include <net/net_namespace.h>
92 #include <net/protocol.h>
93 #include <net/ip.h>
94 #include <net/route.h>
95 #include <net/inetpeer.h>
96 #include <net/sock.h>
97 #include <net/ip_fib.h>
98 #include <net/nexthop.h>
99 #include <net/arp.h>
100 #include <net/tcp.h>
101 #include <net/icmp.h>
102 #include <net/xfrm.h>
103 #include <net/lwtunnel.h>
104 #include <net/netevent.h>
105 #include <net/rtnetlink.h>
106 #ifdef CONFIG_SYSCTL
107 #include <linux/sysctl.h>
108 #endif
109 #include <net/secure_seq.h>
110 #include <net/ip_tunnels.h>
111 #include <net/l3mdev.h>
112
113 #include "fib_lookup.h"
114
115 #define RT_FL_TOS(oldflp4) \
116         ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
117
118 #define RT_GC_TIMEOUT (300*HZ)
119
120 static int ip_rt_max_size;
121 static int ip_rt_redirect_number __read_mostly  = 9;
122 static int ip_rt_redirect_load __read_mostly    = HZ / 50;
123 static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
124 static int ip_rt_error_cost __read_mostly       = HZ;
125 static int ip_rt_error_burst __read_mostly      = 5 * HZ;
126 static int ip_rt_mtu_expires __read_mostly      = 10 * 60 * HZ;
127 static u32 ip_rt_min_pmtu __read_mostly         = 512 + 20 + 20;
128 static int ip_rt_min_advmss __read_mostly       = 256;
129
130 static int ip_rt_gc_timeout __read_mostly       = RT_GC_TIMEOUT;
131
132 /*
133  *      Interface to generic destination cache.
134  */
135
136 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
137 static unsigned int      ipv4_default_advmss(const struct dst_entry *dst);
138 static unsigned int      ipv4_mtu(const struct dst_entry *dst);
139 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
140 static void              ipv4_link_failure(struct sk_buff *skb);
141 static void              ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
142                                            struct sk_buff *skb, u32 mtu);
143 static void              ip_do_redirect(struct dst_entry *dst, struct sock *sk,
144                                         struct sk_buff *skb);
145 static void             ipv4_dst_destroy(struct dst_entry *dst);
146
147 static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
148 {
149         WARN_ON(1);
150         return NULL;
151 }
152
153 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
154                                            struct sk_buff *skb,
155                                            const void *daddr);
156 static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr);
157
158 static struct dst_ops ipv4_dst_ops = {
159         .family =               AF_INET,
160         .check =                ipv4_dst_check,
161         .default_advmss =       ipv4_default_advmss,
162         .mtu =                  ipv4_mtu,
163         .cow_metrics =          ipv4_cow_metrics,
164         .destroy =              ipv4_dst_destroy,
165         .negative_advice =      ipv4_negative_advice,
166         .link_failure =         ipv4_link_failure,
167         .update_pmtu =          ip_rt_update_pmtu,
168         .redirect =             ip_do_redirect,
169         .local_out =            __ip_local_out,
170         .neigh_lookup =         ipv4_neigh_lookup,
171         .confirm_neigh =        ipv4_confirm_neigh,
172 };
173
174 #define ECN_OR_COST(class)      TC_PRIO_##class
175
176 const __u8 ip_tos2prio[16] = {
177         TC_PRIO_BESTEFFORT,
178         ECN_OR_COST(BESTEFFORT),
179         TC_PRIO_BESTEFFORT,
180         ECN_OR_COST(BESTEFFORT),
181         TC_PRIO_BULK,
182         ECN_OR_COST(BULK),
183         TC_PRIO_BULK,
184         ECN_OR_COST(BULK),
185         TC_PRIO_INTERACTIVE,
186         ECN_OR_COST(INTERACTIVE),
187         TC_PRIO_INTERACTIVE,
188         ECN_OR_COST(INTERACTIVE),
189         TC_PRIO_INTERACTIVE_BULK,
190         ECN_OR_COST(INTERACTIVE_BULK),
191         TC_PRIO_INTERACTIVE_BULK,
192         ECN_OR_COST(INTERACTIVE_BULK)
193 };
194 EXPORT_SYMBOL(ip_tos2prio);
195
196 static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
197 #define RT_CACHE_STAT_INC(field) raw_cpu_inc(rt_cache_stat.field)
198
199 #ifdef CONFIG_PROC_FS
200 static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
201 {
202         if (*pos)
203                 return NULL;
204         return SEQ_START_TOKEN;
205 }
206
207 static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
208 {
209         ++*pos;
210         return NULL;
211 }
212
213 static void rt_cache_seq_stop(struct seq_file *seq, void *v)
214 {
215 }
216
217 static int rt_cache_seq_show(struct seq_file *seq, void *v)
218 {
219         if (v == SEQ_START_TOKEN)
220                 seq_printf(seq, "%-127s\n",
221                            "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
222                            "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
223                            "HHUptod\tSpecDst");
224         return 0;
225 }
226
227 static const struct seq_operations rt_cache_seq_ops = {
228         .start  = rt_cache_seq_start,
229         .next   = rt_cache_seq_next,
230         .stop   = rt_cache_seq_stop,
231         .show   = rt_cache_seq_show,
232 };
233
234 static int rt_cache_seq_open(struct inode *inode, struct file *file)
235 {
236         return seq_open(file, &rt_cache_seq_ops);
237 }
238
239 static const struct file_operations rt_cache_seq_fops = {
240         .open    = rt_cache_seq_open,
241         .read    = seq_read,
242         .llseek  = seq_lseek,
243         .release = seq_release,
244 };
245
246
247 static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
248 {
249         int cpu;
250
251         if (*pos == 0)
252                 return SEQ_START_TOKEN;
253
254         for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
255                 if (!cpu_possible(cpu))
256                         continue;
257                 *pos = cpu+1;
258                 return &per_cpu(rt_cache_stat, cpu);
259         }
260         return NULL;
261 }
262
263 static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
264 {
265         int cpu;
266
267         for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
268                 if (!cpu_possible(cpu))
269                         continue;
270                 *pos = cpu+1;
271                 return &per_cpu(rt_cache_stat, cpu);
272         }
273         return NULL;
274
275 }
276
277 static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
278 {
279
280 }
281
282 static int rt_cpu_seq_show(struct seq_file *seq, void *v)
283 {
284         struct rt_cache_stat *st = v;
285
286         if (v == SEQ_START_TOKEN) {
287                 seq_printf(seq, "entries  in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src  out_hit out_slow_tot out_slow_mc  gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
288                 return 0;
289         }
290
291         seq_printf(seq,"%08x  %08x %08x %08x %08x %08x %08x %08x "
292                    " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
293                    dst_entries_get_slow(&ipv4_dst_ops),
294                    0, /* st->in_hit */
295                    st->in_slow_tot,
296                    st->in_slow_mc,
297                    st->in_no_route,
298                    st->in_brd,
299                    st->in_martian_dst,
300                    st->in_martian_src,
301
302                    0, /* st->out_hit */
303                    st->out_slow_tot,
304                    st->out_slow_mc,
305
306                    0, /* st->gc_total */
307                    0, /* st->gc_ignored */
308                    0, /* st->gc_goal_miss */
309                    0, /* st->gc_dst_overflow */
310                    0, /* st->in_hlist_search */
311                    0  /* st->out_hlist_search */
312                 );
313         return 0;
314 }
315
316 static const struct seq_operations rt_cpu_seq_ops = {
317         .start  = rt_cpu_seq_start,
318         .next   = rt_cpu_seq_next,
319         .stop   = rt_cpu_seq_stop,
320         .show   = rt_cpu_seq_show,
321 };
322
323
324 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
325 {
326         return seq_open(file, &rt_cpu_seq_ops);
327 }
328
329 static const struct file_operations rt_cpu_seq_fops = {
330         .open    = rt_cpu_seq_open,
331         .read    = seq_read,
332         .llseek  = seq_lseek,
333         .release = seq_release,
334 };
335
336 #ifdef CONFIG_IP_ROUTE_CLASSID
337 static int rt_acct_proc_show(struct seq_file *m, void *v)
338 {
339         struct ip_rt_acct *dst, *src;
340         unsigned int i, j;
341
342         dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
343         if (!dst)
344                 return -ENOMEM;
345
346         for_each_possible_cpu(i) {
347                 src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
348                 for (j = 0; j < 256; j++) {
349                         dst[j].o_bytes   += src[j].o_bytes;
350                         dst[j].o_packets += src[j].o_packets;
351                         dst[j].i_bytes   += src[j].i_bytes;
352                         dst[j].i_packets += src[j].i_packets;
353                 }
354         }
355
356         seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
357         kfree(dst);
358         return 0;
359 }
360 #endif
361
362 static int __net_init ip_rt_do_proc_init(struct net *net)
363 {
364         struct proc_dir_entry *pde;
365
366         pde = proc_create("rt_cache", 0444, net->proc_net,
367                           &rt_cache_seq_fops);
368         if (!pde)
369                 goto err1;
370
371         pde = proc_create("rt_cache", 0444,
372                           net->proc_net_stat, &rt_cpu_seq_fops);
373         if (!pde)
374                 goto err2;
375
376 #ifdef CONFIG_IP_ROUTE_CLASSID
377         pde = proc_create_single("rt_acct", 0, net->proc_net,
378                         rt_acct_proc_show);
379         if (!pde)
380                 goto err3;
381 #endif
382         return 0;
383
384 #ifdef CONFIG_IP_ROUTE_CLASSID
385 err3:
386         remove_proc_entry("rt_cache", net->proc_net_stat);
387 #endif
388 err2:
389         remove_proc_entry("rt_cache", net->proc_net);
390 err1:
391         return -ENOMEM;
392 }
393
394 static void __net_exit ip_rt_do_proc_exit(struct net *net)
395 {
396         remove_proc_entry("rt_cache", net->proc_net_stat);
397         remove_proc_entry("rt_cache", net->proc_net);
398 #ifdef CONFIG_IP_ROUTE_CLASSID
399         remove_proc_entry("rt_acct", net->proc_net);
400 #endif
401 }
402
403 static struct pernet_operations ip_rt_proc_ops __net_initdata =  {
404         .init = ip_rt_do_proc_init,
405         .exit = ip_rt_do_proc_exit,
406 };
407
408 static int __init ip_rt_proc_init(void)
409 {
410         return register_pernet_subsys(&ip_rt_proc_ops);
411 }
412
413 #else
414 static inline int ip_rt_proc_init(void)
415 {
416         return 0;
417 }
418 #endif /* CONFIG_PROC_FS */
419
420 static inline bool rt_is_expired(const struct rtable *rth)
421 {
422         return rth->rt_genid != rt_genid_ipv4(dev_net(rth->dst.dev));
423 }
424
425 void rt_cache_flush(struct net *net)
426 {
427         rt_genid_bump_ipv4(net);
428 }
429
430 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
431                                            struct sk_buff *skb,
432                                            const void *daddr)
433 {
434         const struct rtable *rt = container_of(dst, struct rtable, dst);
435         struct net_device *dev = dst->dev;
436         struct neighbour *n;
437
438         rcu_read_lock_bh();
439
440         if (likely(rt->rt_gw_family == AF_INET)) {
441                 n = ip_neigh_gw4(dev, rt->rt_gw4);
442         } else if (rt->rt_gw_family == AF_INET6) {
443                 n = ip_neigh_gw6(dev, &rt->rt_gw6);
444         } else {
445                 __be32 pkey;
446
447                 pkey = skb ? ip_hdr(skb)->daddr : *((__be32 *) daddr);
448                 n = ip_neigh_gw4(dev, pkey);
449         }
450
451         if (n && !refcount_inc_not_zero(&n->refcnt))
452                 n = NULL;
453
454         rcu_read_unlock_bh();
455
456         return n;
457 }
458
459 static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr)
460 {
461         const struct rtable *rt = container_of(dst, struct rtable, dst);
462         struct net_device *dev = dst->dev;
463         const __be32 *pkey = daddr;
464
465         if (rt->rt_gw_family == AF_INET) {
466                 pkey = (const __be32 *)&rt->rt_gw4;
467         } else if (rt->rt_gw_family == AF_INET6) {
468                 return __ipv6_confirm_neigh_stub(dev, &rt->rt_gw6);
469         } else if (!daddr ||
470                  (rt->rt_flags &
471                   (RTCF_MULTICAST | RTCF_BROADCAST | RTCF_LOCAL))) {
472                 return;
473         }
474         __ipv4_confirm_neigh(dev, *(__force u32 *)pkey);
475 }
476
477 #define IP_IDENTS_SZ 2048u
478
479 static atomic_t *ip_idents __read_mostly;
480 static u32 *ip_tstamps __read_mostly;
481
482 /* In order to protect privacy, we add a perturbation to identifiers
483  * if one generator is seldom used. This makes hard for an attacker
484  * to infer how many packets were sent between two points in time.
485  */
486 u32 ip_idents_reserve(u32 hash, int segs)
487 {
488         u32 *p_tstamp = ip_tstamps + hash % IP_IDENTS_SZ;
489         atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ;
490         u32 old = READ_ONCE(*p_tstamp);
491         u32 now = (u32)jiffies;
492         u32 new, delta = 0;
493
494         if (old != now && cmpxchg(p_tstamp, old, now) == old)
495                 delta = prandom_u32_max(now - old);
496
497         /* Do not use atomic_add_return() as it makes UBSAN unhappy */
498         do {
499                 old = (u32)atomic_read(p_id);
500                 new = old + delta + segs;
501         } while (atomic_cmpxchg(p_id, old, new) != old);
502
503         return new - segs;
504 }
505 EXPORT_SYMBOL(ip_idents_reserve);
506
507 void __ip_select_ident(struct net *net, struct iphdr *iph, int segs)
508 {
509         u32 hash, id;
510
511         /* Note the following code is not safe, but this is okay. */
512         if (unlikely(siphash_key_is_zero(&net->ipv4.ip_id_key)))
513                 get_random_bytes(&net->ipv4.ip_id_key,
514                                  sizeof(net->ipv4.ip_id_key));
515
516         hash = siphash_3u32((__force u32)iph->daddr,
517                             (__force u32)iph->saddr,
518                             iph->protocol,
519                             &net->ipv4.ip_id_key);
520         id = ip_idents_reserve(hash, segs);
521         iph->id = htons(id);
522 }
523 EXPORT_SYMBOL(__ip_select_ident);
524
525 static void __build_flow_key(const struct net *net, struct flowi4 *fl4,
526                              const struct sock *sk,
527                              const struct iphdr *iph,
528                              int oif, u8 tos,
529                              u8 prot, u32 mark, int flow_flags)
530 {
531         if (sk) {
532                 const struct inet_sock *inet = inet_sk(sk);
533
534                 oif = sk->sk_bound_dev_if;
535                 mark = sk->sk_mark;
536                 tos = RT_CONN_FLAGS(sk);
537                 prot = inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol;
538         }
539         flowi4_init_output(fl4, oif, mark, tos,
540                            RT_SCOPE_UNIVERSE, prot,
541                            flow_flags,
542                            iph->daddr, iph->saddr, 0, 0,
543                            sock_net_uid(net, sk));
544 }
545
546 static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
547                                const struct sock *sk)
548 {
549         const struct net *net = dev_net(skb->dev);
550         const struct iphdr *iph = ip_hdr(skb);
551         int oif = skb->dev->ifindex;
552         u8 tos = RT_TOS(iph->tos);
553         u8 prot = iph->protocol;
554         u32 mark = skb->mark;
555
556         __build_flow_key(net, fl4, sk, iph, oif, tos, prot, mark, 0);
557 }
558
559 static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
560 {
561         const struct inet_sock *inet = inet_sk(sk);
562         const struct ip_options_rcu *inet_opt;
563         __be32 daddr = inet->inet_daddr;
564
565         rcu_read_lock();
566         inet_opt = rcu_dereference(inet->inet_opt);
567         if (inet_opt && inet_opt->opt.srr)
568                 daddr = inet_opt->opt.faddr;
569         flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
570                            RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
571                            inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
572                            inet_sk_flowi_flags(sk),
573                            daddr, inet->inet_saddr, 0, 0, sk->sk_uid);
574         rcu_read_unlock();
575 }
576
577 static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk,
578                                  const struct sk_buff *skb)
579 {
580         if (skb)
581                 build_skb_flow_key(fl4, skb, sk);
582         else
583                 build_sk_flow_key(fl4, sk);
584 }
585
586 static DEFINE_SPINLOCK(fnhe_lock);
587
588 static void fnhe_flush_routes(struct fib_nh_exception *fnhe)
589 {
590         struct rtable *rt;
591
592         rt = rcu_dereference(fnhe->fnhe_rth_input);
593         if (rt) {
594                 RCU_INIT_POINTER(fnhe->fnhe_rth_input, NULL);
595                 dst_dev_put(&rt->dst);
596                 dst_release(&rt->dst);
597         }
598         rt = rcu_dereference(fnhe->fnhe_rth_output);
599         if (rt) {
600                 RCU_INIT_POINTER(fnhe->fnhe_rth_output, NULL);
601                 dst_dev_put(&rt->dst);
602                 dst_release(&rt->dst);
603         }
604 }
605
606 static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash)
607 {
608         struct fib_nh_exception *fnhe, *oldest;
609
610         oldest = rcu_dereference(hash->chain);
611         for (fnhe = rcu_dereference(oldest->fnhe_next); fnhe;
612              fnhe = rcu_dereference(fnhe->fnhe_next)) {
613                 if (time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp))
614                         oldest = fnhe;
615         }
616         fnhe_flush_routes(oldest);
617         return oldest;
618 }
619
620 static inline u32 fnhe_hashfun(__be32 daddr)
621 {
622         static u32 fnhe_hashrnd __read_mostly;
623         u32 hval;
624
625         net_get_random_once(&fnhe_hashrnd, sizeof(fnhe_hashrnd));
626         hval = jhash_1word((__force u32) daddr, fnhe_hashrnd);
627         return hash_32(hval, FNHE_HASH_SHIFT);
628 }
629
630 static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe)
631 {
632         rt->rt_pmtu = fnhe->fnhe_pmtu;
633         rt->rt_mtu_locked = fnhe->fnhe_mtu_locked;
634         rt->dst.expires = fnhe->fnhe_expires;
635
636         if (fnhe->fnhe_gw) {
637                 rt->rt_flags |= RTCF_REDIRECTED;
638                 rt->rt_gw_family = AF_INET;
639                 rt->rt_gw4 = fnhe->fnhe_gw;
640         }
641 }
642
643 static void update_or_create_fnhe(struct fib_nh_common *nhc, __be32 daddr,
644                                   __be32 gw, u32 pmtu, bool lock,
645                                   unsigned long expires)
646 {
647         struct fnhe_hash_bucket *hash;
648         struct fib_nh_exception *fnhe;
649         struct rtable *rt;
650         u32 genid, hval;
651         unsigned int i;
652         int depth;
653
654         genid = fnhe_genid(dev_net(nhc->nhc_dev));
655         hval = fnhe_hashfun(daddr);
656
657         spin_lock_bh(&fnhe_lock);
658
659         hash = rcu_dereference(nhc->nhc_exceptions);
660         if (!hash) {
661                 hash = kcalloc(FNHE_HASH_SIZE, sizeof(*hash), GFP_ATOMIC);
662                 if (!hash)
663                         goto out_unlock;
664                 rcu_assign_pointer(nhc->nhc_exceptions, hash);
665         }
666
667         hash += hval;
668
669         depth = 0;
670         for (fnhe = rcu_dereference(hash->chain); fnhe;
671              fnhe = rcu_dereference(fnhe->fnhe_next)) {
672                 if (fnhe->fnhe_daddr == daddr)
673                         break;
674                 depth++;
675         }
676
677         if (fnhe) {
678                 if (fnhe->fnhe_genid != genid)
679                         fnhe->fnhe_genid = genid;
680                 if (gw)
681                         fnhe->fnhe_gw = gw;
682                 if (pmtu) {
683                         fnhe->fnhe_pmtu = pmtu;
684                         fnhe->fnhe_mtu_locked = lock;
685                 }
686                 fnhe->fnhe_expires = max(1UL, expires);
687                 /* Update all cached dsts too */
688                 rt = rcu_dereference(fnhe->fnhe_rth_input);
689                 if (rt)
690                         fill_route_from_fnhe(rt, fnhe);
691                 rt = rcu_dereference(fnhe->fnhe_rth_output);
692                 if (rt)
693                         fill_route_from_fnhe(rt, fnhe);
694         } else {
695                 if (depth > FNHE_RECLAIM_DEPTH)
696                         fnhe = fnhe_oldest(hash);
697                 else {
698                         fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC);
699                         if (!fnhe)
700                                 goto out_unlock;
701
702                         fnhe->fnhe_next = hash->chain;
703                         rcu_assign_pointer(hash->chain, fnhe);
704                 }
705                 fnhe->fnhe_genid = genid;
706                 fnhe->fnhe_daddr = daddr;
707                 fnhe->fnhe_gw = gw;
708                 fnhe->fnhe_pmtu = pmtu;
709                 fnhe->fnhe_mtu_locked = lock;
710                 fnhe->fnhe_expires = max(1UL, expires);
711
712                 /* Exception created; mark the cached routes for the nexthop
713                  * stale, so anyone caching it rechecks if this exception
714                  * applies to them.
715                  */
716                 rt = rcu_dereference(nhc->nhc_rth_input);
717                 if (rt)
718                         rt->dst.obsolete = DST_OBSOLETE_KILL;
719
720                 for_each_possible_cpu(i) {
721                         struct rtable __rcu **prt;
722                         prt = per_cpu_ptr(nhc->nhc_pcpu_rth_output, i);
723                         rt = rcu_dereference(*prt);
724                         if (rt)
725                                 rt->dst.obsolete = DST_OBSOLETE_KILL;
726                 }
727         }
728
729         fnhe->fnhe_stamp = jiffies;
730
731 out_unlock:
732         spin_unlock_bh(&fnhe_lock);
733 }
734
735 static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4,
736                              bool kill_route)
737 {
738         __be32 new_gw = icmp_hdr(skb)->un.gateway;
739         __be32 old_gw = ip_hdr(skb)->saddr;
740         struct net_device *dev = skb->dev;
741         struct in_device *in_dev;
742         struct fib_result res;
743         struct neighbour *n;
744         struct net *net;
745
746         switch (icmp_hdr(skb)->code & 7) {
747         case ICMP_REDIR_NET:
748         case ICMP_REDIR_NETTOS:
749         case ICMP_REDIR_HOST:
750         case ICMP_REDIR_HOSTTOS:
751                 break;
752
753         default:
754                 return;
755         }
756
757         if (rt->rt_gw_family != AF_INET || rt->rt_gw4 != old_gw)
758                 return;
759
760         in_dev = __in_dev_get_rcu(dev);
761         if (!in_dev)
762                 return;
763
764         net = dev_net(dev);
765         if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
766             ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
767             ipv4_is_zeronet(new_gw))
768                 goto reject_redirect;
769
770         if (!IN_DEV_SHARED_MEDIA(in_dev)) {
771                 if (!inet_addr_onlink(in_dev, new_gw, old_gw))
772                         goto reject_redirect;
773                 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
774                         goto reject_redirect;
775         } else {
776                 if (inet_addr_type(net, new_gw) != RTN_UNICAST)
777                         goto reject_redirect;
778         }
779
780         n = __ipv4_neigh_lookup(rt->dst.dev, new_gw);
781         if (!n)
782                 n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev);
783         if (!IS_ERR(n)) {
784                 if (!(n->nud_state & NUD_VALID)) {
785                         neigh_event_send(n, NULL);
786                 } else {
787                         if (fib_lookup(net, fl4, &res, 0) == 0) {
788                                 struct fib_nh_common *nhc = FIB_RES_NHC(res);
789
790                                 update_or_create_fnhe(nhc, fl4->daddr, new_gw,
791                                                 0, false,
792                                                 jiffies + ip_rt_gc_timeout);
793                         }
794                         if (kill_route)
795                                 rt->dst.obsolete = DST_OBSOLETE_KILL;
796                         call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
797                 }
798                 neigh_release(n);
799         }
800         return;
801
802 reject_redirect:
803 #ifdef CONFIG_IP_ROUTE_VERBOSE
804         if (IN_DEV_LOG_MARTIANS(in_dev)) {
805                 const struct iphdr *iph = (const struct iphdr *) skb->data;
806                 __be32 daddr = iph->daddr;
807                 __be32 saddr = iph->saddr;
808
809                 net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n"
810                                      "  Advised path = %pI4 -> %pI4\n",
811                                      &old_gw, dev->name, &new_gw,
812                                      &saddr, &daddr);
813         }
814 #endif
815         ;
816 }
817
818 static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
819 {
820         struct rtable *rt;
821         struct flowi4 fl4;
822         const struct iphdr *iph = (const struct iphdr *) skb->data;
823         struct net *net = dev_net(skb->dev);
824         int oif = skb->dev->ifindex;
825         u8 tos = RT_TOS(iph->tos);
826         u8 prot = iph->protocol;
827         u32 mark = skb->mark;
828
829         rt = (struct rtable *) dst;
830
831         __build_flow_key(net, &fl4, sk, iph, oif, tos, prot, mark, 0);
832         __ip_do_redirect(rt, skb, &fl4, true);
833 }
834
835 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
836 {
837         struct rtable *rt = (struct rtable *)dst;
838         struct dst_entry *ret = dst;
839
840         if (rt) {
841                 if (dst->obsolete > 0) {
842                         ip_rt_put(rt);
843                         ret = NULL;
844                 } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
845                            rt->dst.expires) {
846                         ip_rt_put(rt);
847                         ret = NULL;
848                 }
849         }
850         return ret;
851 }
852
853 /*
854  * Algorithm:
855  *      1. The first ip_rt_redirect_number redirects are sent
856  *         with exponential backoff, then we stop sending them at all,
857  *         assuming that the host ignores our redirects.
858  *      2. If we did not see packets requiring redirects
859  *         during ip_rt_redirect_silence, we assume that the host
860  *         forgot redirected route and start to send redirects again.
861  *
862  * This algorithm is much cheaper and more intelligent than dumb load limiting
863  * in icmp.c.
864  *
865  * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
866  * and "frag. need" (breaks PMTU discovery) in icmp.c.
867  */
868
869 void ip_rt_send_redirect(struct sk_buff *skb)
870 {
871         struct rtable *rt = skb_rtable(skb);
872         struct in_device *in_dev;
873         struct inet_peer *peer;
874         struct net *net;
875         int log_martians;
876         int vif;
877
878         rcu_read_lock();
879         in_dev = __in_dev_get_rcu(rt->dst.dev);
880         if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
881                 rcu_read_unlock();
882                 return;
883         }
884         log_martians = IN_DEV_LOG_MARTIANS(in_dev);
885         vif = l3mdev_master_ifindex_rcu(rt->dst.dev);
886         rcu_read_unlock();
887
888         net = dev_net(rt->dst.dev);
889         peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, vif, 1);
890         if (!peer) {
891                 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST,
892                           rt_nexthop(rt, ip_hdr(skb)->daddr));
893                 return;
894         }
895
896         /* No redirected packets during ip_rt_redirect_silence;
897          * reset the algorithm.
898          */
899         if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) {
900                 peer->rate_tokens = 0;
901                 peer->n_redirects = 0;
902         }
903
904         /* Too many ignored redirects; do not send anything
905          * set dst.rate_last to the last seen redirected packet.
906          */
907         if (peer->n_redirects >= ip_rt_redirect_number) {
908                 peer->rate_last = jiffies;
909                 goto out_put_peer;
910         }
911
912         /* Check for load limit; set rate_last to the latest sent
913          * redirect.
914          */
915         if (peer->rate_tokens == 0 ||
916             time_after(jiffies,
917                        (peer->rate_last +
918                         (ip_rt_redirect_load << peer->rate_tokens)))) {
919                 __be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr);
920
921                 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
922                 peer->rate_last = jiffies;
923                 ++peer->rate_tokens;
924                 ++peer->n_redirects;
925 #ifdef CONFIG_IP_ROUTE_VERBOSE
926                 if (log_martians &&
927                     peer->rate_tokens == ip_rt_redirect_number)
928                         net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
929                                              &ip_hdr(skb)->saddr, inet_iif(skb),
930                                              &ip_hdr(skb)->daddr, &gw);
931 #endif
932         }
933 out_put_peer:
934         inet_putpeer(peer);
935 }
936
937 static int ip_error(struct sk_buff *skb)
938 {
939         struct rtable *rt = skb_rtable(skb);
940         struct net_device *dev = skb->dev;
941         struct in_device *in_dev;
942         struct inet_peer *peer;
943         unsigned long now;
944         struct net *net;
945         bool send;
946         int code;
947
948         if (netif_is_l3_master(skb->dev)) {
949                 dev = __dev_get_by_index(dev_net(skb->dev), IPCB(skb)->iif);
950                 if (!dev)
951                         goto out;
952         }
953
954         in_dev = __in_dev_get_rcu(dev);
955
956         /* IP on this device is disabled. */
957         if (!in_dev)
958                 goto out;
959
960         net = dev_net(rt->dst.dev);
961         if (!IN_DEV_FORWARD(in_dev)) {
962                 switch (rt->dst.error) {
963                 case EHOSTUNREACH:
964                         __IP_INC_STATS(net, IPSTATS_MIB_INADDRERRORS);
965                         break;
966
967                 case ENETUNREACH:
968                         __IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
969                         break;
970                 }
971                 goto out;
972         }
973
974         switch (rt->dst.error) {
975         case EINVAL:
976         default:
977                 goto out;
978         case EHOSTUNREACH:
979                 code = ICMP_HOST_UNREACH;
980                 break;
981         case ENETUNREACH:
982                 code = ICMP_NET_UNREACH;
983                 __IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
984                 break;
985         case EACCES:
986                 code = ICMP_PKT_FILTERED;
987                 break;
988         }
989
990         peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr,
991                                l3mdev_master_ifindex(skb->dev), 1);
992
993         send = true;
994         if (peer) {
995                 now = jiffies;
996                 peer->rate_tokens += now - peer->rate_last;
997                 if (peer->rate_tokens > ip_rt_error_burst)
998                         peer->rate_tokens = ip_rt_error_burst;
999                 peer->rate_last = now;
1000                 if (peer->rate_tokens >= ip_rt_error_cost)
1001                         peer->rate_tokens -= ip_rt_error_cost;
1002                 else
1003                         send = false;
1004                 inet_putpeer(peer);
1005         }
1006         if (send)
1007                 icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
1008
1009 out:    kfree_skb(skb);
1010         return 0;
1011 }
1012
1013 static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
1014 {
1015         struct dst_entry *dst = &rt->dst;
1016         u32 old_mtu = ipv4_mtu(dst);
1017         struct fib_result res;
1018         bool lock = false;
1019
1020         if (ip_mtu_locked(dst))
1021                 return;
1022
1023         if (old_mtu < mtu)
1024                 return;
1025
1026         if (mtu < ip_rt_min_pmtu) {
1027                 lock = true;
1028                 mtu = min(old_mtu, ip_rt_min_pmtu);
1029         }
1030
1031         if (rt->rt_pmtu == mtu && !lock &&
1032             time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2))
1033                 return;
1034
1035         rcu_read_lock();
1036         if (fib_lookup(dev_net(dst->dev), fl4, &res, 0) == 0) {
1037                 struct fib_nh_common *nhc = FIB_RES_NHC(res);
1038
1039                 update_or_create_fnhe(nhc, fl4->daddr, 0, mtu, lock,
1040                                       jiffies + ip_rt_mtu_expires);
1041         }
1042         rcu_read_unlock();
1043 }
1044
1045 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
1046                               struct sk_buff *skb, u32 mtu)
1047 {
1048         struct rtable *rt = (struct rtable *) dst;
1049         struct flowi4 fl4;
1050
1051         ip_rt_build_flow_key(&fl4, sk, skb);
1052         __ip_rt_update_pmtu(rt, &fl4, mtu);
1053 }
1054
1055 void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
1056                       int oif, u8 protocol)
1057 {
1058         const struct iphdr *iph = (const struct iphdr *) skb->data;
1059         struct flowi4 fl4;
1060         struct rtable *rt;
1061         u32 mark = IP4_REPLY_MARK(net, skb->mark);
1062
1063         __build_flow_key(net, &fl4, NULL, iph, oif,
1064                          RT_TOS(iph->tos), protocol, mark, 0);
1065         rt = __ip_route_output_key(net, &fl4);
1066         if (!IS_ERR(rt)) {
1067                 __ip_rt_update_pmtu(rt, &fl4, mtu);
1068                 ip_rt_put(rt);
1069         }
1070 }
1071 EXPORT_SYMBOL_GPL(ipv4_update_pmtu);
1072
1073 static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1074 {
1075         const struct iphdr *iph = (const struct iphdr *) skb->data;
1076         struct flowi4 fl4;
1077         struct rtable *rt;
1078
1079         __build_flow_key(sock_net(sk), &fl4, sk, iph, 0, 0, 0, 0, 0);
1080
1081         if (!fl4.flowi4_mark)
1082                 fl4.flowi4_mark = IP4_REPLY_MARK(sock_net(sk), skb->mark);
1083
1084         rt = __ip_route_output_key(sock_net(sk), &fl4);
1085         if (!IS_ERR(rt)) {
1086                 __ip_rt_update_pmtu(rt, &fl4, mtu);
1087                 ip_rt_put(rt);
1088         }
1089 }
1090
1091 void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1092 {
1093         const struct iphdr *iph = (const struct iphdr *) skb->data;
1094         struct flowi4 fl4;
1095         struct rtable *rt;
1096         struct dst_entry *odst = NULL;
1097         bool new = false;
1098         struct net *net = sock_net(sk);
1099
1100         bh_lock_sock(sk);
1101
1102         if (!ip_sk_accept_pmtu(sk))
1103                 goto out;
1104
1105         odst = sk_dst_get(sk);
1106
1107         if (sock_owned_by_user(sk) || !odst) {
1108                 __ipv4_sk_update_pmtu(skb, sk, mtu);
1109                 goto out;
1110         }
1111
1112         __build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
1113
1114         rt = (struct rtable *)odst;
1115         if (odst->obsolete && !odst->ops->check(odst, 0)) {
1116                 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1117                 if (IS_ERR(rt))
1118                         goto out;
1119
1120                 new = true;
1121         }
1122
1123         __ip_rt_update_pmtu((struct rtable *) xfrm_dst_path(&rt->dst), &fl4, mtu);
1124
1125         if (!dst_check(&rt->dst, 0)) {
1126                 if (new)
1127                         dst_release(&rt->dst);
1128
1129                 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1130                 if (IS_ERR(rt))
1131                         goto out;
1132
1133                 new = true;
1134         }
1135
1136         if (new)
1137                 sk_dst_set(sk, &rt->dst);
1138
1139 out:
1140         bh_unlock_sock(sk);
1141         dst_release(odst);
1142 }
1143 EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
1144
1145 void ipv4_redirect(struct sk_buff *skb, struct net *net,
1146                    int oif, u8 protocol)
1147 {
1148         const struct iphdr *iph = (const struct iphdr *) skb->data;
1149         struct flowi4 fl4;
1150         struct rtable *rt;
1151
1152         __build_flow_key(net, &fl4, NULL, iph, oif,
1153                          RT_TOS(iph->tos), protocol, 0, 0);
1154         rt = __ip_route_output_key(net, &fl4);
1155         if (!IS_ERR(rt)) {
1156                 __ip_do_redirect(rt, skb, &fl4, false);
1157                 ip_rt_put(rt);
1158         }
1159 }
1160 EXPORT_SYMBOL_GPL(ipv4_redirect);
1161
1162 void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk)
1163 {
1164         const struct iphdr *iph = (const struct iphdr *) skb->data;
1165         struct flowi4 fl4;
1166         struct rtable *rt;
1167         struct net *net = sock_net(sk);
1168
1169         __build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
1170         rt = __ip_route_output_key(net, &fl4);
1171         if (!IS_ERR(rt)) {
1172                 __ip_do_redirect(rt, skb, &fl4, false);
1173                 ip_rt_put(rt);
1174         }
1175 }
1176 EXPORT_SYMBOL_GPL(ipv4_sk_redirect);
1177
1178 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1179 {
1180         struct rtable *rt = (struct rtable *) dst;
1181
1182         /* All IPV4 dsts are created with ->obsolete set to the value
1183          * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1184          * into this function always.
1185          *
1186          * When a PMTU/redirect information update invalidates a route,
1187          * this is indicated by setting obsolete to DST_OBSOLETE_KILL or
1188          * DST_OBSOLETE_DEAD.
1189          */
1190         if (dst->obsolete != DST_OBSOLETE_FORCE_CHK || rt_is_expired(rt))
1191                 return NULL;
1192         return dst;
1193 }
1194
1195 static void ipv4_send_dest_unreach(struct sk_buff *skb)
1196 {
1197         struct ip_options opt;
1198         int res;
1199
1200         /* Recompile ip options since IPCB may not be valid anymore.
1201          * Also check we have a reasonable ipv4 header.
1202          */
1203         if (!pskb_network_may_pull(skb, sizeof(struct iphdr)) ||
1204             ip_hdr(skb)->version != 4 || ip_hdr(skb)->ihl < 5)
1205                 return;
1206
1207         memset(&opt, 0, sizeof(opt));
1208         if (ip_hdr(skb)->ihl > 5) {
1209                 if (!pskb_network_may_pull(skb, ip_hdr(skb)->ihl * 4))
1210                         return;
1211                 opt.optlen = ip_hdr(skb)->ihl * 4 - sizeof(struct iphdr);
1212
1213                 rcu_read_lock();
1214                 res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL);
1215                 rcu_read_unlock();
1216
1217                 if (res)
1218                         return;
1219         }
1220         __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, &opt);
1221 }
1222
1223 static void ipv4_link_failure(struct sk_buff *skb)
1224 {
1225         struct rtable *rt;
1226
1227         ipv4_send_dest_unreach(skb);
1228
1229         rt = skb_rtable(skb);
1230         if (rt)
1231                 dst_set_expires(&rt->dst, 0);
1232 }
1233
1234 static int ip_rt_bug(struct net *net, struct sock *sk, struct sk_buff *skb)
1235 {
1236         pr_debug("%s: %pI4 -> %pI4, %s\n",
1237                  __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1238                  skb->dev ? skb->dev->name : "?");
1239         kfree_skb(skb);
1240         WARN_ON(1);
1241         return 0;
1242 }
1243
1244 /*
1245    We do not cache source address of outgoing interface,
1246    because it is used only by IP RR, TS and SRR options,
1247    so that it out of fast path.
1248
1249    BTW remember: "addr" is allowed to be not aligned
1250    in IP options!
1251  */
1252
1253 void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
1254 {
1255         __be32 src;
1256
1257         if (rt_is_output_route(rt))
1258                 src = ip_hdr(skb)->saddr;
1259         else {
1260                 struct fib_result res;
1261                 struct iphdr *iph = ip_hdr(skb);
1262                 struct flowi4 fl4 = {
1263                         .daddr = iph->daddr,
1264                         .saddr = iph->saddr,
1265                         .flowi4_tos = RT_TOS(iph->tos),
1266                         .flowi4_oif = rt->dst.dev->ifindex,
1267                         .flowi4_iif = skb->dev->ifindex,
1268                         .flowi4_mark = skb->mark,
1269                 };
1270
1271                 rcu_read_lock();
1272                 if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res, 0) == 0)
1273                         src = fib_result_prefsrc(dev_net(rt->dst.dev), &res);
1274                 else
1275                         src = inet_select_addr(rt->dst.dev,
1276                                                rt_nexthop(rt, iph->daddr),
1277                                                RT_SCOPE_UNIVERSE);
1278                 rcu_read_unlock();
1279         }
1280         memcpy(addr, &src, 4);
1281 }
1282
1283 #ifdef CONFIG_IP_ROUTE_CLASSID
1284 static void set_class_tag(struct rtable *rt, u32 tag)
1285 {
1286         if (!(rt->dst.tclassid & 0xFFFF))
1287                 rt->dst.tclassid |= tag & 0xFFFF;
1288         if (!(rt->dst.tclassid & 0xFFFF0000))
1289                 rt->dst.tclassid |= tag & 0xFFFF0000;
1290 }
1291 #endif
1292
1293 static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
1294 {
1295         unsigned int header_size = sizeof(struct tcphdr) + sizeof(struct iphdr);
1296         unsigned int advmss = max_t(unsigned int, ipv4_mtu(dst) - header_size,
1297                                     ip_rt_min_advmss);
1298
1299         return min(advmss, IPV4_MAX_PMTU - header_size);
1300 }
1301
1302 static unsigned int ipv4_mtu(const struct dst_entry *dst)
1303 {
1304         const struct rtable *rt = (const struct rtable *) dst;
1305         unsigned int mtu = rt->rt_pmtu;
1306
1307         if (!mtu || time_after_eq(jiffies, rt->dst.expires))
1308                 mtu = dst_metric_raw(dst, RTAX_MTU);
1309
1310         if (mtu)
1311                 return mtu;
1312
1313         mtu = READ_ONCE(dst->dev->mtu);
1314
1315         if (unlikely(ip_mtu_locked(dst))) {
1316                 if (rt->rt_gw_family && mtu > 576)
1317                         mtu = 576;
1318         }
1319
1320         mtu = min_t(unsigned int, mtu, IP_MAX_MTU);
1321
1322         return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
1323 }
1324
1325 static void ip_del_fnhe(struct fib_nh_common *nhc, __be32 daddr)
1326 {
1327         struct fnhe_hash_bucket *hash;
1328         struct fib_nh_exception *fnhe, __rcu **fnhe_p;
1329         u32 hval = fnhe_hashfun(daddr);
1330
1331         spin_lock_bh(&fnhe_lock);
1332
1333         hash = rcu_dereference_protected(nhc->nhc_exceptions,
1334                                          lockdep_is_held(&fnhe_lock));
1335         hash += hval;
1336
1337         fnhe_p = &hash->chain;
1338         fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock));
1339         while (fnhe) {
1340                 if (fnhe->fnhe_daddr == daddr) {
1341                         rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
1342                                 fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
1343                         /* set fnhe_daddr to 0 to ensure it won't bind with
1344                          * new dsts in rt_bind_exception().
1345                          */
1346                         fnhe->fnhe_daddr = 0;
1347                         fnhe_flush_routes(fnhe);
1348                         kfree_rcu(fnhe, rcu);
1349                         break;
1350                 }
1351                 fnhe_p = &fnhe->fnhe_next;
1352                 fnhe = rcu_dereference_protected(fnhe->fnhe_next,
1353                                                  lockdep_is_held(&fnhe_lock));
1354         }
1355
1356         spin_unlock_bh(&fnhe_lock);
1357 }
1358
1359 static struct fib_nh_exception *find_exception(struct fib_nh_common *nhc,
1360                                                __be32 daddr)
1361 {
1362         struct fnhe_hash_bucket *hash = rcu_dereference(nhc->nhc_exceptions);
1363         struct fib_nh_exception *fnhe;
1364         u32 hval;
1365
1366         if (!hash)
1367                 return NULL;
1368
1369         hval = fnhe_hashfun(daddr);
1370
1371         for (fnhe = rcu_dereference(hash[hval].chain); fnhe;
1372              fnhe = rcu_dereference(fnhe->fnhe_next)) {
1373                 if (fnhe->fnhe_daddr == daddr) {
1374                         if (fnhe->fnhe_expires &&
1375                             time_after(jiffies, fnhe->fnhe_expires)) {
1376                                 ip_del_fnhe(nhc, daddr);
1377                                 break;
1378                         }
1379                         return fnhe;
1380                 }
1381         }
1382         return NULL;
1383 }
1384
1385 /* MTU selection:
1386  * 1. mtu on route is locked - use it
1387  * 2. mtu from nexthop exception
1388  * 3. mtu from egress device
1389  */
1390
1391 u32 ip_mtu_from_fib_result(struct fib_result *res, __be32 daddr)
1392 {
1393         struct fib_nh_common *nhc = res->nhc;
1394         struct net_device *dev = nhc->nhc_dev;
1395         struct fib_info *fi = res->fi;
1396         u32 mtu = 0;
1397
1398         if (dev_net(dev)->ipv4.sysctl_ip_fwd_use_pmtu ||
1399             fi->fib_metrics->metrics[RTAX_LOCK - 1] & (1 << RTAX_MTU))
1400                 mtu = fi->fib_mtu;
1401
1402         if (likely(!mtu)) {
1403                 struct fib_nh_exception *fnhe;
1404
1405                 fnhe = find_exception(nhc, daddr);
1406                 if (fnhe && !time_after_eq(jiffies, fnhe->fnhe_expires))
1407                         mtu = fnhe->fnhe_pmtu;
1408         }
1409
1410         if (likely(!mtu))
1411                 mtu = min(READ_ONCE(dev->mtu), IP_MAX_MTU);
1412
1413         return mtu - lwtunnel_headroom(nhc->nhc_lwtstate, mtu);
1414 }
1415
1416 static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
1417                               __be32 daddr, const bool do_cache)
1418 {
1419         bool ret = false;
1420
1421         spin_lock_bh(&fnhe_lock);
1422
1423         if (daddr == fnhe->fnhe_daddr) {
1424                 struct rtable __rcu **porig;
1425                 struct rtable *orig;
1426                 int genid = fnhe_genid(dev_net(rt->dst.dev));
1427
1428                 if (rt_is_input_route(rt))
1429                         porig = &fnhe->fnhe_rth_input;
1430                 else
1431                         porig = &fnhe->fnhe_rth_output;
1432                 orig = rcu_dereference(*porig);
1433
1434                 if (fnhe->fnhe_genid != genid) {
1435                         fnhe->fnhe_genid = genid;
1436                         fnhe->fnhe_gw = 0;
1437                         fnhe->fnhe_pmtu = 0;
1438                         fnhe->fnhe_expires = 0;
1439                         fnhe->fnhe_mtu_locked = false;
1440                         fnhe_flush_routes(fnhe);
1441                         orig = NULL;
1442                 }
1443                 fill_route_from_fnhe(rt, fnhe);
1444                 if (!rt->rt_gw4) {
1445                         rt->rt_gw4 = daddr;
1446                         rt->rt_gw_family = AF_INET;
1447                 }
1448
1449                 if (do_cache) {
1450                         dst_hold(&rt->dst);
1451                         rcu_assign_pointer(*porig, rt);
1452                         if (orig) {
1453                                 dst_dev_put(&orig->dst);
1454                                 dst_release(&orig->dst);
1455                         }
1456                         ret = true;
1457                 }
1458
1459                 fnhe->fnhe_stamp = jiffies;
1460         }
1461         spin_unlock_bh(&fnhe_lock);
1462
1463         return ret;
1464 }
1465
1466 static bool rt_cache_route(struct fib_nh_common *nhc, struct rtable *rt)
1467 {
1468         struct rtable *orig, *prev, **p;
1469         bool ret = true;
1470
1471         if (rt_is_input_route(rt)) {
1472                 p = (struct rtable **)&nhc->nhc_rth_input;
1473         } else {
1474                 p = (struct rtable **)raw_cpu_ptr(nhc->nhc_pcpu_rth_output);
1475         }
1476         orig = *p;
1477
1478         /* hold dst before doing cmpxchg() to avoid race condition
1479          * on this dst
1480          */
1481         dst_hold(&rt->dst);
1482         prev = cmpxchg(p, orig, rt);
1483         if (prev == orig) {
1484                 if (orig) {
1485                         dst_dev_put(&orig->dst);
1486                         dst_release(&orig->dst);
1487                 }
1488         } else {
1489                 dst_release(&rt->dst);
1490                 ret = false;
1491         }
1492
1493         return ret;
1494 }
1495
1496 struct uncached_list {
1497         spinlock_t              lock;
1498         struct list_head        head;
1499 };
1500
1501 static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list);
1502
1503 void rt_add_uncached_list(struct rtable *rt)
1504 {
1505         struct uncached_list *ul = raw_cpu_ptr(&rt_uncached_list);
1506
1507         rt->rt_uncached_list = ul;
1508
1509         spin_lock_bh(&ul->lock);
1510         list_add_tail(&rt->rt_uncached, &ul->head);
1511         spin_unlock_bh(&ul->lock);
1512 }
1513
1514 void rt_del_uncached_list(struct rtable *rt)
1515 {
1516         if (!list_empty(&rt->rt_uncached)) {
1517                 struct uncached_list *ul = rt->rt_uncached_list;
1518
1519                 spin_lock_bh(&ul->lock);
1520                 list_del(&rt->rt_uncached);
1521                 spin_unlock_bh(&ul->lock);
1522         }
1523 }
1524
1525 static void ipv4_dst_destroy(struct dst_entry *dst)
1526 {
1527         struct rtable *rt = (struct rtable *)dst;
1528
1529         ip_dst_metrics_put(dst);
1530         rt_del_uncached_list(rt);
1531 }
1532
1533 void rt_flush_dev(struct net_device *dev)
1534 {
1535         struct net *net = dev_net(dev);
1536         struct rtable *rt;
1537         int cpu;
1538
1539         for_each_possible_cpu(cpu) {
1540                 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
1541
1542                 spin_lock_bh(&ul->lock);
1543                 list_for_each_entry(rt, &ul->head, rt_uncached) {
1544                         if (rt->dst.dev != dev)
1545                                 continue;
1546                         rt->dst.dev = net->loopback_dev;
1547                         dev_hold(rt->dst.dev);
1548                         dev_put(dev);
1549                 }
1550                 spin_unlock_bh(&ul->lock);
1551         }
1552 }
1553
1554 static bool rt_cache_valid(const struct rtable *rt)
1555 {
1556         return  rt &&
1557                 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
1558                 !rt_is_expired(rt);
1559 }
1560
1561 static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
1562                            const struct fib_result *res,
1563                            struct fib_nh_exception *fnhe,
1564                            struct fib_info *fi, u16 type, u32 itag,
1565                            const bool do_cache)
1566 {
1567         bool cached = false;
1568
1569         if (fi) {
1570                 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
1571
1572                 if (nhc->nhc_gw_family && nhc->nhc_scope == RT_SCOPE_LINK) {
1573                         rt->rt_gw_family = nhc->nhc_gw_family;
1574                         /* only INET and INET6 are supported */
1575                         if (likely(nhc->nhc_gw_family == AF_INET))
1576                                 rt->rt_gw4 = nhc->nhc_gw.ipv4;
1577                         else
1578                                 rt->rt_gw6 = nhc->nhc_gw.ipv6;
1579                 }
1580
1581                 ip_dst_init_metrics(&rt->dst, fi->fib_metrics);
1582
1583 #ifdef CONFIG_IP_ROUTE_CLASSID
1584                 if (nhc->nhc_family == AF_INET) {
1585                         struct fib_nh *nh;
1586
1587                         nh = container_of(nhc, struct fib_nh, nh_common);
1588                         rt->dst.tclassid = nh->nh_tclassid;
1589                 }
1590 #endif
1591                 rt->dst.lwtstate = lwtstate_get(nhc->nhc_lwtstate);
1592                 if (unlikely(fnhe))
1593                         cached = rt_bind_exception(rt, fnhe, daddr, do_cache);
1594                 else if (do_cache)
1595                         cached = rt_cache_route(nhc, rt);
1596                 if (unlikely(!cached)) {
1597                         /* Routes we intend to cache in nexthop exception or
1598                          * FIB nexthop have the DST_NOCACHE bit clear.
1599                          * However, if we are unsuccessful at storing this
1600                          * route into the cache we really need to set it.
1601                          */
1602                         if (!rt->rt_gw4) {
1603                                 rt->rt_gw_family = AF_INET;
1604                                 rt->rt_gw4 = daddr;
1605                         }
1606                         rt_add_uncached_list(rt);
1607                 }
1608         } else
1609                 rt_add_uncached_list(rt);
1610
1611 #ifdef CONFIG_IP_ROUTE_CLASSID
1612 #ifdef CONFIG_IP_MULTIPLE_TABLES
1613         set_class_tag(rt, res->tclassid);
1614 #endif
1615         set_class_tag(rt, itag);
1616 #endif
1617 }
1618
1619 struct rtable *rt_dst_alloc(struct net_device *dev,
1620                             unsigned int flags, u16 type,
1621                             bool nopolicy, bool noxfrm, bool will_cache)
1622 {
1623         struct rtable *rt;
1624
1625         rt = dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
1626                        (will_cache ? 0 : DST_HOST) |
1627                        (nopolicy ? DST_NOPOLICY : 0) |
1628                        (noxfrm ? DST_NOXFRM : 0));
1629
1630         if (rt) {
1631                 rt->rt_genid = rt_genid_ipv4(dev_net(dev));
1632                 rt->rt_flags = flags;
1633                 rt->rt_type = type;
1634                 rt->rt_is_input = 0;
1635                 rt->rt_iif = 0;
1636                 rt->rt_pmtu = 0;
1637                 rt->rt_mtu_locked = 0;
1638                 rt->rt_gw_family = 0;
1639                 rt->rt_gw4 = 0;
1640                 INIT_LIST_HEAD(&rt->rt_uncached);
1641
1642                 rt->dst.output = ip_output;
1643                 if (flags & RTCF_LOCAL)
1644                         rt->dst.input = ip_local_deliver;
1645         }
1646
1647         return rt;
1648 }
1649 EXPORT_SYMBOL(rt_dst_alloc);
1650
1651 /* called in rcu_read_lock() section */
1652 int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1653                           u8 tos, struct net_device *dev,
1654                           struct in_device *in_dev, u32 *itag)
1655 {
1656         int err;
1657
1658         /* Primary sanity checks. */
1659         if (!in_dev)
1660                 return -EINVAL;
1661
1662         if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
1663             skb->protocol != htons(ETH_P_IP))
1664                 return -EINVAL;
1665
1666         if (ipv4_is_loopback(saddr) && !IN_DEV_ROUTE_LOCALNET(in_dev))
1667                 return -EINVAL;
1668
1669         if (ipv4_is_zeronet(saddr)) {
1670                 if (!ipv4_is_local_multicast(daddr) &&
1671                     ip_hdr(skb)->protocol != IPPROTO_IGMP)
1672                         return -EINVAL;
1673         } else {
1674                 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
1675                                           in_dev, itag);
1676                 if (err < 0)
1677                         return err;
1678         }
1679         return 0;
1680 }
1681
1682 /* called in rcu_read_lock() section */
1683 static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1684                              u8 tos, struct net_device *dev, int our)
1685 {
1686         struct in_device *in_dev = __in_dev_get_rcu(dev);
1687         unsigned int flags = RTCF_MULTICAST;
1688         struct rtable *rth;
1689         u32 itag = 0;
1690         int err;
1691
1692         err = ip_mc_validate_source(skb, daddr, saddr, tos, dev, in_dev, &itag);
1693         if (err)
1694                 return err;
1695
1696         if (our)
1697                 flags |= RTCF_LOCAL;
1698
1699         rth = rt_dst_alloc(dev_net(dev)->loopback_dev, flags, RTN_MULTICAST,
1700                            IN_DEV_CONF_GET(in_dev, NOPOLICY), false, false);
1701         if (!rth)
1702                 return -ENOBUFS;
1703
1704 #ifdef CONFIG_IP_ROUTE_CLASSID
1705         rth->dst.tclassid = itag;
1706 #endif
1707         rth->dst.output = ip_rt_bug;
1708         rth->rt_is_input= 1;
1709
1710 #ifdef CONFIG_IP_MROUTE
1711         if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
1712                 rth->dst.input = ip_mr_input;
1713 #endif
1714         RT_CACHE_STAT_INC(in_slow_mc);
1715
1716         skb_dst_set(skb, &rth->dst);
1717         return 0;
1718 }
1719
1720
1721 static void ip_handle_martian_source(struct net_device *dev,
1722                                      struct in_device *in_dev,
1723                                      struct sk_buff *skb,
1724                                      __be32 daddr,
1725                                      __be32 saddr)
1726 {
1727         RT_CACHE_STAT_INC(in_martian_src);
1728 #ifdef CONFIG_IP_ROUTE_VERBOSE
1729         if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
1730                 /*
1731                  *      RFC1812 recommendation, if source is martian,
1732                  *      the only hint is MAC header.
1733                  */
1734                 pr_warn("martian source %pI4 from %pI4, on dev %s\n",
1735                         &daddr, &saddr, dev->name);
1736                 if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
1737                         print_hex_dump(KERN_WARNING, "ll header: ",
1738                                        DUMP_PREFIX_OFFSET, 16, 1,
1739                                        skb_mac_header(skb),
1740                                        dev->hard_header_len, false);
1741                 }
1742         }
1743 #endif
1744 }
1745
1746 /* called in rcu_read_lock() section */
1747 static int __mkroute_input(struct sk_buff *skb,
1748                            const struct fib_result *res,
1749                            struct in_device *in_dev,
1750                            __be32 daddr, __be32 saddr, u32 tos)
1751 {
1752         struct fib_nh_common *nhc = FIB_RES_NHC(*res);
1753         struct net_device *dev = nhc->nhc_dev;
1754         struct fib_nh_exception *fnhe;
1755         struct rtable *rth;
1756         int err;
1757         struct in_device *out_dev;
1758         bool do_cache;
1759         u32 itag = 0;
1760
1761         /* get a working reference to the output device */
1762         out_dev = __in_dev_get_rcu(dev);
1763         if (!out_dev) {
1764                 net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
1765                 return -EINVAL;
1766         }
1767
1768         err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
1769                                   in_dev->dev, in_dev, &itag);
1770         if (err < 0) {
1771                 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1772                                          saddr);
1773
1774                 goto cleanup;
1775         }
1776
1777         do_cache = res->fi && !itag;
1778         if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) &&
1779             skb->protocol == htons(ETH_P_IP)) {
1780                 __be32 gw;
1781
1782                 gw = nhc->nhc_gw_family == AF_INET ? nhc->nhc_gw.ipv4 : 0;
1783                 if (IN_DEV_SHARED_MEDIA(out_dev) ||
1784                     inet_addr_onlink(out_dev, saddr, gw))
1785                         IPCB(skb)->flags |= IPSKB_DOREDIRECT;
1786         }
1787
1788         if (skb->protocol != htons(ETH_P_IP)) {
1789                 /* Not IP (i.e. ARP). Do not create route, if it is
1790                  * invalid for proxy arp. DNAT routes are always valid.
1791                  *
1792                  * Proxy arp feature have been extended to allow, ARP
1793                  * replies back to the same interface, to support
1794                  * Private VLAN switch technologies. See arp.c.
1795                  */
1796                 if (out_dev == in_dev &&
1797                     IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
1798                         err = -EINVAL;
1799                         goto cleanup;
1800                 }
1801         }
1802
1803         fnhe = find_exception(nhc, daddr);
1804         if (do_cache) {
1805                 if (fnhe)
1806                         rth = rcu_dereference(fnhe->fnhe_rth_input);
1807                 else
1808                         rth = rcu_dereference(nhc->nhc_rth_input);
1809                 if (rt_cache_valid(rth)) {
1810                         skb_dst_set_noref(skb, &rth->dst);
1811                         goto out;
1812                 }
1813         }
1814
1815         rth = rt_dst_alloc(out_dev->dev, 0, res->type,
1816                            IN_DEV_CONF_GET(in_dev, NOPOLICY),
1817                            IN_DEV_CONF_GET(out_dev, NOXFRM), do_cache);
1818         if (!rth) {
1819                 err = -ENOBUFS;
1820                 goto cleanup;
1821         }
1822
1823         rth->rt_is_input = 1;
1824         RT_CACHE_STAT_INC(in_slow_tot);
1825
1826         rth->dst.input = ip_forward;
1827
1828         rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag,
1829                        do_cache);
1830         lwtunnel_set_redirect(&rth->dst);
1831         skb_dst_set(skb, &rth->dst);
1832 out:
1833         err = 0;
1834  cleanup:
1835         return err;
1836 }
1837
1838 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1839 /* To make ICMP packets follow the right flow, the multipath hash is
1840  * calculated from the inner IP addresses.
1841  */
1842 static void ip_multipath_l3_keys(const struct sk_buff *skb,
1843                                  struct flow_keys *hash_keys)
1844 {
1845         const struct iphdr *outer_iph = ip_hdr(skb);
1846         const struct iphdr *key_iph = outer_iph;
1847         const struct iphdr *inner_iph;
1848         const struct icmphdr *icmph;
1849         struct iphdr _inner_iph;
1850         struct icmphdr _icmph;
1851
1852         if (likely(outer_iph->protocol != IPPROTO_ICMP))
1853                 goto out;
1854
1855         if (unlikely((outer_iph->frag_off & htons(IP_OFFSET)) != 0))
1856                 goto out;
1857
1858         icmph = skb_header_pointer(skb, outer_iph->ihl * 4, sizeof(_icmph),
1859                                    &_icmph);
1860         if (!icmph)
1861                 goto out;
1862
1863         if (icmph->type != ICMP_DEST_UNREACH &&
1864             icmph->type != ICMP_REDIRECT &&
1865             icmph->type != ICMP_TIME_EXCEEDED &&
1866             icmph->type != ICMP_PARAMETERPROB)
1867                 goto out;
1868
1869         inner_iph = skb_header_pointer(skb,
1870                                        outer_iph->ihl * 4 + sizeof(_icmph),
1871                                        sizeof(_inner_iph), &_inner_iph);
1872         if (!inner_iph)
1873                 goto out;
1874
1875         key_iph = inner_iph;
1876 out:
1877         hash_keys->addrs.v4addrs.src = key_iph->saddr;
1878         hash_keys->addrs.v4addrs.dst = key_iph->daddr;
1879 }
1880
1881 /* if skb is set it will be used and fl4 can be NULL */
1882 int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4,
1883                        const struct sk_buff *skb, struct flow_keys *flkeys)
1884 {
1885         u32 multipath_hash = fl4 ? fl4->flowi4_multipath_hash : 0;
1886         struct flow_keys hash_keys;
1887         u32 mhash;
1888
1889         switch (net->ipv4.sysctl_fib_multipath_hash_policy) {
1890         case 0:
1891                 memset(&hash_keys, 0, sizeof(hash_keys));
1892                 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1893                 if (skb) {
1894                         ip_multipath_l3_keys(skb, &hash_keys);
1895                 } else {
1896                         hash_keys.addrs.v4addrs.src = fl4->saddr;
1897                         hash_keys.addrs.v4addrs.dst = fl4->daddr;
1898                 }
1899                 break;
1900         case 1:
1901                 /* skb is currently provided only when forwarding */
1902                 if (skb) {
1903                         unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
1904                         struct flow_keys keys;
1905
1906                         /* short-circuit if we already have L4 hash present */
1907                         if (skb->l4_hash)
1908                                 return skb_get_hash_raw(skb) >> 1;
1909
1910                         memset(&hash_keys, 0, sizeof(hash_keys));
1911
1912                         if (!flkeys) {
1913                                 skb_flow_dissect_flow_keys(skb, &keys, flag);
1914                                 flkeys = &keys;
1915                         }
1916
1917                         hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1918                         hash_keys.addrs.v4addrs.src = flkeys->addrs.v4addrs.src;
1919                         hash_keys.addrs.v4addrs.dst = flkeys->addrs.v4addrs.dst;
1920                         hash_keys.ports.src = flkeys->ports.src;
1921                         hash_keys.ports.dst = flkeys->ports.dst;
1922                         hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
1923                 } else {
1924                         memset(&hash_keys, 0, sizeof(hash_keys));
1925                         hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1926                         hash_keys.addrs.v4addrs.src = fl4->saddr;
1927                         hash_keys.addrs.v4addrs.dst = fl4->daddr;
1928                         hash_keys.ports.src = fl4->fl4_sport;
1929                         hash_keys.ports.dst = fl4->fl4_dport;
1930                         hash_keys.basic.ip_proto = fl4->flowi4_proto;
1931                 }
1932                 break;
1933         case 2:
1934                 memset(&hash_keys, 0, sizeof(hash_keys));
1935                 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1936                 /* skb is currently provided only when forwarding */
1937                 if (skb) {
1938                         struct flow_keys keys;
1939
1940                         skb_flow_dissect_flow_keys(skb, &keys, 0);
1941
1942                         hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
1943                         hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
1944                 } else {
1945                         /* Same as case 0 */
1946                         hash_keys.addrs.v4addrs.src = fl4->saddr;
1947                         hash_keys.addrs.v4addrs.dst = fl4->daddr;
1948                 }
1949                 break;
1950         }
1951         mhash = flow_hash_from_keys(&hash_keys);
1952
1953         if (multipath_hash)
1954                 mhash = jhash_2words(mhash, multipath_hash, 0);
1955
1956         return mhash >> 1;
1957 }
1958 #endif /* CONFIG_IP_ROUTE_MULTIPATH */
1959
1960 static int ip_mkroute_input(struct sk_buff *skb,
1961                             struct fib_result *res,
1962                             struct in_device *in_dev,
1963                             __be32 daddr, __be32 saddr, u32 tos,
1964                             struct flow_keys *hkeys)
1965 {
1966 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1967         if (res->fi && fib_info_num_path(res->fi) > 1) {
1968                 int h = fib_multipath_hash(res->fi->fib_net, NULL, skb, hkeys);
1969
1970                 fib_select_multipath(res, h);
1971         }
1972 #endif
1973
1974         /* create a routing cache entry */
1975         return __mkroute_input(skb, res, in_dev, daddr, saddr, tos);
1976 }
1977
1978 /*
1979  *      NOTE. We drop all the packets that has local source
1980  *      addresses, because every properly looped back packet
1981  *      must have correct destination already attached by output routine.
1982  *
1983  *      Such approach solves two big problems:
1984  *      1. Not simplex devices are handled properly.
1985  *      2. IP spoofing attempts are filtered with 100% of guarantee.
1986  *      called with rcu_read_lock()
1987  */
1988
1989 static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1990                                u8 tos, struct net_device *dev,
1991                                struct fib_result *res)
1992 {
1993         struct in_device *in_dev = __in_dev_get_rcu(dev);
1994         struct flow_keys *flkeys = NULL, _flkeys;
1995         struct net    *net = dev_net(dev);
1996         struct ip_tunnel_info *tun_info;
1997         int             err = -EINVAL;
1998         unsigned int    flags = 0;
1999         u32             itag = 0;
2000         struct rtable   *rth;
2001         struct flowi4   fl4;
2002         bool do_cache = true;
2003
2004         /* IP on this device is disabled. */
2005
2006         if (!in_dev)
2007                 goto out;
2008
2009         /* Check for the most weird martians, which can be not detected
2010            by fib_lookup.
2011          */
2012
2013         tun_info = skb_tunnel_info(skb);
2014         if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
2015                 fl4.flowi4_tun_key.tun_id = tun_info->key.tun_id;
2016         else
2017                 fl4.flowi4_tun_key.tun_id = 0;
2018         skb_dst_drop(skb);
2019
2020         if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
2021                 goto martian_source;
2022
2023         res->fi = NULL;
2024         res->table = NULL;
2025         if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
2026                 goto brd_input;
2027
2028         /* Accept zero addresses only to limited broadcast;
2029          * I even do not know to fix it or not. Waiting for complains :-)
2030          */
2031         if (ipv4_is_zeronet(saddr))
2032                 goto martian_source;
2033
2034         if (ipv4_is_zeronet(daddr))
2035                 goto martian_destination;
2036
2037         /* Following code try to avoid calling IN_DEV_NET_ROUTE_LOCALNET(),
2038          * and call it once if daddr or/and saddr are loopback addresses
2039          */
2040         if (ipv4_is_loopback(daddr)) {
2041                 if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
2042                         goto martian_destination;
2043         } else if (ipv4_is_loopback(saddr)) {
2044                 if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
2045                         goto martian_source;
2046         }
2047
2048         /*
2049          *      Now we are ready to route packet.
2050          */
2051         fl4.flowi4_oif = 0;
2052         fl4.flowi4_iif = dev->ifindex;
2053         fl4.flowi4_mark = skb->mark;
2054         fl4.flowi4_tos = tos;
2055         fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
2056         fl4.flowi4_flags = 0;
2057         fl4.daddr = daddr;
2058         fl4.saddr = saddr;
2059         fl4.flowi4_uid = sock_net_uid(net, NULL);
2060
2061         if (fib4_rules_early_flow_dissect(net, skb, &fl4, &_flkeys)) {
2062                 flkeys = &_flkeys;
2063         } else {
2064                 fl4.flowi4_proto = 0;
2065                 fl4.fl4_sport = 0;
2066                 fl4.fl4_dport = 0;
2067         }
2068
2069         err = fib_lookup(net, &fl4, res, 0);
2070         if (err != 0) {
2071                 if (!IN_DEV_FORWARD(in_dev))
2072                         err = -EHOSTUNREACH;
2073                 goto no_route;
2074         }
2075
2076         if (res->type == RTN_BROADCAST) {
2077                 if (IN_DEV_BFORWARD(in_dev))
2078                         goto make_route;
2079                 /* not do cache if bc_forwarding is enabled */
2080                 if (IPV4_DEVCONF_ALL(net, BC_FORWARDING))
2081                         do_cache = false;
2082                 goto brd_input;
2083         }
2084
2085         if (res->type == RTN_LOCAL) {
2086                 err = fib_validate_source(skb, saddr, daddr, tos,
2087                                           0, dev, in_dev, &itag);
2088                 if (err < 0)
2089                         goto martian_source;
2090                 goto local_input;
2091         }
2092
2093         if (!IN_DEV_FORWARD(in_dev)) {
2094                 err = -EHOSTUNREACH;
2095                 goto no_route;
2096         }
2097         if (res->type != RTN_UNICAST)
2098                 goto martian_destination;
2099
2100 make_route:
2101         err = ip_mkroute_input(skb, res, in_dev, daddr, saddr, tos, flkeys);
2102 out:    return err;
2103
2104 brd_input:
2105         if (skb->protocol != htons(ETH_P_IP))
2106                 goto e_inval;
2107
2108         if (!ipv4_is_zeronet(saddr)) {
2109                 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
2110                                           in_dev, &itag);
2111                 if (err < 0)
2112                         goto martian_source;
2113         }
2114         flags |= RTCF_BROADCAST;
2115         res->type = RTN_BROADCAST;
2116         RT_CACHE_STAT_INC(in_brd);
2117
2118 local_input:
2119         do_cache &= res->fi && !itag;
2120         if (do_cache) {
2121                 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
2122
2123                 rth = rcu_dereference(nhc->nhc_rth_input);
2124                 if (rt_cache_valid(rth)) {
2125                         skb_dst_set_noref(skb, &rth->dst);
2126                         err = 0;
2127                         goto out;
2128                 }
2129         }
2130
2131         rth = rt_dst_alloc(l3mdev_master_dev_rcu(dev) ? : net->loopback_dev,
2132                            flags | RTCF_LOCAL, res->type,
2133                            IN_DEV_CONF_GET(in_dev, NOPOLICY), false, do_cache);
2134         if (!rth)
2135                 goto e_nobufs;
2136
2137         rth->dst.output= ip_rt_bug;
2138 #ifdef CONFIG_IP_ROUTE_CLASSID
2139         rth->dst.tclassid = itag;
2140 #endif
2141         rth->rt_is_input = 1;
2142
2143         RT_CACHE_STAT_INC(in_slow_tot);
2144         if (res->type == RTN_UNREACHABLE) {
2145                 rth->dst.input= ip_error;
2146                 rth->dst.error= -err;
2147                 rth->rt_flags   &= ~RTCF_LOCAL;
2148         }
2149
2150         if (do_cache) {
2151                 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
2152
2153                 rth->dst.lwtstate = lwtstate_get(nhc->nhc_lwtstate);
2154                 if (lwtunnel_input_redirect(rth->dst.lwtstate)) {
2155                         WARN_ON(rth->dst.input == lwtunnel_input);
2156                         rth->dst.lwtstate->orig_input = rth->dst.input;
2157                         rth->dst.input = lwtunnel_input;
2158                 }
2159
2160                 if (unlikely(!rt_cache_route(nhc, rth)))
2161                         rt_add_uncached_list(rth);
2162         }
2163         skb_dst_set(skb, &rth->dst);
2164         err = 0;
2165         goto out;
2166
2167 no_route:
2168         RT_CACHE_STAT_INC(in_no_route);
2169         res->type = RTN_UNREACHABLE;
2170         res->fi = NULL;
2171         res->table = NULL;
2172         goto local_input;
2173
2174         /*
2175          *      Do not cache martian addresses: they should be logged (RFC1812)
2176          */
2177 martian_destination:
2178         RT_CACHE_STAT_INC(in_martian_dst);
2179 #ifdef CONFIG_IP_ROUTE_VERBOSE
2180         if (IN_DEV_LOG_MARTIANS(in_dev))
2181                 net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n",
2182                                      &daddr, &saddr, dev->name);
2183 #endif
2184
2185 e_inval:
2186         err = -EINVAL;
2187         goto out;
2188
2189 e_nobufs:
2190         err = -ENOBUFS;
2191         goto out;
2192
2193 martian_source:
2194         ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
2195         goto out;
2196 }
2197
2198 int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2199                          u8 tos, struct net_device *dev)
2200 {
2201         struct fib_result res;
2202         int err;
2203
2204         tos &= IPTOS_RT_MASK;
2205         rcu_read_lock();
2206         err = ip_route_input_rcu(skb, daddr, saddr, tos, dev, &res);
2207         rcu_read_unlock();
2208
2209         return err;
2210 }
2211 EXPORT_SYMBOL(ip_route_input_noref);
2212
2213 /* called with rcu_read_lock held */
2214 int ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2215                        u8 tos, struct net_device *dev, struct fib_result *res)
2216 {
2217         /* Multicast recognition logic is moved from route cache to here.
2218            The problem was that too many Ethernet cards have broken/missing
2219            hardware multicast filters :-( As result the host on multicasting
2220            network acquires a lot of useless route cache entries, sort of
2221            SDR messages from all the world. Now we try to get rid of them.
2222            Really, provided software IP multicast filter is organized
2223            reasonably (at least, hashed), it does not result in a slowdown
2224            comparing with route cache reject entries.
2225            Note, that multicast routers are not affected, because
2226            route cache entry is created eventually.
2227          */
2228         if (ipv4_is_multicast(daddr)) {
2229                 struct in_device *in_dev = __in_dev_get_rcu(dev);
2230                 int our = 0;
2231                 int err = -EINVAL;
2232
2233                 if (!in_dev)
2234                         return err;
2235                 our = ip_check_mc_rcu(in_dev, daddr, saddr,
2236                                       ip_hdr(skb)->protocol);
2237
2238                 /* check l3 master if no match yet */
2239                 if (!our && netif_is_l3_slave(dev)) {
2240                         struct in_device *l3_in_dev;
2241
2242                         l3_in_dev = __in_dev_get_rcu(skb->dev);
2243                         if (l3_in_dev)
2244                                 our = ip_check_mc_rcu(l3_in_dev, daddr, saddr,
2245                                                       ip_hdr(skb)->protocol);
2246                 }
2247
2248                 if (our
2249 #ifdef CONFIG_IP_MROUTE
2250                         ||
2251                     (!ipv4_is_local_multicast(daddr) &&
2252                      IN_DEV_MFORWARD(in_dev))
2253 #endif
2254                    ) {
2255                         err = ip_route_input_mc(skb, daddr, saddr,
2256                                                 tos, dev, our);
2257                 }
2258                 return err;
2259         }
2260
2261         return ip_route_input_slow(skb, daddr, saddr, tos, dev, res);
2262 }
2263
2264 /* called with rcu_read_lock() */
2265 static struct rtable *__mkroute_output(const struct fib_result *res,
2266                                        const struct flowi4 *fl4, int orig_oif,
2267                                        struct net_device *dev_out,
2268                                        unsigned int flags)
2269 {
2270         struct fib_info *fi = res->fi;
2271         struct fib_nh_exception *fnhe;
2272         struct in_device *in_dev;
2273         u16 type = res->type;
2274         struct rtable *rth;
2275         bool do_cache;
2276
2277         in_dev = __in_dev_get_rcu(dev_out);
2278         if (!in_dev)
2279                 return ERR_PTR(-EINVAL);
2280
2281         if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
2282                 if (ipv4_is_loopback(fl4->saddr) &&
2283                     !(dev_out->flags & IFF_LOOPBACK) &&
2284                     !netif_is_l3_master(dev_out))
2285                         return ERR_PTR(-EINVAL);
2286
2287         if (ipv4_is_lbcast(fl4->daddr))
2288                 type = RTN_BROADCAST;
2289         else if (ipv4_is_multicast(fl4->daddr))
2290                 type = RTN_MULTICAST;
2291         else if (ipv4_is_zeronet(fl4->daddr))
2292                 return ERR_PTR(-EINVAL);
2293
2294         if (dev_out->flags & IFF_LOOPBACK)
2295                 flags |= RTCF_LOCAL;
2296
2297         do_cache = true;
2298         if (type == RTN_BROADCAST) {
2299                 flags |= RTCF_BROADCAST | RTCF_LOCAL;
2300                 fi = NULL;
2301         } else if (type == RTN_MULTICAST) {
2302                 flags |= RTCF_MULTICAST | RTCF_LOCAL;
2303                 if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
2304                                      fl4->flowi4_proto))
2305                         flags &= ~RTCF_LOCAL;
2306                 else
2307                         do_cache = false;
2308                 /* If multicast route do not exist use
2309                  * default one, but do not gateway in this case.
2310                  * Yes, it is hack.
2311                  */
2312                 if (fi && res->prefixlen < 4)
2313                         fi = NULL;
2314         } else if ((type == RTN_LOCAL) && (orig_oif != 0) &&
2315                    (orig_oif != dev_out->ifindex)) {
2316                 /* For local routes that require a particular output interface
2317                  * we do not want to cache the result.  Caching the result
2318                  * causes incorrect behaviour when there are multiple source
2319                  * addresses on the interface, the end result being that if the
2320                  * intended recipient is waiting on that interface for the
2321                  * packet he won't receive it because it will be delivered on
2322                  * the loopback interface and the IP_PKTINFO ipi_ifindex will
2323                  * be set to the loopback interface as well.
2324                  */
2325                 do_cache = false;
2326         }
2327
2328         fnhe = NULL;
2329         do_cache &= fi != NULL;
2330         if (fi) {
2331                 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
2332                 struct rtable __rcu **prth;
2333
2334                 fnhe = find_exception(nhc, fl4->daddr);
2335                 if (!do_cache)
2336                         goto add;
2337                 if (fnhe) {
2338                         prth = &fnhe->fnhe_rth_output;
2339                 } else {
2340                         if (unlikely(fl4->flowi4_flags &
2341                                      FLOWI_FLAG_KNOWN_NH &&
2342                                      !(nhc->nhc_gw_family &&
2343                                        nhc->nhc_scope == RT_SCOPE_LINK))) {
2344                                 do_cache = false;
2345                                 goto add;
2346                         }
2347                         prth = raw_cpu_ptr(nhc->nhc_pcpu_rth_output);
2348                 }
2349                 rth = rcu_dereference(*prth);
2350                 if (rt_cache_valid(rth) && dst_hold_safe(&rth->dst))
2351                         return rth;
2352         }
2353
2354 add:
2355         rth = rt_dst_alloc(dev_out, flags, type,
2356                            IN_DEV_CONF_GET(in_dev, NOPOLICY),
2357                            IN_DEV_CONF_GET(in_dev, NOXFRM),
2358                            do_cache);
2359         if (!rth)
2360                 return ERR_PTR(-ENOBUFS);
2361
2362         rth->rt_iif = orig_oif;
2363
2364         RT_CACHE_STAT_INC(out_slow_tot);
2365
2366         if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
2367                 if (flags & RTCF_LOCAL &&
2368                     !(dev_out->flags & IFF_LOOPBACK)) {
2369                         rth->dst.output = ip_mc_output;
2370                         RT_CACHE_STAT_INC(out_slow_mc);
2371                 }
2372 #ifdef CONFIG_IP_MROUTE
2373                 if (type == RTN_MULTICAST) {
2374                         if (IN_DEV_MFORWARD(in_dev) &&
2375                             !ipv4_is_local_multicast(fl4->daddr)) {
2376                                 rth->dst.input = ip_mr_input;
2377                                 rth->dst.output = ip_mc_output;
2378                         }
2379                 }
2380 #endif
2381         }
2382
2383         rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0, do_cache);
2384         lwtunnel_set_redirect(&rth->dst);
2385
2386         return rth;
2387 }
2388
2389 /*
2390  * Major route resolver routine.
2391  */
2392
2393 struct rtable *ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
2394                                         const struct sk_buff *skb)
2395 {
2396         __u8 tos = RT_FL_TOS(fl4);
2397         struct fib_result res = {
2398                 .type           = RTN_UNSPEC,
2399                 .fi             = NULL,
2400                 .table          = NULL,
2401                 .tclassid       = 0,
2402         };
2403         struct rtable *rth;
2404
2405         fl4->flowi4_iif = LOOPBACK_IFINDEX;
2406         fl4->flowi4_tos = tos & IPTOS_RT_MASK;
2407         fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
2408                          RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
2409
2410         rcu_read_lock();
2411         rth = ip_route_output_key_hash_rcu(net, fl4, &res, skb);
2412         rcu_read_unlock();
2413
2414         return rth;
2415 }
2416 EXPORT_SYMBOL_GPL(ip_route_output_key_hash);
2417
2418 struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *fl4,
2419                                             struct fib_result *res,
2420                                             const struct sk_buff *skb)
2421 {
2422         struct net_device *dev_out = NULL;
2423         int orig_oif = fl4->flowi4_oif;
2424         unsigned int flags = 0;
2425         struct rtable *rth;
2426         int err = -ENETUNREACH;
2427
2428         if (fl4->saddr) {
2429                 rth = ERR_PTR(-EINVAL);
2430                 if (ipv4_is_multicast(fl4->saddr) ||
2431                     ipv4_is_lbcast(fl4->saddr) ||
2432                     ipv4_is_zeronet(fl4->saddr))
2433                         goto out;
2434
2435                 /* I removed check for oif == dev_out->oif here.
2436                    It was wrong for two reasons:
2437                    1. ip_dev_find(net, saddr) can return wrong iface, if saddr
2438                       is assigned to multiple interfaces.
2439                    2. Moreover, we are allowed to send packets with saddr
2440                       of another iface. --ANK
2441                  */
2442
2443                 if (fl4->flowi4_oif == 0 &&
2444                     (ipv4_is_multicast(fl4->daddr) ||
2445                      ipv4_is_lbcast(fl4->daddr))) {
2446                         /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2447                         dev_out = __ip_dev_find(net, fl4->saddr, false);
2448                         if (!dev_out)
2449                                 goto out;
2450
2451                         /* Special hack: user can direct multicasts
2452                            and limited broadcast via necessary interface
2453                            without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2454                            This hack is not just for fun, it allows
2455                            vic,vat and friends to work.
2456                            They bind socket to loopback, set ttl to zero
2457                            and expect that it will work.
2458                            From the viewpoint of routing cache they are broken,
2459                            because we are not allowed to build multicast path
2460                            with loopback source addr (look, routing cache
2461                            cannot know, that ttl is zero, so that packet
2462                            will not leave this host and route is valid).
2463                            Luckily, this hack is good workaround.
2464                          */
2465
2466                         fl4->flowi4_oif = dev_out->ifindex;
2467                         goto make_route;
2468                 }
2469
2470                 if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) {
2471                         /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2472                         if (!__ip_dev_find(net, fl4->saddr, false))
2473                                 goto out;
2474                 }
2475         }
2476
2477
2478         if (fl4->flowi4_oif) {
2479                 dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
2480                 rth = ERR_PTR(-ENODEV);
2481                 if (!dev_out)
2482                         goto out;
2483
2484                 /* RACE: Check return value of inet_select_addr instead. */
2485                 if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
2486                         rth = ERR_PTR(-ENETUNREACH);
2487                         goto out;
2488                 }
2489                 if (ipv4_is_local_multicast(fl4->daddr) ||
2490                     ipv4_is_lbcast(fl4->daddr) ||
2491                     fl4->flowi4_proto == IPPROTO_IGMP) {
2492                         if (!fl4->saddr)
2493                                 fl4->saddr = inet_select_addr(dev_out, 0,
2494                                                               RT_SCOPE_LINK);
2495                         goto make_route;
2496                 }
2497                 if (!fl4->saddr) {
2498                         if (ipv4_is_multicast(fl4->daddr))
2499                                 fl4->saddr = inet_select_addr(dev_out, 0,
2500                                                               fl4->flowi4_scope);
2501                         else if (!fl4->daddr)
2502                                 fl4->saddr = inet_select_addr(dev_out, 0,
2503                                                               RT_SCOPE_HOST);
2504                 }
2505         }
2506
2507         if (!fl4->daddr) {
2508                 fl4->daddr = fl4->saddr;
2509                 if (!fl4->daddr)
2510                         fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
2511                 dev_out = net->loopback_dev;
2512                 fl4->flowi4_oif = LOOPBACK_IFINDEX;
2513                 res->type = RTN_LOCAL;
2514                 flags |= RTCF_LOCAL;
2515                 goto make_route;
2516         }
2517
2518         err = fib_lookup(net, fl4, res, 0);
2519         if (err) {
2520                 res->fi = NULL;
2521                 res->table = NULL;
2522                 if (fl4->flowi4_oif &&
2523                     (ipv4_is_multicast(fl4->daddr) ||
2524                     !netif_index_is_l3_master(net, fl4->flowi4_oif))) {
2525                         /* Apparently, routing tables are wrong. Assume,
2526                            that the destination is on link.
2527
2528                            WHY? DW.
2529                            Because we are allowed to send to iface
2530                            even if it has NO routes and NO assigned
2531                            addresses. When oif is specified, routing
2532                            tables are looked up with only one purpose:
2533                            to catch if destination is gatewayed, rather than
2534                            direct. Moreover, if MSG_DONTROUTE is set,
2535                            we send packet, ignoring both routing tables
2536                            and ifaddr state. --ANK
2537
2538
2539                            We could make it even if oif is unknown,
2540                            likely IPv6, but we do not.
2541                          */
2542
2543                         if (fl4->saddr == 0)
2544                                 fl4->saddr = inet_select_addr(dev_out, 0,
2545                                                               RT_SCOPE_LINK);
2546                         res->type = RTN_UNICAST;
2547                         goto make_route;
2548                 }
2549                 rth = ERR_PTR(err);
2550                 goto out;
2551         }
2552
2553         if (res->type == RTN_LOCAL) {
2554                 if (!fl4->saddr) {
2555                         if (res->fi->fib_prefsrc)
2556                                 fl4->saddr = res->fi->fib_prefsrc;
2557                         else
2558                                 fl4->saddr = fl4->daddr;
2559                 }
2560
2561                 /* L3 master device is the loopback for that domain */
2562                 dev_out = l3mdev_master_dev_rcu(FIB_RES_DEV(*res)) ? :
2563                         net->loopback_dev;
2564
2565                 /* make sure orig_oif points to fib result device even
2566                  * though packet rx/tx happens over loopback or l3mdev
2567                  */
2568                 orig_oif = FIB_RES_OIF(*res);
2569
2570                 fl4->flowi4_oif = dev_out->ifindex;
2571                 flags |= RTCF_LOCAL;
2572                 goto make_route;
2573         }
2574
2575         fib_select_path(net, res, fl4, skb);
2576
2577         dev_out = FIB_RES_DEV(*res);
2578         fl4->flowi4_oif = dev_out->ifindex;
2579
2580
2581 make_route:
2582         rth = __mkroute_output(res, fl4, orig_oif, dev_out, flags);
2583
2584 out:
2585         return rth;
2586 }
2587
2588 static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
2589 {
2590         return NULL;
2591 }
2592
2593 static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
2594 {
2595         unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
2596
2597         return mtu ? : dst->dev->mtu;
2598 }
2599
2600 static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
2601                                           struct sk_buff *skb, u32 mtu)
2602 {
2603 }
2604
2605 static void ipv4_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
2606                                        struct sk_buff *skb)
2607 {
2608 }
2609
2610 static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst,
2611                                           unsigned long old)
2612 {
2613         return NULL;
2614 }
2615
2616 static struct dst_ops ipv4_dst_blackhole_ops = {
2617         .family                 =       AF_INET,
2618         .check                  =       ipv4_blackhole_dst_check,
2619         .mtu                    =       ipv4_blackhole_mtu,
2620         .default_advmss         =       ipv4_default_advmss,
2621         .update_pmtu            =       ipv4_rt_blackhole_update_pmtu,
2622         .redirect               =       ipv4_rt_blackhole_redirect,
2623         .cow_metrics            =       ipv4_rt_blackhole_cow_metrics,
2624         .neigh_lookup           =       ipv4_neigh_lookup,
2625 };
2626
2627 struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2628 {
2629         struct rtable *ort = (struct rtable *) dst_orig;
2630         struct rtable *rt;
2631
2632         rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_DEAD, 0);
2633         if (rt) {
2634                 struct dst_entry *new = &rt->dst;
2635
2636                 new->__use = 1;
2637                 new->input = dst_discard;
2638                 new->output = dst_discard_out;
2639
2640                 new->dev = net->loopback_dev;
2641                 if (new->dev)
2642                         dev_hold(new->dev);
2643
2644                 rt->rt_is_input = ort->rt_is_input;
2645                 rt->rt_iif = ort->rt_iif;
2646                 rt->rt_pmtu = ort->rt_pmtu;
2647                 rt->rt_mtu_locked = ort->rt_mtu_locked;
2648
2649                 rt->rt_genid = rt_genid_ipv4(net);
2650                 rt->rt_flags = ort->rt_flags;
2651                 rt->rt_type = ort->rt_type;
2652                 rt->rt_gw_family = ort->rt_gw_family;
2653                 if (rt->rt_gw_family == AF_INET)
2654                         rt->rt_gw4 = ort->rt_gw4;
2655                 else if (rt->rt_gw_family == AF_INET6)
2656                         rt->rt_gw6 = ort->rt_gw6;
2657
2658                 INIT_LIST_HEAD(&rt->rt_uncached);
2659         }
2660
2661         dst_release(dst_orig);
2662
2663         return rt ? &rt->dst : ERR_PTR(-ENOMEM);
2664 }
2665
2666 struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
2667                                     const struct sock *sk)
2668 {
2669         struct rtable *rt = __ip_route_output_key(net, flp4);
2670
2671         if (IS_ERR(rt))
2672                 return rt;
2673
2674         if (flp4->flowi4_proto)
2675                 rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst,
2676                                                         flowi4_to_flowi(flp4),
2677                                                         sk, 0);
2678
2679         return rt;
2680 }
2681 EXPORT_SYMBOL_GPL(ip_route_output_flow);
2682
2683 /* called with rcu_read_lock held */
2684 static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
2685                         struct rtable *rt, u32 table_id, struct flowi4 *fl4,
2686                         struct sk_buff *skb, u32 portid, u32 seq)
2687 {
2688         struct rtmsg *r;
2689         struct nlmsghdr *nlh;
2690         unsigned long expires = 0;
2691         u32 error;
2692         u32 metrics[RTAX_MAX];
2693
2694         nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*r), 0);
2695         if (!nlh)
2696                 return -EMSGSIZE;
2697
2698         r = nlmsg_data(nlh);
2699         r->rtm_family    = AF_INET;
2700         r->rtm_dst_len  = 32;
2701         r->rtm_src_len  = 0;
2702         r->rtm_tos      = fl4 ? fl4->flowi4_tos : 0;
2703         r->rtm_table    = table_id < 256 ? table_id : RT_TABLE_COMPAT;
2704         if (nla_put_u32(skb, RTA_TABLE, table_id))
2705                 goto nla_put_failure;
2706         r->rtm_type     = rt->rt_type;
2707         r->rtm_scope    = RT_SCOPE_UNIVERSE;
2708         r->rtm_protocol = RTPROT_UNSPEC;
2709         r->rtm_flags    = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2710         if (rt->rt_flags & RTCF_NOTIFY)
2711                 r->rtm_flags |= RTM_F_NOTIFY;
2712         if (IPCB(skb)->flags & IPSKB_DOREDIRECT)
2713                 r->rtm_flags |= RTCF_DOREDIRECT;
2714
2715         if (nla_put_in_addr(skb, RTA_DST, dst))
2716                 goto nla_put_failure;
2717         if (src) {
2718                 r->rtm_src_len = 32;
2719                 if (nla_put_in_addr(skb, RTA_SRC, src))
2720                         goto nla_put_failure;
2721         }
2722         if (rt->dst.dev &&
2723             nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
2724                 goto nla_put_failure;
2725 #ifdef CONFIG_IP_ROUTE_CLASSID
2726         if (rt->dst.tclassid &&
2727             nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
2728                 goto nla_put_failure;
2729 #endif
2730         if (fl4 && !rt_is_input_route(rt) &&
2731             fl4->saddr != src) {
2732                 if (nla_put_in_addr(skb, RTA_PREFSRC, fl4->saddr))
2733                         goto nla_put_failure;
2734         }
2735         if (rt->rt_gw_family == AF_INET &&
2736             nla_put_in_addr(skb, RTA_GATEWAY, rt->rt_gw4)) {
2737                 goto nla_put_failure;
2738         } else if (rt->rt_gw_family == AF_INET6) {
2739                 int alen = sizeof(struct in6_addr);
2740                 struct nlattr *nla;
2741                 struct rtvia *via;
2742
2743                 nla = nla_reserve(skb, RTA_VIA, alen + 2);
2744                 if (!nla)
2745                         goto nla_put_failure;
2746
2747                 via = nla_data(nla);
2748                 via->rtvia_family = AF_INET6;
2749                 memcpy(via->rtvia_addr, &rt->rt_gw6, alen);
2750         }
2751
2752         expires = rt->dst.expires;
2753         if (expires) {
2754                 unsigned long now = jiffies;
2755
2756                 if (time_before(now, expires))
2757                         expires -= now;
2758                 else
2759                         expires = 0;
2760         }
2761
2762         memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
2763         if (rt->rt_pmtu && expires)
2764                 metrics[RTAX_MTU - 1] = rt->rt_pmtu;
2765         if (rt->rt_mtu_locked && expires)
2766                 metrics[RTAX_LOCK - 1] |= BIT(RTAX_MTU);
2767         if (rtnetlink_put_metrics(skb, metrics) < 0)
2768                 goto nla_put_failure;
2769
2770         if (fl4) {
2771                 if (fl4->flowi4_mark &&
2772                     nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark))
2773                         goto nla_put_failure;
2774
2775                 if (!uid_eq(fl4->flowi4_uid, INVALID_UID) &&
2776                     nla_put_u32(skb, RTA_UID,
2777                                 from_kuid_munged(current_user_ns(),
2778                                                  fl4->flowi4_uid)))
2779                         goto nla_put_failure;
2780
2781                 if (rt_is_input_route(rt)) {
2782 #ifdef CONFIG_IP_MROUTE
2783                         if (ipv4_is_multicast(dst) &&
2784                             !ipv4_is_local_multicast(dst) &&
2785                             IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
2786                                 int err = ipmr_get_route(net, skb,
2787                                                          fl4->saddr, fl4->daddr,
2788                                                          r, portid);
2789
2790                                 if (err <= 0) {
2791                                         if (err == 0)
2792                                                 return 0;
2793                                         goto nla_put_failure;
2794                                 }
2795                         } else
2796 #endif
2797                                 if (nla_put_u32(skb, RTA_IIF, fl4->flowi4_iif))
2798                                         goto nla_put_failure;
2799                 }
2800         }
2801
2802         error = rt->dst.error;
2803
2804         if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0)
2805                 goto nla_put_failure;
2806
2807         nlmsg_end(skb, nlh);
2808         return 0;
2809
2810 nla_put_failure:
2811         nlmsg_cancel(skb, nlh);
2812         return -EMSGSIZE;
2813 }
2814
2815 static int fnhe_dump_bucket(struct net *net, struct sk_buff *skb,
2816                             struct netlink_callback *cb, u32 table_id,
2817                             struct fnhe_hash_bucket *bucket, int genid,
2818                             int *fa_index, int fa_start)
2819 {
2820         int i;
2821
2822         for (i = 0; i < FNHE_HASH_SIZE; i++) {
2823                 struct fib_nh_exception *fnhe;
2824
2825                 for (fnhe = rcu_dereference(bucket[i].chain); fnhe;
2826                      fnhe = rcu_dereference(fnhe->fnhe_next)) {
2827                         struct rtable *rt;
2828                         int err;
2829
2830                         if (*fa_index < fa_start)
2831                                 goto next;
2832
2833                         if (fnhe->fnhe_genid != genid)
2834                                 goto next;
2835
2836                         if (fnhe->fnhe_expires &&
2837                             time_after(jiffies, fnhe->fnhe_expires))
2838                                 goto next;
2839
2840                         rt = rcu_dereference(fnhe->fnhe_rth_input);
2841                         if (!rt)
2842                                 rt = rcu_dereference(fnhe->fnhe_rth_output);
2843                         if (!rt)
2844                                 goto next;
2845
2846                         err = rt_fill_info(net, fnhe->fnhe_daddr, 0, rt,
2847                                            table_id, NULL, skb,
2848                                            NETLINK_CB(cb->skb).portid,
2849                                            cb->nlh->nlmsg_seq);
2850                         if (err)
2851                                 return err;
2852 next:
2853                         (*fa_index)++;
2854                 }
2855         }
2856
2857         return 0;
2858 }
2859
2860 int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb,
2861                        u32 table_id, struct fib_info *fi,
2862                        int *fa_index, int fa_start)
2863 {
2864         struct net *net = sock_net(cb->skb->sk);
2865         int nhsel, genid = fnhe_genid(net);
2866
2867         for (nhsel = 0; nhsel < fib_info_num_path(fi); nhsel++) {
2868                 struct fib_nh_common *nhc = fib_info_nhc(fi, nhsel);
2869                 struct fnhe_hash_bucket *bucket;
2870                 int err;
2871
2872                 if (nhc->nhc_flags & RTNH_F_DEAD)
2873                         continue;
2874
2875                 bucket = rcu_dereference(nhc->nhc_exceptions);
2876                 if (!bucket)
2877                         continue;
2878
2879                 err = fnhe_dump_bucket(net, skb, cb, table_id, bucket, genid,
2880                                        fa_index, fa_start);
2881                 if (err)
2882                         return err;
2883         }
2884
2885         return 0;
2886 }
2887
2888 static struct sk_buff *inet_rtm_getroute_build_skb(__be32 src, __be32 dst,
2889                                                    u8 ip_proto, __be16 sport,
2890                                                    __be16 dport)
2891 {
2892         struct sk_buff *skb;
2893         struct iphdr *iph;
2894
2895         skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2896         if (!skb)
2897                 return NULL;
2898
2899         /* Reserve room for dummy headers, this skb can pass
2900          * through good chunk of routing engine.
2901          */
2902         skb_reset_mac_header(skb);
2903         skb_reset_network_header(skb);
2904         skb->protocol = htons(ETH_P_IP);
2905         iph = skb_put(skb, sizeof(struct iphdr));
2906         iph->protocol = ip_proto;
2907         iph->saddr = src;
2908         iph->daddr = dst;
2909         iph->version = 0x4;
2910         iph->frag_off = 0;
2911         iph->ihl = 0x5;
2912         skb_set_transport_header(skb, skb->len);
2913
2914         switch (iph->protocol) {
2915         case IPPROTO_UDP: {
2916                 struct udphdr *udph;
2917
2918                 udph = skb_put_zero(skb, sizeof(struct udphdr));
2919                 udph->source = sport;
2920                 udph->dest = dport;
2921                 udph->len = sizeof(struct udphdr);
2922                 udph->check = 0;
2923                 break;
2924         }
2925         case IPPROTO_TCP: {
2926                 struct tcphdr *tcph;
2927
2928                 tcph = skb_put_zero(skb, sizeof(struct tcphdr));
2929                 tcph->source    = sport;
2930                 tcph->dest      = dport;
2931                 tcph->doff      = sizeof(struct tcphdr) / 4;
2932                 tcph->rst = 1;
2933                 tcph->check = ~tcp_v4_check(sizeof(struct tcphdr),
2934                                             src, dst, 0);
2935                 break;
2936         }
2937         case IPPROTO_ICMP: {
2938                 struct icmphdr *icmph;
2939
2940                 icmph = skb_put_zero(skb, sizeof(struct icmphdr));
2941                 icmph->type = ICMP_ECHO;
2942                 icmph->code = 0;
2943         }
2944         }
2945
2946         return skb;
2947 }
2948
2949 static int inet_rtm_valid_getroute_req(struct sk_buff *skb,
2950                                        const struct nlmsghdr *nlh,
2951                                        struct nlattr **tb,
2952                                        struct netlink_ext_ack *extack)
2953 {
2954         struct rtmsg *rtm;
2955         int i, err;
2956
2957         if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
2958                 NL_SET_ERR_MSG(extack,
2959                                "ipv4: Invalid header for route get request");
2960                 return -EINVAL;
2961         }
2962
2963         if (!netlink_strict_get_check(skb))
2964                 return nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
2965                                               rtm_ipv4_policy, extack);
2966
2967         rtm = nlmsg_data(nlh);
2968         if ((rtm->rtm_src_len && rtm->rtm_src_len != 32) ||
2969             (rtm->rtm_dst_len && rtm->rtm_dst_len != 32) ||
2970             rtm->rtm_table || rtm->rtm_protocol ||
2971             rtm->rtm_scope || rtm->rtm_type) {
2972                 NL_SET_ERR_MSG(extack, "ipv4: Invalid values in header for route get request");
2973                 return -EINVAL;
2974         }
2975
2976         if (rtm->rtm_flags & ~(RTM_F_NOTIFY |
2977                                RTM_F_LOOKUP_TABLE |
2978                                RTM_F_FIB_MATCH)) {
2979                 NL_SET_ERR_MSG(extack, "ipv4: Unsupported rtm_flags for route get request");
2980                 return -EINVAL;
2981         }
2982
2983         err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
2984                                             rtm_ipv4_policy, extack);
2985         if (err)
2986                 return err;
2987
2988         if ((tb[RTA_SRC] && !rtm->rtm_src_len) ||
2989             (tb[RTA_DST] && !rtm->rtm_dst_len)) {
2990                 NL_SET_ERR_MSG(extack, "ipv4: rtm_src_len and rtm_dst_len must be 32 for IPv4");
2991                 return -EINVAL;
2992         }
2993
2994         for (i = 0; i <= RTA_MAX; i++) {
2995                 if (!tb[i])
2996                         continue;
2997
2998                 switch (i) {
2999                 case RTA_IIF:
3000                 case RTA_OIF:
3001                 case RTA_SRC:
3002                 case RTA_DST:
3003                 case RTA_IP_PROTO:
3004                 case RTA_SPORT:
3005                 case RTA_DPORT:
3006                 case RTA_MARK:
3007                 case RTA_UID:
3008                         break;
3009                 default:
3010                         NL_SET_ERR_MSG(extack, "ipv4: Unsupported attribute in route get request");
3011                         return -EINVAL;
3012                 }
3013         }
3014
3015         return 0;
3016 }
3017
3018 static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
3019                              struct netlink_ext_ack *extack)
3020 {
3021         struct net *net = sock_net(in_skb->sk);
3022         struct nlattr *tb[RTA_MAX+1];
3023         u32 table_id = RT_TABLE_MAIN;
3024         __be16 sport = 0, dport = 0;
3025         struct fib_result res = {};
3026         u8 ip_proto = IPPROTO_UDP;
3027         struct rtable *rt = NULL;
3028         struct sk_buff *skb;
3029         struct rtmsg *rtm;
3030         struct flowi4 fl4 = {};
3031         __be32 dst = 0;
3032         __be32 src = 0;
3033         kuid_t uid;
3034         u32 iif;
3035         int err;
3036         int mark;
3037
3038         err = inet_rtm_valid_getroute_req(in_skb, nlh, tb, extack);
3039         if (err < 0)
3040                 return err;
3041
3042         rtm = nlmsg_data(nlh);
3043         src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
3044         dst = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0;
3045         iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
3046         mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
3047         if (tb[RTA_UID])
3048                 uid = make_kuid(current_user_ns(), nla_get_u32(tb[RTA_UID]));
3049         else
3050                 uid = (iif ? INVALID_UID : current_uid());
3051
3052         if (tb[RTA_IP_PROTO]) {
3053                 err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
3054                                                   &ip_proto, AF_INET, extack);
3055                 if (err)
3056                         return err;
3057         }
3058
3059         if (tb[RTA_SPORT])
3060                 sport = nla_get_be16(tb[RTA_SPORT]);
3061
3062         if (tb[RTA_DPORT])
3063                 dport = nla_get_be16(tb[RTA_DPORT]);
3064
3065         skb = inet_rtm_getroute_build_skb(src, dst, ip_proto, sport, dport);
3066         if (!skb)
3067                 return -ENOBUFS;
3068
3069         fl4.daddr = dst;
3070         fl4.saddr = src;
3071         fl4.flowi4_tos = rtm->rtm_tos;
3072         fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
3073         fl4.flowi4_mark = mark;
3074         fl4.flowi4_uid = uid;
3075         if (sport)
3076                 fl4.fl4_sport = sport;
3077         if (dport)
3078                 fl4.fl4_dport = dport;
3079         fl4.flowi4_proto = ip_proto;
3080
3081         rcu_read_lock();
3082
3083         if (iif) {
3084                 struct net_device *dev;
3085
3086                 dev = dev_get_by_index_rcu(net, iif);
3087                 if (!dev) {
3088                         err = -ENODEV;
3089                         goto errout_rcu;
3090                 }
3091
3092                 fl4.flowi4_iif = iif; /* for rt_fill_info */
3093                 skb->dev        = dev;
3094                 skb->mark       = mark;
3095                 err = ip_route_input_rcu(skb, dst, src, rtm->rtm_tos,
3096                                          dev, &res);
3097
3098                 rt = skb_rtable(skb);
3099                 if (err == 0 && rt->dst.error)
3100                         err = -rt->dst.error;
3101         } else {
3102                 fl4.flowi4_iif = LOOPBACK_IFINDEX;
3103                 skb->dev = net->loopback_dev;
3104                 rt = ip_route_output_key_hash_rcu(net, &fl4, &res, skb);
3105                 err = 0;
3106                 if (IS_ERR(rt))
3107                         err = PTR_ERR(rt);
3108                 else
3109                         skb_dst_set(skb, &rt->dst);
3110         }
3111
3112         if (err)
3113                 goto errout_rcu;
3114
3115         if (rtm->rtm_flags & RTM_F_NOTIFY)
3116                 rt->rt_flags |= RTCF_NOTIFY;
3117
3118         if (rtm->rtm_flags & RTM_F_LOOKUP_TABLE)
3119                 table_id = res.table ? res.table->tb_id : 0;
3120
3121         /* reset skb for netlink reply msg */
3122         skb_trim(skb, 0);
3123         skb_reset_network_header(skb);
3124         skb_reset_transport_header(skb);
3125         skb_reset_mac_header(skb);
3126
3127         if (rtm->rtm_flags & RTM_F_FIB_MATCH) {
3128                 if (!res.fi) {
3129                         err = fib_props[res.type].error;
3130                         if (!err)
3131                                 err = -EHOSTUNREACH;
3132                         goto errout_rcu;
3133                 }
3134                 err = fib_dump_info(skb, NETLINK_CB(in_skb).portid,
3135                                     nlh->nlmsg_seq, RTM_NEWROUTE, table_id,
3136                                     rt->rt_type, res.prefix, res.prefixlen,
3137                                     fl4.flowi4_tos, res.fi, 0);
3138         } else {
3139                 err = rt_fill_info(net, dst, src, rt, table_id, &fl4, skb,
3140                                    NETLINK_CB(in_skb).portid, nlh->nlmsg_seq);
3141         }
3142         if (err < 0)
3143                 goto errout_rcu;
3144
3145         rcu_read_unlock();
3146
3147         err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
3148
3149 errout_free:
3150         return err;
3151 errout_rcu:
3152         rcu_read_unlock();
3153         kfree_skb(skb);
3154         goto errout_free;
3155 }
3156
3157 void ip_rt_multicast_event(struct in_device *in_dev)
3158 {
3159         rt_cache_flush(dev_net(in_dev->dev));
3160 }
3161
3162 #ifdef CONFIG_SYSCTL
3163 static int ip_rt_gc_interval __read_mostly  = 60 * HZ;
3164 static int ip_rt_gc_min_interval __read_mostly  = HZ / 2;
3165 static int ip_rt_gc_elasticity __read_mostly    = 8;
3166 static int ip_min_valid_pmtu __read_mostly      = IPV4_MIN_MTU;
3167
3168 static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write,
3169                                         void __user *buffer,
3170                                         size_t *lenp, loff_t *ppos)
3171 {
3172         struct net *net = (struct net *)__ctl->extra1;
3173
3174         if (write) {
3175                 rt_cache_flush(net);
3176                 fnhe_genid_bump(net);
3177                 return 0;
3178         }
3179
3180         return -EINVAL;
3181 }
3182
3183 static struct ctl_table ipv4_route_table[] = {
3184         {
3185                 .procname       = "gc_thresh",
3186                 .data           = &ipv4_dst_ops.gc_thresh,
3187                 .maxlen         = sizeof(int),
3188                 .mode           = 0644,
3189                 .proc_handler   = proc_dointvec,
3190         },
3191         {
3192                 .procname       = "max_size",
3193                 .data           = &ip_rt_max_size,
3194                 .maxlen         = sizeof(int),
3195                 .mode           = 0644,
3196                 .proc_handler   = proc_dointvec,
3197         },
3198         {
3199                 /*  Deprecated. Use gc_min_interval_ms */
3200
3201                 .procname       = "gc_min_interval",
3202                 .data           = &ip_rt_gc_min_interval,
3203                 .maxlen         = sizeof(int),
3204                 .mode           = 0644,
3205                 .proc_handler   = proc_dointvec_jiffies,
3206         },
3207         {
3208                 .procname       = "gc_min_interval_ms",
3209                 .data           = &ip_rt_gc_min_interval,
3210                 .maxlen         = sizeof(int),
3211                 .mode           = 0644,
3212                 .proc_handler   = proc_dointvec_ms_jiffies,
3213         },
3214         {
3215                 .procname       = "gc_timeout",
3216                 .data           = &ip_rt_gc_timeout,
3217                 .maxlen         = sizeof(int),
3218                 .mode           = 0644,
3219                 .proc_handler   = proc_dointvec_jiffies,
3220         },
3221         {
3222                 .procname       = "gc_interval",
3223                 .data           = &ip_rt_gc_interval,
3224                 .maxlen         = sizeof(int),
3225                 .mode           = 0644,
3226                 .proc_handler   = proc_dointvec_jiffies,
3227         },
3228         {
3229                 .procname       = "redirect_load",
3230                 .data           = &ip_rt_redirect_load,
3231                 .maxlen         = sizeof(int),
3232                 .mode           = 0644,
3233                 .proc_handler   = proc_dointvec,
3234         },
3235         {
3236                 .procname       = "redirect_number",
3237                 .data           = &ip_rt_redirect_number,
3238                 .maxlen         = sizeof(int),
3239                 .mode           = 0644,
3240                 .proc_handler   = proc_dointvec,
3241         },
3242         {
3243                 .procname       = "redirect_silence",
3244                 .data           = &ip_rt_redirect_silence,
3245                 .maxlen         = sizeof(int),
3246                 .mode           = 0644,
3247                 .proc_handler   = proc_dointvec,
3248         },
3249         {
3250                 .procname       = "error_cost",
3251                 .data           = &ip_rt_error_cost,
3252                 .maxlen         = sizeof(int),
3253                 .mode           = 0644,
3254                 .proc_handler   = proc_dointvec,
3255         },
3256         {
3257                 .procname       = "error_burst",
3258                 .data           = &ip_rt_error_burst,
3259                 .maxlen         = sizeof(int),
3260                 .mode           = 0644,
3261                 .proc_handler   = proc_dointvec,
3262         },
3263         {
3264                 .procname       = "gc_elasticity",
3265                 .data           = &ip_rt_gc_elasticity,
3266                 .maxlen         = sizeof(int),
3267                 .mode           = 0644,
3268                 .proc_handler   = proc_dointvec,
3269         },
3270         {
3271                 .procname       = "mtu_expires",
3272                 .data           = &ip_rt_mtu_expires,
3273                 .maxlen         = sizeof(int),
3274                 .mode           = 0644,
3275                 .proc_handler   = proc_dointvec_jiffies,
3276         },
3277         {
3278                 .procname       = "min_pmtu",
3279                 .data           = &ip_rt_min_pmtu,
3280                 .maxlen         = sizeof(int),
3281                 .mode           = 0644,
3282                 .proc_handler   = proc_dointvec_minmax,
3283                 .extra1         = &ip_min_valid_pmtu,
3284         },
3285         {
3286                 .procname       = "min_adv_mss",
3287                 .data           = &ip_rt_min_advmss,
3288                 .maxlen         = sizeof(int),
3289                 .mode           = 0644,
3290                 .proc_handler   = proc_dointvec,
3291         },
3292         { }
3293 };
3294
3295 static struct ctl_table ipv4_route_flush_table[] = {
3296         {
3297                 .procname       = "flush",
3298                 .maxlen         = sizeof(int),
3299                 .mode           = 0200,
3300                 .proc_handler   = ipv4_sysctl_rtcache_flush,
3301         },
3302         { },
3303 };
3304
3305 static __net_init int sysctl_route_net_init(struct net *net)
3306 {
3307         struct ctl_table *tbl;
3308
3309         tbl = ipv4_route_flush_table;
3310         if (!net_eq(net, &init_net)) {
3311                 tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
3312                 if (!tbl)
3313                         goto err_dup;
3314
3315                 /* Don't export sysctls to unprivileged users */
3316                 if (net->user_ns != &init_user_ns)
3317                         tbl[0].procname = NULL;
3318         }
3319         tbl[0].extra1 = net;
3320
3321         net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
3322         if (!net->ipv4.route_hdr)
3323                 goto err_reg;
3324         return 0;
3325
3326 err_reg:
3327         if (tbl != ipv4_route_flush_table)
3328                 kfree(tbl);
3329 err_dup:
3330         return -ENOMEM;
3331 }
3332
3333 static __net_exit void sysctl_route_net_exit(struct net *net)
3334 {
3335         struct ctl_table *tbl;
3336
3337         tbl = net->ipv4.route_hdr->ctl_table_arg;
3338         unregister_net_sysctl_table(net->ipv4.route_hdr);
3339         BUG_ON(tbl == ipv4_route_flush_table);
3340         kfree(tbl);
3341 }
3342
3343 static __net_initdata struct pernet_operations sysctl_route_ops = {
3344         .init = sysctl_route_net_init,
3345         .exit = sysctl_route_net_exit,
3346 };
3347 #endif
3348
3349 static __net_init int rt_genid_init(struct net *net)
3350 {
3351         atomic_set(&net->ipv4.rt_genid, 0);
3352         atomic_set(&net->fnhe_genid, 0);
3353         atomic_set(&net->ipv4.dev_addr_genid, get_random_int());
3354         return 0;
3355 }
3356
3357 static __net_initdata struct pernet_operations rt_genid_ops = {
3358         .init = rt_genid_init,
3359 };
3360
3361 static int __net_init ipv4_inetpeer_init(struct net *net)
3362 {
3363         struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
3364
3365         if (!bp)
3366                 return -ENOMEM;
3367         inet_peer_base_init(bp);
3368         net->ipv4.peers = bp;
3369         return 0;
3370 }
3371
3372 static void __net_exit ipv4_inetpeer_exit(struct net *net)
3373 {
3374         struct inet_peer_base *bp = net->ipv4.peers;
3375
3376         net->ipv4.peers = NULL;
3377         inetpeer_invalidate_tree(bp);
3378         kfree(bp);
3379 }
3380
3381 static __net_initdata struct pernet_operations ipv4_inetpeer_ops = {
3382         .init   =       ipv4_inetpeer_init,
3383         .exit   =       ipv4_inetpeer_exit,
3384 };
3385
3386 #ifdef CONFIG_IP_ROUTE_CLASSID
3387 struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
3388 #endif /* CONFIG_IP_ROUTE_CLASSID */
3389
3390 int __init ip_rt_init(void)
3391 {
3392         int cpu;
3393
3394         ip_idents = kmalloc_array(IP_IDENTS_SZ, sizeof(*ip_idents),
3395                                   GFP_KERNEL);
3396         if (!ip_idents)
3397                 panic("IP: failed to allocate ip_idents\n");
3398
3399         prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
3400
3401         ip_tstamps = kcalloc(IP_IDENTS_SZ, sizeof(*ip_tstamps), GFP_KERNEL);
3402         if (!ip_tstamps)
3403                 panic("IP: failed to allocate ip_tstamps\n");
3404
3405         for_each_possible_cpu(cpu) {
3406                 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
3407
3408                 INIT_LIST_HEAD(&ul->head);
3409                 spin_lock_init(&ul->lock);
3410         }
3411 #ifdef CONFIG_IP_ROUTE_CLASSID
3412         ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
3413         if (!ip_rt_acct)
3414                 panic("IP: failed to allocate ip_rt_acct\n");
3415 #endif
3416
3417         ipv4_dst_ops.kmem_cachep =
3418                 kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
3419                                   SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3420
3421         ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
3422
3423         if (dst_entries_init(&ipv4_dst_ops) < 0)
3424                 panic("IP: failed to allocate ipv4_dst_ops counter\n");
3425
3426         if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
3427                 panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
3428
3429         ipv4_dst_ops.gc_thresh = ~0;
3430         ip_rt_max_size = INT_MAX;
3431
3432         devinet_init();
3433         ip_fib_init();
3434
3435         if (ip_rt_proc_init())
3436                 pr_err("Unable to create route proc files\n");
3437 #ifdef CONFIG_XFRM
3438         xfrm_init();
3439         xfrm4_init();
3440 #endif
3441         rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL,
3442                       RTNL_FLAG_DOIT_UNLOCKED);
3443
3444 #ifdef CONFIG_SYSCTL
3445         register_pernet_subsys(&sysctl_route_ops);
3446 #endif
3447         register_pernet_subsys(&rt_genid_ops);
3448         register_pernet_subsys(&ipv4_inetpeer_ops);
3449         return 0;
3450 }
3451
3452 #ifdef CONFIG_SYSCTL
3453 /*
3454  * We really need to sanitize the damn ipv4 init order, then all
3455  * this nonsense will go away.
3456  */
3457 void __init ip_static_sysctl_init(void)
3458 {
3459         register_net_sysctl(&init_net, "net/ipv4/route", ipv4_route_table);
3460 }
3461 #endif