2 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
16 * YOSHIFUJI Hideaki @USAGI
17 * reworked default router selection.
18 * - respect outgoing interface
19 * - select from (probably) reachable routers (i.e.
20 * routers in REACHABLE, STALE, DELAY or PROBE states).
21 * - always select the same router if it is (probably)
22 * reachable. otherwise, round-robin the list.
24 * Fixed routing subtrees.
27 #define pr_fmt(fmt) "IPv6: " fmt
29 #include <linux/capability.h>
30 #include <linux/errno.h>
31 #include <linux/export.h>
32 #include <linux/types.h>
33 #include <linux/times.h>
34 #include <linux/socket.h>
35 #include <linux/sockios.h>
36 #include <linux/net.h>
37 #include <linux/route.h>
38 #include <linux/netdevice.h>
39 #include <linux/in6.h>
40 #include <linux/mroute6.h>
41 #include <linux/init.h>
42 #include <linux/if_arp.h>
43 #include <linux/proc_fs.h>
44 #include <linux/seq_file.h>
45 #include <linux/nsproxy.h>
46 #include <linux/slab.h>
47 #include <linux/jhash.h>
48 #include <net/net_namespace.h>
51 #include <net/ip6_fib.h>
52 #include <net/ip6_route.h>
53 #include <net/ndisc.h>
54 #include <net/addrconf.h>
56 #include <linux/rtnetlink.h>
58 #include <net/dst_metadata.h>
60 #include <net/netevent.h>
61 #include <net/netlink.h>
63 #include <net/lwtunnel.h>
64 #include <net/ip_tunnels.h>
65 #include <net/l3mdev.h>
67 #include <linux/uaccess.h>
70 #include <linux/sysctl.h>
73 static int ip6_rt_type_to_error(u8 fib6_type);
75 #define CREATE_TRACE_POINTS
76 #include <trace/events/fib6.h>
77 EXPORT_TRACEPOINT_SYMBOL_GPL(fib6_table_lookup);
78 #undef CREATE_TRACE_POINTS
81 RT6_NUD_FAIL_HARD = -3,
82 RT6_NUD_FAIL_PROBE = -2,
83 RT6_NUD_FAIL_DO_RR = -1,
87 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
88 static unsigned int ip6_default_advmss(const struct dst_entry *dst);
89 static unsigned int ip6_mtu(const struct dst_entry *dst);
90 static struct dst_entry *ip6_negative_advice(struct dst_entry *);
91 static void ip6_dst_destroy(struct dst_entry *);
92 static void ip6_dst_ifdown(struct dst_entry *,
93 struct net_device *dev, int how);
94 static int ip6_dst_gc(struct dst_ops *ops);
96 static int ip6_pkt_discard(struct sk_buff *skb);
97 static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
98 static int ip6_pkt_prohibit(struct sk_buff *skb);
99 static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
100 static void ip6_link_failure(struct sk_buff *skb);
101 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
102 struct sk_buff *skb, u32 mtu);
103 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
104 struct sk_buff *skb);
105 static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif,
107 static size_t rt6_nlmsg_size(struct fib6_info *rt);
108 static int rt6_fill_node(struct net *net, struct sk_buff *skb,
109 struct fib6_info *rt, struct dst_entry *dst,
110 struct in6_addr *dest, struct in6_addr *src,
111 int iif, int type, u32 portid, u32 seq,
113 static struct rt6_info *rt6_find_cached_rt(const struct fib6_result *res,
114 const struct in6_addr *daddr,
115 const struct in6_addr *saddr);
117 #ifdef CONFIG_IPV6_ROUTE_INFO
118 static struct fib6_info *rt6_add_route_info(struct net *net,
119 const struct in6_addr *prefix, int prefixlen,
120 const struct in6_addr *gwaddr,
121 struct net_device *dev,
123 static struct fib6_info *rt6_get_route_info(struct net *net,
124 const struct in6_addr *prefix, int prefixlen,
125 const struct in6_addr *gwaddr,
126 struct net_device *dev);
129 struct uncached_list {
131 struct list_head head;
134 static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);
136 void rt6_uncached_list_add(struct rt6_info *rt)
138 struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list);
140 rt->rt6i_uncached_list = ul;
142 spin_lock_bh(&ul->lock);
143 list_add_tail(&rt->rt6i_uncached, &ul->head);
144 spin_unlock_bh(&ul->lock);
147 void rt6_uncached_list_del(struct rt6_info *rt)
149 if (!list_empty(&rt->rt6i_uncached)) {
150 struct uncached_list *ul = rt->rt6i_uncached_list;
151 struct net *net = dev_net(rt->dst.dev);
153 spin_lock_bh(&ul->lock);
154 list_del(&rt->rt6i_uncached);
155 atomic_dec(&net->ipv6.rt6_stats->fib_rt_uncache);
156 spin_unlock_bh(&ul->lock);
160 static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev)
162 struct net_device *loopback_dev = net->loopback_dev;
165 if (dev == loopback_dev)
168 for_each_possible_cpu(cpu) {
169 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
172 spin_lock_bh(&ul->lock);
173 list_for_each_entry(rt, &ul->head, rt6i_uncached) {
174 struct inet6_dev *rt_idev = rt->rt6i_idev;
175 struct net_device *rt_dev = rt->dst.dev;
177 if (rt_idev->dev == dev) {
178 rt->rt6i_idev = in6_dev_get(loopback_dev);
179 in6_dev_put(rt_idev);
183 rt->dst.dev = loopback_dev;
184 dev_hold(rt->dst.dev);
188 spin_unlock_bh(&ul->lock);
192 static inline const void *choose_neigh_daddr(const struct in6_addr *p,
196 if (!ipv6_addr_any(p))
197 return (const void *) p;
199 return &ipv6_hdr(skb)->daddr;
203 struct neighbour *ip6_neigh_lookup(const struct in6_addr *gw,
204 struct net_device *dev,
210 daddr = choose_neigh_daddr(gw, skb, daddr);
211 n = __ipv6_neigh_lookup(dev, daddr);
215 n = neigh_create(&nd_tbl, daddr, dev);
216 return IS_ERR(n) ? NULL : n;
219 static struct neighbour *ip6_dst_neigh_lookup(const struct dst_entry *dst,
223 const struct rt6_info *rt = container_of(dst, struct rt6_info, dst);
225 return ip6_neigh_lookup(&rt->rt6i_gateway, dst->dev, skb, daddr);
228 static void ip6_confirm_neigh(const struct dst_entry *dst, const void *daddr)
230 struct net_device *dev = dst->dev;
231 struct rt6_info *rt = (struct rt6_info *)dst;
233 daddr = choose_neigh_daddr(&rt->rt6i_gateway, NULL, daddr);
236 if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
238 if (ipv6_addr_is_multicast((const struct in6_addr *)daddr))
240 __ipv6_confirm_neigh(dev, daddr);
243 static struct dst_ops ip6_dst_ops_template = {
247 .check = ip6_dst_check,
248 .default_advmss = ip6_default_advmss,
250 .cow_metrics = dst_cow_metrics_generic,
251 .destroy = ip6_dst_destroy,
252 .ifdown = ip6_dst_ifdown,
253 .negative_advice = ip6_negative_advice,
254 .link_failure = ip6_link_failure,
255 .update_pmtu = ip6_rt_update_pmtu,
256 .redirect = rt6_do_redirect,
257 .local_out = __ip6_local_out,
258 .neigh_lookup = ip6_dst_neigh_lookup,
259 .confirm_neigh = ip6_confirm_neigh,
262 static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
264 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
266 return mtu ? : dst->dev->mtu;
269 static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
270 struct sk_buff *skb, u32 mtu)
274 static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
279 static struct dst_ops ip6_dst_blackhole_ops = {
281 .destroy = ip6_dst_destroy,
282 .check = ip6_dst_check,
283 .mtu = ip6_blackhole_mtu,
284 .default_advmss = ip6_default_advmss,
285 .update_pmtu = ip6_rt_blackhole_update_pmtu,
286 .redirect = ip6_rt_blackhole_redirect,
287 .cow_metrics = dst_cow_metrics_generic,
288 .neigh_lookup = ip6_dst_neigh_lookup,
291 static const u32 ip6_template_metrics[RTAX_MAX] = {
292 [RTAX_HOPLIMIT - 1] = 0,
295 static const struct fib6_info fib6_null_entry_template = {
296 .fib6_flags = (RTF_REJECT | RTF_NONEXTHOP),
297 .fib6_protocol = RTPROT_KERNEL,
298 .fib6_metric = ~(u32)0,
299 .fib6_ref = REFCOUNT_INIT(1),
300 .fib6_type = RTN_UNREACHABLE,
301 .fib6_metrics = (struct dst_metrics *)&dst_default_metrics,
304 static const struct rt6_info ip6_null_entry_template = {
306 .__refcnt = ATOMIC_INIT(1),
308 .obsolete = DST_OBSOLETE_FORCE_CHK,
309 .error = -ENETUNREACH,
310 .input = ip6_pkt_discard,
311 .output = ip6_pkt_discard_out,
313 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
316 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
318 static const struct rt6_info ip6_prohibit_entry_template = {
320 .__refcnt = ATOMIC_INIT(1),
322 .obsolete = DST_OBSOLETE_FORCE_CHK,
324 .input = ip6_pkt_prohibit,
325 .output = ip6_pkt_prohibit_out,
327 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
330 static const struct rt6_info ip6_blk_hole_entry_template = {
332 .__refcnt = ATOMIC_INIT(1),
334 .obsolete = DST_OBSOLETE_FORCE_CHK,
336 .input = dst_discard,
337 .output = dst_discard_out,
339 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
344 static void rt6_info_init(struct rt6_info *rt)
346 struct dst_entry *dst = &rt->dst;
348 memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
349 INIT_LIST_HEAD(&rt->rt6i_uncached);
352 /* allocate dst with ip6_dst_ops */
353 struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev,
356 struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
357 1, DST_OBSOLETE_FORCE_CHK, flags);
361 atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
366 EXPORT_SYMBOL(ip6_dst_alloc);
368 static void ip6_dst_destroy(struct dst_entry *dst)
370 struct rt6_info *rt = (struct rt6_info *)dst;
371 struct fib6_info *from;
372 struct inet6_dev *idev;
374 ip_dst_metrics_put(dst);
375 rt6_uncached_list_del(rt);
377 idev = rt->rt6i_idev;
379 rt->rt6i_idev = NULL;
383 from = xchg((__force struct fib6_info **)&rt->from, NULL);
384 fib6_info_release(from);
387 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
390 struct rt6_info *rt = (struct rt6_info *)dst;
391 struct inet6_dev *idev = rt->rt6i_idev;
392 struct net_device *loopback_dev =
393 dev_net(dev)->loopback_dev;
395 if (idev && idev->dev != loopback_dev) {
396 struct inet6_dev *loopback_idev = in6_dev_get(loopback_dev);
398 rt->rt6i_idev = loopback_idev;
404 static bool __rt6_check_expired(const struct rt6_info *rt)
406 if (rt->rt6i_flags & RTF_EXPIRES)
407 return time_after(jiffies, rt->dst.expires);
412 static bool rt6_check_expired(const struct rt6_info *rt)
414 struct fib6_info *from;
416 from = rcu_dereference(rt->from);
418 if (rt->rt6i_flags & RTF_EXPIRES) {
419 if (time_after(jiffies, rt->dst.expires))
422 return rt->dst.obsolete != DST_OBSOLETE_FORCE_CHK ||
423 fib6_check_expired(from);
428 void fib6_select_path(const struct net *net, struct fib6_result *res,
429 struct flowi6 *fl6, int oif, bool have_oif_match,
430 const struct sk_buff *skb, int strict)
432 struct fib6_info *sibling, *next_sibling;
433 struct fib6_info *match = res->f6i;
435 if (!match->fib6_nsiblings || have_oif_match)
438 /* We might have already computed the hash for ICMPv6 errors. In such
439 * case it will always be non-zero. Otherwise now is the time to do it.
442 fl6->mp_hash = rt6_multipath_hash(net, fl6, skb, NULL);
444 if (fl6->mp_hash <= atomic_read(&match->fib6_nh->fib_nh_upper_bound))
447 list_for_each_entry_safe(sibling, next_sibling, &match->fib6_siblings,
449 const struct fib6_nh *nh = sibling->fib6_nh;
452 nh_upper_bound = atomic_read(&nh->fib_nh_upper_bound);
453 if (fl6->mp_hash > nh_upper_bound)
455 if (rt6_score_route(nh, sibling->fib6_flags, oif, strict) < 0)
463 res->nh = match->fib6_nh;
467 * Route lookup. rcu_read_lock() should be held.
470 static bool __rt6_device_match(struct net *net, const struct fib6_nh *nh,
471 const struct in6_addr *saddr, int oif, int flags)
473 const struct net_device *dev;
475 if (nh->fib_nh_flags & RTNH_F_DEAD)
478 dev = nh->fib_nh_dev;
480 if (dev->ifindex == oif)
483 if (ipv6_chk_addr(net, saddr, dev,
484 flags & RT6_LOOKUP_F_IFACE))
491 static void rt6_device_match(struct net *net, struct fib6_result *res,
492 const struct in6_addr *saddr, int oif, int flags)
494 struct fib6_info *f6i = res->f6i;
495 struct fib6_info *spf6i;
498 if (!oif && ipv6_addr_any(saddr)) {
500 if (!(nh->fib_nh_flags & RTNH_F_DEAD))
504 for (spf6i = f6i; spf6i; spf6i = rcu_dereference(spf6i->fib6_next)) {
506 if (__rt6_device_match(net, nh, saddr, oif, flags)) {
512 if (oif && flags & RT6_LOOKUP_F_IFACE) {
513 res->f6i = net->ipv6.fib6_null_entry;
514 nh = res->f6i->fib6_nh;
519 if (nh->fib_nh_flags & RTNH_F_DEAD) {
520 res->f6i = net->ipv6.fib6_null_entry;
521 nh = res->f6i->fib6_nh;
525 res->fib6_type = res->f6i->fib6_type;
526 res->fib6_flags = res->f6i->fib6_flags;
529 #ifdef CONFIG_IPV6_ROUTER_PREF
530 struct __rt6_probe_work {
531 struct work_struct work;
532 struct in6_addr target;
533 struct net_device *dev;
536 static void rt6_probe_deferred(struct work_struct *w)
538 struct in6_addr mcaddr;
539 struct __rt6_probe_work *work =
540 container_of(w, struct __rt6_probe_work, work);
542 addrconf_addr_solict_mult(&work->target, &mcaddr);
543 ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, 0);
548 static void rt6_probe(struct fib6_nh *fib6_nh)
550 struct __rt6_probe_work *work = NULL;
551 const struct in6_addr *nh_gw;
552 struct neighbour *neigh;
553 struct net_device *dev;
554 struct inet6_dev *idev;
557 * Okay, this does not seem to be appropriate
558 * for now, however, we need to check if it
559 * is really so; aka Router Reachability Probing.
561 * Router Reachability Probe MUST be rate-limited
562 * to no more than one per minute.
564 if (fib6_nh->fib_nh_gw_family)
567 nh_gw = &fib6_nh->fib_nh_gw6;
568 dev = fib6_nh->fib_nh_dev;
570 idev = __in6_dev_get(dev);
571 neigh = __ipv6_neigh_lookup_noref(dev, nh_gw);
573 if (neigh->nud_state & NUD_VALID)
576 write_lock(&neigh->lock);
577 if (!(neigh->nud_state & NUD_VALID) &&
579 neigh->updated + idev->cnf.rtr_probe_interval)) {
580 work = kmalloc(sizeof(*work), GFP_ATOMIC);
582 __neigh_set_probe_once(neigh);
584 write_unlock(&neigh->lock);
585 } else if (time_after(jiffies, fib6_nh->last_probe +
586 idev->cnf.rtr_probe_interval)) {
587 work = kmalloc(sizeof(*work), GFP_ATOMIC);
591 fib6_nh->last_probe = jiffies;
592 INIT_WORK(&work->work, rt6_probe_deferred);
593 work->target = *nh_gw;
596 schedule_work(&work->work);
600 rcu_read_unlock_bh();
603 static inline void rt6_probe(struct fib6_nh *fib6_nh)
609 * Default Router Selection (RFC 2461 6.3.6)
611 static enum rt6_nud_state rt6_check_neigh(const struct fib6_nh *fib6_nh)
613 enum rt6_nud_state ret = RT6_NUD_FAIL_HARD;
614 struct neighbour *neigh;
617 neigh = __ipv6_neigh_lookup_noref(fib6_nh->fib_nh_dev,
618 &fib6_nh->fib_nh_gw6);
620 read_lock(&neigh->lock);
621 if (neigh->nud_state & NUD_VALID)
622 ret = RT6_NUD_SUCCEED;
623 #ifdef CONFIG_IPV6_ROUTER_PREF
624 else if (!(neigh->nud_state & NUD_FAILED))
625 ret = RT6_NUD_SUCCEED;
627 ret = RT6_NUD_FAIL_PROBE;
629 read_unlock(&neigh->lock);
631 ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ?
632 RT6_NUD_SUCCEED : RT6_NUD_FAIL_DO_RR;
634 rcu_read_unlock_bh();
639 static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif,
644 if (!oif || nh->fib_nh_dev->ifindex == oif)
647 if (!m && (strict & RT6_LOOKUP_F_IFACE))
648 return RT6_NUD_FAIL_HARD;
649 #ifdef CONFIG_IPV6_ROUTER_PREF
650 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(fib6_flags)) << 2;
652 if ((strict & RT6_LOOKUP_F_REACHABLE) &&
653 !(fib6_flags & RTF_NONEXTHOP) && nh->fib_nh_gw_family) {
654 int n = rt6_check_neigh(nh);
661 static bool find_match(struct fib6_nh *nh, u32 fib6_flags,
662 int oif, int strict, int *mpri, bool *do_rr)
664 bool match_do_rr = false;
668 if (nh->fib_nh_flags & RTNH_F_DEAD)
671 if (ip6_ignore_linkdown(nh->fib_nh_dev) &&
672 nh->fib_nh_flags & RTNH_F_LINKDOWN &&
673 !(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE))
676 m = rt6_score_route(nh, fib6_flags, oif, strict);
677 if (m == RT6_NUD_FAIL_DO_RR) {
679 m = 0; /* lowest valid score */
680 } else if (m == RT6_NUD_FAIL_HARD) {
684 if (strict & RT6_LOOKUP_F_REACHABLE)
687 /* note that m can be RT6_NUD_FAIL_PROBE at this point */
689 *do_rr = match_do_rr;
697 static void __find_rr_leaf(struct fib6_info *f6i_start,
698 struct fib6_info *nomatch, u32 metric,
699 struct fib6_result *res, struct fib6_info **cont,
700 int oif, int strict, bool *do_rr, int *mpri)
702 struct fib6_info *f6i;
704 for (f6i = f6i_start;
705 f6i && f6i != nomatch;
706 f6i = rcu_dereference(f6i->fib6_next)) {
709 if (cont && f6i->fib6_metric != metric) {
714 if (fib6_check_expired(f6i))
718 if (find_match(nh, f6i->fib6_flags, oif, strict, mpri, do_rr)) {
721 res->fib6_flags = f6i->fib6_flags;
722 res->fib6_type = f6i->fib6_type;
727 static void find_rr_leaf(struct fib6_node *fn, struct fib6_info *leaf,
728 struct fib6_info *rr_head, int oif, int strict,
729 bool *do_rr, struct fib6_result *res)
731 u32 metric = rr_head->fib6_metric;
732 struct fib6_info *cont = NULL;
735 __find_rr_leaf(rr_head, NULL, metric, res, &cont,
736 oif, strict, do_rr, &mpri);
738 __find_rr_leaf(leaf, rr_head, metric, res, &cont,
739 oif, strict, do_rr, &mpri);
741 if (res->f6i || !cont)
744 __find_rr_leaf(cont, NULL, metric, res, NULL,
745 oif, strict, do_rr, &mpri);
748 static void rt6_select(struct net *net, struct fib6_node *fn, int oif,
749 struct fib6_result *res, int strict)
751 struct fib6_info *leaf = rcu_dereference(fn->leaf);
752 struct fib6_info *rt0;
756 /* make sure this function or its helpers sets f6i */
759 if (!leaf || leaf == net->ipv6.fib6_null_entry)
762 rt0 = rcu_dereference(fn->rr_ptr);
766 /* Double check to make sure fn is not an intermediate node
767 * and fn->leaf does not points to its child's leaf
768 * (This might happen if all routes under fn are deleted from
769 * the tree and fib6_repair_tree() is called on the node.)
771 key_plen = rt0->fib6_dst.plen;
772 #ifdef CONFIG_IPV6_SUBTREES
773 if (rt0->fib6_src.plen)
774 key_plen = rt0->fib6_src.plen;
776 if (fn->fn_bit != key_plen)
779 find_rr_leaf(fn, leaf, rt0, oif, strict, &do_rr, res);
781 struct fib6_info *next = rcu_dereference(rt0->fib6_next);
783 /* no entries matched; do round-robin */
784 if (!next || next->fib6_metric != rt0->fib6_metric)
788 spin_lock_bh(&leaf->fib6_table->tb6_lock);
789 /* make sure next is not being deleted from the tree */
791 rcu_assign_pointer(fn->rr_ptr, next);
792 spin_unlock_bh(&leaf->fib6_table->tb6_lock);
798 res->f6i = net->ipv6.fib6_null_entry;
799 res->nh = res->f6i->fib6_nh;
800 res->fib6_flags = res->f6i->fib6_flags;
801 res->fib6_type = res->f6i->fib6_type;
805 static bool rt6_is_gw_or_nonexthop(const struct fib6_result *res)
807 return (res->f6i->fib6_flags & RTF_NONEXTHOP) ||
808 res->nh->fib_nh_gw_family;
811 #ifdef CONFIG_IPV6_ROUTE_INFO
812 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
813 const struct in6_addr *gwaddr)
815 struct net *net = dev_net(dev);
816 struct route_info *rinfo = (struct route_info *) opt;
817 struct in6_addr prefix_buf, *prefix;
819 unsigned long lifetime;
820 struct fib6_info *rt;
822 if (len < sizeof(struct route_info)) {
826 /* Sanity check for prefix_len and length */
827 if (rinfo->length > 3) {
829 } else if (rinfo->prefix_len > 128) {
831 } else if (rinfo->prefix_len > 64) {
832 if (rinfo->length < 2) {
835 } else if (rinfo->prefix_len > 0) {
836 if (rinfo->length < 1) {
841 pref = rinfo->route_pref;
842 if (pref == ICMPV6_ROUTER_PREF_INVALID)
845 lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ);
847 if (rinfo->length == 3)
848 prefix = (struct in6_addr *)rinfo->prefix;
850 /* this function is safe */
851 ipv6_addr_prefix(&prefix_buf,
852 (struct in6_addr *)rinfo->prefix,
854 prefix = &prefix_buf;
857 if (rinfo->prefix_len == 0)
858 rt = rt6_get_dflt_router(net, gwaddr, dev);
860 rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
863 if (rt && !lifetime) {
869 rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr,
872 rt->fib6_flags = RTF_ROUTEINFO |
873 (rt->fib6_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
876 if (!addrconf_finite_timeout(lifetime))
877 fib6_clean_expires(rt);
879 fib6_set_expires(rt, jiffies + HZ * lifetime);
881 fib6_info_release(rt);
888 * Misc support functions
891 /* called with rcu_lock held */
892 static struct net_device *ip6_rt_get_dev_rcu(const struct fib6_result *res)
894 struct net_device *dev = res->nh->fib_nh_dev;
896 if (res->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)) {
897 /* for copies of local routes, dst->dev needs to be the
898 * device if it is a master device, the master device if
899 * device is enslaved, and the loopback as the default
901 if (netif_is_l3_slave(dev) &&
902 !rt6_need_strict(&res->f6i->fib6_dst.addr))
903 dev = l3mdev_master_dev_rcu(dev);
904 else if (!netif_is_l3_master(dev))
905 dev = dev_net(dev)->loopback_dev;
906 /* last case is netif_is_l3_master(dev) is true in which
907 * case we want dev returned to be dev
914 static const int fib6_prop[RTN_MAX + 1] = {
921 [RTN_BLACKHOLE] = -EINVAL,
922 [RTN_UNREACHABLE] = -EHOSTUNREACH,
923 [RTN_PROHIBIT] = -EACCES,
924 [RTN_THROW] = -EAGAIN,
926 [RTN_XRESOLVE] = -EINVAL,
929 static int ip6_rt_type_to_error(u8 fib6_type)
931 return fib6_prop[fib6_type];
934 static unsigned short fib6_info_dst_flags(struct fib6_info *rt)
936 unsigned short flags = 0;
939 flags |= DST_NOCOUNT;
940 if (rt->dst_nopolicy)
941 flags |= DST_NOPOLICY;
948 static void ip6_rt_init_dst_reject(struct rt6_info *rt, u8 fib6_type)
950 rt->dst.error = ip6_rt_type_to_error(fib6_type);
954 rt->dst.output = dst_discard_out;
955 rt->dst.input = dst_discard;
958 rt->dst.output = ip6_pkt_prohibit_out;
959 rt->dst.input = ip6_pkt_prohibit;
962 case RTN_UNREACHABLE:
964 rt->dst.output = ip6_pkt_discard_out;
965 rt->dst.input = ip6_pkt_discard;
970 static void ip6_rt_init_dst(struct rt6_info *rt, const struct fib6_result *res)
972 struct fib6_info *f6i = res->f6i;
974 if (res->fib6_flags & RTF_REJECT) {
975 ip6_rt_init_dst_reject(rt, res->fib6_type);
980 rt->dst.output = ip6_output;
982 if (res->fib6_type == RTN_LOCAL || res->fib6_type == RTN_ANYCAST) {
983 rt->dst.input = ip6_input;
984 } else if (ipv6_addr_type(&f6i->fib6_dst.addr) & IPV6_ADDR_MULTICAST) {
985 rt->dst.input = ip6_mc_input;
987 rt->dst.input = ip6_forward;
990 if (res->nh->fib_nh_lws) {
991 rt->dst.lwtstate = lwtstate_get(res->nh->fib_nh_lws);
992 lwtunnel_set_redirect(&rt->dst);
995 rt->dst.lastuse = jiffies;
998 /* Caller must already hold reference to @from */
999 static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from)
1001 rt->rt6i_flags &= ~RTF_EXPIRES;
1002 rcu_assign_pointer(rt->from, from);
1003 ip_dst_init_metrics(&rt->dst, from->fib6_metrics);
1006 /* Caller must already hold reference to f6i in result */
1007 static void ip6_rt_copy_init(struct rt6_info *rt, const struct fib6_result *res)
1009 const struct fib6_nh *nh = res->nh;
1010 const struct net_device *dev = nh->fib_nh_dev;
1011 struct fib6_info *f6i = res->f6i;
1013 ip6_rt_init_dst(rt, res);
1015 rt->rt6i_dst = f6i->fib6_dst;
1016 rt->rt6i_idev = dev ? in6_dev_get(dev) : NULL;
1017 rt->rt6i_flags = res->fib6_flags;
1018 if (nh->fib_nh_gw_family) {
1019 rt->rt6i_gateway = nh->fib_nh_gw6;
1020 rt->rt6i_flags |= RTF_GATEWAY;
1022 rt6_set_from(rt, f6i);
1023 #ifdef CONFIG_IPV6_SUBTREES
1024 rt->rt6i_src = f6i->fib6_src;
1028 static struct fib6_node* fib6_backtrack(struct fib6_node *fn,
1029 struct in6_addr *saddr)
1031 struct fib6_node *pn, *sn;
1033 if (fn->fn_flags & RTN_TL_ROOT)
1035 pn = rcu_dereference(fn->parent);
1036 sn = FIB6_SUBTREE(pn);
1038 fn = fib6_node_lookup(sn, NULL, saddr);
1041 if (fn->fn_flags & RTN_RTINFO)
1046 static bool ip6_hold_safe(struct net *net, struct rt6_info **prt)
1048 struct rt6_info *rt = *prt;
1050 if (dst_hold_safe(&rt->dst))
1053 rt = net->ipv6.ip6_null_entry;
1062 /* called with rcu_lock held */
1063 static struct rt6_info *ip6_create_rt_rcu(const struct fib6_result *res)
1065 struct net_device *dev = res->nh->fib_nh_dev;
1066 struct fib6_info *f6i = res->f6i;
1067 unsigned short flags;
1068 struct rt6_info *nrt;
1070 if (!fib6_info_hold_safe(f6i))
1073 flags = fib6_info_dst_flags(f6i);
1074 nrt = ip6_dst_alloc(dev_net(dev), dev, flags);
1076 fib6_info_release(f6i);
1080 ip6_rt_copy_init(nrt, res);
1084 nrt = dev_net(dev)->ipv6.ip6_null_entry;
1085 dst_hold(&nrt->dst);
1089 static struct rt6_info *ip6_pol_route_lookup(struct net *net,
1090 struct fib6_table *table,
1092 const struct sk_buff *skb,
1095 struct fib6_result res = {};
1096 struct fib6_node *fn;
1097 struct rt6_info *rt;
1099 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
1100 flags &= ~RT6_LOOKUP_F_IFACE;
1103 fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
1105 res.f6i = rcu_dereference(fn->leaf);
1107 res.f6i = net->ipv6.fib6_null_entry;
1109 rt6_device_match(net, &res, &fl6->saddr, fl6->flowi6_oif,
1112 if (res.f6i == net->ipv6.fib6_null_entry) {
1113 fn = fib6_backtrack(fn, &fl6->saddr);
1117 rt = net->ipv6.ip6_null_entry;
1122 fib6_select_path(net, &res, fl6, fl6->flowi6_oif,
1123 fl6->flowi6_oif != 0, skb, flags);
1125 /* Search through exception table */
1126 rt = rt6_find_cached_rt(&res, &fl6->daddr, &fl6->saddr);
1128 if (ip6_hold_safe(net, &rt))
1129 dst_use_noref(&rt->dst, jiffies);
1131 rt = ip6_create_rt_rcu(&res);
1135 trace_fib6_table_lookup(net, &res, table, fl6);
1142 struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
1143 const struct sk_buff *skb, int flags)
1145 return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_lookup);
1147 EXPORT_SYMBOL_GPL(ip6_route_lookup);
1149 struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
1150 const struct in6_addr *saddr, int oif,
1151 const struct sk_buff *skb, int strict)
1153 struct flowi6 fl6 = {
1157 struct dst_entry *dst;
1158 int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
1161 memcpy(&fl6.saddr, saddr, sizeof(*saddr));
1162 flags |= RT6_LOOKUP_F_HAS_SADDR;
1165 dst = fib6_rule_lookup(net, &fl6, skb, flags, ip6_pol_route_lookup);
1166 if (dst->error == 0)
1167 return (struct rt6_info *) dst;
1173 EXPORT_SYMBOL(rt6_lookup);
1175 /* ip6_ins_rt is called with FREE table->tb6_lock.
1176 * It takes new route entry, the addition fails by any reason the
1177 * route is released.
1178 * Caller must hold dst before calling it.
1181 static int __ip6_ins_rt(struct fib6_info *rt, struct nl_info *info,
1182 struct netlink_ext_ack *extack)
1185 struct fib6_table *table;
1187 table = rt->fib6_table;
1188 spin_lock_bh(&table->tb6_lock);
1189 err = fib6_add(&table->tb6_root, rt, info, extack);
1190 spin_unlock_bh(&table->tb6_lock);
1195 int ip6_ins_rt(struct net *net, struct fib6_info *rt)
1197 struct nl_info info = { .nl_net = net, };
1199 return __ip6_ins_rt(rt, &info, NULL);
1202 static struct rt6_info *ip6_rt_cache_alloc(const struct fib6_result *res,
1203 const struct in6_addr *daddr,
1204 const struct in6_addr *saddr)
1206 struct fib6_info *f6i = res->f6i;
1207 struct net_device *dev;
1208 struct rt6_info *rt;
1214 if (!fib6_info_hold_safe(f6i))
1217 dev = ip6_rt_get_dev_rcu(res);
1218 rt = ip6_dst_alloc(dev_net(dev), dev, 0);
1220 fib6_info_release(f6i);
1224 ip6_rt_copy_init(rt, res);
1225 rt->rt6i_flags |= RTF_CACHE;
1226 rt->dst.flags |= DST_HOST;
1227 rt->rt6i_dst.addr = *daddr;
1228 rt->rt6i_dst.plen = 128;
1230 if (!rt6_is_gw_or_nonexthop(res)) {
1231 if (f6i->fib6_dst.plen != 128 &&
1232 ipv6_addr_equal(&f6i->fib6_dst.addr, daddr))
1233 rt->rt6i_flags |= RTF_ANYCAST;
1234 #ifdef CONFIG_IPV6_SUBTREES
1235 if (rt->rt6i_src.plen && saddr) {
1236 rt->rt6i_src.addr = *saddr;
1237 rt->rt6i_src.plen = 128;
1245 static struct rt6_info *ip6_rt_pcpu_alloc(const struct fib6_result *res)
1247 struct fib6_info *f6i = res->f6i;
1248 unsigned short flags = fib6_info_dst_flags(f6i);
1249 struct net_device *dev;
1250 struct rt6_info *pcpu_rt;
1252 if (!fib6_info_hold_safe(f6i))
1256 dev = ip6_rt_get_dev_rcu(res);
1257 pcpu_rt = ip6_dst_alloc(dev_net(dev), dev, flags);
1260 fib6_info_release(f6i);
1263 ip6_rt_copy_init(pcpu_rt, res);
1264 pcpu_rt->rt6i_flags |= RTF_PCPU;
1268 /* It should be called with rcu_read_lock() acquired */
1269 static struct rt6_info *rt6_get_pcpu_route(const struct fib6_result *res)
1271 struct rt6_info *pcpu_rt, **p;
1273 p = this_cpu_ptr(res->nh->rt6i_pcpu);
1277 ip6_hold_safe(NULL, &pcpu_rt);
1282 static struct rt6_info *rt6_make_pcpu_route(struct net *net,
1283 const struct fib6_result *res)
1285 struct rt6_info *pcpu_rt, *prev, **p;
1287 pcpu_rt = ip6_rt_pcpu_alloc(res);
1289 dst_hold(&net->ipv6.ip6_null_entry->dst);
1290 return net->ipv6.ip6_null_entry;
1293 dst_hold(&pcpu_rt->dst);
1294 p = this_cpu_ptr(res->nh->rt6i_pcpu);
1295 prev = cmpxchg(p, NULL, pcpu_rt);
1298 if (res->f6i->fib6_destroying) {
1299 struct fib6_info *from;
1301 from = xchg((__force struct fib6_info **)&pcpu_rt->from, NULL);
1302 fib6_info_release(from);
1308 /* exception hash table implementation
1310 static DEFINE_SPINLOCK(rt6_exception_lock);
1312 /* Remove rt6_ex from hash table and free the memory
1313 * Caller must hold rt6_exception_lock
1315 static void rt6_remove_exception(struct rt6_exception_bucket *bucket,
1316 struct rt6_exception *rt6_ex)
1318 struct fib6_info *from;
1321 if (!bucket || !rt6_ex)
1324 net = dev_net(rt6_ex->rt6i->dst.dev);
1325 net->ipv6.rt6_stats->fib_rt_cache--;
1327 /* purge completely the exception to allow releasing the held resources:
1328 * some [sk] cache may keep the dst around for unlimited time
1330 from = xchg((__force struct fib6_info **)&rt6_ex->rt6i->from, NULL);
1331 fib6_info_release(from);
1332 dst_dev_put(&rt6_ex->rt6i->dst);
1334 hlist_del_rcu(&rt6_ex->hlist);
1335 dst_release(&rt6_ex->rt6i->dst);
1336 kfree_rcu(rt6_ex, rcu);
1337 WARN_ON_ONCE(!bucket->depth);
1341 /* Remove oldest rt6_ex in bucket and free the memory
1342 * Caller must hold rt6_exception_lock
1344 static void rt6_exception_remove_oldest(struct rt6_exception_bucket *bucket)
1346 struct rt6_exception *rt6_ex, *oldest = NULL;
1351 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
1352 if (!oldest || time_before(rt6_ex->stamp, oldest->stamp))
1355 rt6_remove_exception(bucket, oldest);
1358 static u32 rt6_exception_hash(const struct in6_addr *dst,
1359 const struct in6_addr *src)
1361 static u32 seed __read_mostly;
1364 net_get_random_once(&seed, sizeof(seed));
1365 val = jhash(dst, sizeof(*dst), seed);
1367 #ifdef CONFIG_IPV6_SUBTREES
1369 val = jhash(src, sizeof(*src), val);
1371 return hash_32(val, FIB6_EXCEPTION_BUCKET_SIZE_SHIFT);
1374 /* Helper function to find the cached rt in the hash table
1375 * and update bucket pointer to point to the bucket for this
1376 * (daddr, saddr) pair
1377 * Caller must hold rt6_exception_lock
1379 static struct rt6_exception *
1380 __rt6_find_exception_spinlock(struct rt6_exception_bucket **bucket,
1381 const struct in6_addr *daddr,
1382 const struct in6_addr *saddr)
1384 struct rt6_exception *rt6_ex;
1387 if (!(*bucket) || !daddr)
1390 hval = rt6_exception_hash(daddr, saddr);
1393 hlist_for_each_entry(rt6_ex, &(*bucket)->chain, hlist) {
1394 struct rt6_info *rt6 = rt6_ex->rt6i;
1395 bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);
1397 #ifdef CONFIG_IPV6_SUBTREES
1398 if (matched && saddr)
1399 matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr);
1407 /* Helper function to find the cached rt in the hash table
1408 * and update bucket pointer to point to the bucket for this
1409 * (daddr, saddr) pair
1410 * Caller must hold rcu_read_lock()
1412 static struct rt6_exception *
1413 __rt6_find_exception_rcu(struct rt6_exception_bucket **bucket,
1414 const struct in6_addr *daddr,
1415 const struct in6_addr *saddr)
1417 struct rt6_exception *rt6_ex;
1420 WARN_ON_ONCE(!rcu_read_lock_held());
1422 if (!(*bucket) || !daddr)
1425 hval = rt6_exception_hash(daddr, saddr);
1428 hlist_for_each_entry_rcu(rt6_ex, &(*bucket)->chain, hlist) {
1429 struct rt6_info *rt6 = rt6_ex->rt6i;
1430 bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);
1432 #ifdef CONFIG_IPV6_SUBTREES
1433 if (matched && saddr)
1434 matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr);
1442 static unsigned int fib6_mtu(const struct fib6_result *res)
1444 const struct fib6_nh *nh = res->nh;
1447 if (res->f6i->fib6_pmtu) {
1448 mtu = res->f6i->fib6_pmtu;
1450 struct net_device *dev = nh->fib_nh_dev;
1451 struct inet6_dev *idev;
1454 idev = __in6_dev_get(dev);
1455 mtu = idev->cnf.mtu6;
1459 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
1461 return mtu - lwtunnel_headroom(nh->fib_nh_lws, mtu);
1464 #define FIB6_EXCEPTION_BUCKET_FLUSHED 0x1UL
1466 /* used when the flushed bit is not relevant, only access to the bucket
1467 * (ie., all bucket users except rt6_insert_exception);
1469 * called under rcu lock; sometimes called with rt6_exception_lock held
1472 struct rt6_exception_bucket *fib6_nh_get_excptn_bucket(const struct fib6_nh *nh,
1475 struct rt6_exception_bucket *bucket;
1478 bucket = rcu_dereference_protected(nh->rt6i_exception_bucket,
1479 lockdep_is_held(lock));
1481 bucket = rcu_dereference(nh->rt6i_exception_bucket);
1483 /* remove bucket flushed bit if set */
1485 unsigned long p = (unsigned long)bucket;
1487 p &= ~FIB6_EXCEPTION_BUCKET_FLUSHED;
1488 bucket = (struct rt6_exception_bucket *)p;
1494 static bool fib6_nh_excptn_bucket_flushed(struct rt6_exception_bucket *bucket)
1496 unsigned long p = (unsigned long)bucket;
1498 return !!(p & FIB6_EXCEPTION_BUCKET_FLUSHED);
1501 /* called with rt6_exception_lock held */
1502 static void fib6_nh_excptn_bucket_set_flushed(struct fib6_nh *nh,
1505 struct rt6_exception_bucket *bucket;
1508 bucket = rcu_dereference_protected(nh->rt6i_exception_bucket,
1509 lockdep_is_held(lock));
1511 p = (unsigned long)bucket;
1512 p |= FIB6_EXCEPTION_BUCKET_FLUSHED;
1513 bucket = (struct rt6_exception_bucket *)p;
1514 rcu_assign_pointer(nh->rt6i_exception_bucket, bucket);
1517 static int rt6_insert_exception(struct rt6_info *nrt,
1518 const struct fib6_result *res)
1520 struct net *net = dev_net(nrt->dst.dev);
1521 struct rt6_exception_bucket *bucket;
1522 struct fib6_info *f6i = res->f6i;
1523 struct in6_addr *src_key = NULL;
1524 struct rt6_exception *rt6_ex;
1525 struct fib6_nh *nh = res->nh;
1528 spin_lock_bh(&rt6_exception_lock);
1530 bucket = rcu_dereference_protected(nh->rt6i_exception_bucket,
1531 lockdep_is_held(&rt6_exception_lock));
1533 bucket = kcalloc(FIB6_EXCEPTION_BUCKET_SIZE, sizeof(*bucket),
1539 rcu_assign_pointer(nh->rt6i_exception_bucket, bucket);
1540 } else if (fib6_nh_excptn_bucket_flushed(bucket)) {
1545 #ifdef CONFIG_IPV6_SUBTREES
1546 /* fib6_src.plen != 0 indicates f6i is in subtree
1547 * and exception table is indexed by a hash of
1548 * both fib6_dst and fib6_src.
1549 * Otherwise, the exception table is indexed by
1550 * a hash of only fib6_dst.
1552 if (f6i->fib6_src.plen)
1553 src_key = &nrt->rt6i_src.addr;
1555 /* rt6_mtu_change() might lower mtu on f6i.
1556 * Only insert this exception route if its mtu
1557 * is less than f6i's mtu value.
1559 if (dst_metric_raw(&nrt->dst, RTAX_MTU) >= fib6_mtu(res)) {
1564 rt6_ex = __rt6_find_exception_spinlock(&bucket, &nrt->rt6i_dst.addr,
1567 rt6_remove_exception(bucket, rt6_ex);
1569 rt6_ex = kzalloc(sizeof(*rt6_ex), GFP_ATOMIC);
1575 rt6_ex->stamp = jiffies;
1576 hlist_add_head_rcu(&rt6_ex->hlist, &bucket->chain);
1578 net->ipv6.rt6_stats->fib_rt_cache++;
1580 if (bucket->depth > FIB6_MAX_DEPTH)
1581 rt6_exception_remove_oldest(bucket);
1584 spin_unlock_bh(&rt6_exception_lock);
1586 /* Update fn->fn_sernum to invalidate all cached dst */
1588 spin_lock_bh(&f6i->fib6_table->tb6_lock);
1589 fib6_update_sernum(net, f6i);
1590 spin_unlock_bh(&f6i->fib6_table->tb6_lock);
1591 fib6_force_start_gc(net);
1597 static void fib6_nh_flush_exceptions(struct fib6_nh *nh, struct fib6_info *from)
1599 struct rt6_exception_bucket *bucket;
1600 struct rt6_exception *rt6_ex;
1601 struct hlist_node *tmp;
1604 spin_lock_bh(&rt6_exception_lock);
1606 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
1610 /* Prevent rt6_insert_exception() to recreate the bucket list */
1612 fib6_nh_excptn_bucket_set_flushed(nh, &rt6_exception_lock);
1614 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1615 hlist_for_each_entry_safe(rt6_ex, tmp, &bucket->chain, hlist) {
1617 rcu_access_pointer(rt6_ex->rt6i->from) == from)
1618 rt6_remove_exception(bucket, rt6_ex);
1620 WARN_ON_ONCE(!from && bucket->depth);
1624 spin_unlock_bh(&rt6_exception_lock);
1627 void rt6_flush_exceptions(struct fib6_info *f6i)
1629 fib6_nh_flush_exceptions(f6i->fib6_nh, f6i);
1632 /* Find cached rt in the hash table inside passed in rt
1633 * Caller has to hold rcu_read_lock()
1635 static struct rt6_info *rt6_find_cached_rt(const struct fib6_result *res,
1636 const struct in6_addr *daddr,
1637 const struct in6_addr *saddr)
1639 const struct in6_addr *src_key = NULL;
1640 struct rt6_exception_bucket *bucket;
1641 struct rt6_exception *rt6_ex;
1642 struct rt6_info *ret = NULL;
1644 #ifdef CONFIG_IPV6_SUBTREES
1645 /* fib6i_src.plen != 0 indicates f6i is in subtree
1646 * and exception table is indexed by a hash of
1647 * both fib6_dst and fib6_src.
1648 * However, the src addr used to create the hash
1649 * might not be exactly the passed in saddr which
1650 * is a /128 addr from the flow.
1651 * So we need to use f6i->fib6_src to redo lookup
1652 * if the passed in saddr does not find anything.
1653 * (See the logic in ip6_rt_cache_alloc() on how
1654 * rt->rt6i_src is updated.)
1656 if (res->f6i->fib6_src.plen)
1660 bucket = fib6_nh_get_excptn_bucket(res->nh, NULL);
1661 rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key);
1663 if (rt6_ex && !rt6_check_expired(rt6_ex->rt6i))
1666 #ifdef CONFIG_IPV6_SUBTREES
1667 /* Use fib6_src as src_key and redo lookup */
1668 if (!ret && src_key && src_key != &res->f6i->fib6_src.addr) {
1669 src_key = &res->f6i->fib6_src.addr;
1677 /* Remove the passed in cached rt from the hash table that contains it */
1678 static int fib6_nh_remove_exception(const struct fib6_nh *nh, int plen,
1679 const struct rt6_info *rt)
1681 const struct in6_addr *src_key = NULL;
1682 struct rt6_exception_bucket *bucket;
1683 struct rt6_exception *rt6_ex;
1686 if (!rcu_access_pointer(nh->rt6i_exception_bucket))
1689 spin_lock_bh(&rt6_exception_lock);
1690 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
1692 #ifdef CONFIG_IPV6_SUBTREES
1693 /* rt6i_src.plen != 0 indicates 'from' is in subtree
1694 * and exception table is indexed by a hash of
1695 * both rt6i_dst and rt6i_src.
1696 * Otherwise, the exception table is indexed by
1697 * a hash of only rt6i_dst.
1700 src_key = &rt->rt6i_src.addr;
1702 rt6_ex = __rt6_find_exception_spinlock(&bucket,
1706 rt6_remove_exception(bucket, rt6_ex);
1712 spin_unlock_bh(&rt6_exception_lock);
1716 static int rt6_remove_exception_rt(struct rt6_info *rt)
1718 struct fib6_info *from;
1720 from = rcu_dereference(rt->from);
1721 if (!from || !(rt->rt6i_flags & RTF_CACHE))
1724 return fib6_nh_remove_exception(from->fib6_nh,
1725 from->fib6_src.plen, rt);
1728 /* Find rt6_ex which contains the passed in rt cache and
1731 static void fib6_nh_update_exception(const struct fib6_nh *nh, int plen,
1732 const struct rt6_info *rt)
1734 const struct in6_addr *src_key = NULL;
1735 struct rt6_exception_bucket *bucket;
1736 struct rt6_exception *rt6_ex;
1738 bucket = fib6_nh_get_excptn_bucket(nh, NULL);
1739 #ifdef CONFIG_IPV6_SUBTREES
1740 /* rt6i_src.plen != 0 indicates 'from' is in subtree
1741 * and exception table is indexed by a hash of
1742 * both rt6i_dst and rt6i_src.
1743 * Otherwise, the exception table is indexed by
1744 * a hash of only rt6i_dst.
1747 src_key = &rt->rt6i_src.addr;
1749 rt6_ex = __rt6_find_exception_rcu(&bucket, &rt->rt6i_dst.addr, src_key);
1751 rt6_ex->stamp = jiffies;
1754 static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
1756 struct fib6_info *from;
1760 from = rcu_dereference(rt->from);
1761 if (!from || !(rt->rt6i_flags & RTF_CACHE))
1764 fib6_nh_update_exception(from->fib6_nh, from->fib6_src.plen, rt);
1769 static bool rt6_mtu_change_route_allowed(struct inet6_dev *idev,
1770 struct rt6_info *rt, int mtu)
1772 /* If the new MTU is lower than the route PMTU, this new MTU will be the
1773 * lowest MTU in the path: always allow updating the route PMTU to
1774 * reflect PMTU decreases.
1776 * If the new MTU is higher, and the route PMTU is equal to the local
1777 * MTU, this means the old MTU is the lowest in the path, so allow
1778 * updating it: if other nodes now have lower MTUs, PMTU discovery will
1782 if (dst_mtu(&rt->dst) >= mtu)
1785 if (dst_mtu(&rt->dst) == idev->cnf.mtu6)
1791 static void rt6_exceptions_update_pmtu(struct inet6_dev *idev,
1792 const struct fib6_nh *nh, int mtu)
1794 struct rt6_exception_bucket *bucket;
1795 struct rt6_exception *rt6_ex;
1798 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
1802 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1803 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
1804 struct rt6_info *entry = rt6_ex->rt6i;
1806 /* For RTF_CACHE with rt6i_pmtu == 0 (i.e. a redirected
1807 * route), the metrics of its rt->from have already
1810 if (dst_metric_raw(&entry->dst, RTAX_MTU) &&
1811 rt6_mtu_change_route_allowed(idev, entry, mtu))
1812 dst_metric_set(&entry->dst, RTAX_MTU, mtu);
1818 #define RTF_CACHE_GATEWAY (RTF_GATEWAY | RTF_CACHE)
1820 static void fib6_nh_exceptions_clean_tohost(const struct fib6_nh *nh,
1821 const struct in6_addr *gateway)
1823 struct rt6_exception_bucket *bucket;
1824 struct rt6_exception *rt6_ex;
1825 struct hlist_node *tmp;
1828 if (!rcu_access_pointer(nh->rt6i_exception_bucket))
1831 spin_lock_bh(&rt6_exception_lock);
1832 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
1834 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1835 hlist_for_each_entry_safe(rt6_ex, tmp,
1836 &bucket->chain, hlist) {
1837 struct rt6_info *entry = rt6_ex->rt6i;
1839 if ((entry->rt6i_flags & RTF_CACHE_GATEWAY) ==
1840 RTF_CACHE_GATEWAY &&
1841 ipv6_addr_equal(gateway,
1842 &entry->rt6i_gateway)) {
1843 rt6_remove_exception(bucket, rt6_ex);
1850 spin_unlock_bh(&rt6_exception_lock);
1853 static void rt6_age_examine_exception(struct rt6_exception_bucket *bucket,
1854 struct rt6_exception *rt6_ex,
1855 struct fib6_gc_args *gc_args,
1858 struct rt6_info *rt = rt6_ex->rt6i;
1860 /* we are pruning and obsoleting aged-out and non gateway exceptions
1861 * even if others have still references to them, so that on next
1862 * dst_check() such references can be dropped.
1863 * EXPIRES exceptions - e.g. pmtu-generated ones are pruned when
1864 * expired, independently from their aging, as per RFC 8201 section 4
1866 if (!(rt->rt6i_flags & RTF_EXPIRES)) {
1867 if (time_after_eq(now, rt->dst.lastuse + gc_args->timeout)) {
1868 RT6_TRACE("aging clone %p\n", rt);
1869 rt6_remove_exception(bucket, rt6_ex);
1872 } else if (time_after(jiffies, rt->dst.expires)) {
1873 RT6_TRACE("purging expired route %p\n", rt);
1874 rt6_remove_exception(bucket, rt6_ex);
1878 if (rt->rt6i_flags & RTF_GATEWAY) {
1879 struct neighbour *neigh;
1880 __u8 neigh_flags = 0;
1882 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
1884 neigh_flags = neigh->flags;
1886 if (!(neigh_flags & NTF_ROUTER)) {
1887 RT6_TRACE("purging route %p via non-router but gateway\n",
1889 rt6_remove_exception(bucket, rt6_ex);
1897 static void fib6_nh_age_exceptions(const struct fib6_nh *nh,
1898 struct fib6_gc_args *gc_args,
1901 struct rt6_exception_bucket *bucket;
1902 struct rt6_exception *rt6_ex;
1903 struct hlist_node *tmp;
1906 if (!rcu_access_pointer(nh->rt6i_exception_bucket))
1910 spin_lock(&rt6_exception_lock);
1911 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
1913 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1914 hlist_for_each_entry_safe(rt6_ex, tmp,
1915 &bucket->chain, hlist) {
1916 rt6_age_examine_exception(bucket, rt6_ex,
1922 spin_unlock(&rt6_exception_lock);
1923 rcu_read_unlock_bh();
1926 void rt6_age_exceptions(struct fib6_info *f6i,
1927 struct fib6_gc_args *gc_args,
1930 fib6_nh_age_exceptions(f6i->fib6_nh, gc_args, now);
1933 /* must be called with rcu lock held */
1934 int fib6_table_lookup(struct net *net, struct fib6_table *table, int oif,
1935 struct flowi6 *fl6, struct fib6_result *res, int strict)
1937 struct fib6_node *fn, *saved_fn;
1939 fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
1942 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
1946 rt6_select(net, fn, oif, res, strict);
1947 if (res->f6i == net->ipv6.fib6_null_entry) {
1948 fn = fib6_backtrack(fn, &fl6->saddr);
1950 goto redo_rt6_select;
1951 else if (strict & RT6_LOOKUP_F_REACHABLE) {
1952 /* also consider unreachable route */
1953 strict &= ~RT6_LOOKUP_F_REACHABLE;
1955 goto redo_rt6_select;
1959 trace_fib6_table_lookup(net, res, table, fl6);
1964 struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
1965 int oif, struct flowi6 *fl6,
1966 const struct sk_buff *skb, int flags)
1968 struct fib6_result res = {};
1969 struct rt6_info *rt;
1972 strict |= flags & RT6_LOOKUP_F_IFACE;
1973 strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE;
1974 if (net->ipv6.devconf_all->forwarding == 0)
1975 strict |= RT6_LOOKUP_F_REACHABLE;
1979 fib6_table_lookup(net, table, oif, fl6, &res, strict);
1980 if (res.f6i == net->ipv6.fib6_null_entry) {
1981 rt = net->ipv6.ip6_null_entry;
1987 fib6_select_path(net, &res, fl6, oif, false, skb, strict);
1989 /*Search through exception table */
1990 rt = rt6_find_cached_rt(&res, &fl6->daddr, &fl6->saddr);
1992 if (ip6_hold_safe(net, &rt))
1993 dst_use_noref(&rt->dst, jiffies);
1997 } else if (unlikely((fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH) &&
1998 !res.nh->fib_nh_gw_family)) {
1999 /* Create a RTF_CACHE clone which will not be
2000 * owned by the fib6 tree. It is for the special case where
2001 * the daddr in the skb during the neighbor look-up is different
2002 * from the fl6->daddr used to look-up route here.
2004 struct rt6_info *uncached_rt;
2006 uncached_rt = ip6_rt_cache_alloc(&res, &fl6->daddr, NULL);
2011 /* Uncached_rt's refcnt is taken during ip6_rt_cache_alloc()
2012 * No need for another dst_hold()
2014 rt6_uncached_list_add(uncached_rt);
2015 atomic_inc(&net->ipv6.rt6_stats->fib_rt_uncache);
2017 uncached_rt = net->ipv6.ip6_null_entry;
2018 dst_hold(&uncached_rt->dst);
2023 /* Get a percpu copy */
2025 struct rt6_info *pcpu_rt;
2028 pcpu_rt = rt6_get_pcpu_route(&res);
2031 pcpu_rt = rt6_make_pcpu_route(net, &res);
2039 EXPORT_SYMBOL_GPL(ip6_pol_route);
2041 static struct rt6_info *ip6_pol_route_input(struct net *net,
2042 struct fib6_table *table,
2044 const struct sk_buff *skb,
2047 return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, skb, flags);
2050 struct dst_entry *ip6_route_input_lookup(struct net *net,
2051 struct net_device *dev,
2053 const struct sk_buff *skb,
2056 if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG)
2057 flags |= RT6_LOOKUP_F_IFACE;
2059 return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_input);
2061 EXPORT_SYMBOL_GPL(ip6_route_input_lookup);
2063 static void ip6_multipath_l3_keys(const struct sk_buff *skb,
2064 struct flow_keys *keys,
2065 struct flow_keys *flkeys)
2067 const struct ipv6hdr *outer_iph = ipv6_hdr(skb);
2068 const struct ipv6hdr *key_iph = outer_iph;
2069 struct flow_keys *_flkeys = flkeys;
2070 const struct ipv6hdr *inner_iph;
2071 const struct icmp6hdr *icmph;
2072 struct ipv6hdr _inner_iph;
2073 struct icmp6hdr _icmph;
2075 if (likely(outer_iph->nexthdr != IPPROTO_ICMPV6))
2078 icmph = skb_header_pointer(skb, skb_transport_offset(skb),
2079 sizeof(_icmph), &_icmph);
2083 if (icmph->icmp6_type != ICMPV6_DEST_UNREACH &&
2084 icmph->icmp6_type != ICMPV6_PKT_TOOBIG &&
2085 icmph->icmp6_type != ICMPV6_TIME_EXCEED &&
2086 icmph->icmp6_type != ICMPV6_PARAMPROB)
2089 inner_iph = skb_header_pointer(skb,
2090 skb_transport_offset(skb) + sizeof(*icmph),
2091 sizeof(_inner_iph), &_inner_iph);
2095 key_iph = inner_iph;
2099 keys->addrs.v6addrs.src = _flkeys->addrs.v6addrs.src;
2100 keys->addrs.v6addrs.dst = _flkeys->addrs.v6addrs.dst;
2101 keys->tags.flow_label = _flkeys->tags.flow_label;
2102 keys->basic.ip_proto = _flkeys->basic.ip_proto;
2104 keys->addrs.v6addrs.src = key_iph->saddr;
2105 keys->addrs.v6addrs.dst = key_iph->daddr;
2106 keys->tags.flow_label = ip6_flowlabel(key_iph);
2107 keys->basic.ip_proto = key_iph->nexthdr;
2111 /* if skb is set it will be used and fl6 can be NULL */
2112 u32 rt6_multipath_hash(const struct net *net, const struct flowi6 *fl6,
2113 const struct sk_buff *skb, struct flow_keys *flkeys)
2115 struct flow_keys hash_keys;
2118 switch (ip6_multipath_hash_policy(net)) {
2120 memset(&hash_keys, 0, sizeof(hash_keys));
2121 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2123 ip6_multipath_l3_keys(skb, &hash_keys, flkeys);
2125 hash_keys.addrs.v6addrs.src = fl6->saddr;
2126 hash_keys.addrs.v6addrs.dst = fl6->daddr;
2127 hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
2128 hash_keys.basic.ip_proto = fl6->flowi6_proto;
2133 unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
2134 struct flow_keys keys;
2136 /* short-circuit if we already have L4 hash present */
2138 return skb_get_hash_raw(skb) >> 1;
2140 memset(&hash_keys, 0, sizeof(hash_keys));
2143 skb_flow_dissect_flow_keys(skb, &keys, flag);
2146 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2147 hash_keys.addrs.v6addrs.src = flkeys->addrs.v6addrs.src;
2148 hash_keys.addrs.v6addrs.dst = flkeys->addrs.v6addrs.dst;
2149 hash_keys.ports.src = flkeys->ports.src;
2150 hash_keys.ports.dst = flkeys->ports.dst;
2151 hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
2153 memset(&hash_keys, 0, sizeof(hash_keys));
2154 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2155 hash_keys.addrs.v6addrs.src = fl6->saddr;
2156 hash_keys.addrs.v6addrs.dst = fl6->daddr;
2157 hash_keys.ports.src = fl6->fl6_sport;
2158 hash_keys.ports.dst = fl6->fl6_dport;
2159 hash_keys.basic.ip_proto = fl6->flowi6_proto;
2163 mhash = flow_hash_from_keys(&hash_keys);
2168 void ip6_route_input(struct sk_buff *skb)
2170 const struct ipv6hdr *iph = ipv6_hdr(skb);
2171 struct net *net = dev_net(skb->dev);
2172 int flags = RT6_LOOKUP_F_HAS_SADDR;
2173 struct ip_tunnel_info *tun_info;
2174 struct flowi6 fl6 = {
2175 .flowi6_iif = skb->dev->ifindex,
2176 .daddr = iph->daddr,
2177 .saddr = iph->saddr,
2178 .flowlabel = ip6_flowinfo(iph),
2179 .flowi6_mark = skb->mark,
2180 .flowi6_proto = iph->nexthdr,
2182 struct flow_keys *flkeys = NULL, _flkeys;
2184 tun_info = skb_tunnel_info(skb);
2185 if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
2186 fl6.flowi6_tun_key.tun_id = tun_info->key.tun_id;
2188 if (fib6_rules_early_flow_dissect(net, skb, &fl6, &_flkeys))
2191 if (unlikely(fl6.flowi6_proto == IPPROTO_ICMPV6))
2192 fl6.mp_hash = rt6_multipath_hash(net, &fl6, skb, flkeys);
2195 ip6_route_input_lookup(net, skb->dev, &fl6, skb, flags));
2198 static struct rt6_info *ip6_pol_route_output(struct net *net,
2199 struct fib6_table *table,
2201 const struct sk_buff *skb,
2204 return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, skb, flags);
2207 struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
2208 struct flowi6 *fl6, int flags)
2212 if (ipv6_addr_type(&fl6->daddr) &
2213 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL)) {
2214 struct dst_entry *dst;
2216 dst = l3mdev_link_scope_lookup(net, fl6);
2221 fl6->flowi6_iif = LOOPBACK_IFINDEX;
2223 any_src = ipv6_addr_any(&fl6->saddr);
2224 if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) ||
2225 (fl6->flowi6_oif && any_src))
2226 flags |= RT6_LOOKUP_F_IFACE;
2229 flags |= RT6_LOOKUP_F_HAS_SADDR;
2231 flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
2233 return fib6_rule_lookup(net, fl6, NULL, flags, ip6_pol_route_output);
2235 EXPORT_SYMBOL_GPL(ip6_route_output_flags);
2237 struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2239 struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
2240 struct net_device *loopback_dev = net->loopback_dev;
2241 struct dst_entry *new = NULL;
2243 rt = dst_alloc(&ip6_dst_blackhole_ops, loopback_dev, 1,
2244 DST_OBSOLETE_DEAD, 0);
2247 atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
2251 new->input = dst_discard;
2252 new->output = dst_discard_out;
2254 dst_copy_metrics(new, &ort->dst);
2256 rt->rt6i_idev = in6_dev_get(loopback_dev);
2257 rt->rt6i_gateway = ort->rt6i_gateway;
2258 rt->rt6i_flags = ort->rt6i_flags & ~RTF_PCPU;
2260 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
2261 #ifdef CONFIG_IPV6_SUBTREES
2262 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
2266 dst_release(dst_orig);
2267 return new ? new : ERR_PTR(-ENOMEM);
2271 * Destination cache support functions
2274 static bool fib6_check(struct fib6_info *f6i, u32 cookie)
2278 if (!fib6_get_cookie_safe(f6i, &rt_cookie) || rt_cookie != cookie)
2281 if (fib6_check_expired(f6i))
2287 static struct dst_entry *rt6_check(struct rt6_info *rt,
2288 struct fib6_info *from,
2293 if ((from && !fib6_get_cookie_safe(from, &rt_cookie)) ||
2294 rt_cookie != cookie)
2297 if (rt6_check_expired(rt))
2303 static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt,
2304 struct fib6_info *from,
2307 if (!__rt6_check_expired(rt) &&
2308 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
2309 fib6_check(from, cookie))
2315 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
2317 struct dst_entry *dst_ret;
2318 struct fib6_info *from;
2319 struct rt6_info *rt;
2321 rt = container_of(dst, struct rt6_info, dst);
2325 /* All IPV6 dsts are created with ->obsolete set to the value
2326 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
2327 * into this function always.
2330 from = rcu_dereference(rt->from);
2332 if (from && (rt->rt6i_flags & RTF_PCPU ||
2333 unlikely(!list_empty(&rt->rt6i_uncached))))
2334 dst_ret = rt6_dst_from_check(rt, from, cookie);
2336 dst_ret = rt6_check(rt, from, cookie);
2343 static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
2345 struct rt6_info *rt = (struct rt6_info *) dst;
2348 if (rt->rt6i_flags & RTF_CACHE) {
2350 if (rt6_check_expired(rt)) {
2351 rt6_remove_exception_rt(rt);
2363 static void ip6_link_failure(struct sk_buff *skb)
2365 struct rt6_info *rt;
2367 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
2369 rt = (struct rt6_info *) skb_dst(skb);
2372 if (rt->rt6i_flags & RTF_CACHE) {
2373 rt6_remove_exception_rt(rt);
2375 struct fib6_info *from;
2376 struct fib6_node *fn;
2378 from = rcu_dereference(rt->from);
2380 fn = rcu_dereference(from->fib6_node);
2381 if (fn && (rt->rt6i_flags & RTF_DEFAULT))
2389 static void rt6_update_expires(struct rt6_info *rt0, int timeout)
2391 if (!(rt0->rt6i_flags & RTF_EXPIRES)) {
2392 struct fib6_info *from;
2395 from = rcu_dereference(rt0->from);
2397 rt0->dst.expires = from->expires;
2401 dst_set_expires(&rt0->dst, timeout);
2402 rt0->rt6i_flags |= RTF_EXPIRES;
2405 static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
2407 struct net *net = dev_net(rt->dst.dev);
2409 dst_metric_set(&rt->dst, RTAX_MTU, mtu);
2410 rt->rt6i_flags |= RTF_MODIFIED;
2411 rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
2414 static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
2416 return !(rt->rt6i_flags & RTF_CACHE) &&
2417 (rt->rt6i_flags & RTF_PCPU || rcu_access_pointer(rt->from));
2420 static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
2421 const struct ipv6hdr *iph, u32 mtu)
2423 const struct in6_addr *daddr, *saddr;
2424 struct rt6_info *rt6 = (struct rt6_info *)dst;
2426 if (dst_metric_locked(dst, RTAX_MTU))
2430 daddr = &iph->daddr;
2431 saddr = &iph->saddr;
2433 daddr = &sk->sk_v6_daddr;
2434 saddr = &inet6_sk(sk)->saddr;
2439 dst_confirm_neigh(dst, daddr);
2440 mtu = max_t(u32, mtu, IPV6_MIN_MTU);
2441 if (mtu >= dst_mtu(dst))
2444 if (!rt6_cache_allowed_for_pmtu(rt6)) {
2445 rt6_do_update_pmtu(rt6, mtu);
2446 /* update rt6_ex->stamp for cache */
2447 if (rt6->rt6i_flags & RTF_CACHE)
2448 rt6_update_exception_stamp_rt(rt6);
2450 struct fib6_result res = {};
2451 struct rt6_info *nrt6;
2454 res.f6i = rcu_dereference(rt6->from);
2459 res.nh = res.f6i->fib6_nh;
2460 res.fib6_flags = res.f6i->fib6_flags;
2461 res.fib6_type = res.f6i->fib6_type;
2463 nrt6 = ip6_rt_cache_alloc(&res, daddr, saddr);
2465 rt6_do_update_pmtu(nrt6, mtu);
2466 if (rt6_insert_exception(nrt6, &res))
2467 dst_release_immediate(&nrt6->dst);
2473 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
2474 struct sk_buff *skb, u32 mtu)
2476 __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu);
2479 void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
2480 int oif, u32 mark, kuid_t uid)
2482 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
2483 struct dst_entry *dst;
2484 struct flowi6 fl6 = {
2486 .flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark),
2487 .daddr = iph->daddr,
2488 .saddr = iph->saddr,
2489 .flowlabel = ip6_flowinfo(iph),
2493 dst = ip6_route_output(net, NULL, &fl6);
2495 __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu));
2498 EXPORT_SYMBOL_GPL(ip6_update_pmtu);
2500 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
2502 int oif = sk->sk_bound_dev_if;
2503 struct dst_entry *dst;
2505 if (!oif && skb->dev)
2506 oif = l3mdev_master_ifindex(skb->dev);
2508 ip6_update_pmtu(skb, sock_net(sk), mtu, oif, sk->sk_mark, sk->sk_uid);
2510 dst = __sk_dst_get(sk);
2511 if (!dst || !dst->obsolete ||
2512 dst->ops->check(dst, inet6_sk(sk)->dst_cookie))
2516 if (!sock_owned_by_user(sk) && !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
2517 ip6_datagram_dst_update(sk, false);
2520 EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
2522 void ip6_sk_dst_store_flow(struct sock *sk, struct dst_entry *dst,
2523 const struct flowi6 *fl6)
2525 #ifdef CONFIG_IPV6_SUBTREES
2526 struct ipv6_pinfo *np = inet6_sk(sk);
2529 ip6_dst_store(sk, dst,
2530 ipv6_addr_equal(&fl6->daddr, &sk->sk_v6_daddr) ?
2531 &sk->sk_v6_daddr : NULL,
2532 #ifdef CONFIG_IPV6_SUBTREES
2533 ipv6_addr_equal(&fl6->saddr, &np->saddr) ?
2539 static bool ip6_redirect_nh_match(const struct fib6_result *res,
2541 const struct in6_addr *gw,
2542 struct rt6_info **ret)
2544 const struct fib6_nh *nh = res->nh;
2546 if (nh->fib_nh_flags & RTNH_F_DEAD || !nh->fib_nh_gw_family ||
2547 fl6->flowi6_oif != nh->fib_nh_dev->ifindex)
2550 /* rt_cache's gateway might be different from its 'parent'
2551 * in the case of an ip redirect.
2552 * So we keep searching in the exception table if the gateway
2555 if (!ipv6_addr_equal(gw, &nh->fib_nh_gw6)) {
2556 struct rt6_info *rt_cache;
2558 rt_cache = rt6_find_cached_rt(res, &fl6->daddr, &fl6->saddr);
2560 ipv6_addr_equal(gw, &rt_cache->rt6i_gateway)) {
2569 /* Handle redirects */
2570 struct ip6rd_flowi {
2572 struct in6_addr gateway;
2575 static struct rt6_info *__ip6_route_redirect(struct net *net,
2576 struct fib6_table *table,
2578 const struct sk_buff *skb,
2581 struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6;
2582 struct rt6_info *ret = NULL;
2583 struct fib6_result res = {};
2584 struct fib6_info *rt;
2585 struct fib6_node *fn;
2587 /* Get the "current" route for this destination and
2588 * check if the redirect has come from appropriate router.
2590 * RFC 4861 specifies that redirects should only be
2591 * accepted if they come from the nexthop to the target.
2592 * Due to the way the routes are chosen, this notion
2593 * is a bit fuzzy and one might need to check all possible
2598 fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
2600 for_each_fib6_node_rt_rcu(fn) {
2602 res.nh = rt->fib6_nh;
2604 if (fib6_check_expired(rt))
2606 if (rt->fib6_flags & RTF_REJECT)
2608 if (ip6_redirect_nh_match(&res, fl6, &rdfl->gateway, &ret))
2613 rt = net->ipv6.fib6_null_entry;
2614 else if (rt->fib6_flags & RTF_REJECT) {
2615 ret = net->ipv6.ip6_null_entry;
2619 if (rt == net->ipv6.fib6_null_entry) {
2620 fn = fib6_backtrack(fn, &fl6->saddr);
2626 res.nh = rt->fib6_nh;
2629 ip6_hold_safe(net, &ret);
2631 res.fib6_flags = res.f6i->fib6_flags;
2632 res.fib6_type = res.f6i->fib6_type;
2633 ret = ip6_create_rt_rcu(&res);
2638 trace_fib6_table_lookup(net, &res, table, fl6);
2642 static struct dst_entry *ip6_route_redirect(struct net *net,
2643 const struct flowi6 *fl6,
2644 const struct sk_buff *skb,
2645 const struct in6_addr *gateway)
2647 int flags = RT6_LOOKUP_F_HAS_SADDR;
2648 struct ip6rd_flowi rdfl;
2651 rdfl.gateway = *gateway;
2653 return fib6_rule_lookup(net, &rdfl.fl6, skb,
2654 flags, __ip6_route_redirect);
2657 void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark,
2660 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
2661 struct dst_entry *dst;
2662 struct flowi6 fl6 = {
2663 .flowi6_iif = LOOPBACK_IFINDEX,
2665 .flowi6_mark = mark,
2666 .daddr = iph->daddr,
2667 .saddr = iph->saddr,
2668 .flowlabel = ip6_flowinfo(iph),
2672 dst = ip6_route_redirect(net, &fl6, skb, &ipv6_hdr(skb)->saddr);
2673 rt6_do_redirect(dst, NULL, skb);
2676 EXPORT_SYMBOL_GPL(ip6_redirect);
2678 void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif)
2680 const struct ipv6hdr *iph = ipv6_hdr(skb);
2681 const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb);
2682 struct dst_entry *dst;
2683 struct flowi6 fl6 = {
2684 .flowi6_iif = LOOPBACK_IFINDEX,
2687 .saddr = iph->daddr,
2688 .flowi6_uid = sock_net_uid(net, NULL),
2691 dst = ip6_route_redirect(net, &fl6, skb, &iph->saddr);
2692 rt6_do_redirect(dst, NULL, skb);
2696 void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
2698 ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark,
2701 EXPORT_SYMBOL_GPL(ip6_sk_redirect);
2703 static unsigned int ip6_default_advmss(const struct dst_entry *dst)
2705 struct net_device *dev = dst->dev;
2706 unsigned int mtu = dst_mtu(dst);
2707 struct net *net = dev_net(dev);
2709 mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
2711 if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
2712 mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
2715 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
2716 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
2717 * IPV6_MAXPLEN is also valid and means: "any MSS,
2718 * rely only on pmtu discovery"
2720 if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
2725 static unsigned int ip6_mtu(const struct dst_entry *dst)
2727 struct inet6_dev *idev;
2730 mtu = dst_metric_raw(dst, RTAX_MTU);
2737 idev = __in6_dev_get(dst->dev);
2739 mtu = idev->cnf.mtu6;
2743 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
2745 return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
2749 * 1. mtu on route is locked - use it
2750 * 2. mtu from nexthop exception
2751 * 3. mtu from egress device
2753 * based on ip6_dst_mtu_forward and exception logic of
2754 * rt6_find_cached_rt; called with rcu_read_lock
2756 u32 ip6_mtu_from_fib6(const struct fib6_result *res,
2757 const struct in6_addr *daddr,
2758 const struct in6_addr *saddr)
2760 const struct fib6_nh *nh = res->nh;
2761 struct fib6_info *f6i = res->f6i;
2762 struct inet6_dev *idev;
2763 struct rt6_info *rt;
2766 if (unlikely(fib6_metric_locked(f6i, RTAX_MTU))) {
2767 mtu = f6i->fib6_pmtu;
2772 rt = rt6_find_cached_rt(res, daddr, saddr);
2774 mtu = dst_metric_raw(&rt->dst, RTAX_MTU);
2776 struct net_device *dev = nh->fib_nh_dev;
2779 idev = __in6_dev_get(dev);
2780 if (idev && idev->cnf.mtu6 > mtu)
2781 mtu = idev->cnf.mtu6;
2784 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
2786 return mtu - lwtunnel_headroom(nh->fib_nh_lws, mtu);
2789 struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
2792 struct dst_entry *dst;
2793 struct rt6_info *rt;
2794 struct inet6_dev *idev = in6_dev_get(dev);
2795 struct net *net = dev_net(dev);
2797 if (unlikely(!idev))
2798 return ERR_PTR(-ENODEV);
2800 rt = ip6_dst_alloc(net, dev, 0);
2801 if (unlikely(!rt)) {
2803 dst = ERR_PTR(-ENOMEM);
2807 rt->dst.flags |= DST_HOST;
2808 rt->dst.input = ip6_input;
2809 rt->dst.output = ip6_output;
2810 rt->rt6i_gateway = fl6->daddr;
2811 rt->rt6i_dst.addr = fl6->daddr;
2812 rt->rt6i_dst.plen = 128;
2813 rt->rt6i_idev = idev;
2814 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
2816 /* Add this dst into uncached_list so that rt6_disable_ip() can
2817 * do proper release of the net_device
2819 rt6_uncached_list_add(rt);
2820 atomic_inc(&net->ipv6.rt6_stats->fib_rt_uncache);
2822 dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0);
2828 static int ip6_dst_gc(struct dst_ops *ops)
2830 struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
2831 int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
2832 int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
2833 int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
2834 int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
2835 unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
2838 entries = dst_entries_get_fast(ops);
2839 if (time_after(rt_last_gc + rt_min_interval, jiffies) &&
2840 entries <= rt_max_size)
2843 net->ipv6.ip6_rt_gc_expire++;
2844 fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, true);
2845 entries = dst_entries_get_slow(ops);
2846 if (entries < ops->gc_thresh)
2847 net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
2849 net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
2850 return entries > rt_max_size;
2853 static struct rt6_info *ip6_nh_lookup_table(struct net *net,
2854 struct fib6_config *cfg,
2855 const struct in6_addr *gw_addr,
2856 u32 tbid, int flags)
2858 struct flowi6 fl6 = {
2859 .flowi6_oif = cfg->fc_ifindex,
2861 .saddr = cfg->fc_prefsrc,
2863 struct fib6_table *table;
2864 struct rt6_info *rt;
2866 table = fib6_get_table(net, tbid);
2870 if (!ipv6_addr_any(&cfg->fc_prefsrc))
2871 flags |= RT6_LOOKUP_F_HAS_SADDR;
2873 flags |= RT6_LOOKUP_F_IGNORE_LINKSTATE;
2874 rt = ip6_pol_route(net, table, cfg->fc_ifindex, &fl6, NULL, flags);
2876 /* if table lookup failed, fall back to full lookup */
2877 if (rt == net->ipv6.ip6_null_entry) {
2885 static int ip6_route_check_nh_onlink(struct net *net,
2886 struct fib6_config *cfg,
2887 const struct net_device *dev,
2888 struct netlink_ext_ack *extack)
2890 u32 tbid = l3mdev_fib_table(dev) ? : RT_TABLE_MAIN;
2891 const struct in6_addr *gw_addr = &cfg->fc_gateway;
2892 u32 flags = RTF_LOCAL | RTF_ANYCAST | RTF_REJECT;
2893 struct fib6_info *from;
2894 struct rt6_info *grt;
2898 grt = ip6_nh_lookup_table(net, cfg, gw_addr, tbid, 0);
2901 from = rcu_dereference(grt->from);
2902 if (!grt->dst.error &&
2903 /* ignore match if it is the default route */
2904 from && !ipv6_addr_any(&from->fib6_dst.addr) &&
2905 (grt->rt6i_flags & flags || dev != grt->dst.dev)) {
2906 NL_SET_ERR_MSG(extack,
2907 "Nexthop has invalid gateway or device mismatch");
2918 static int ip6_route_check_nh(struct net *net,
2919 struct fib6_config *cfg,
2920 struct net_device **_dev,
2921 struct inet6_dev **idev)
2923 const struct in6_addr *gw_addr = &cfg->fc_gateway;
2924 struct net_device *dev = _dev ? *_dev : NULL;
2925 struct rt6_info *grt = NULL;
2926 int err = -EHOSTUNREACH;
2928 if (cfg->fc_table) {
2929 int flags = RT6_LOOKUP_F_IFACE;
2931 grt = ip6_nh_lookup_table(net, cfg, gw_addr,
2932 cfg->fc_table, flags);
2934 if (grt->rt6i_flags & RTF_GATEWAY ||
2935 (dev && dev != grt->dst.dev)) {
2943 grt = rt6_lookup(net, gw_addr, NULL, cfg->fc_ifindex, NULL, 1);
2949 if (dev != grt->dst.dev) {
2954 *_dev = dev = grt->dst.dev;
2955 *idev = grt->rt6i_idev;
2957 in6_dev_hold(grt->rt6i_idev);
2960 if (!(grt->rt6i_flags & RTF_GATEWAY))
2969 static int ip6_validate_gw(struct net *net, struct fib6_config *cfg,
2970 struct net_device **_dev, struct inet6_dev **idev,
2971 struct netlink_ext_ack *extack)
2973 const struct in6_addr *gw_addr = &cfg->fc_gateway;
2974 int gwa_type = ipv6_addr_type(gw_addr);
2975 bool skip_dev = gwa_type & IPV6_ADDR_LINKLOCAL ? false : true;
2976 const struct net_device *dev = *_dev;
2977 bool need_addr_check = !dev;
2980 /* if gw_addr is local we will fail to detect this in case
2981 * address is still TENTATIVE (DAD in progress). rt6_lookup()
2982 * will return already-added prefix route via interface that
2983 * prefix route was assigned to, which might be non-loopback.
2986 ipv6_chk_addr_and_flags(net, gw_addr, dev, skip_dev, 0, 0)) {
2987 NL_SET_ERR_MSG(extack, "Gateway can not be a local address");
2991 if (gwa_type != (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_UNICAST)) {
2992 /* IPv6 strictly inhibits using not link-local
2993 * addresses as nexthop address.
2994 * Otherwise, router will not able to send redirects.
2995 * It is very good, but in some (rare!) circumstances
2996 * (SIT, PtP, NBMA NOARP links) it is handy to allow
2997 * some exceptions. --ANK
2998 * We allow IPv4-mapped nexthops to support RFC4798-type
3001 if (!(gwa_type & (IPV6_ADDR_UNICAST | IPV6_ADDR_MAPPED))) {
3002 NL_SET_ERR_MSG(extack, "Invalid gateway address");
3006 if (cfg->fc_flags & RTNH_F_ONLINK)
3007 err = ip6_route_check_nh_onlink(net, cfg, dev, extack);
3009 err = ip6_route_check_nh(net, cfg, _dev, idev);
3015 /* reload in case device was changed */
3020 NL_SET_ERR_MSG(extack, "Egress device not specified");
3022 } else if (dev->flags & IFF_LOOPBACK) {
3023 NL_SET_ERR_MSG(extack,
3024 "Egress device can not be loopback device for this route");
3028 /* if we did not check gw_addr above, do so now that the
3029 * egress device has been resolved.
3031 if (need_addr_check &&
3032 ipv6_chk_addr_and_flags(net, gw_addr, dev, skip_dev, 0, 0)) {
3033 NL_SET_ERR_MSG(extack, "Gateway can not be a local address");
3042 static bool fib6_is_reject(u32 flags, struct net_device *dev, int addr_type)
3044 if ((flags & RTF_REJECT) ||
3045 (dev && (dev->flags & IFF_LOOPBACK) &&
3046 !(addr_type & IPV6_ADDR_LOOPBACK) &&
3047 !(flags & RTF_LOCAL)))
3053 int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
3054 struct fib6_config *cfg, gfp_t gfp_flags,
3055 struct netlink_ext_ack *extack)
3057 struct net_device *dev = NULL;
3058 struct inet6_dev *idev = NULL;
3062 fib6_nh->fib_nh_family = AF_INET6;
3065 if (cfg->fc_ifindex) {
3066 dev = dev_get_by_index(net, cfg->fc_ifindex);
3069 idev = in6_dev_get(dev);
3074 if (cfg->fc_flags & RTNH_F_ONLINK) {
3076 NL_SET_ERR_MSG(extack,
3077 "Nexthop device required for onlink");
3081 if (!(dev->flags & IFF_UP)) {
3082 NL_SET_ERR_MSG(extack, "Nexthop device is not up");
3087 fib6_nh->fib_nh_flags |= RTNH_F_ONLINK;
3090 fib6_nh->fib_nh_weight = 1;
3092 /* We cannot add true routes via loopback here,
3093 * they would result in kernel looping; promote them to reject routes
3095 addr_type = ipv6_addr_type(&cfg->fc_dst);
3096 if (fib6_is_reject(cfg->fc_flags, dev, addr_type)) {
3097 /* hold loopback dev/idev if we haven't done so. */
3098 if (dev != net->loopback_dev) {
3103 dev = net->loopback_dev;
3105 idev = in6_dev_get(dev);
3114 if (cfg->fc_flags & RTF_GATEWAY) {
3115 err = ip6_validate_gw(net, cfg, &dev, &idev, extack);
3119 fib6_nh->fib_nh_gw6 = cfg->fc_gateway;
3120 fib6_nh->fib_nh_gw_family = AF_INET6;
3127 if (idev->cnf.disable_ipv6) {
3128 NL_SET_ERR_MSG(extack, "IPv6 is disabled on nexthop device");
3133 if (!(dev->flags & IFF_UP) && !cfg->fc_ignore_dev_down) {
3134 NL_SET_ERR_MSG(extack, "Nexthop device is not up");
3139 if (!(cfg->fc_flags & (RTF_LOCAL | RTF_ANYCAST)) &&
3140 !netif_carrier_ok(dev))
3141 fib6_nh->fib_nh_flags |= RTNH_F_LINKDOWN;
3143 fib6_nh->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, gfp_flags);
3144 if (!fib6_nh->rt6i_pcpu) {
3149 err = fib_nh_common_init(&fib6_nh->nh_common, cfg->fc_encap,
3150 cfg->fc_encap_type, cfg, gfp_flags, extack);
3154 fib6_nh->fib_nh_dev = dev;
3155 fib6_nh->fib_nh_oif = dev->ifindex;
3162 lwtstate_put(fib6_nh->fib_nh_lws);
3163 fib6_nh->fib_nh_lws = NULL;
3171 void fib6_nh_release(struct fib6_nh *fib6_nh)
3173 struct rt6_exception_bucket *bucket;
3177 fib6_nh_flush_exceptions(fib6_nh, NULL);
3178 bucket = fib6_nh_get_excptn_bucket(fib6_nh, NULL);
3180 rcu_assign_pointer(fib6_nh->rt6i_exception_bucket, NULL);
3186 if (fib6_nh->rt6i_pcpu) {
3189 for_each_possible_cpu(cpu) {
3190 struct rt6_info **ppcpu_rt;
3191 struct rt6_info *pcpu_rt;
3193 ppcpu_rt = per_cpu_ptr(fib6_nh->rt6i_pcpu, cpu);
3194 pcpu_rt = *ppcpu_rt;
3196 dst_dev_put(&pcpu_rt->dst);
3197 dst_release(&pcpu_rt->dst);
3202 free_percpu(fib6_nh->rt6i_pcpu);
3205 fib_nh_common_release(&fib6_nh->nh_common);
3208 static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
3210 struct netlink_ext_ack *extack)
3212 struct net *net = cfg->fc_nlinfo.nl_net;
3213 struct fib6_info *rt = NULL;
3214 struct fib6_table *table;
3218 /* RTF_PCPU is an internal flag; can not be set by userspace */
3219 if (cfg->fc_flags & RTF_PCPU) {
3220 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_PCPU");
3224 /* RTF_CACHE is an internal flag; can not be set by userspace */
3225 if (cfg->fc_flags & RTF_CACHE) {
3226 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_CACHE");
3230 if (cfg->fc_type > RTN_MAX) {
3231 NL_SET_ERR_MSG(extack, "Invalid route type");
3235 if (cfg->fc_dst_len > 128) {
3236 NL_SET_ERR_MSG(extack, "Invalid prefix length");
3239 if (cfg->fc_src_len > 128) {
3240 NL_SET_ERR_MSG(extack, "Invalid source address length");
3243 #ifndef CONFIG_IPV6_SUBTREES
3244 if (cfg->fc_src_len) {
3245 NL_SET_ERR_MSG(extack,
3246 "Specifying source address requires IPV6_SUBTREES to be enabled");
3252 if (cfg->fc_nlinfo.nlh &&
3253 !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
3254 table = fib6_get_table(net, cfg->fc_table);
3256 pr_warn("NLM_F_CREATE should be specified when creating new route\n");
3257 table = fib6_new_table(net, cfg->fc_table);
3260 table = fib6_new_table(net, cfg->fc_table);
3267 rt = fib6_info_alloc(gfp_flags, true);
3271 rt->fib6_metrics = ip_fib_metrics_init(net, cfg->fc_mx, cfg->fc_mx_len,
3273 if (IS_ERR(rt->fib6_metrics)) {
3274 err = PTR_ERR(rt->fib6_metrics);
3275 /* Do not leave garbage there. */
3276 rt->fib6_metrics = (struct dst_metrics *)&dst_default_metrics;
3280 if (cfg->fc_flags & RTF_ADDRCONF)
3281 rt->dst_nocount = true;
3283 if (cfg->fc_flags & RTF_EXPIRES)
3284 fib6_set_expires(rt, jiffies +
3285 clock_t_to_jiffies(cfg->fc_expires));
3287 fib6_clean_expires(rt);
3289 if (cfg->fc_protocol == RTPROT_UNSPEC)
3290 cfg->fc_protocol = RTPROT_BOOT;
3291 rt->fib6_protocol = cfg->fc_protocol;
3293 rt->fib6_table = table;
3294 rt->fib6_metric = cfg->fc_metric;
3295 rt->fib6_type = cfg->fc_type;
3296 rt->fib6_flags = cfg->fc_flags & ~RTF_GATEWAY;
3298 ipv6_addr_prefix(&rt->fib6_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
3299 rt->fib6_dst.plen = cfg->fc_dst_len;
3300 if (rt->fib6_dst.plen == 128)
3301 rt->dst_host = true;
3303 #ifdef CONFIG_IPV6_SUBTREES
3304 ipv6_addr_prefix(&rt->fib6_src.addr, &cfg->fc_src, cfg->fc_src_len);
3305 rt->fib6_src.plen = cfg->fc_src_len;
3307 err = fib6_nh_init(net, rt->fib6_nh, cfg, gfp_flags, extack);
3311 /* We cannot add true routes via loopback here,
3312 * they would result in kernel looping; promote them to reject routes
3314 addr_type = ipv6_addr_type(&cfg->fc_dst);
3315 if (fib6_is_reject(cfg->fc_flags, rt->fib6_nh->fib_nh_dev, addr_type))
3316 rt->fib6_flags = RTF_REJECT | RTF_NONEXTHOP;
3318 if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
3319 struct net_device *dev = fib6_info_nh_dev(rt);
3321 if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
3322 NL_SET_ERR_MSG(extack, "Invalid source address");
3326 rt->fib6_prefsrc.addr = cfg->fc_prefsrc;
3327 rt->fib6_prefsrc.plen = 128;
3329 rt->fib6_prefsrc.plen = 0;
3333 fib6_info_release(rt);
3334 return ERR_PTR(err);
3337 int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags,
3338 struct netlink_ext_ack *extack)
3340 struct fib6_info *rt;
3343 rt = ip6_route_info_create(cfg, gfp_flags, extack);
3347 err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, extack);
3348 fib6_info_release(rt);
3353 static int __ip6_del_rt(struct fib6_info *rt, struct nl_info *info)
3355 struct net *net = info->nl_net;
3356 struct fib6_table *table;
3359 if (rt == net->ipv6.fib6_null_entry) {
3364 table = rt->fib6_table;
3365 spin_lock_bh(&table->tb6_lock);
3366 err = fib6_del(rt, info);
3367 spin_unlock_bh(&table->tb6_lock);
3370 fib6_info_release(rt);
3374 int ip6_del_rt(struct net *net, struct fib6_info *rt)
3376 struct nl_info info = { .nl_net = net };
3378 return __ip6_del_rt(rt, &info);
3381 static int __ip6_del_rt_siblings(struct fib6_info *rt, struct fib6_config *cfg)
3383 struct nl_info *info = &cfg->fc_nlinfo;
3384 struct net *net = info->nl_net;
3385 struct sk_buff *skb = NULL;
3386 struct fib6_table *table;
3389 if (rt == net->ipv6.fib6_null_entry)
3391 table = rt->fib6_table;
3392 spin_lock_bh(&table->tb6_lock);
3394 if (rt->fib6_nsiblings && cfg->fc_delete_all_nh) {
3395 struct fib6_info *sibling, *next_sibling;
3397 /* prefer to send a single notification with all hops */
3398 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
3400 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
3402 if (rt6_fill_node(net, skb, rt, NULL,
3403 NULL, NULL, 0, RTM_DELROUTE,
3404 info->portid, seq, 0) < 0) {
3408 info->skip_notify = 1;
3411 list_for_each_entry_safe(sibling, next_sibling,
3414 err = fib6_del(sibling, info);
3420 err = fib6_del(rt, info);
3422 spin_unlock_bh(&table->tb6_lock);
3424 fib6_info_release(rt);
3427 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
3428 info->nlh, gfp_any());
3433 static int __ip6_del_cached_rt(struct rt6_info *rt, struct fib6_config *cfg)
3437 if (cfg->fc_ifindex && rt->dst.dev->ifindex != cfg->fc_ifindex)
3440 if (cfg->fc_flags & RTF_GATEWAY &&
3441 !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
3444 rc = rt6_remove_exception_rt(rt);
3449 static int ip6_del_cached_rt(struct fib6_config *cfg, struct fib6_info *rt,
3452 struct fib6_result res = {
3456 struct rt6_info *rt_cache;
3458 rt_cache = rt6_find_cached_rt(&res, &cfg->fc_dst, &cfg->fc_src);
3460 return __ip6_del_cached_rt(rt_cache, cfg);
3465 static int ip6_route_del(struct fib6_config *cfg,
3466 struct netlink_ext_ack *extack)
3468 struct fib6_table *table;
3469 struct fib6_info *rt;
3470 struct fib6_node *fn;
3473 table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
3475 NL_SET_ERR_MSG(extack, "FIB table does not exist");
3481 fn = fib6_locate(&table->tb6_root,
3482 &cfg->fc_dst, cfg->fc_dst_len,
3483 &cfg->fc_src, cfg->fc_src_len,
3484 !(cfg->fc_flags & RTF_CACHE));
3487 for_each_fib6_node_rt_rcu(fn) {
3491 if (cfg->fc_flags & RTF_CACHE) {
3494 rc = ip6_del_cached_rt(cfg, rt, nh);
3502 if (cfg->fc_ifindex &&
3504 nh->fib_nh_dev->ifindex != cfg->fc_ifindex))
3506 if (cfg->fc_flags & RTF_GATEWAY &&
3507 !ipv6_addr_equal(&cfg->fc_gateway, &nh->fib_nh_gw6))
3509 if (cfg->fc_metric && cfg->fc_metric != rt->fib6_metric)
3511 if (cfg->fc_protocol && cfg->fc_protocol != rt->fib6_protocol)
3513 if (!fib6_info_hold_safe(rt))
3517 /* if gateway was specified only delete the one hop */
3518 if (cfg->fc_flags & RTF_GATEWAY)
3519 return __ip6_del_rt(rt, &cfg->fc_nlinfo);
3521 return __ip6_del_rt_siblings(rt, cfg);
3529 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
3531 struct netevent_redirect netevent;
3532 struct rt6_info *rt, *nrt = NULL;
3533 struct fib6_result res = {};
3534 struct ndisc_options ndopts;
3535 struct inet6_dev *in6_dev;
3536 struct neighbour *neigh;
3538 int optlen, on_link;
3541 optlen = skb_tail_pointer(skb) - skb_transport_header(skb);
3542 optlen -= sizeof(*msg);
3545 net_dbg_ratelimited("rt6_do_redirect: packet too short\n");
3549 msg = (struct rd_msg *)icmp6_hdr(skb);
3551 if (ipv6_addr_is_multicast(&msg->dest)) {
3552 net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n");
3557 if (ipv6_addr_equal(&msg->dest, &msg->target)) {
3559 } else if (ipv6_addr_type(&msg->target) !=
3560 (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
3561 net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n");
3565 in6_dev = __in6_dev_get(skb->dev);
3568 if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
3572 * The IP source address of the Redirect MUST be the same as the current
3573 * first-hop router for the specified ICMP Destination Address.
3576 if (!ndisc_parse_options(skb->dev, msg->opt, optlen, &ndopts)) {
3577 net_dbg_ratelimited("rt6_redirect: invalid ND options\n");
3582 if (ndopts.nd_opts_tgt_lladdr) {
3583 lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr,
3586 net_dbg_ratelimited("rt6_redirect: invalid link-layer address length\n");
3591 rt = (struct rt6_info *) dst;
3592 if (rt->rt6i_flags & RTF_REJECT) {
3593 net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
3597 /* Redirect received -> path was valid.
3598 * Look, redirects are sent only in response to data packets,
3599 * so that this nexthop apparently is reachable. --ANK
3601 dst_confirm_neigh(&rt->dst, &ipv6_hdr(skb)->saddr);
3603 neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1);
3608 * We have finally decided to accept it.
3611 ndisc_update(skb->dev, neigh, lladdr, NUD_STALE,
3612 NEIGH_UPDATE_F_WEAK_OVERRIDE|
3613 NEIGH_UPDATE_F_OVERRIDE|
3614 (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
3615 NEIGH_UPDATE_F_ISROUTER)),
3616 NDISC_REDIRECT, &ndopts);
3619 res.f6i = rcu_dereference(rt->from);
3623 res.nh = res.f6i->fib6_nh;
3624 res.fib6_flags = res.f6i->fib6_flags;
3625 res.fib6_type = res.f6i->fib6_type;
3626 nrt = ip6_rt_cache_alloc(&res, &msg->dest, NULL);
3630 nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
3632 nrt->rt6i_flags &= ~RTF_GATEWAY;
3634 nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
3636 /* rt6_insert_exception() will take care of duplicated exceptions */
3637 if (rt6_insert_exception(nrt, &res)) {
3638 dst_release_immediate(&nrt->dst);
3642 netevent.old = &rt->dst;
3643 netevent.new = &nrt->dst;
3644 netevent.daddr = &msg->dest;
3645 netevent.neigh = neigh;
3646 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
3650 neigh_release(neigh);
3653 #ifdef CONFIG_IPV6_ROUTE_INFO
3654 static struct fib6_info *rt6_get_route_info(struct net *net,
3655 const struct in6_addr *prefix, int prefixlen,
3656 const struct in6_addr *gwaddr,
3657 struct net_device *dev)
3659 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO;
3660 int ifindex = dev->ifindex;
3661 struct fib6_node *fn;
3662 struct fib6_info *rt = NULL;
3663 struct fib6_table *table;
3665 table = fib6_get_table(net, tb_id);
3670 fn = fib6_locate(&table->tb6_root, prefix, prefixlen, NULL, 0, true);
3674 for_each_fib6_node_rt_rcu(fn) {
3675 if (rt->fib6_nh->fib_nh_dev->ifindex != ifindex)
3677 if (!(rt->fib6_flags & RTF_ROUTEINFO) ||
3678 !rt->fib6_nh->fib_nh_gw_family)
3680 if (!ipv6_addr_equal(&rt->fib6_nh->fib_nh_gw6, gwaddr))
3682 if (!fib6_info_hold_safe(rt))
3691 static struct fib6_info *rt6_add_route_info(struct net *net,
3692 const struct in6_addr *prefix, int prefixlen,
3693 const struct in6_addr *gwaddr,
3694 struct net_device *dev,
3697 struct fib6_config cfg = {
3698 .fc_metric = IP6_RT_PRIO_USER,
3699 .fc_ifindex = dev->ifindex,
3700 .fc_dst_len = prefixlen,
3701 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
3702 RTF_UP | RTF_PREF(pref),
3703 .fc_protocol = RTPROT_RA,
3704 .fc_type = RTN_UNICAST,
3705 .fc_nlinfo.portid = 0,
3706 .fc_nlinfo.nlh = NULL,
3707 .fc_nlinfo.nl_net = net,
3710 cfg.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO,
3711 cfg.fc_dst = *prefix;
3712 cfg.fc_gateway = *gwaddr;
3714 /* We should treat it as a default route if prefix length is 0. */
3716 cfg.fc_flags |= RTF_DEFAULT;
3718 ip6_route_add(&cfg, GFP_ATOMIC, NULL);
3720 return rt6_get_route_info(net, prefix, prefixlen, gwaddr, dev);
3724 struct fib6_info *rt6_get_dflt_router(struct net *net,
3725 const struct in6_addr *addr,
3726 struct net_device *dev)
3728 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT;
3729 struct fib6_info *rt;
3730 struct fib6_table *table;
3732 table = fib6_get_table(net, tb_id);
3737 for_each_fib6_node_rt_rcu(&table->tb6_root) {
3738 struct fib6_nh *nh = rt->fib6_nh;
3740 if (dev == nh->fib_nh_dev &&
3741 ((rt->fib6_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
3742 ipv6_addr_equal(&nh->fib_nh_gw6, addr))
3745 if (rt && !fib6_info_hold_safe(rt))
3751 struct fib6_info *rt6_add_dflt_router(struct net *net,
3752 const struct in6_addr *gwaddr,
3753 struct net_device *dev,
3756 struct fib6_config cfg = {
3757 .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT,
3758 .fc_metric = IP6_RT_PRIO_USER,
3759 .fc_ifindex = dev->ifindex,
3760 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
3761 RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
3762 .fc_protocol = RTPROT_RA,
3763 .fc_type = RTN_UNICAST,
3764 .fc_nlinfo.portid = 0,
3765 .fc_nlinfo.nlh = NULL,
3766 .fc_nlinfo.nl_net = net,
3769 cfg.fc_gateway = *gwaddr;
3771 if (!ip6_route_add(&cfg, GFP_ATOMIC, NULL)) {
3772 struct fib6_table *table;
3774 table = fib6_get_table(dev_net(dev), cfg.fc_table);
3776 table->flags |= RT6_TABLE_HAS_DFLT_ROUTER;
3779 return rt6_get_dflt_router(net, gwaddr, dev);
3782 static void __rt6_purge_dflt_routers(struct net *net,
3783 struct fib6_table *table)
3785 struct fib6_info *rt;
3789 for_each_fib6_node_rt_rcu(&table->tb6_root) {
3790 struct net_device *dev = fib6_info_nh_dev(rt);
3791 struct inet6_dev *idev = dev ? __in6_dev_get(dev) : NULL;
3793 if (rt->fib6_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
3794 (!idev || idev->cnf.accept_ra != 2) &&
3795 fib6_info_hold_safe(rt)) {
3797 ip6_del_rt(net, rt);
3803 table->flags &= ~RT6_TABLE_HAS_DFLT_ROUTER;
3806 void rt6_purge_dflt_routers(struct net *net)
3808 struct fib6_table *table;
3809 struct hlist_head *head;
3814 for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
3815 head = &net->ipv6.fib_table_hash[h];
3816 hlist_for_each_entry_rcu(table, head, tb6_hlist) {
3817 if (table->flags & RT6_TABLE_HAS_DFLT_ROUTER)
3818 __rt6_purge_dflt_routers(net, table);
3825 static void rtmsg_to_fib6_config(struct net *net,
3826 struct in6_rtmsg *rtmsg,
3827 struct fib6_config *cfg)
3829 *cfg = (struct fib6_config){
3830 .fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ?
3832 .fc_ifindex = rtmsg->rtmsg_ifindex,
3833 .fc_metric = rtmsg->rtmsg_metric ? : IP6_RT_PRIO_USER,
3834 .fc_expires = rtmsg->rtmsg_info,
3835 .fc_dst_len = rtmsg->rtmsg_dst_len,
3836 .fc_src_len = rtmsg->rtmsg_src_len,
3837 .fc_flags = rtmsg->rtmsg_flags,
3838 .fc_type = rtmsg->rtmsg_type,
3840 .fc_nlinfo.nl_net = net,
3842 .fc_dst = rtmsg->rtmsg_dst,
3843 .fc_src = rtmsg->rtmsg_src,
3844 .fc_gateway = rtmsg->rtmsg_gateway,
3848 int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
3850 struct fib6_config cfg;
3851 struct in6_rtmsg rtmsg;
3855 case SIOCADDRT: /* Add a route */
3856 case SIOCDELRT: /* Delete a route */
3857 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3859 err = copy_from_user(&rtmsg, arg,
3860 sizeof(struct in6_rtmsg));
3864 rtmsg_to_fib6_config(net, &rtmsg, &cfg);
3869 err = ip6_route_add(&cfg, GFP_KERNEL, NULL);
3872 err = ip6_route_del(&cfg, NULL);
3886 * Drop the packet on the floor
3889 static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
3891 struct dst_entry *dst = skb_dst(skb);
3892 struct net *net = dev_net(dst->dev);
3893 struct inet6_dev *idev;
3896 if (netif_is_l3_master(skb->dev) &&
3897 dst->dev == net->loopback_dev)
3898 idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif));
3900 idev = ip6_dst_idev(dst);
3902 switch (ipstats_mib_noroutes) {
3903 case IPSTATS_MIB_INNOROUTES:
3904 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
3905 if (type == IPV6_ADDR_ANY) {
3906 IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS);
3910 case IPSTATS_MIB_OUTNOROUTES:
3911 IP6_INC_STATS(net, idev, ipstats_mib_noroutes);
3915 /* Start over by dropping the dst for l3mdev case */
3916 if (netif_is_l3_master(skb->dev))
3919 icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
3924 static int ip6_pkt_discard(struct sk_buff *skb)
3926 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
3929 static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
3931 skb->dev = skb_dst(skb)->dev;
3932 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
3935 static int ip6_pkt_prohibit(struct sk_buff *skb)
3937 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
3940 static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb)
3942 skb->dev = skb_dst(skb)->dev;
3943 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
3947 * Allocate a dst for local (unicast / anycast) address.
3950 struct fib6_info *addrconf_f6i_alloc(struct net *net,
3951 struct inet6_dev *idev,
3952 const struct in6_addr *addr,
3953 bool anycast, gfp_t gfp_flags)
3955 struct fib6_config cfg = {
3956 .fc_table = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL,
3957 .fc_ifindex = idev->dev->ifindex,
3958 .fc_flags = RTF_UP | RTF_ADDRCONF | RTF_NONEXTHOP,
3961 .fc_protocol = RTPROT_KERNEL,
3962 .fc_nlinfo.nl_net = net,
3963 .fc_ignore_dev_down = true,
3967 cfg.fc_type = RTN_ANYCAST;
3968 cfg.fc_flags |= RTF_ANYCAST;
3970 cfg.fc_type = RTN_LOCAL;
3971 cfg.fc_flags |= RTF_LOCAL;
3974 return ip6_route_info_create(&cfg, gfp_flags, NULL);
3977 /* remove deleted ip from prefsrc entries */
3978 struct arg_dev_net_ip {
3979 struct net_device *dev;
3981 struct in6_addr *addr;
3984 static int fib6_remove_prefsrc(struct fib6_info *rt, void *arg)
3986 struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev;
3987 struct net *net = ((struct arg_dev_net_ip *)arg)->net;
3988 struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
3990 if (((void *)rt->fib6_nh->fib_nh_dev == dev || !dev) &&
3991 rt != net->ipv6.fib6_null_entry &&
3992 ipv6_addr_equal(addr, &rt->fib6_prefsrc.addr)) {
3993 spin_lock_bh(&rt6_exception_lock);
3994 /* remove prefsrc entry */
3995 rt->fib6_prefsrc.plen = 0;
3996 spin_unlock_bh(&rt6_exception_lock);
4001 void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
4003 struct net *net = dev_net(ifp->idev->dev);
4004 struct arg_dev_net_ip adni = {
4005 .dev = ifp->idev->dev,
4009 fib6_clean_all(net, fib6_remove_prefsrc, &adni);
4012 #define RTF_RA_ROUTER (RTF_ADDRCONF | RTF_DEFAULT)
4014 /* Remove routers and update dst entries when gateway turn into host. */
4015 static int fib6_clean_tohost(struct fib6_info *rt, void *arg)
4017 struct in6_addr *gateway = (struct in6_addr *)arg;
4018 struct fib6_nh *nh = rt->fib6_nh;
4020 if (((rt->fib6_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) &&
4021 nh->fib_nh_gw_family && ipv6_addr_equal(gateway, &nh->fib_nh_gw6))
4024 /* Further clean up cached routes in exception table.
4025 * This is needed because cached route may have a different
4026 * gateway than its 'parent' in the case of an ip redirect.
4028 fib6_nh_exceptions_clean_tohost(nh, gateway);
4033 void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
4035 fib6_clean_all(net, fib6_clean_tohost, gateway);
4038 struct arg_netdev_event {
4039 const struct net_device *dev;
4041 unsigned char nh_flags;
4042 unsigned long event;
4046 static struct fib6_info *rt6_multipath_first_sibling(const struct fib6_info *rt)
4048 struct fib6_info *iter;
4049 struct fib6_node *fn;
4051 fn = rcu_dereference_protected(rt->fib6_node,
4052 lockdep_is_held(&rt->fib6_table->tb6_lock));
4053 iter = rcu_dereference_protected(fn->leaf,
4054 lockdep_is_held(&rt->fib6_table->tb6_lock));
4056 if (iter->fib6_metric == rt->fib6_metric &&
4057 rt6_qualify_for_ecmp(iter))
4059 iter = rcu_dereference_protected(iter->fib6_next,
4060 lockdep_is_held(&rt->fib6_table->tb6_lock));
4066 static bool rt6_is_dead(const struct fib6_info *rt)
4068 if (rt->fib6_nh->fib_nh_flags & RTNH_F_DEAD ||
4069 (rt->fib6_nh->fib_nh_flags & RTNH_F_LINKDOWN &&
4070 ip6_ignore_linkdown(rt->fib6_nh->fib_nh_dev)))
4076 static int rt6_multipath_total_weight(const struct fib6_info *rt)
4078 struct fib6_info *iter;
4081 if (!rt6_is_dead(rt))
4082 total += rt->fib6_nh->fib_nh_weight;
4084 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
4085 if (!rt6_is_dead(iter))
4086 total += iter->fib6_nh->fib_nh_weight;
4092 static void rt6_upper_bound_set(struct fib6_info *rt, int *weight, int total)
4094 int upper_bound = -1;
4096 if (!rt6_is_dead(rt)) {
4097 *weight += rt->fib6_nh->fib_nh_weight;
4098 upper_bound = DIV_ROUND_CLOSEST_ULL((u64) (*weight) << 31,
4101 atomic_set(&rt->fib6_nh->fib_nh_upper_bound, upper_bound);
4104 static void rt6_multipath_upper_bound_set(struct fib6_info *rt, int total)
4106 struct fib6_info *iter;
4109 rt6_upper_bound_set(rt, &weight, total);
4111 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4112 rt6_upper_bound_set(iter, &weight, total);
4115 void rt6_multipath_rebalance(struct fib6_info *rt)
4117 struct fib6_info *first;
4120 /* In case the entire multipath route was marked for flushing,
4121 * then there is no need to rebalance upon the removal of every
4124 if (!rt->fib6_nsiblings || rt->should_flush)
4127 /* During lookup routes are evaluated in order, so we need to
4128 * make sure upper bounds are assigned from the first sibling
4131 first = rt6_multipath_first_sibling(rt);
4132 if (WARN_ON_ONCE(!first))
4135 total = rt6_multipath_total_weight(first);
4136 rt6_multipath_upper_bound_set(first, total);
4139 static int fib6_ifup(struct fib6_info *rt, void *p_arg)
4141 const struct arg_netdev_event *arg = p_arg;
4142 struct net *net = dev_net(arg->dev);
4144 if (rt != net->ipv6.fib6_null_entry &&
4145 rt->fib6_nh->fib_nh_dev == arg->dev) {
4146 rt->fib6_nh->fib_nh_flags &= ~arg->nh_flags;
4147 fib6_update_sernum_upto_root(net, rt);
4148 rt6_multipath_rebalance(rt);
4154 void rt6_sync_up(struct net_device *dev, unsigned char nh_flags)
4156 struct arg_netdev_event arg = {
4159 .nh_flags = nh_flags,
4163 if (nh_flags & RTNH_F_DEAD && netif_carrier_ok(dev))
4164 arg.nh_flags |= RTNH_F_LINKDOWN;
4166 fib6_clean_all(dev_net(dev), fib6_ifup, &arg);
4169 static bool rt6_multipath_uses_dev(const struct fib6_info *rt,
4170 const struct net_device *dev)
4172 struct fib6_info *iter;
4174 if (rt->fib6_nh->fib_nh_dev == dev)
4176 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4177 if (iter->fib6_nh->fib_nh_dev == dev)
4183 static void rt6_multipath_flush(struct fib6_info *rt)
4185 struct fib6_info *iter;
4187 rt->should_flush = 1;
4188 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4189 iter->should_flush = 1;
4192 static unsigned int rt6_multipath_dead_count(const struct fib6_info *rt,
4193 const struct net_device *down_dev)
4195 struct fib6_info *iter;
4196 unsigned int dead = 0;
4198 if (rt->fib6_nh->fib_nh_dev == down_dev ||
4199 rt->fib6_nh->fib_nh_flags & RTNH_F_DEAD)
4201 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4202 if (iter->fib6_nh->fib_nh_dev == down_dev ||
4203 iter->fib6_nh->fib_nh_flags & RTNH_F_DEAD)
4209 static void rt6_multipath_nh_flags_set(struct fib6_info *rt,
4210 const struct net_device *dev,
4211 unsigned char nh_flags)
4213 struct fib6_info *iter;
4215 if (rt->fib6_nh->fib_nh_dev == dev)
4216 rt->fib6_nh->fib_nh_flags |= nh_flags;
4217 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4218 if (iter->fib6_nh->fib_nh_dev == dev)
4219 iter->fib6_nh->fib_nh_flags |= nh_flags;
4222 /* called with write lock held for table with rt */
4223 static int fib6_ifdown(struct fib6_info *rt, void *p_arg)
4225 const struct arg_netdev_event *arg = p_arg;
4226 const struct net_device *dev = arg->dev;
4227 struct net *net = dev_net(dev);
4229 if (rt == net->ipv6.fib6_null_entry)
4232 switch (arg->event) {
4233 case NETDEV_UNREGISTER:
4234 return rt->fib6_nh->fib_nh_dev == dev ? -1 : 0;
4236 if (rt->should_flush)
4238 if (!rt->fib6_nsiblings)
4239 return rt->fib6_nh->fib_nh_dev == dev ? -1 : 0;
4240 if (rt6_multipath_uses_dev(rt, dev)) {
4243 count = rt6_multipath_dead_count(rt, dev);
4244 if (rt->fib6_nsiblings + 1 == count) {
4245 rt6_multipath_flush(rt);
4248 rt6_multipath_nh_flags_set(rt, dev, RTNH_F_DEAD |
4250 fib6_update_sernum(net, rt);
4251 rt6_multipath_rebalance(rt);
4255 if (rt->fib6_nh->fib_nh_dev != dev ||
4256 rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))
4258 rt->fib6_nh->fib_nh_flags |= RTNH_F_LINKDOWN;
4259 rt6_multipath_rebalance(rt);
4266 void rt6_sync_down_dev(struct net_device *dev, unsigned long event)
4268 struct arg_netdev_event arg = {
4274 struct net *net = dev_net(dev);
4276 if (net->ipv6.sysctl.skip_notify_on_dev_down)
4277 fib6_clean_all_skip_notify(net, fib6_ifdown, &arg);
4279 fib6_clean_all(net, fib6_ifdown, &arg);
4282 void rt6_disable_ip(struct net_device *dev, unsigned long event)
4284 rt6_sync_down_dev(dev, event);
4285 rt6_uncached_list_flush_dev(dev_net(dev), dev);
4286 neigh_ifdown(&nd_tbl, dev);
4289 struct rt6_mtu_change_arg {
4290 struct net_device *dev;
4292 struct fib6_info *f6i;
4295 static int fib6_nh_mtu_change(struct fib6_nh *nh, void *_arg)
4297 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *)_arg;
4298 struct fib6_info *f6i = arg->f6i;
4300 /* For administrative MTU increase, there is no way to discover
4301 * IPv6 PMTU increase, so PMTU increase should be updated here.
4302 * Since RFC 1981 doesn't include administrative MTU increase
4303 * update PMTU increase is a MUST. (i.e. jumbo frame)
4305 if (nh->fib_nh_dev == arg->dev) {
4306 struct inet6_dev *idev = __in6_dev_get(arg->dev);
4307 u32 mtu = f6i->fib6_pmtu;
4309 if (mtu >= arg->mtu ||
4310 (mtu < arg->mtu && mtu == idev->cnf.mtu6))
4311 fib6_metric_set(f6i, RTAX_MTU, arg->mtu);
4313 spin_lock_bh(&rt6_exception_lock);
4314 rt6_exceptions_update_pmtu(idev, nh, arg->mtu);
4315 spin_unlock_bh(&rt6_exception_lock);
4321 static int rt6_mtu_change_route(struct fib6_info *f6i, void *p_arg)
4323 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
4324 struct inet6_dev *idev;
4326 /* In IPv6 pmtu discovery is not optional,
4327 so that RTAX_MTU lock cannot disable it.
4328 We still use this lock to block changes
4329 caused by addrconf/ndisc.
4332 idev = __in6_dev_get(arg->dev);
4336 if (fib6_metric_locked(f6i, RTAX_MTU))
4340 return fib6_nh_mtu_change(f6i->fib6_nh, arg);
4343 void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
4345 struct rt6_mtu_change_arg arg = {
4350 fib6_clean_all(dev_net(dev), rt6_mtu_change_route, &arg);
4353 static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
4354 [RTA_UNSPEC] = { .strict_start_type = RTA_DPORT + 1 },
4355 [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
4356 [RTA_PREFSRC] = { .len = sizeof(struct in6_addr) },
4357 [RTA_OIF] = { .type = NLA_U32 },
4358 [RTA_IIF] = { .type = NLA_U32 },
4359 [RTA_PRIORITY] = { .type = NLA_U32 },
4360 [RTA_METRICS] = { .type = NLA_NESTED },
4361 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
4362 [RTA_PREF] = { .type = NLA_U8 },
4363 [RTA_ENCAP_TYPE] = { .type = NLA_U16 },
4364 [RTA_ENCAP] = { .type = NLA_NESTED },
4365 [RTA_EXPIRES] = { .type = NLA_U32 },
4366 [RTA_UID] = { .type = NLA_U32 },
4367 [RTA_MARK] = { .type = NLA_U32 },
4368 [RTA_TABLE] = { .type = NLA_U32 },
4369 [RTA_IP_PROTO] = { .type = NLA_U8 },
4370 [RTA_SPORT] = { .type = NLA_U16 },
4371 [RTA_DPORT] = { .type = NLA_U16 },
4374 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
4375 struct fib6_config *cfg,
4376 struct netlink_ext_ack *extack)
4379 struct nlattr *tb[RTA_MAX+1];
4383 err = nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
4384 rtm_ipv6_policy, extack);
4389 rtm = nlmsg_data(nlh);
4391 *cfg = (struct fib6_config){
4392 .fc_table = rtm->rtm_table,
4393 .fc_dst_len = rtm->rtm_dst_len,
4394 .fc_src_len = rtm->rtm_src_len,
4396 .fc_protocol = rtm->rtm_protocol,
4397 .fc_type = rtm->rtm_type,
4399 .fc_nlinfo.portid = NETLINK_CB(skb).portid,
4400 .fc_nlinfo.nlh = nlh,
4401 .fc_nlinfo.nl_net = sock_net(skb->sk),
4404 if (rtm->rtm_type == RTN_UNREACHABLE ||
4405 rtm->rtm_type == RTN_BLACKHOLE ||
4406 rtm->rtm_type == RTN_PROHIBIT ||
4407 rtm->rtm_type == RTN_THROW)
4408 cfg->fc_flags |= RTF_REJECT;
4410 if (rtm->rtm_type == RTN_LOCAL)
4411 cfg->fc_flags |= RTF_LOCAL;
4413 if (rtm->rtm_flags & RTM_F_CLONED)
4414 cfg->fc_flags |= RTF_CACHE;
4416 cfg->fc_flags |= (rtm->rtm_flags & RTNH_F_ONLINK);
4418 if (tb[RTA_GATEWAY]) {
4419 cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]);
4420 cfg->fc_flags |= RTF_GATEWAY;
4423 NL_SET_ERR_MSG(extack, "IPv6 does not support RTA_VIA attribute");
4428 int plen = (rtm->rtm_dst_len + 7) >> 3;
4430 if (nla_len(tb[RTA_DST]) < plen)
4433 nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
4437 int plen = (rtm->rtm_src_len + 7) >> 3;
4439 if (nla_len(tb[RTA_SRC]) < plen)
4442 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
4445 if (tb[RTA_PREFSRC])
4446 cfg->fc_prefsrc = nla_get_in6_addr(tb[RTA_PREFSRC]);
4449 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
4451 if (tb[RTA_PRIORITY])
4452 cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);
4454 if (tb[RTA_METRICS]) {
4455 cfg->fc_mx = nla_data(tb[RTA_METRICS]);
4456 cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
4460 cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
4462 if (tb[RTA_MULTIPATH]) {
4463 cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
4464 cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
4466 err = lwtunnel_valid_encap_type_attr(cfg->fc_mp,
4467 cfg->fc_mp_len, extack);
4473 pref = nla_get_u8(tb[RTA_PREF]);
4474 if (pref != ICMPV6_ROUTER_PREF_LOW &&
4475 pref != ICMPV6_ROUTER_PREF_HIGH)
4476 pref = ICMPV6_ROUTER_PREF_MEDIUM;
4477 cfg->fc_flags |= RTF_PREF(pref);
4481 cfg->fc_encap = tb[RTA_ENCAP];
4483 if (tb[RTA_ENCAP_TYPE]) {
4484 cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);
4486 err = lwtunnel_valid_encap_type(cfg->fc_encap_type, extack);
4491 if (tb[RTA_EXPIRES]) {
4492 unsigned long timeout = addrconf_timeout_fixup(nla_get_u32(tb[RTA_EXPIRES]), HZ);
4494 if (addrconf_finite_timeout(timeout)) {
4495 cfg->fc_expires = jiffies_to_clock_t(timeout * HZ);
4496 cfg->fc_flags |= RTF_EXPIRES;
4506 struct fib6_info *fib6_info;
4507 struct fib6_config r_cfg;
4508 struct list_head next;
4511 static int ip6_route_info_append(struct net *net,
4512 struct list_head *rt6_nh_list,
4513 struct fib6_info *rt,
4514 struct fib6_config *r_cfg)
4519 list_for_each_entry(nh, rt6_nh_list, next) {
4520 /* check if fib6_info already exists */
4521 if (rt6_duplicate_nexthop(nh->fib6_info, rt))
4525 nh = kzalloc(sizeof(*nh), GFP_KERNEL);
4529 memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg));
4530 list_add_tail(&nh->next, rt6_nh_list);
4535 static void ip6_route_mpath_notify(struct fib6_info *rt,
4536 struct fib6_info *rt_last,
4537 struct nl_info *info,
4540 /* if this is an APPEND route, then rt points to the first route
4541 * inserted and rt_last points to last route inserted. Userspace
4542 * wants a consistent dump of the route which starts at the first
4543 * nexthop. Since sibling routes are always added at the end of
4544 * the list, find the first sibling of the last route appended
4546 if ((nlflags & NLM_F_APPEND) && rt_last && rt_last->fib6_nsiblings) {
4547 rt = list_first_entry(&rt_last->fib6_siblings,
4553 inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
4556 static int ip6_route_multipath_add(struct fib6_config *cfg,
4557 struct netlink_ext_ack *extack)
4559 struct fib6_info *rt_notif = NULL, *rt_last = NULL;
4560 struct nl_info *info = &cfg->fc_nlinfo;
4561 struct fib6_config r_cfg;
4562 struct rtnexthop *rtnh;
4563 struct fib6_info *rt;
4564 struct rt6_nh *err_nh;
4565 struct rt6_nh *nh, *nh_safe;
4571 int replace = (cfg->fc_nlinfo.nlh &&
4572 (cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_REPLACE));
4573 LIST_HEAD(rt6_nh_list);
4575 nlflags = replace ? NLM_F_REPLACE : NLM_F_CREATE;
4576 if (info->nlh && info->nlh->nlmsg_flags & NLM_F_APPEND)
4577 nlflags |= NLM_F_APPEND;
4579 remaining = cfg->fc_mp_len;
4580 rtnh = (struct rtnexthop *)cfg->fc_mp;
4582 /* Parse a Multipath Entry and build a list (rt6_nh_list) of
4583 * fib6_info structs per nexthop
4585 while (rtnh_ok(rtnh, remaining)) {
4586 memcpy(&r_cfg, cfg, sizeof(*cfg));
4587 if (rtnh->rtnh_ifindex)
4588 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
4590 attrlen = rtnh_attrlen(rtnh);
4592 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
4594 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
4596 r_cfg.fc_gateway = nla_get_in6_addr(nla);
4597 r_cfg.fc_flags |= RTF_GATEWAY;
4599 r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
4600 nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
4602 r_cfg.fc_encap_type = nla_get_u16(nla);
4605 r_cfg.fc_flags |= (rtnh->rtnh_flags & RTNH_F_ONLINK);
4606 rt = ip6_route_info_create(&r_cfg, GFP_KERNEL, extack);
4612 if (!rt6_qualify_for_ecmp(rt)) {
4614 NL_SET_ERR_MSG(extack,
4615 "Device only routes can not be added for IPv6 using the multipath API.");
4616 fib6_info_release(rt);
4620 rt->fib6_nh->fib_nh_weight = rtnh->rtnh_hops + 1;
4622 err = ip6_route_info_append(info->nl_net, &rt6_nh_list,
4625 fib6_info_release(rt);
4629 rtnh = rtnh_next(rtnh, &remaining);
4632 /* for add and replace send one notification with all nexthops.
4633 * Skip the notification in fib6_add_rt2node and send one with
4634 * the full route when done
4636 info->skip_notify = 1;
4639 list_for_each_entry(nh, &rt6_nh_list, next) {
4640 err = __ip6_ins_rt(nh->fib6_info, info, extack);
4641 fib6_info_release(nh->fib6_info);
4644 /* save reference to last route successfully inserted */
4645 rt_last = nh->fib6_info;
4647 /* save reference to first route for notification */
4649 rt_notif = nh->fib6_info;
4652 /* nh->fib6_info is used or freed at this point, reset to NULL*/
4653 nh->fib6_info = NULL;
4656 NL_SET_ERR_MSG_MOD(extack,
4657 "multipath route replace failed (check consistency of installed routes)");
4662 /* Because each route is added like a single route we remove
4663 * these flags after the first nexthop: if there is a collision,
4664 * we have already failed to add the first nexthop:
4665 * fib6_add_rt2node() has rejected it; when replacing, old
4666 * nexthops have been replaced by first new, the rest should
4669 cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
4674 /* success ... tell user about new route */
4675 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
4679 /* send notification for routes that were added so that
4680 * the delete notifications sent by ip6_route_del are
4684 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
4686 /* Delete routes that were already added */
4687 list_for_each_entry(nh, &rt6_nh_list, next) {
4690 ip6_route_del(&nh->r_cfg, extack);
4694 list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) {
4696 fib6_info_release(nh->fib6_info);
4697 list_del(&nh->next);
4704 static int ip6_route_multipath_del(struct fib6_config *cfg,
4705 struct netlink_ext_ack *extack)
4707 struct fib6_config r_cfg;
4708 struct rtnexthop *rtnh;
4711 int err = 1, last_err = 0;
4713 remaining = cfg->fc_mp_len;
4714 rtnh = (struct rtnexthop *)cfg->fc_mp;
4716 /* Parse a Multipath Entry */
4717 while (rtnh_ok(rtnh, remaining)) {
4718 memcpy(&r_cfg, cfg, sizeof(*cfg));
4719 if (rtnh->rtnh_ifindex)
4720 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
4722 attrlen = rtnh_attrlen(rtnh);
4724 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
4726 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
4728 nla_memcpy(&r_cfg.fc_gateway, nla, 16);
4729 r_cfg.fc_flags |= RTF_GATEWAY;
4732 err = ip6_route_del(&r_cfg, extack);
4736 rtnh = rtnh_next(rtnh, &remaining);
4742 static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh,
4743 struct netlink_ext_ack *extack)
4745 struct fib6_config cfg;
4748 err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
4753 return ip6_route_multipath_del(&cfg, extack);
4755 cfg.fc_delete_all_nh = 1;
4756 return ip6_route_del(&cfg, extack);
4760 static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
4761 struct netlink_ext_ack *extack)
4763 struct fib6_config cfg;
4766 err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
4770 if (cfg.fc_metric == 0)
4771 cfg.fc_metric = IP6_RT_PRIO_USER;
4774 return ip6_route_multipath_add(&cfg, extack);
4776 return ip6_route_add(&cfg, GFP_KERNEL, extack);
4779 static size_t rt6_nlmsg_size(struct fib6_info *rt)
4781 int nexthop_len = 0;
4783 if (rt->fib6_nsiblings) {
4784 nexthop_len = nla_total_size(0) /* RTA_MULTIPATH */
4785 + NLA_ALIGN(sizeof(struct rtnexthop))
4786 + nla_total_size(16) /* RTA_GATEWAY */
4787 + lwtunnel_get_encap_size(rt->fib6_nh->fib_nh_lws);
4789 nexthop_len *= rt->fib6_nsiblings;
4792 return NLMSG_ALIGN(sizeof(struct rtmsg))
4793 + nla_total_size(16) /* RTA_SRC */
4794 + nla_total_size(16) /* RTA_DST */
4795 + nla_total_size(16) /* RTA_GATEWAY */
4796 + nla_total_size(16) /* RTA_PREFSRC */
4797 + nla_total_size(4) /* RTA_TABLE */
4798 + nla_total_size(4) /* RTA_IIF */
4799 + nla_total_size(4) /* RTA_OIF */
4800 + nla_total_size(4) /* RTA_PRIORITY */
4801 + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
4802 + nla_total_size(sizeof(struct rta_cacheinfo))
4803 + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
4804 + nla_total_size(1) /* RTA_PREF */
4805 + lwtunnel_get_encap_size(rt->fib6_nh->fib_nh_lws)
4809 static int rt6_fill_node(struct net *net, struct sk_buff *skb,
4810 struct fib6_info *rt, struct dst_entry *dst,
4811 struct in6_addr *dest, struct in6_addr *src,
4812 int iif, int type, u32 portid, u32 seq,
4815 struct rt6_info *rt6 = (struct rt6_info *)dst;
4816 struct rt6key *rt6_dst, *rt6_src;
4817 u32 *pmetrics, table, rt6_flags;
4818 struct nlmsghdr *nlh;
4822 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
4827 rt6_dst = &rt6->rt6i_dst;
4828 rt6_src = &rt6->rt6i_src;
4829 rt6_flags = rt6->rt6i_flags;
4831 rt6_dst = &rt->fib6_dst;
4832 rt6_src = &rt->fib6_src;
4833 rt6_flags = rt->fib6_flags;
4836 rtm = nlmsg_data(nlh);
4837 rtm->rtm_family = AF_INET6;
4838 rtm->rtm_dst_len = rt6_dst->plen;
4839 rtm->rtm_src_len = rt6_src->plen;
4842 table = rt->fib6_table->tb6_id;
4844 table = RT6_TABLE_UNSPEC;
4845 rtm->rtm_table = table < 256 ? table : RT_TABLE_COMPAT;
4846 if (nla_put_u32(skb, RTA_TABLE, table))
4847 goto nla_put_failure;
4849 rtm->rtm_type = rt->fib6_type;
4851 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
4852 rtm->rtm_protocol = rt->fib6_protocol;
4854 if (rt6_flags & RTF_CACHE)
4855 rtm->rtm_flags |= RTM_F_CLONED;
4858 if (nla_put_in6_addr(skb, RTA_DST, dest))
4859 goto nla_put_failure;
4860 rtm->rtm_dst_len = 128;
4861 } else if (rtm->rtm_dst_len)
4862 if (nla_put_in6_addr(skb, RTA_DST, &rt6_dst->addr))
4863 goto nla_put_failure;
4864 #ifdef CONFIG_IPV6_SUBTREES
4866 if (nla_put_in6_addr(skb, RTA_SRC, src))
4867 goto nla_put_failure;
4868 rtm->rtm_src_len = 128;
4869 } else if (rtm->rtm_src_len &&
4870 nla_put_in6_addr(skb, RTA_SRC, &rt6_src->addr))
4871 goto nla_put_failure;
4874 #ifdef CONFIG_IPV6_MROUTE
4875 if (ipv6_addr_is_multicast(&rt6_dst->addr)) {
4876 int err = ip6mr_get_route(net, skb, rtm, portid);
4881 goto nla_put_failure;
4884 if (nla_put_u32(skb, RTA_IIF, iif))
4885 goto nla_put_failure;
4887 struct in6_addr saddr_buf;
4888 if (ip6_route_get_saddr(net, rt, dest, 0, &saddr_buf) == 0 &&
4889 nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
4890 goto nla_put_failure;
4893 if (rt->fib6_prefsrc.plen) {
4894 struct in6_addr saddr_buf;
4895 saddr_buf = rt->fib6_prefsrc.addr;
4896 if (nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
4897 goto nla_put_failure;
4900 pmetrics = dst ? dst_metrics_ptr(dst) : rt->fib6_metrics->metrics;
4901 if (rtnetlink_put_metrics(skb, pmetrics) < 0)
4902 goto nla_put_failure;
4904 if (nla_put_u32(skb, RTA_PRIORITY, rt->fib6_metric))
4905 goto nla_put_failure;
4907 /* For multipath routes, walk the siblings list and add
4908 * each as a nexthop within RTA_MULTIPATH.
4911 if (rt6_flags & RTF_GATEWAY &&
4912 nla_put_in6_addr(skb, RTA_GATEWAY, &rt6->rt6i_gateway))
4913 goto nla_put_failure;
4915 if (dst->dev && nla_put_u32(skb, RTA_OIF, dst->dev->ifindex))
4916 goto nla_put_failure;
4917 } else if (rt->fib6_nsiblings) {
4918 struct fib6_info *sibling, *next_sibling;
4921 mp = nla_nest_start_noflag(skb, RTA_MULTIPATH);
4923 goto nla_put_failure;
4925 if (fib_add_nexthop(skb, &rt->fib6_nh->nh_common,
4926 rt->fib6_nh->fib_nh_weight) < 0)
4927 goto nla_put_failure;
4929 list_for_each_entry_safe(sibling, next_sibling,
4930 &rt->fib6_siblings, fib6_siblings) {
4931 if (fib_add_nexthop(skb, &sibling->fib6_nh->nh_common,
4932 sibling->fib6_nh->fib_nh_weight) < 0)
4933 goto nla_put_failure;
4936 nla_nest_end(skb, mp);
4938 unsigned char nh_flags = 0;
4940 if (fib_nexthop_info(skb, &rt->fib6_nh->nh_common,
4941 &nh_flags, false) < 0)
4942 goto nla_put_failure;
4944 rtm->rtm_flags |= nh_flags;
4947 if (rt6_flags & RTF_EXPIRES) {
4948 expires = dst ? dst->expires : rt->expires;
4952 if (rtnl_put_cacheinfo(skb, dst, 0, expires, dst ? dst->error : 0) < 0)
4953 goto nla_put_failure;
4955 if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt6_flags)))
4956 goto nla_put_failure;
4959 nlmsg_end(skb, nlh);
4963 nlmsg_cancel(skb, nlh);
4967 static bool fib6_info_uses_dev(const struct fib6_info *f6i,
4968 const struct net_device *dev)
4970 if (f6i->fib6_nh->fib_nh_dev == dev)
4973 if (f6i->fib6_nsiblings) {
4974 struct fib6_info *sibling, *next_sibling;
4976 list_for_each_entry_safe(sibling, next_sibling,
4977 &f6i->fib6_siblings, fib6_siblings) {
4978 if (sibling->fib6_nh->fib_nh_dev == dev)
4986 int rt6_dump_route(struct fib6_info *rt, void *p_arg)
4988 struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
4989 struct fib_dump_filter *filter = &arg->filter;
4990 unsigned int flags = NLM_F_MULTI;
4991 struct net *net = arg->net;
4993 if (rt == net->ipv6.fib6_null_entry)
4996 if ((filter->flags & RTM_F_PREFIX) &&
4997 !(rt->fib6_flags & RTF_PREFIX_RT)) {
4998 /* success since this is not a prefix route */
5001 if (filter->filter_set) {
5002 if ((filter->rt_type && rt->fib6_type != filter->rt_type) ||
5003 (filter->dev && !fib6_info_uses_dev(rt, filter->dev)) ||
5004 (filter->protocol && rt->fib6_protocol != filter->protocol)) {
5007 flags |= NLM_F_DUMP_FILTERED;
5010 return rt6_fill_node(net, arg->skb, rt, NULL, NULL, NULL, 0,
5011 RTM_NEWROUTE, NETLINK_CB(arg->cb->skb).portid,
5012 arg->cb->nlh->nlmsg_seq, flags);
5015 static int inet6_rtm_valid_getroute_req(struct sk_buff *skb,
5016 const struct nlmsghdr *nlh,
5018 struct netlink_ext_ack *extack)
5023 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
5024 NL_SET_ERR_MSG_MOD(extack,
5025 "Invalid header for get route request");
5029 if (!netlink_strict_get_check(skb))
5030 return nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
5031 rtm_ipv6_policy, extack);
5033 rtm = nlmsg_data(nlh);
5034 if ((rtm->rtm_src_len && rtm->rtm_src_len != 128) ||
5035 (rtm->rtm_dst_len && rtm->rtm_dst_len != 128) ||
5036 rtm->rtm_table || rtm->rtm_protocol || rtm->rtm_scope ||
5038 NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for get route request");
5041 if (rtm->rtm_flags & ~RTM_F_FIB_MATCH) {
5042 NL_SET_ERR_MSG_MOD(extack,
5043 "Invalid flags for get route request");
5047 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
5048 rtm_ipv6_policy, extack);
5052 if ((tb[RTA_SRC] && !rtm->rtm_src_len) ||
5053 (tb[RTA_DST] && !rtm->rtm_dst_len)) {
5054 NL_SET_ERR_MSG_MOD(extack, "rtm_src_len and rtm_dst_len must be 128 for IPv6");
5058 for (i = 0; i <= RTA_MAX; i++) {
5074 NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in get route request");
5082 static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
5083 struct netlink_ext_ack *extack)
5085 struct net *net = sock_net(in_skb->sk);
5086 struct nlattr *tb[RTA_MAX+1];
5087 int err, iif = 0, oif = 0;
5088 struct fib6_info *from;
5089 struct dst_entry *dst;
5090 struct rt6_info *rt;
5091 struct sk_buff *skb;
5093 struct flowi6 fl6 = {};
5096 err = inet6_rtm_valid_getroute_req(in_skb, nlh, tb, extack);
5101 rtm = nlmsg_data(nlh);
5102 fl6.flowlabel = ip6_make_flowinfo(rtm->rtm_tos, 0);
5103 fibmatch = !!(rtm->rtm_flags & RTM_F_FIB_MATCH);
5106 if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
5109 fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]);
5113 if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
5116 fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]);
5120 iif = nla_get_u32(tb[RTA_IIF]);
5123 oif = nla_get_u32(tb[RTA_OIF]);
5126 fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
5129 fl6.flowi6_uid = make_kuid(current_user_ns(),
5130 nla_get_u32(tb[RTA_UID]));
5132 fl6.flowi6_uid = iif ? INVALID_UID : current_uid();
5135 fl6.fl6_sport = nla_get_be16(tb[RTA_SPORT]);
5138 fl6.fl6_dport = nla_get_be16(tb[RTA_DPORT]);
5140 if (tb[RTA_IP_PROTO]) {
5141 err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
5142 &fl6.flowi6_proto, AF_INET6,
5149 struct net_device *dev;
5154 dev = dev_get_by_index_rcu(net, iif);
5161 fl6.flowi6_iif = iif;
5163 if (!ipv6_addr_any(&fl6.saddr))
5164 flags |= RT6_LOOKUP_F_HAS_SADDR;
5166 dst = ip6_route_input_lookup(net, dev, &fl6, NULL, flags);
5170 fl6.flowi6_oif = oif;
5172 dst = ip6_route_output(net, NULL, &fl6);
5176 rt = container_of(dst, struct rt6_info, dst);
5177 if (rt->dst.error) {
5178 err = rt->dst.error;
5183 if (rt == net->ipv6.ip6_null_entry) {
5184 err = rt->dst.error;
5189 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
5196 skb_dst_set(skb, &rt->dst);
5199 from = rcu_dereference(rt->from);
5202 err = rt6_fill_node(net, skb, from, NULL, NULL, NULL,
5204 NETLINK_CB(in_skb).portid,
5207 err = rt6_fill_node(net, skb, from, dst, &fl6.daddr,
5208 &fl6.saddr, iif, RTM_NEWROUTE,
5209 NETLINK_CB(in_skb).portid,
5221 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
5226 void inet6_rt_notify(int event, struct fib6_info *rt, struct nl_info *info,
5227 unsigned int nlm_flags)
5229 struct sk_buff *skb;
5230 struct net *net = info->nl_net;
5235 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
5237 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
5241 err = rt6_fill_node(net, skb, rt, NULL, NULL, NULL, 0,
5242 event, info->portid, seq, nlm_flags);
5244 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
5245 WARN_ON(err == -EMSGSIZE);
5249 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
5250 info->nlh, gfp_any());
5254 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
5257 void fib6_rt_update(struct net *net, struct fib6_info *rt,
5258 struct nl_info *info)
5260 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
5261 struct sk_buff *skb;
5264 /* call_fib6_entry_notifiers will be removed when in-kernel notifier
5265 * is implemented and supported for nexthop objects
5267 call_fib6_entry_notifiers(net, FIB_EVENT_ENTRY_REPLACE, rt, NULL);
5269 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
5273 err = rt6_fill_node(net, skb, rt, NULL, NULL, NULL, 0,
5274 RTM_NEWROUTE, info->portid, seq, NLM_F_REPLACE);
5276 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
5277 WARN_ON(err == -EMSGSIZE);
5281 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
5282 info->nlh, gfp_any());
5286 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
5289 static int ip6_route_dev_notify(struct notifier_block *this,
5290 unsigned long event, void *ptr)
5292 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
5293 struct net *net = dev_net(dev);
5295 if (!(dev->flags & IFF_LOOPBACK))
5298 if (event == NETDEV_REGISTER) {
5299 net->ipv6.fib6_null_entry->fib6_nh->fib_nh_dev = dev;
5300 net->ipv6.ip6_null_entry->dst.dev = dev;
5301 net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
5302 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
5303 net->ipv6.ip6_prohibit_entry->dst.dev = dev;
5304 net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
5305 net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
5306 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
5308 } else if (event == NETDEV_UNREGISTER &&
5309 dev->reg_state != NETREG_UNREGISTERED) {
5310 /* NETDEV_UNREGISTER could be fired for multiple times by
5311 * netdev_wait_allrefs(). Make sure we only call this once.
5313 in6_dev_put_clear(&net->ipv6.ip6_null_entry->rt6i_idev);
5314 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
5315 in6_dev_put_clear(&net->ipv6.ip6_prohibit_entry->rt6i_idev);
5316 in6_dev_put_clear(&net->ipv6.ip6_blk_hole_entry->rt6i_idev);
5327 #ifdef CONFIG_PROC_FS
5328 static int rt6_stats_seq_show(struct seq_file *seq, void *v)
5330 struct net *net = (struct net *)seq->private;
5331 seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
5332 net->ipv6.rt6_stats->fib_nodes,
5333 net->ipv6.rt6_stats->fib_route_nodes,
5334 atomic_read(&net->ipv6.rt6_stats->fib_rt_alloc),
5335 net->ipv6.rt6_stats->fib_rt_entries,
5336 net->ipv6.rt6_stats->fib_rt_cache,
5337 dst_entries_get_slow(&net->ipv6.ip6_dst_ops),
5338 net->ipv6.rt6_stats->fib_discarded_routes);
5342 #endif /* CONFIG_PROC_FS */
5344 #ifdef CONFIG_SYSCTL
5347 int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
5348 void __user *buffer, size_t *lenp, loff_t *ppos)
5356 net = (struct net *)ctl->extra1;
5357 delay = net->ipv6.sysctl.flush_delay;
5358 ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
5362 fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
5369 static struct ctl_table ipv6_route_table_template[] = {
5371 .procname = "flush",
5372 .data = &init_net.ipv6.sysctl.flush_delay,
5373 .maxlen = sizeof(int),
5375 .proc_handler = ipv6_sysctl_rtcache_flush
5378 .procname = "gc_thresh",
5379 .data = &ip6_dst_ops_template.gc_thresh,
5380 .maxlen = sizeof(int),
5382 .proc_handler = proc_dointvec,
5385 .procname = "max_size",
5386 .data = &init_net.ipv6.sysctl.ip6_rt_max_size,
5387 .maxlen = sizeof(int),
5389 .proc_handler = proc_dointvec,
5392 .procname = "gc_min_interval",
5393 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
5394 .maxlen = sizeof(int),
5396 .proc_handler = proc_dointvec_jiffies,
5399 .procname = "gc_timeout",
5400 .data = &init_net.ipv6.sysctl.ip6_rt_gc_timeout,
5401 .maxlen = sizeof(int),
5403 .proc_handler = proc_dointvec_jiffies,
5406 .procname = "gc_interval",
5407 .data = &init_net.ipv6.sysctl.ip6_rt_gc_interval,
5408 .maxlen = sizeof(int),
5410 .proc_handler = proc_dointvec_jiffies,
5413 .procname = "gc_elasticity",
5414 .data = &init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
5415 .maxlen = sizeof(int),
5417 .proc_handler = proc_dointvec,
5420 .procname = "mtu_expires",
5421 .data = &init_net.ipv6.sysctl.ip6_rt_mtu_expires,
5422 .maxlen = sizeof(int),
5424 .proc_handler = proc_dointvec_jiffies,
5427 .procname = "min_adv_mss",
5428 .data = &init_net.ipv6.sysctl.ip6_rt_min_advmss,
5429 .maxlen = sizeof(int),
5431 .proc_handler = proc_dointvec,
5434 .procname = "gc_min_interval_ms",
5435 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
5436 .maxlen = sizeof(int),
5438 .proc_handler = proc_dointvec_ms_jiffies,
5441 .procname = "skip_notify_on_dev_down",
5442 .data = &init_net.ipv6.sysctl.skip_notify_on_dev_down,
5443 .maxlen = sizeof(int),
5445 .proc_handler = proc_dointvec,
5452 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
5454 struct ctl_table *table;
5456 table = kmemdup(ipv6_route_table_template,
5457 sizeof(ipv6_route_table_template),
5461 table[0].data = &net->ipv6.sysctl.flush_delay;
5462 table[0].extra1 = net;
5463 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
5464 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
5465 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
5466 table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
5467 table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
5468 table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
5469 table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
5470 table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
5471 table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
5472 table[10].data = &net->ipv6.sysctl.skip_notify_on_dev_down;
5474 /* Don't export sysctls to unprivileged users */
5475 if (net->user_ns != &init_user_ns)
5476 table[0].procname = NULL;
5483 static int __net_init ip6_route_net_init(struct net *net)
5487 memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
5488 sizeof(net->ipv6.ip6_dst_ops));
5490 if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0)
5491 goto out_ip6_dst_ops;
5493 net->ipv6.fib6_null_entry = fib6_info_alloc(GFP_KERNEL, true);
5494 if (!net->ipv6.fib6_null_entry)
5495 goto out_ip6_dst_entries;
5496 memcpy(net->ipv6.fib6_null_entry, &fib6_null_entry_template,
5497 sizeof(*net->ipv6.fib6_null_entry));
5499 net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
5500 sizeof(*net->ipv6.ip6_null_entry),
5502 if (!net->ipv6.ip6_null_entry)
5503 goto out_fib6_null_entry;
5504 net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
5505 dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
5506 ip6_template_metrics, true);
5508 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
5509 net->ipv6.fib6_has_custom_rules = false;
5510 net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
5511 sizeof(*net->ipv6.ip6_prohibit_entry),
5513 if (!net->ipv6.ip6_prohibit_entry)
5514 goto out_ip6_null_entry;
5515 net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
5516 dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
5517 ip6_template_metrics, true);
5519 net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
5520 sizeof(*net->ipv6.ip6_blk_hole_entry),
5522 if (!net->ipv6.ip6_blk_hole_entry)
5523 goto out_ip6_prohibit_entry;
5524 net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
5525 dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
5526 ip6_template_metrics, true);
5529 net->ipv6.sysctl.flush_delay = 0;
5530 net->ipv6.sysctl.ip6_rt_max_size = 4096;
5531 net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
5532 net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
5533 net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
5534 net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;
5535 net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
5536 net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
5537 net->ipv6.sysctl.skip_notify_on_dev_down = 0;
5539 net->ipv6.ip6_rt_gc_expire = 30*HZ;
5545 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
5546 out_ip6_prohibit_entry:
5547 kfree(net->ipv6.ip6_prohibit_entry);
5549 kfree(net->ipv6.ip6_null_entry);
5551 out_fib6_null_entry:
5552 kfree(net->ipv6.fib6_null_entry);
5553 out_ip6_dst_entries:
5554 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
5559 static void __net_exit ip6_route_net_exit(struct net *net)
5561 kfree(net->ipv6.fib6_null_entry);
5562 kfree(net->ipv6.ip6_null_entry);
5563 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
5564 kfree(net->ipv6.ip6_prohibit_entry);
5565 kfree(net->ipv6.ip6_blk_hole_entry);
5567 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
5570 static int __net_init ip6_route_net_init_late(struct net *net)
5572 #ifdef CONFIG_PROC_FS
5573 proc_create_net("ipv6_route", 0, net->proc_net, &ipv6_route_seq_ops,
5574 sizeof(struct ipv6_route_iter));
5575 proc_create_net_single("rt6_stats", 0444, net->proc_net,
5576 rt6_stats_seq_show, NULL);
5581 static void __net_exit ip6_route_net_exit_late(struct net *net)
5583 #ifdef CONFIG_PROC_FS
5584 remove_proc_entry("ipv6_route", net->proc_net);
5585 remove_proc_entry("rt6_stats", net->proc_net);
5589 static struct pernet_operations ip6_route_net_ops = {
5590 .init = ip6_route_net_init,
5591 .exit = ip6_route_net_exit,
5594 static int __net_init ipv6_inetpeer_init(struct net *net)
5596 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
5600 inet_peer_base_init(bp);
5601 net->ipv6.peers = bp;
5605 static void __net_exit ipv6_inetpeer_exit(struct net *net)
5607 struct inet_peer_base *bp = net->ipv6.peers;
5609 net->ipv6.peers = NULL;
5610 inetpeer_invalidate_tree(bp);
5614 static struct pernet_operations ipv6_inetpeer_ops = {
5615 .init = ipv6_inetpeer_init,
5616 .exit = ipv6_inetpeer_exit,
5619 static struct pernet_operations ip6_route_net_late_ops = {
5620 .init = ip6_route_net_init_late,
5621 .exit = ip6_route_net_exit_late,
5624 static struct notifier_block ip6_route_dev_notifier = {
5625 .notifier_call = ip6_route_dev_notify,
5626 .priority = ADDRCONF_NOTIFY_PRIORITY - 10,
5629 void __init ip6_route_init_special_entries(void)
5631 /* Registering of the loopback is done before this portion of code,
5632 * the loopback reference in rt6_info will not be taken, do it
5633 * manually for init_net */
5634 init_net.ipv6.fib6_null_entry->fib6_nh->fib_nh_dev = init_net.loopback_dev;
5635 init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
5636 init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
5637 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
5638 init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
5639 init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
5640 init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
5641 init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
5645 int __init ip6_route_init(void)
5651 ip6_dst_ops_template.kmem_cachep =
5652 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
5653 SLAB_HWCACHE_ALIGN, NULL);
5654 if (!ip6_dst_ops_template.kmem_cachep)
5657 ret = dst_entries_init(&ip6_dst_blackhole_ops);
5659 goto out_kmem_cache;
5661 ret = register_pernet_subsys(&ipv6_inetpeer_ops);
5663 goto out_dst_entries;
5665 ret = register_pernet_subsys(&ip6_route_net_ops);
5667 goto out_register_inetpeer;
5669 ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
5673 goto out_register_subsys;
5679 ret = fib6_rules_init();
5683 ret = register_pernet_subsys(&ip6_route_net_late_ops);
5685 goto fib6_rules_init;
5687 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_NEWROUTE,
5688 inet6_rtm_newroute, NULL, 0);
5690 goto out_register_late_subsys;
5692 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_DELROUTE,
5693 inet6_rtm_delroute, NULL, 0);
5695 goto out_register_late_subsys;
5697 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETROUTE,
5698 inet6_rtm_getroute, NULL,
5699 RTNL_FLAG_DOIT_UNLOCKED);
5701 goto out_register_late_subsys;
5703 ret = register_netdevice_notifier(&ip6_route_dev_notifier);
5705 goto out_register_late_subsys;
5707 for_each_possible_cpu(cpu) {
5708 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
5710 INIT_LIST_HEAD(&ul->head);
5711 spin_lock_init(&ul->lock);
5717 out_register_late_subsys:
5718 rtnl_unregister_all(PF_INET6);
5719 unregister_pernet_subsys(&ip6_route_net_late_ops);
5721 fib6_rules_cleanup();
5726 out_register_subsys:
5727 unregister_pernet_subsys(&ip6_route_net_ops);
5728 out_register_inetpeer:
5729 unregister_pernet_subsys(&ipv6_inetpeer_ops);
5731 dst_entries_destroy(&ip6_dst_blackhole_ops);
5733 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
5737 void ip6_route_cleanup(void)
5739 unregister_netdevice_notifier(&ip6_route_dev_notifier);
5740 unregister_pernet_subsys(&ip6_route_net_late_ops);
5741 fib6_rules_cleanup();
5744 unregister_pernet_subsys(&ipv6_inetpeer_ops);
5745 unregister_pernet_subsys(&ip6_route_net_ops);
5746 dst_entries_destroy(&ip6_dst_blackhole_ops);
5747 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);