1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Linux INET6 implementation
7 * Pedro Roque <roque@di.fc.ul.pt>
12 * YOSHIFUJI Hideaki @USAGI
13 * reworked default router selection.
14 * - respect outgoing interface
15 * - select from (probably) reachable routers (i.e.
16 * routers in REACHABLE, STALE, DELAY or PROBE states).
17 * - always select the same router if it is (probably)
18 * reachable. otherwise, round-robin the list.
20 * Fixed routing subtrees.
23 #define pr_fmt(fmt) "IPv6: " fmt
25 #include <linux/capability.h>
26 #include <linux/errno.h>
27 #include <linux/export.h>
28 #include <linux/types.h>
29 #include <linux/times.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/route.h>
34 #include <linux/netdevice.h>
35 #include <linux/in6.h>
36 #include <linux/mroute6.h>
37 #include <linux/init.h>
38 #include <linux/if_arp.h>
39 #include <linux/proc_fs.h>
40 #include <linux/seq_file.h>
41 #include <linux/nsproxy.h>
42 #include <linux/slab.h>
43 #include <linux/jhash.h>
44 #include <net/net_namespace.h>
47 #include <net/ip6_fib.h>
48 #include <net/ip6_route.h>
49 #include <net/ndisc.h>
50 #include <net/addrconf.h>
52 #include <linux/rtnetlink.h>
54 #include <net/dst_metadata.h>
56 #include <net/netevent.h>
57 #include <net/netlink.h>
59 #include <net/lwtunnel.h>
60 #include <net/ip_tunnels.h>
61 #include <net/l3mdev.h>
63 #include <linux/uaccess.h>
66 #include <linux/sysctl.h>
69 static int ip6_rt_type_to_error(u8 fib6_type);
71 #define CREATE_TRACE_POINTS
72 #include <trace/events/fib6.h>
73 EXPORT_TRACEPOINT_SYMBOL_GPL(fib6_table_lookup);
74 #undef CREATE_TRACE_POINTS
77 RT6_NUD_FAIL_HARD = -3,
78 RT6_NUD_FAIL_PROBE = -2,
79 RT6_NUD_FAIL_DO_RR = -1,
83 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
84 static unsigned int ip6_default_advmss(const struct dst_entry *dst);
85 static unsigned int ip6_mtu(const struct dst_entry *dst);
86 static struct dst_entry *ip6_negative_advice(struct dst_entry *);
87 static void ip6_dst_destroy(struct dst_entry *);
88 static void ip6_dst_ifdown(struct dst_entry *,
89 struct net_device *dev, int how);
90 static int ip6_dst_gc(struct dst_ops *ops);
92 static int ip6_pkt_discard(struct sk_buff *skb);
93 static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
94 static int ip6_pkt_prohibit(struct sk_buff *skb);
95 static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
96 static void ip6_link_failure(struct sk_buff *skb);
97 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
98 struct sk_buff *skb, u32 mtu);
99 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
100 struct sk_buff *skb);
101 static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif,
103 static size_t rt6_nlmsg_size(struct fib6_info *rt);
104 static int rt6_fill_node(struct net *net, struct sk_buff *skb,
105 struct fib6_info *rt, struct dst_entry *dst,
106 struct in6_addr *dest, struct in6_addr *src,
107 int iif, int type, u32 portid, u32 seq,
109 static struct rt6_info *rt6_find_cached_rt(const struct fib6_result *res,
110 const struct in6_addr *daddr,
111 const struct in6_addr *saddr);
113 #ifdef CONFIG_IPV6_ROUTE_INFO
114 static struct fib6_info *rt6_add_route_info(struct net *net,
115 const struct in6_addr *prefix, int prefixlen,
116 const struct in6_addr *gwaddr,
117 struct net_device *dev,
119 static struct fib6_info *rt6_get_route_info(struct net *net,
120 const struct in6_addr *prefix, int prefixlen,
121 const struct in6_addr *gwaddr,
122 struct net_device *dev);
125 struct uncached_list {
127 struct list_head head;
130 static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);
132 void rt6_uncached_list_add(struct rt6_info *rt)
134 struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list);
136 rt->rt6i_uncached_list = ul;
138 spin_lock_bh(&ul->lock);
139 list_add_tail(&rt->rt6i_uncached, &ul->head);
140 spin_unlock_bh(&ul->lock);
143 void rt6_uncached_list_del(struct rt6_info *rt)
145 if (!list_empty(&rt->rt6i_uncached)) {
146 struct uncached_list *ul = rt->rt6i_uncached_list;
147 struct net *net = dev_net(rt->dst.dev);
149 spin_lock_bh(&ul->lock);
150 list_del(&rt->rt6i_uncached);
151 atomic_dec(&net->ipv6.rt6_stats->fib_rt_uncache);
152 spin_unlock_bh(&ul->lock);
156 static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev)
158 struct net_device *loopback_dev = net->loopback_dev;
161 if (dev == loopback_dev)
164 for_each_possible_cpu(cpu) {
165 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
168 spin_lock_bh(&ul->lock);
169 list_for_each_entry(rt, &ul->head, rt6i_uncached) {
170 struct inet6_dev *rt_idev = rt->rt6i_idev;
171 struct net_device *rt_dev = rt->dst.dev;
173 if (rt_idev->dev == dev) {
174 rt->rt6i_idev = in6_dev_get(loopback_dev);
175 in6_dev_put(rt_idev);
179 rt->dst.dev = loopback_dev;
180 dev_hold(rt->dst.dev);
184 spin_unlock_bh(&ul->lock);
188 static inline const void *choose_neigh_daddr(const struct in6_addr *p,
192 if (!ipv6_addr_any(p))
193 return (const void *) p;
195 return &ipv6_hdr(skb)->daddr;
199 struct neighbour *ip6_neigh_lookup(const struct in6_addr *gw,
200 struct net_device *dev,
206 daddr = choose_neigh_daddr(gw, skb, daddr);
207 n = __ipv6_neigh_lookup(dev, daddr);
211 n = neigh_create(&nd_tbl, daddr, dev);
212 return IS_ERR(n) ? NULL : n;
215 static struct neighbour *ip6_dst_neigh_lookup(const struct dst_entry *dst,
219 const struct rt6_info *rt = container_of(dst, struct rt6_info, dst);
221 return ip6_neigh_lookup(&rt->rt6i_gateway, dst->dev, skb, daddr);
224 static void ip6_confirm_neigh(const struct dst_entry *dst, const void *daddr)
226 struct net_device *dev = dst->dev;
227 struct rt6_info *rt = (struct rt6_info *)dst;
229 daddr = choose_neigh_daddr(&rt->rt6i_gateway, NULL, daddr);
232 if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
234 if (ipv6_addr_is_multicast((const struct in6_addr *)daddr))
236 __ipv6_confirm_neigh(dev, daddr);
239 static struct dst_ops ip6_dst_ops_template = {
243 .check = ip6_dst_check,
244 .default_advmss = ip6_default_advmss,
246 .cow_metrics = dst_cow_metrics_generic,
247 .destroy = ip6_dst_destroy,
248 .ifdown = ip6_dst_ifdown,
249 .negative_advice = ip6_negative_advice,
250 .link_failure = ip6_link_failure,
251 .update_pmtu = ip6_rt_update_pmtu,
252 .redirect = rt6_do_redirect,
253 .local_out = __ip6_local_out,
254 .neigh_lookup = ip6_dst_neigh_lookup,
255 .confirm_neigh = ip6_confirm_neigh,
258 static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
260 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
262 return mtu ? : dst->dev->mtu;
265 static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
266 struct sk_buff *skb, u32 mtu)
270 static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
275 static struct dst_ops ip6_dst_blackhole_ops = {
277 .destroy = ip6_dst_destroy,
278 .check = ip6_dst_check,
279 .mtu = ip6_blackhole_mtu,
280 .default_advmss = ip6_default_advmss,
281 .update_pmtu = ip6_rt_blackhole_update_pmtu,
282 .redirect = ip6_rt_blackhole_redirect,
283 .cow_metrics = dst_cow_metrics_generic,
284 .neigh_lookup = ip6_dst_neigh_lookup,
287 static const u32 ip6_template_metrics[RTAX_MAX] = {
288 [RTAX_HOPLIMIT - 1] = 0,
291 static const struct fib6_info fib6_null_entry_template = {
292 .fib6_flags = (RTF_REJECT | RTF_NONEXTHOP),
293 .fib6_protocol = RTPROT_KERNEL,
294 .fib6_metric = ~(u32)0,
295 .fib6_ref = REFCOUNT_INIT(1),
296 .fib6_type = RTN_UNREACHABLE,
297 .fib6_metrics = (struct dst_metrics *)&dst_default_metrics,
300 static const struct rt6_info ip6_null_entry_template = {
302 .__refcnt = ATOMIC_INIT(1),
304 .obsolete = DST_OBSOLETE_FORCE_CHK,
305 .error = -ENETUNREACH,
306 .input = ip6_pkt_discard,
307 .output = ip6_pkt_discard_out,
309 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
312 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
314 static const struct rt6_info ip6_prohibit_entry_template = {
316 .__refcnt = ATOMIC_INIT(1),
318 .obsolete = DST_OBSOLETE_FORCE_CHK,
320 .input = ip6_pkt_prohibit,
321 .output = ip6_pkt_prohibit_out,
323 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
326 static const struct rt6_info ip6_blk_hole_entry_template = {
328 .__refcnt = ATOMIC_INIT(1),
330 .obsolete = DST_OBSOLETE_FORCE_CHK,
332 .input = dst_discard,
333 .output = dst_discard_out,
335 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
340 static void rt6_info_init(struct rt6_info *rt)
342 struct dst_entry *dst = &rt->dst;
344 memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
345 INIT_LIST_HEAD(&rt->rt6i_uncached);
348 /* allocate dst with ip6_dst_ops */
349 struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev,
352 struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
353 1, DST_OBSOLETE_FORCE_CHK, flags);
357 atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
362 EXPORT_SYMBOL(ip6_dst_alloc);
364 static void ip6_dst_destroy(struct dst_entry *dst)
366 struct rt6_info *rt = (struct rt6_info *)dst;
367 struct fib6_info *from;
368 struct inet6_dev *idev;
370 ip_dst_metrics_put(dst);
371 rt6_uncached_list_del(rt);
373 idev = rt->rt6i_idev;
375 rt->rt6i_idev = NULL;
379 from = xchg((__force struct fib6_info **)&rt->from, NULL);
380 fib6_info_release(from);
383 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
386 struct rt6_info *rt = (struct rt6_info *)dst;
387 struct inet6_dev *idev = rt->rt6i_idev;
388 struct net_device *loopback_dev =
389 dev_net(dev)->loopback_dev;
391 if (idev && idev->dev != loopback_dev) {
392 struct inet6_dev *loopback_idev = in6_dev_get(loopback_dev);
394 rt->rt6i_idev = loopback_idev;
400 static bool __rt6_check_expired(const struct rt6_info *rt)
402 if (rt->rt6i_flags & RTF_EXPIRES)
403 return time_after(jiffies, rt->dst.expires);
408 static bool rt6_check_expired(const struct rt6_info *rt)
410 struct fib6_info *from;
412 from = rcu_dereference(rt->from);
414 if (rt->rt6i_flags & RTF_EXPIRES) {
415 if (time_after(jiffies, rt->dst.expires))
418 return rt->dst.obsolete != DST_OBSOLETE_FORCE_CHK ||
419 fib6_check_expired(from);
424 void fib6_select_path(const struct net *net, struct fib6_result *res,
425 struct flowi6 *fl6, int oif, bool have_oif_match,
426 const struct sk_buff *skb, int strict)
428 struct fib6_info *sibling, *next_sibling;
429 struct fib6_info *match = res->f6i;
431 if ((!match->fib6_nsiblings && !match->nh) || have_oif_match)
434 /* We might have already computed the hash for ICMPv6 errors. In such
435 * case it will always be non-zero. Otherwise now is the time to do it.
438 (!match->nh || nexthop_is_multipath(match->nh)))
439 fl6->mp_hash = rt6_multipath_hash(net, fl6, skb, NULL);
441 if (unlikely(match->nh)) {
442 nexthop_path_fib6_result(res, fl6->mp_hash);
446 if (fl6->mp_hash <= atomic_read(&match->fib6_nh->fib_nh_upper_bound))
449 list_for_each_entry_safe(sibling, next_sibling, &match->fib6_siblings,
451 const struct fib6_nh *nh = sibling->fib6_nh;
454 nh_upper_bound = atomic_read(&nh->fib_nh_upper_bound);
455 if (fl6->mp_hash > nh_upper_bound)
457 if (rt6_score_route(nh, sibling->fib6_flags, oif, strict) < 0)
465 res->nh = match->fib6_nh;
469 * Route lookup. rcu_read_lock() should be held.
472 static bool __rt6_device_match(struct net *net, const struct fib6_nh *nh,
473 const struct in6_addr *saddr, int oif, int flags)
475 const struct net_device *dev;
477 if (nh->fib_nh_flags & RTNH_F_DEAD)
480 dev = nh->fib_nh_dev;
482 if (dev->ifindex == oif)
485 if (ipv6_chk_addr(net, saddr, dev,
486 flags & RT6_LOOKUP_F_IFACE))
493 struct fib6_nh_dm_arg {
495 const struct in6_addr *saddr;
501 static int __rt6_nh_dev_match(struct fib6_nh *nh, void *_arg)
503 struct fib6_nh_dm_arg *arg = _arg;
506 return __rt6_device_match(arg->net, nh, arg->saddr, arg->oif,
510 /* returns fib6_nh from nexthop or NULL */
511 static struct fib6_nh *rt6_nh_dev_match(struct net *net, struct nexthop *nh,
512 struct fib6_result *res,
513 const struct in6_addr *saddr,
516 struct fib6_nh_dm_arg arg = {
523 if (nexthop_is_blackhole(nh))
526 if (nexthop_for_each_fib6_nh(nh, __rt6_nh_dev_match, &arg))
532 static void rt6_device_match(struct net *net, struct fib6_result *res,
533 const struct in6_addr *saddr, int oif, int flags)
535 struct fib6_info *f6i = res->f6i;
536 struct fib6_info *spf6i;
539 if (!oif && ipv6_addr_any(saddr)) {
540 if (unlikely(f6i->nh)) {
541 nh = nexthop_fib6_nh(f6i->nh);
542 if (nexthop_is_blackhole(f6i->nh))
547 if (!(nh->fib_nh_flags & RTNH_F_DEAD))
551 for (spf6i = f6i; spf6i; spf6i = rcu_dereference(spf6i->fib6_next)) {
552 bool matched = false;
554 if (unlikely(spf6i->nh)) {
555 nh = rt6_nh_dev_match(net, spf6i->nh, res, saddr,
561 if (__rt6_device_match(net, nh, saddr, oif, flags))
570 if (oif && flags & RT6_LOOKUP_F_IFACE) {
571 res->f6i = net->ipv6.fib6_null_entry;
572 nh = res->f6i->fib6_nh;
576 if (unlikely(f6i->nh)) {
577 nh = nexthop_fib6_nh(f6i->nh);
578 if (nexthop_is_blackhole(f6i->nh))
584 if (nh->fib_nh_flags & RTNH_F_DEAD) {
585 res->f6i = net->ipv6.fib6_null_entry;
586 nh = res->f6i->fib6_nh;
590 res->fib6_type = res->f6i->fib6_type;
591 res->fib6_flags = res->f6i->fib6_flags;
595 res->fib6_flags |= RTF_REJECT;
596 res->fib6_type = RTN_BLACKHOLE;
600 #ifdef CONFIG_IPV6_ROUTER_PREF
601 struct __rt6_probe_work {
602 struct work_struct work;
603 struct in6_addr target;
604 struct net_device *dev;
607 static void rt6_probe_deferred(struct work_struct *w)
609 struct in6_addr mcaddr;
610 struct __rt6_probe_work *work =
611 container_of(w, struct __rt6_probe_work, work);
613 addrconf_addr_solict_mult(&work->target, &mcaddr);
614 ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, 0);
619 static void rt6_probe(struct fib6_nh *fib6_nh)
621 struct __rt6_probe_work *work = NULL;
622 const struct in6_addr *nh_gw;
623 struct neighbour *neigh;
624 struct net_device *dev;
625 struct inet6_dev *idev;
628 * Okay, this does not seem to be appropriate
629 * for now, however, we need to check if it
630 * is really so; aka Router Reachability Probing.
632 * Router Reachability Probe MUST be rate-limited
633 * to no more than one per minute.
635 if (fib6_nh->fib_nh_gw_family)
638 nh_gw = &fib6_nh->fib_nh_gw6;
639 dev = fib6_nh->fib_nh_dev;
641 idev = __in6_dev_get(dev);
642 neigh = __ipv6_neigh_lookup_noref(dev, nh_gw);
644 if (neigh->nud_state & NUD_VALID)
647 write_lock(&neigh->lock);
648 if (!(neigh->nud_state & NUD_VALID) &&
650 neigh->updated + idev->cnf.rtr_probe_interval)) {
651 work = kmalloc(sizeof(*work), GFP_ATOMIC);
653 __neigh_set_probe_once(neigh);
655 write_unlock(&neigh->lock);
656 } else if (time_after(jiffies, fib6_nh->last_probe +
657 idev->cnf.rtr_probe_interval)) {
658 work = kmalloc(sizeof(*work), GFP_ATOMIC);
662 fib6_nh->last_probe = jiffies;
663 INIT_WORK(&work->work, rt6_probe_deferred);
664 work->target = *nh_gw;
667 schedule_work(&work->work);
671 rcu_read_unlock_bh();
674 static inline void rt6_probe(struct fib6_nh *fib6_nh)
680 * Default Router Selection (RFC 2461 6.3.6)
682 static enum rt6_nud_state rt6_check_neigh(const struct fib6_nh *fib6_nh)
684 enum rt6_nud_state ret = RT6_NUD_FAIL_HARD;
685 struct neighbour *neigh;
688 neigh = __ipv6_neigh_lookup_noref(fib6_nh->fib_nh_dev,
689 &fib6_nh->fib_nh_gw6);
691 read_lock(&neigh->lock);
692 if (neigh->nud_state & NUD_VALID)
693 ret = RT6_NUD_SUCCEED;
694 #ifdef CONFIG_IPV6_ROUTER_PREF
695 else if (!(neigh->nud_state & NUD_FAILED))
696 ret = RT6_NUD_SUCCEED;
698 ret = RT6_NUD_FAIL_PROBE;
700 read_unlock(&neigh->lock);
702 ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ?
703 RT6_NUD_SUCCEED : RT6_NUD_FAIL_DO_RR;
705 rcu_read_unlock_bh();
710 static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif,
715 if (!oif || nh->fib_nh_dev->ifindex == oif)
718 if (!m && (strict & RT6_LOOKUP_F_IFACE))
719 return RT6_NUD_FAIL_HARD;
720 #ifdef CONFIG_IPV6_ROUTER_PREF
721 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(fib6_flags)) << 2;
723 if ((strict & RT6_LOOKUP_F_REACHABLE) &&
724 !(fib6_flags & RTF_NONEXTHOP) && nh->fib_nh_gw_family) {
725 int n = rt6_check_neigh(nh);
732 static bool find_match(struct fib6_nh *nh, u32 fib6_flags,
733 int oif, int strict, int *mpri, bool *do_rr)
735 bool match_do_rr = false;
739 if (nh->fib_nh_flags & RTNH_F_DEAD)
742 if (ip6_ignore_linkdown(nh->fib_nh_dev) &&
743 nh->fib_nh_flags & RTNH_F_LINKDOWN &&
744 !(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE))
747 m = rt6_score_route(nh, fib6_flags, oif, strict);
748 if (m == RT6_NUD_FAIL_DO_RR) {
750 m = 0; /* lowest valid score */
751 } else if (m == RT6_NUD_FAIL_HARD) {
755 if (strict & RT6_LOOKUP_F_REACHABLE)
758 /* note that m can be RT6_NUD_FAIL_PROBE at this point */
760 *do_rr = match_do_rr;
768 struct fib6_nh_frl_arg {
777 static int rt6_nh_find_match(struct fib6_nh *nh, void *_arg)
779 struct fib6_nh_frl_arg *arg = _arg;
782 return find_match(nh, arg->flags, arg->oif, arg->strict,
783 arg->mpri, arg->do_rr);
786 static void __find_rr_leaf(struct fib6_info *f6i_start,
787 struct fib6_info *nomatch, u32 metric,
788 struct fib6_result *res, struct fib6_info **cont,
789 int oif, int strict, bool *do_rr, int *mpri)
791 struct fib6_info *f6i;
793 for (f6i = f6i_start;
794 f6i && f6i != nomatch;
795 f6i = rcu_dereference(f6i->fib6_next)) {
796 bool matched = false;
799 if (cont && f6i->fib6_metric != metric) {
804 if (fib6_check_expired(f6i))
807 if (unlikely(f6i->nh)) {
808 struct fib6_nh_frl_arg arg = {
809 .flags = f6i->fib6_flags,
816 if (nexthop_is_blackhole(f6i->nh)) {
817 res->fib6_flags = RTF_REJECT;
818 res->fib6_type = RTN_BLACKHOLE;
820 res->nh = nexthop_fib6_nh(f6i->nh);
823 if (nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_find_match,
830 if (find_match(nh, f6i->fib6_flags, oif, strict,
837 res->fib6_flags = f6i->fib6_flags;
838 res->fib6_type = f6i->fib6_type;
843 static void find_rr_leaf(struct fib6_node *fn, struct fib6_info *leaf,
844 struct fib6_info *rr_head, int oif, int strict,
845 bool *do_rr, struct fib6_result *res)
847 u32 metric = rr_head->fib6_metric;
848 struct fib6_info *cont = NULL;
851 __find_rr_leaf(rr_head, NULL, metric, res, &cont,
852 oif, strict, do_rr, &mpri);
854 __find_rr_leaf(leaf, rr_head, metric, res, &cont,
855 oif, strict, do_rr, &mpri);
857 if (res->f6i || !cont)
860 __find_rr_leaf(cont, NULL, metric, res, NULL,
861 oif, strict, do_rr, &mpri);
864 static void rt6_select(struct net *net, struct fib6_node *fn, int oif,
865 struct fib6_result *res, int strict)
867 struct fib6_info *leaf = rcu_dereference(fn->leaf);
868 struct fib6_info *rt0;
872 /* make sure this function or its helpers sets f6i */
875 if (!leaf || leaf == net->ipv6.fib6_null_entry)
878 rt0 = rcu_dereference(fn->rr_ptr);
882 /* Double check to make sure fn is not an intermediate node
883 * and fn->leaf does not points to its child's leaf
884 * (This might happen if all routes under fn are deleted from
885 * the tree and fib6_repair_tree() is called on the node.)
887 key_plen = rt0->fib6_dst.plen;
888 #ifdef CONFIG_IPV6_SUBTREES
889 if (rt0->fib6_src.plen)
890 key_plen = rt0->fib6_src.plen;
892 if (fn->fn_bit != key_plen)
895 find_rr_leaf(fn, leaf, rt0, oif, strict, &do_rr, res);
897 struct fib6_info *next = rcu_dereference(rt0->fib6_next);
899 /* no entries matched; do round-robin */
900 if (!next || next->fib6_metric != rt0->fib6_metric)
904 spin_lock_bh(&leaf->fib6_table->tb6_lock);
905 /* make sure next is not being deleted from the tree */
907 rcu_assign_pointer(fn->rr_ptr, next);
908 spin_unlock_bh(&leaf->fib6_table->tb6_lock);
914 res->f6i = net->ipv6.fib6_null_entry;
915 res->nh = res->f6i->fib6_nh;
916 res->fib6_flags = res->f6i->fib6_flags;
917 res->fib6_type = res->f6i->fib6_type;
921 static bool rt6_is_gw_or_nonexthop(const struct fib6_result *res)
923 return (res->f6i->fib6_flags & RTF_NONEXTHOP) ||
924 res->nh->fib_nh_gw_family;
927 #ifdef CONFIG_IPV6_ROUTE_INFO
928 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
929 const struct in6_addr *gwaddr)
931 struct net *net = dev_net(dev);
932 struct route_info *rinfo = (struct route_info *) opt;
933 struct in6_addr prefix_buf, *prefix;
935 unsigned long lifetime;
936 struct fib6_info *rt;
938 if (len < sizeof(struct route_info)) {
942 /* Sanity check for prefix_len and length */
943 if (rinfo->length > 3) {
945 } else if (rinfo->prefix_len > 128) {
947 } else if (rinfo->prefix_len > 64) {
948 if (rinfo->length < 2) {
951 } else if (rinfo->prefix_len > 0) {
952 if (rinfo->length < 1) {
957 pref = rinfo->route_pref;
958 if (pref == ICMPV6_ROUTER_PREF_INVALID)
961 lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ);
963 if (rinfo->length == 3)
964 prefix = (struct in6_addr *)rinfo->prefix;
966 /* this function is safe */
967 ipv6_addr_prefix(&prefix_buf,
968 (struct in6_addr *)rinfo->prefix,
970 prefix = &prefix_buf;
973 if (rinfo->prefix_len == 0)
974 rt = rt6_get_dflt_router(net, gwaddr, dev);
976 rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
979 if (rt && !lifetime) {
985 rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr,
988 rt->fib6_flags = RTF_ROUTEINFO |
989 (rt->fib6_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
992 if (!addrconf_finite_timeout(lifetime))
993 fib6_clean_expires(rt);
995 fib6_set_expires(rt, jiffies + HZ * lifetime);
997 fib6_info_release(rt);
1004 * Misc support functions
1007 /* called with rcu_lock held */
1008 static struct net_device *ip6_rt_get_dev_rcu(const struct fib6_result *res)
1010 struct net_device *dev = res->nh->fib_nh_dev;
1012 if (res->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)) {
1013 /* for copies of local routes, dst->dev needs to be the
1014 * device if it is a master device, the master device if
1015 * device is enslaved, and the loopback as the default
1017 if (netif_is_l3_slave(dev) &&
1018 !rt6_need_strict(&res->f6i->fib6_dst.addr))
1019 dev = l3mdev_master_dev_rcu(dev);
1020 else if (!netif_is_l3_master(dev))
1021 dev = dev_net(dev)->loopback_dev;
1022 /* last case is netif_is_l3_master(dev) is true in which
1023 * case we want dev returned to be dev
1030 static const int fib6_prop[RTN_MAX + 1] = {
1034 [RTN_BROADCAST] = 0,
1036 [RTN_MULTICAST] = 0,
1037 [RTN_BLACKHOLE] = -EINVAL,
1038 [RTN_UNREACHABLE] = -EHOSTUNREACH,
1039 [RTN_PROHIBIT] = -EACCES,
1040 [RTN_THROW] = -EAGAIN,
1041 [RTN_NAT] = -EINVAL,
1042 [RTN_XRESOLVE] = -EINVAL,
1045 static int ip6_rt_type_to_error(u8 fib6_type)
1047 return fib6_prop[fib6_type];
1050 static unsigned short fib6_info_dst_flags(struct fib6_info *rt)
1052 unsigned short flags = 0;
1054 if (rt->dst_nocount)
1055 flags |= DST_NOCOUNT;
1056 if (rt->dst_nopolicy)
1057 flags |= DST_NOPOLICY;
1064 static void ip6_rt_init_dst_reject(struct rt6_info *rt, u8 fib6_type)
1066 rt->dst.error = ip6_rt_type_to_error(fib6_type);
1068 switch (fib6_type) {
1070 rt->dst.output = dst_discard_out;
1071 rt->dst.input = dst_discard;
1074 rt->dst.output = ip6_pkt_prohibit_out;
1075 rt->dst.input = ip6_pkt_prohibit;
1078 case RTN_UNREACHABLE:
1080 rt->dst.output = ip6_pkt_discard_out;
1081 rt->dst.input = ip6_pkt_discard;
1086 static void ip6_rt_init_dst(struct rt6_info *rt, const struct fib6_result *res)
1088 struct fib6_info *f6i = res->f6i;
1090 if (res->fib6_flags & RTF_REJECT) {
1091 ip6_rt_init_dst_reject(rt, res->fib6_type);
1096 rt->dst.output = ip6_output;
1098 if (res->fib6_type == RTN_LOCAL || res->fib6_type == RTN_ANYCAST) {
1099 rt->dst.input = ip6_input;
1100 } else if (ipv6_addr_type(&f6i->fib6_dst.addr) & IPV6_ADDR_MULTICAST) {
1101 rt->dst.input = ip6_mc_input;
1103 rt->dst.input = ip6_forward;
1106 if (res->nh->fib_nh_lws) {
1107 rt->dst.lwtstate = lwtstate_get(res->nh->fib_nh_lws);
1108 lwtunnel_set_redirect(&rt->dst);
1111 rt->dst.lastuse = jiffies;
1114 /* Caller must already hold reference to @from */
1115 static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from)
1117 rt->rt6i_flags &= ~RTF_EXPIRES;
1118 rcu_assign_pointer(rt->from, from);
1119 ip_dst_init_metrics(&rt->dst, from->fib6_metrics);
1122 /* Caller must already hold reference to f6i in result */
1123 static void ip6_rt_copy_init(struct rt6_info *rt, const struct fib6_result *res)
1125 const struct fib6_nh *nh = res->nh;
1126 const struct net_device *dev = nh->fib_nh_dev;
1127 struct fib6_info *f6i = res->f6i;
1129 ip6_rt_init_dst(rt, res);
1131 rt->rt6i_dst = f6i->fib6_dst;
1132 rt->rt6i_idev = dev ? in6_dev_get(dev) : NULL;
1133 rt->rt6i_flags = res->fib6_flags;
1134 if (nh->fib_nh_gw_family) {
1135 rt->rt6i_gateway = nh->fib_nh_gw6;
1136 rt->rt6i_flags |= RTF_GATEWAY;
1138 rt6_set_from(rt, f6i);
1139 #ifdef CONFIG_IPV6_SUBTREES
1140 rt->rt6i_src = f6i->fib6_src;
1144 static struct fib6_node* fib6_backtrack(struct fib6_node *fn,
1145 struct in6_addr *saddr)
1147 struct fib6_node *pn, *sn;
1149 if (fn->fn_flags & RTN_TL_ROOT)
1151 pn = rcu_dereference(fn->parent);
1152 sn = FIB6_SUBTREE(pn);
1154 fn = fib6_node_lookup(sn, NULL, saddr);
1157 if (fn->fn_flags & RTN_RTINFO)
1162 static bool ip6_hold_safe(struct net *net, struct rt6_info **prt)
1164 struct rt6_info *rt = *prt;
1166 if (dst_hold_safe(&rt->dst))
1169 rt = net->ipv6.ip6_null_entry;
1178 /* called with rcu_lock held */
1179 static struct rt6_info *ip6_create_rt_rcu(const struct fib6_result *res)
1181 struct net_device *dev = res->nh->fib_nh_dev;
1182 struct fib6_info *f6i = res->f6i;
1183 unsigned short flags;
1184 struct rt6_info *nrt;
1186 if (!fib6_info_hold_safe(f6i))
1189 flags = fib6_info_dst_flags(f6i);
1190 nrt = ip6_dst_alloc(dev_net(dev), dev, flags);
1192 fib6_info_release(f6i);
1196 ip6_rt_copy_init(nrt, res);
1200 nrt = dev_net(dev)->ipv6.ip6_null_entry;
1201 dst_hold(&nrt->dst);
1205 static struct rt6_info *ip6_pol_route_lookup(struct net *net,
1206 struct fib6_table *table,
1208 const struct sk_buff *skb,
1211 struct fib6_result res = {};
1212 struct fib6_node *fn;
1213 struct rt6_info *rt;
1215 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
1216 flags &= ~RT6_LOOKUP_F_IFACE;
1219 fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
1221 res.f6i = rcu_dereference(fn->leaf);
1223 res.f6i = net->ipv6.fib6_null_entry;
1225 rt6_device_match(net, &res, &fl6->saddr, fl6->flowi6_oif,
1228 if (res.f6i == net->ipv6.fib6_null_entry) {
1229 fn = fib6_backtrack(fn, &fl6->saddr);
1233 rt = net->ipv6.ip6_null_entry;
1236 } else if (res.fib6_flags & RTF_REJECT) {
1240 fib6_select_path(net, &res, fl6, fl6->flowi6_oif,
1241 fl6->flowi6_oif != 0, skb, flags);
1243 /* Search through exception table */
1244 rt = rt6_find_cached_rt(&res, &fl6->daddr, &fl6->saddr);
1246 if (ip6_hold_safe(net, &rt))
1247 dst_use_noref(&rt->dst, jiffies);
1250 rt = ip6_create_rt_rcu(&res);
1254 trace_fib6_table_lookup(net, &res, table, fl6);
1261 struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
1262 const struct sk_buff *skb, int flags)
1264 return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_lookup);
1266 EXPORT_SYMBOL_GPL(ip6_route_lookup);
1268 struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
1269 const struct in6_addr *saddr, int oif,
1270 const struct sk_buff *skb, int strict)
1272 struct flowi6 fl6 = {
1276 struct dst_entry *dst;
1277 int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
1280 memcpy(&fl6.saddr, saddr, sizeof(*saddr));
1281 flags |= RT6_LOOKUP_F_HAS_SADDR;
1284 dst = fib6_rule_lookup(net, &fl6, skb, flags, ip6_pol_route_lookup);
1285 if (dst->error == 0)
1286 return (struct rt6_info *) dst;
1292 EXPORT_SYMBOL(rt6_lookup);
1294 /* ip6_ins_rt is called with FREE table->tb6_lock.
1295 * It takes new route entry, the addition fails by any reason the
1296 * route is released.
1297 * Caller must hold dst before calling it.
1300 static int __ip6_ins_rt(struct fib6_info *rt, struct nl_info *info,
1301 struct netlink_ext_ack *extack)
1304 struct fib6_table *table;
1306 table = rt->fib6_table;
1307 spin_lock_bh(&table->tb6_lock);
1308 err = fib6_add(&table->tb6_root, rt, info, extack);
1309 spin_unlock_bh(&table->tb6_lock);
1314 int ip6_ins_rt(struct net *net, struct fib6_info *rt)
1316 struct nl_info info = { .nl_net = net, };
1318 return __ip6_ins_rt(rt, &info, NULL);
1321 static struct rt6_info *ip6_rt_cache_alloc(const struct fib6_result *res,
1322 const struct in6_addr *daddr,
1323 const struct in6_addr *saddr)
1325 struct fib6_info *f6i = res->f6i;
1326 struct net_device *dev;
1327 struct rt6_info *rt;
1333 if (!fib6_info_hold_safe(f6i))
1336 dev = ip6_rt_get_dev_rcu(res);
1337 rt = ip6_dst_alloc(dev_net(dev), dev, 0);
1339 fib6_info_release(f6i);
1343 ip6_rt_copy_init(rt, res);
1344 rt->rt6i_flags |= RTF_CACHE;
1345 rt->dst.flags |= DST_HOST;
1346 rt->rt6i_dst.addr = *daddr;
1347 rt->rt6i_dst.plen = 128;
1349 if (!rt6_is_gw_or_nonexthop(res)) {
1350 if (f6i->fib6_dst.plen != 128 &&
1351 ipv6_addr_equal(&f6i->fib6_dst.addr, daddr))
1352 rt->rt6i_flags |= RTF_ANYCAST;
1353 #ifdef CONFIG_IPV6_SUBTREES
1354 if (rt->rt6i_src.plen && saddr) {
1355 rt->rt6i_src.addr = *saddr;
1356 rt->rt6i_src.plen = 128;
1364 static struct rt6_info *ip6_rt_pcpu_alloc(const struct fib6_result *res)
1366 struct fib6_info *f6i = res->f6i;
1367 unsigned short flags = fib6_info_dst_flags(f6i);
1368 struct net_device *dev;
1369 struct rt6_info *pcpu_rt;
1371 if (!fib6_info_hold_safe(f6i))
1375 dev = ip6_rt_get_dev_rcu(res);
1376 pcpu_rt = ip6_dst_alloc(dev_net(dev), dev, flags);
1379 fib6_info_release(f6i);
1382 ip6_rt_copy_init(pcpu_rt, res);
1383 pcpu_rt->rt6i_flags |= RTF_PCPU;
1387 /* It should be called with rcu_read_lock() acquired */
1388 static struct rt6_info *rt6_get_pcpu_route(const struct fib6_result *res)
1390 struct rt6_info *pcpu_rt;
1392 pcpu_rt = this_cpu_read(*res->nh->rt6i_pcpu);
1395 ip6_hold_safe(NULL, &pcpu_rt);
1400 static struct rt6_info *rt6_make_pcpu_route(struct net *net,
1401 const struct fib6_result *res)
1403 struct rt6_info *pcpu_rt, *prev, **p;
1405 pcpu_rt = ip6_rt_pcpu_alloc(res);
1407 dst_hold(&net->ipv6.ip6_null_entry->dst);
1408 return net->ipv6.ip6_null_entry;
1411 dst_hold(&pcpu_rt->dst);
1412 p = this_cpu_ptr(res->nh->rt6i_pcpu);
1413 prev = cmpxchg(p, NULL, pcpu_rt);
1416 if (res->f6i->fib6_destroying) {
1417 struct fib6_info *from;
1419 from = xchg((__force struct fib6_info **)&pcpu_rt->from, NULL);
1420 fib6_info_release(from);
1426 /* exception hash table implementation
1428 static DEFINE_SPINLOCK(rt6_exception_lock);
1430 /* Remove rt6_ex from hash table and free the memory
1431 * Caller must hold rt6_exception_lock
1433 static void rt6_remove_exception(struct rt6_exception_bucket *bucket,
1434 struct rt6_exception *rt6_ex)
1436 struct fib6_info *from;
1439 if (!bucket || !rt6_ex)
1442 net = dev_net(rt6_ex->rt6i->dst.dev);
1443 net->ipv6.rt6_stats->fib_rt_cache--;
1445 /* purge completely the exception to allow releasing the held resources:
1446 * some [sk] cache may keep the dst around for unlimited time
1448 from = xchg((__force struct fib6_info **)&rt6_ex->rt6i->from, NULL);
1449 fib6_info_release(from);
1450 dst_dev_put(&rt6_ex->rt6i->dst);
1452 hlist_del_rcu(&rt6_ex->hlist);
1453 dst_release(&rt6_ex->rt6i->dst);
1454 kfree_rcu(rt6_ex, rcu);
1455 WARN_ON_ONCE(!bucket->depth);
1459 /* Remove oldest rt6_ex in bucket and free the memory
1460 * Caller must hold rt6_exception_lock
1462 static void rt6_exception_remove_oldest(struct rt6_exception_bucket *bucket)
1464 struct rt6_exception *rt6_ex, *oldest = NULL;
1469 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
1470 if (!oldest || time_before(rt6_ex->stamp, oldest->stamp))
1473 rt6_remove_exception(bucket, oldest);
1476 static u32 rt6_exception_hash(const struct in6_addr *dst,
1477 const struct in6_addr *src)
1479 static u32 seed __read_mostly;
1482 net_get_random_once(&seed, sizeof(seed));
1483 val = jhash(dst, sizeof(*dst), seed);
1485 #ifdef CONFIG_IPV6_SUBTREES
1487 val = jhash(src, sizeof(*src), val);
1489 return hash_32(val, FIB6_EXCEPTION_BUCKET_SIZE_SHIFT);
1492 /* Helper function to find the cached rt in the hash table
1493 * and update bucket pointer to point to the bucket for this
1494 * (daddr, saddr) pair
1495 * Caller must hold rt6_exception_lock
1497 static struct rt6_exception *
1498 __rt6_find_exception_spinlock(struct rt6_exception_bucket **bucket,
1499 const struct in6_addr *daddr,
1500 const struct in6_addr *saddr)
1502 struct rt6_exception *rt6_ex;
1505 if (!(*bucket) || !daddr)
1508 hval = rt6_exception_hash(daddr, saddr);
1511 hlist_for_each_entry(rt6_ex, &(*bucket)->chain, hlist) {
1512 struct rt6_info *rt6 = rt6_ex->rt6i;
1513 bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);
1515 #ifdef CONFIG_IPV6_SUBTREES
1516 if (matched && saddr)
1517 matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr);
1525 /* Helper function to find the cached rt in the hash table
1526 * and update bucket pointer to point to the bucket for this
1527 * (daddr, saddr) pair
1528 * Caller must hold rcu_read_lock()
1530 static struct rt6_exception *
1531 __rt6_find_exception_rcu(struct rt6_exception_bucket **bucket,
1532 const struct in6_addr *daddr,
1533 const struct in6_addr *saddr)
1535 struct rt6_exception *rt6_ex;
1538 WARN_ON_ONCE(!rcu_read_lock_held());
1540 if (!(*bucket) || !daddr)
1543 hval = rt6_exception_hash(daddr, saddr);
1546 hlist_for_each_entry_rcu(rt6_ex, &(*bucket)->chain, hlist) {
1547 struct rt6_info *rt6 = rt6_ex->rt6i;
1548 bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);
1550 #ifdef CONFIG_IPV6_SUBTREES
1551 if (matched && saddr)
1552 matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr);
1560 static unsigned int fib6_mtu(const struct fib6_result *res)
1562 const struct fib6_nh *nh = res->nh;
1565 if (res->f6i->fib6_pmtu) {
1566 mtu = res->f6i->fib6_pmtu;
1568 struct net_device *dev = nh->fib_nh_dev;
1569 struct inet6_dev *idev;
1572 idev = __in6_dev_get(dev);
1573 mtu = idev->cnf.mtu6;
1577 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
1579 return mtu - lwtunnel_headroom(nh->fib_nh_lws, mtu);
1582 #define FIB6_EXCEPTION_BUCKET_FLUSHED 0x1UL
1584 /* used when the flushed bit is not relevant, only access to the bucket
1585 * (ie., all bucket users except rt6_insert_exception);
1587 * called under rcu lock; sometimes called with rt6_exception_lock held
1590 struct rt6_exception_bucket *fib6_nh_get_excptn_bucket(const struct fib6_nh *nh,
1593 struct rt6_exception_bucket *bucket;
1596 bucket = rcu_dereference_protected(nh->rt6i_exception_bucket,
1597 lockdep_is_held(lock));
1599 bucket = rcu_dereference(nh->rt6i_exception_bucket);
1601 /* remove bucket flushed bit if set */
1603 unsigned long p = (unsigned long)bucket;
1605 p &= ~FIB6_EXCEPTION_BUCKET_FLUSHED;
1606 bucket = (struct rt6_exception_bucket *)p;
1612 static bool fib6_nh_excptn_bucket_flushed(struct rt6_exception_bucket *bucket)
1614 unsigned long p = (unsigned long)bucket;
1616 return !!(p & FIB6_EXCEPTION_BUCKET_FLUSHED);
1619 /* called with rt6_exception_lock held */
1620 static void fib6_nh_excptn_bucket_set_flushed(struct fib6_nh *nh,
1623 struct rt6_exception_bucket *bucket;
1626 bucket = rcu_dereference_protected(nh->rt6i_exception_bucket,
1627 lockdep_is_held(lock));
1629 p = (unsigned long)bucket;
1630 p |= FIB6_EXCEPTION_BUCKET_FLUSHED;
1631 bucket = (struct rt6_exception_bucket *)p;
1632 rcu_assign_pointer(nh->rt6i_exception_bucket, bucket);
1635 static int rt6_insert_exception(struct rt6_info *nrt,
1636 const struct fib6_result *res)
1638 struct net *net = dev_net(nrt->dst.dev);
1639 struct rt6_exception_bucket *bucket;
1640 struct fib6_info *f6i = res->f6i;
1641 struct in6_addr *src_key = NULL;
1642 struct rt6_exception *rt6_ex;
1643 struct fib6_nh *nh = res->nh;
1646 spin_lock_bh(&rt6_exception_lock);
1648 bucket = rcu_dereference_protected(nh->rt6i_exception_bucket,
1649 lockdep_is_held(&rt6_exception_lock));
1651 bucket = kcalloc(FIB6_EXCEPTION_BUCKET_SIZE, sizeof(*bucket),
1657 rcu_assign_pointer(nh->rt6i_exception_bucket, bucket);
1658 } else if (fib6_nh_excptn_bucket_flushed(bucket)) {
1663 #ifdef CONFIG_IPV6_SUBTREES
1664 /* fib6_src.plen != 0 indicates f6i is in subtree
1665 * and exception table is indexed by a hash of
1666 * both fib6_dst and fib6_src.
1667 * Otherwise, the exception table is indexed by
1668 * a hash of only fib6_dst.
1670 if (f6i->fib6_src.plen)
1671 src_key = &nrt->rt6i_src.addr;
1673 /* rt6_mtu_change() might lower mtu on f6i.
1674 * Only insert this exception route if its mtu
1675 * is less than f6i's mtu value.
1677 if (dst_metric_raw(&nrt->dst, RTAX_MTU) >= fib6_mtu(res)) {
1682 rt6_ex = __rt6_find_exception_spinlock(&bucket, &nrt->rt6i_dst.addr,
1685 rt6_remove_exception(bucket, rt6_ex);
1687 rt6_ex = kzalloc(sizeof(*rt6_ex), GFP_ATOMIC);
1693 rt6_ex->stamp = jiffies;
1694 hlist_add_head_rcu(&rt6_ex->hlist, &bucket->chain);
1696 net->ipv6.rt6_stats->fib_rt_cache++;
1698 if (bucket->depth > FIB6_MAX_DEPTH)
1699 rt6_exception_remove_oldest(bucket);
1702 spin_unlock_bh(&rt6_exception_lock);
1704 /* Update fn->fn_sernum to invalidate all cached dst */
1706 spin_lock_bh(&f6i->fib6_table->tb6_lock);
1707 fib6_update_sernum(net, f6i);
1708 spin_unlock_bh(&f6i->fib6_table->tb6_lock);
1709 fib6_force_start_gc(net);
1715 static void fib6_nh_flush_exceptions(struct fib6_nh *nh, struct fib6_info *from)
1717 struct rt6_exception_bucket *bucket;
1718 struct rt6_exception *rt6_ex;
1719 struct hlist_node *tmp;
1722 spin_lock_bh(&rt6_exception_lock);
1724 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
1728 /* Prevent rt6_insert_exception() to recreate the bucket list */
1730 fib6_nh_excptn_bucket_set_flushed(nh, &rt6_exception_lock);
1732 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1733 hlist_for_each_entry_safe(rt6_ex, tmp, &bucket->chain, hlist) {
1735 rcu_access_pointer(rt6_ex->rt6i->from) == from)
1736 rt6_remove_exception(bucket, rt6_ex);
1738 WARN_ON_ONCE(!from && bucket->depth);
1742 spin_unlock_bh(&rt6_exception_lock);
1745 void rt6_flush_exceptions(struct fib6_info *f6i)
1747 fib6_nh_flush_exceptions(f6i->fib6_nh, f6i);
1750 /* Find cached rt in the hash table inside passed in rt
1751 * Caller has to hold rcu_read_lock()
1753 static struct rt6_info *rt6_find_cached_rt(const struct fib6_result *res,
1754 const struct in6_addr *daddr,
1755 const struct in6_addr *saddr)
1757 const struct in6_addr *src_key = NULL;
1758 struct rt6_exception_bucket *bucket;
1759 struct rt6_exception *rt6_ex;
1760 struct rt6_info *ret = NULL;
1762 #ifdef CONFIG_IPV6_SUBTREES
1763 /* fib6i_src.plen != 0 indicates f6i is in subtree
1764 * and exception table is indexed by a hash of
1765 * both fib6_dst and fib6_src.
1766 * However, the src addr used to create the hash
1767 * might not be exactly the passed in saddr which
1768 * is a /128 addr from the flow.
1769 * So we need to use f6i->fib6_src to redo lookup
1770 * if the passed in saddr does not find anything.
1771 * (See the logic in ip6_rt_cache_alloc() on how
1772 * rt->rt6i_src is updated.)
1774 if (res->f6i->fib6_src.plen)
1778 bucket = fib6_nh_get_excptn_bucket(res->nh, NULL);
1779 rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key);
1781 if (rt6_ex && !rt6_check_expired(rt6_ex->rt6i))
1784 #ifdef CONFIG_IPV6_SUBTREES
1785 /* Use fib6_src as src_key and redo lookup */
1786 if (!ret && src_key && src_key != &res->f6i->fib6_src.addr) {
1787 src_key = &res->f6i->fib6_src.addr;
1795 /* Remove the passed in cached rt from the hash table that contains it */
1796 static int fib6_nh_remove_exception(const struct fib6_nh *nh, int plen,
1797 const struct rt6_info *rt)
1799 const struct in6_addr *src_key = NULL;
1800 struct rt6_exception_bucket *bucket;
1801 struct rt6_exception *rt6_ex;
1804 if (!rcu_access_pointer(nh->rt6i_exception_bucket))
1807 spin_lock_bh(&rt6_exception_lock);
1808 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
1810 #ifdef CONFIG_IPV6_SUBTREES
1811 /* rt6i_src.plen != 0 indicates 'from' is in subtree
1812 * and exception table is indexed by a hash of
1813 * both rt6i_dst and rt6i_src.
1814 * Otherwise, the exception table is indexed by
1815 * a hash of only rt6i_dst.
1818 src_key = &rt->rt6i_src.addr;
1820 rt6_ex = __rt6_find_exception_spinlock(&bucket,
1824 rt6_remove_exception(bucket, rt6_ex);
1830 spin_unlock_bh(&rt6_exception_lock);
1834 static int rt6_remove_exception_rt(struct rt6_info *rt)
1836 struct fib6_info *from;
1838 from = rcu_dereference(rt->from);
1839 if (!from || !(rt->rt6i_flags & RTF_CACHE))
1842 return fib6_nh_remove_exception(from->fib6_nh,
1843 from->fib6_src.plen, rt);
1846 /* Find rt6_ex which contains the passed in rt cache and
1849 static void fib6_nh_update_exception(const struct fib6_nh *nh, int plen,
1850 const struct rt6_info *rt)
1852 const struct in6_addr *src_key = NULL;
1853 struct rt6_exception_bucket *bucket;
1854 struct rt6_exception *rt6_ex;
1856 bucket = fib6_nh_get_excptn_bucket(nh, NULL);
1857 #ifdef CONFIG_IPV6_SUBTREES
1858 /* rt6i_src.plen != 0 indicates 'from' is in subtree
1859 * and exception table is indexed by a hash of
1860 * both rt6i_dst and rt6i_src.
1861 * Otherwise, the exception table is indexed by
1862 * a hash of only rt6i_dst.
1865 src_key = &rt->rt6i_src.addr;
1867 rt6_ex = __rt6_find_exception_rcu(&bucket, &rt->rt6i_dst.addr, src_key);
1869 rt6_ex->stamp = jiffies;
1872 static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
1874 struct fib6_info *from;
1878 from = rcu_dereference(rt->from);
1879 if (!from || !(rt->rt6i_flags & RTF_CACHE))
1882 fib6_nh_update_exception(from->fib6_nh, from->fib6_src.plen, rt);
1887 static bool rt6_mtu_change_route_allowed(struct inet6_dev *idev,
1888 struct rt6_info *rt, int mtu)
1890 /* If the new MTU is lower than the route PMTU, this new MTU will be the
1891 * lowest MTU in the path: always allow updating the route PMTU to
1892 * reflect PMTU decreases.
1894 * If the new MTU is higher, and the route PMTU is equal to the local
1895 * MTU, this means the old MTU is the lowest in the path, so allow
1896 * updating it: if other nodes now have lower MTUs, PMTU discovery will
1900 if (dst_mtu(&rt->dst) >= mtu)
1903 if (dst_mtu(&rt->dst) == idev->cnf.mtu6)
1909 static void rt6_exceptions_update_pmtu(struct inet6_dev *idev,
1910 const struct fib6_nh *nh, int mtu)
1912 struct rt6_exception_bucket *bucket;
1913 struct rt6_exception *rt6_ex;
1916 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
1920 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1921 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
1922 struct rt6_info *entry = rt6_ex->rt6i;
1924 /* For RTF_CACHE with rt6i_pmtu == 0 (i.e. a redirected
1925 * route), the metrics of its rt->from have already
1928 if (dst_metric_raw(&entry->dst, RTAX_MTU) &&
1929 rt6_mtu_change_route_allowed(idev, entry, mtu))
1930 dst_metric_set(&entry->dst, RTAX_MTU, mtu);
1936 #define RTF_CACHE_GATEWAY (RTF_GATEWAY | RTF_CACHE)
1938 static void fib6_nh_exceptions_clean_tohost(const struct fib6_nh *nh,
1939 const struct in6_addr *gateway)
1941 struct rt6_exception_bucket *bucket;
1942 struct rt6_exception *rt6_ex;
1943 struct hlist_node *tmp;
1946 if (!rcu_access_pointer(nh->rt6i_exception_bucket))
1949 spin_lock_bh(&rt6_exception_lock);
1950 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
1952 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1953 hlist_for_each_entry_safe(rt6_ex, tmp,
1954 &bucket->chain, hlist) {
1955 struct rt6_info *entry = rt6_ex->rt6i;
1957 if ((entry->rt6i_flags & RTF_CACHE_GATEWAY) ==
1958 RTF_CACHE_GATEWAY &&
1959 ipv6_addr_equal(gateway,
1960 &entry->rt6i_gateway)) {
1961 rt6_remove_exception(bucket, rt6_ex);
1968 spin_unlock_bh(&rt6_exception_lock);
1971 static void rt6_age_examine_exception(struct rt6_exception_bucket *bucket,
1972 struct rt6_exception *rt6_ex,
1973 struct fib6_gc_args *gc_args,
1976 struct rt6_info *rt = rt6_ex->rt6i;
1978 /* we are pruning and obsoleting aged-out and non gateway exceptions
1979 * even if others have still references to them, so that on next
1980 * dst_check() such references can be dropped.
1981 * EXPIRES exceptions - e.g. pmtu-generated ones are pruned when
1982 * expired, independently from their aging, as per RFC 8201 section 4
1984 if (!(rt->rt6i_flags & RTF_EXPIRES)) {
1985 if (time_after_eq(now, rt->dst.lastuse + gc_args->timeout)) {
1986 RT6_TRACE("aging clone %p\n", rt);
1987 rt6_remove_exception(bucket, rt6_ex);
1990 } else if (time_after(jiffies, rt->dst.expires)) {
1991 RT6_TRACE("purging expired route %p\n", rt);
1992 rt6_remove_exception(bucket, rt6_ex);
1996 if (rt->rt6i_flags & RTF_GATEWAY) {
1997 struct neighbour *neigh;
1998 __u8 neigh_flags = 0;
2000 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
2002 neigh_flags = neigh->flags;
2004 if (!(neigh_flags & NTF_ROUTER)) {
2005 RT6_TRACE("purging route %p via non-router but gateway\n",
2007 rt6_remove_exception(bucket, rt6_ex);
2015 static void fib6_nh_age_exceptions(const struct fib6_nh *nh,
2016 struct fib6_gc_args *gc_args,
2019 struct rt6_exception_bucket *bucket;
2020 struct rt6_exception *rt6_ex;
2021 struct hlist_node *tmp;
2024 if (!rcu_access_pointer(nh->rt6i_exception_bucket))
2028 spin_lock(&rt6_exception_lock);
2029 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
2031 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
2032 hlist_for_each_entry_safe(rt6_ex, tmp,
2033 &bucket->chain, hlist) {
2034 rt6_age_examine_exception(bucket, rt6_ex,
2040 spin_unlock(&rt6_exception_lock);
2041 rcu_read_unlock_bh();
2044 void rt6_age_exceptions(struct fib6_info *f6i,
2045 struct fib6_gc_args *gc_args,
2048 fib6_nh_age_exceptions(f6i->fib6_nh, gc_args, now);
2051 /* must be called with rcu lock held */
2052 int fib6_table_lookup(struct net *net, struct fib6_table *table, int oif,
2053 struct flowi6 *fl6, struct fib6_result *res, int strict)
2055 struct fib6_node *fn, *saved_fn;
2057 fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
2060 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
2064 rt6_select(net, fn, oif, res, strict);
2065 if (res->f6i == net->ipv6.fib6_null_entry) {
2066 fn = fib6_backtrack(fn, &fl6->saddr);
2068 goto redo_rt6_select;
2069 else if (strict & RT6_LOOKUP_F_REACHABLE) {
2070 /* also consider unreachable route */
2071 strict &= ~RT6_LOOKUP_F_REACHABLE;
2073 goto redo_rt6_select;
2077 trace_fib6_table_lookup(net, res, table, fl6);
2082 struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
2083 int oif, struct flowi6 *fl6,
2084 const struct sk_buff *skb, int flags)
2086 struct fib6_result res = {};
2087 struct rt6_info *rt;
2090 strict |= flags & RT6_LOOKUP_F_IFACE;
2091 strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE;
2092 if (net->ipv6.devconf_all->forwarding == 0)
2093 strict |= RT6_LOOKUP_F_REACHABLE;
2097 fib6_table_lookup(net, table, oif, fl6, &res, strict);
2098 if (res.f6i == net->ipv6.fib6_null_entry) {
2099 rt = net->ipv6.ip6_null_entry;
2105 fib6_select_path(net, &res, fl6, oif, false, skb, strict);
2107 /*Search through exception table */
2108 rt = rt6_find_cached_rt(&res, &fl6->daddr, &fl6->saddr);
2110 if (ip6_hold_safe(net, &rt))
2111 dst_use_noref(&rt->dst, jiffies);
2115 } else if (unlikely((fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH) &&
2116 !res.nh->fib_nh_gw_family)) {
2117 /* Create a RTF_CACHE clone which will not be
2118 * owned by the fib6 tree. It is for the special case where
2119 * the daddr in the skb during the neighbor look-up is different
2120 * from the fl6->daddr used to look-up route here.
2122 struct rt6_info *uncached_rt;
2124 uncached_rt = ip6_rt_cache_alloc(&res, &fl6->daddr, NULL);
2129 /* Uncached_rt's refcnt is taken during ip6_rt_cache_alloc()
2130 * No need for another dst_hold()
2132 rt6_uncached_list_add(uncached_rt);
2133 atomic_inc(&net->ipv6.rt6_stats->fib_rt_uncache);
2135 uncached_rt = net->ipv6.ip6_null_entry;
2136 dst_hold(&uncached_rt->dst);
2141 /* Get a percpu copy */
2143 struct rt6_info *pcpu_rt;
2146 pcpu_rt = rt6_get_pcpu_route(&res);
2149 pcpu_rt = rt6_make_pcpu_route(net, &res);
2157 EXPORT_SYMBOL_GPL(ip6_pol_route);
2159 static struct rt6_info *ip6_pol_route_input(struct net *net,
2160 struct fib6_table *table,
2162 const struct sk_buff *skb,
2165 return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, skb, flags);
2168 struct dst_entry *ip6_route_input_lookup(struct net *net,
2169 struct net_device *dev,
2171 const struct sk_buff *skb,
2174 if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG)
2175 flags |= RT6_LOOKUP_F_IFACE;
2177 return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_input);
2179 EXPORT_SYMBOL_GPL(ip6_route_input_lookup);
2181 static void ip6_multipath_l3_keys(const struct sk_buff *skb,
2182 struct flow_keys *keys,
2183 struct flow_keys *flkeys)
2185 const struct ipv6hdr *outer_iph = ipv6_hdr(skb);
2186 const struct ipv6hdr *key_iph = outer_iph;
2187 struct flow_keys *_flkeys = flkeys;
2188 const struct ipv6hdr *inner_iph;
2189 const struct icmp6hdr *icmph;
2190 struct ipv6hdr _inner_iph;
2191 struct icmp6hdr _icmph;
2193 if (likely(outer_iph->nexthdr != IPPROTO_ICMPV6))
2196 icmph = skb_header_pointer(skb, skb_transport_offset(skb),
2197 sizeof(_icmph), &_icmph);
2201 if (icmph->icmp6_type != ICMPV6_DEST_UNREACH &&
2202 icmph->icmp6_type != ICMPV6_PKT_TOOBIG &&
2203 icmph->icmp6_type != ICMPV6_TIME_EXCEED &&
2204 icmph->icmp6_type != ICMPV6_PARAMPROB)
2207 inner_iph = skb_header_pointer(skb,
2208 skb_transport_offset(skb) + sizeof(*icmph),
2209 sizeof(_inner_iph), &_inner_iph);
2213 key_iph = inner_iph;
2217 keys->addrs.v6addrs.src = _flkeys->addrs.v6addrs.src;
2218 keys->addrs.v6addrs.dst = _flkeys->addrs.v6addrs.dst;
2219 keys->tags.flow_label = _flkeys->tags.flow_label;
2220 keys->basic.ip_proto = _flkeys->basic.ip_proto;
2222 keys->addrs.v6addrs.src = key_iph->saddr;
2223 keys->addrs.v6addrs.dst = key_iph->daddr;
2224 keys->tags.flow_label = ip6_flowlabel(key_iph);
2225 keys->basic.ip_proto = key_iph->nexthdr;
2229 /* if skb is set it will be used and fl6 can be NULL */
2230 u32 rt6_multipath_hash(const struct net *net, const struct flowi6 *fl6,
2231 const struct sk_buff *skb, struct flow_keys *flkeys)
2233 struct flow_keys hash_keys;
2236 switch (ip6_multipath_hash_policy(net)) {
2238 memset(&hash_keys, 0, sizeof(hash_keys));
2239 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2241 ip6_multipath_l3_keys(skb, &hash_keys, flkeys);
2243 hash_keys.addrs.v6addrs.src = fl6->saddr;
2244 hash_keys.addrs.v6addrs.dst = fl6->daddr;
2245 hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
2246 hash_keys.basic.ip_proto = fl6->flowi6_proto;
2251 unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
2252 struct flow_keys keys;
2254 /* short-circuit if we already have L4 hash present */
2256 return skb_get_hash_raw(skb) >> 1;
2258 memset(&hash_keys, 0, sizeof(hash_keys));
2261 skb_flow_dissect_flow_keys(skb, &keys, flag);
2264 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2265 hash_keys.addrs.v6addrs.src = flkeys->addrs.v6addrs.src;
2266 hash_keys.addrs.v6addrs.dst = flkeys->addrs.v6addrs.dst;
2267 hash_keys.ports.src = flkeys->ports.src;
2268 hash_keys.ports.dst = flkeys->ports.dst;
2269 hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
2271 memset(&hash_keys, 0, sizeof(hash_keys));
2272 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2273 hash_keys.addrs.v6addrs.src = fl6->saddr;
2274 hash_keys.addrs.v6addrs.dst = fl6->daddr;
2275 hash_keys.ports.src = fl6->fl6_sport;
2276 hash_keys.ports.dst = fl6->fl6_dport;
2277 hash_keys.basic.ip_proto = fl6->flowi6_proto;
2281 mhash = flow_hash_from_keys(&hash_keys);
2286 void ip6_route_input(struct sk_buff *skb)
2288 const struct ipv6hdr *iph = ipv6_hdr(skb);
2289 struct net *net = dev_net(skb->dev);
2290 int flags = RT6_LOOKUP_F_HAS_SADDR;
2291 struct ip_tunnel_info *tun_info;
2292 struct flowi6 fl6 = {
2293 .flowi6_iif = skb->dev->ifindex,
2294 .daddr = iph->daddr,
2295 .saddr = iph->saddr,
2296 .flowlabel = ip6_flowinfo(iph),
2297 .flowi6_mark = skb->mark,
2298 .flowi6_proto = iph->nexthdr,
2300 struct flow_keys *flkeys = NULL, _flkeys;
2302 tun_info = skb_tunnel_info(skb);
2303 if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
2304 fl6.flowi6_tun_key.tun_id = tun_info->key.tun_id;
2306 if (fib6_rules_early_flow_dissect(net, skb, &fl6, &_flkeys))
2309 if (unlikely(fl6.flowi6_proto == IPPROTO_ICMPV6))
2310 fl6.mp_hash = rt6_multipath_hash(net, &fl6, skb, flkeys);
2313 ip6_route_input_lookup(net, skb->dev, &fl6, skb, flags));
2316 static struct rt6_info *ip6_pol_route_output(struct net *net,
2317 struct fib6_table *table,
2319 const struct sk_buff *skb,
2322 return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, skb, flags);
2325 struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
2326 struct flowi6 *fl6, int flags)
2330 if (ipv6_addr_type(&fl6->daddr) &
2331 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL)) {
2332 struct dst_entry *dst;
2334 dst = l3mdev_link_scope_lookup(net, fl6);
2339 fl6->flowi6_iif = LOOPBACK_IFINDEX;
2341 any_src = ipv6_addr_any(&fl6->saddr);
2342 if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) ||
2343 (fl6->flowi6_oif && any_src))
2344 flags |= RT6_LOOKUP_F_IFACE;
2347 flags |= RT6_LOOKUP_F_HAS_SADDR;
2349 flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
2351 return fib6_rule_lookup(net, fl6, NULL, flags, ip6_pol_route_output);
2353 EXPORT_SYMBOL_GPL(ip6_route_output_flags);
2355 struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2357 struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
2358 struct net_device *loopback_dev = net->loopback_dev;
2359 struct dst_entry *new = NULL;
2361 rt = dst_alloc(&ip6_dst_blackhole_ops, loopback_dev, 1,
2362 DST_OBSOLETE_DEAD, 0);
2365 atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
2369 new->input = dst_discard;
2370 new->output = dst_discard_out;
2372 dst_copy_metrics(new, &ort->dst);
2374 rt->rt6i_idev = in6_dev_get(loopback_dev);
2375 rt->rt6i_gateway = ort->rt6i_gateway;
2376 rt->rt6i_flags = ort->rt6i_flags & ~RTF_PCPU;
2378 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
2379 #ifdef CONFIG_IPV6_SUBTREES
2380 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
2384 dst_release(dst_orig);
2385 return new ? new : ERR_PTR(-ENOMEM);
2389 * Destination cache support functions
2392 static bool fib6_check(struct fib6_info *f6i, u32 cookie)
2396 if (!fib6_get_cookie_safe(f6i, &rt_cookie) || rt_cookie != cookie)
2399 if (fib6_check_expired(f6i))
2405 static struct dst_entry *rt6_check(struct rt6_info *rt,
2406 struct fib6_info *from,
2411 if ((from && !fib6_get_cookie_safe(from, &rt_cookie)) ||
2412 rt_cookie != cookie)
2415 if (rt6_check_expired(rt))
2421 static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt,
2422 struct fib6_info *from,
2425 if (!__rt6_check_expired(rt) &&
2426 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
2427 fib6_check(from, cookie))
2433 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
2435 struct dst_entry *dst_ret;
2436 struct fib6_info *from;
2437 struct rt6_info *rt;
2439 rt = container_of(dst, struct rt6_info, dst);
2443 /* All IPV6 dsts are created with ->obsolete set to the value
2444 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
2445 * into this function always.
2448 from = rcu_dereference(rt->from);
2450 if (from && (rt->rt6i_flags & RTF_PCPU ||
2451 unlikely(!list_empty(&rt->rt6i_uncached))))
2452 dst_ret = rt6_dst_from_check(rt, from, cookie);
2454 dst_ret = rt6_check(rt, from, cookie);
2461 static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
2463 struct rt6_info *rt = (struct rt6_info *) dst;
2466 if (rt->rt6i_flags & RTF_CACHE) {
2468 if (rt6_check_expired(rt)) {
2469 rt6_remove_exception_rt(rt);
2481 static void ip6_link_failure(struct sk_buff *skb)
2483 struct rt6_info *rt;
2485 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
2487 rt = (struct rt6_info *) skb_dst(skb);
2490 if (rt->rt6i_flags & RTF_CACHE) {
2491 rt6_remove_exception_rt(rt);
2493 struct fib6_info *from;
2494 struct fib6_node *fn;
2496 from = rcu_dereference(rt->from);
2498 fn = rcu_dereference(from->fib6_node);
2499 if (fn && (rt->rt6i_flags & RTF_DEFAULT))
2507 static void rt6_update_expires(struct rt6_info *rt0, int timeout)
2509 if (!(rt0->rt6i_flags & RTF_EXPIRES)) {
2510 struct fib6_info *from;
2513 from = rcu_dereference(rt0->from);
2515 rt0->dst.expires = from->expires;
2519 dst_set_expires(&rt0->dst, timeout);
2520 rt0->rt6i_flags |= RTF_EXPIRES;
2523 static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
2525 struct net *net = dev_net(rt->dst.dev);
2527 dst_metric_set(&rt->dst, RTAX_MTU, mtu);
2528 rt->rt6i_flags |= RTF_MODIFIED;
2529 rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
2532 static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
2534 return !(rt->rt6i_flags & RTF_CACHE) &&
2535 (rt->rt6i_flags & RTF_PCPU || rcu_access_pointer(rt->from));
2538 static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
2539 const struct ipv6hdr *iph, u32 mtu)
2541 const struct in6_addr *daddr, *saddr;
2542 struct rt6_info *rt6 = (struct rt6_info *)dst;
2544 if (dst_metric_locked(dst, RTAX_MTU))
2548 daddr = &iph->daddr;
2549 saddr = &iph->saddr;
2551 daddr = &sk->sk_v6_daddr;
2552 saddr = &inet6_sk(sk)->saddr;
2557 dst_confirm_neigh(dst, daddr);
2558 mtu = max_t(u32, mtu, IPV6_MIN_MTU);
2559 if (mtu >= dst_mtu(dst))
2562 if (!rt6_cache_allowed_for_pmtu(rt6)) {
2563 rt6_do_update_pmtu(rt6, mtu);
2564 /* update rt6_ex->stamp for cache */
2565 if (rt6->rt6i_flags & RTF_CACHE)
2566 rt6_update_exception_stamp_rt(rt6);
2568 struct fib6_result res = {};
2569 struct rt6_info *nrt6;
2572 res.f6i = rcu_dereference(rt6->from);
2577 res.nh = res.f6i->fib6_nh;
2578 res.fib6_flags = res.f6i->fib6_flags;
2579 res.fib6_type = res.f6i->fib6_type;
2581 nrt6 = ip6_rt_cache_alloc(&res, daddr, saddr);
2583 rt6_do_update_pmtu(nrt6, mtu);
2584 if (rt6_insert_exception(nrt6, &res))
2585 dst_release_immediate(&nrt6->dst);
2591 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
2592 struct sk_buff *skb, u32 mtu)
2594 __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu);
2597 void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
2598 int oif, u32 mark, kuid_t uid)
2600 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
2601 struct dst_entry *dst;
2602 struct flowi6 fl6 = {
2604 .flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark),
2605 .daddr = iph->daddr,
2606 .saddr = iph->saddr,
2607 .flowlabel = ip6_flowinfo(iph),
2611 dst = ip6_route_output(net, NULL, &fl6);
2613 __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu));
2616 EXPORT_SYMBOL_GPL(ip6_update_pmtu);
2618 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
2620 int oif = sk->sk_bound_dev_if;
2621 struct dst_entry *dst;
2623 if (!oif && skb->dev)
2624 oif = l3mdev_master_ifindex(skb->dev);
2626 ip6_update_pmtu(skb, sock_net(sk), mtu, oif, sk->sk_mark, sk->sk_uid);
2628 dst = __sk_dst_get(sk);
2629 if (!dst || !dst->obsolete ||
2630 dst->ops->check(dst, inet6_sk(sk)->dst_cookie))
2634 if (!sock_owned_by_user(sk) && !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
2635 ip6_datagram_dst_update(sk, false);
2638 EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
2640 void ip6_sk_dst_store_flow(struct sock *sk, struct dst_entry *dst,
2641 const struct flowi6 *fl6)
2643 #ifdef CONFIG_IPV6_SUBTREES
2644 struct ipv6_pinfo *np = inet6_sk(sk);
2647 ip6_dst_store(sk, dst,
2648 ipv6_addr_equal(&fl6->daddr, &sk->sk_v6_daddr) ?
2649 &sk->sk_v6_daddr : NULL,
2650 #ifdef CONFIG_IPV6_SUBTREES
2651 ipv6_addr_equal(&fl6->saddr, &np->saddr) ?
2657 static bool ip6_redirect_nh_match(const struct fib6_result *res,
2659 const struct in6_addr *gw,
2660 struct rt6_info **ret)
2662 const struct fib6_nh *nh = res->nh;
2664 if (nh->fib_nh_flags & RTNH_F_DEAD || !nh->fib_nh_gw_family ||
2665 fl6->flowi6_oif != nh->fib_nh_dev->ifindex)
2668 /* rt_cache's gateway might be different from its 'parent'
2669 * in the case of an ip redirect.
2670 * So we keep searching in the exception table if the gateway
2673 if (!ipv6_addr_equal(gw, &nh->fib_nh_gw6)) {
2674 struct rt6_info *rt_cache;
2676 rt_cache = rt6_find_cached_rt(res, &fl6->daddr, &fl6->saddr);
2678 ipv6_addr_equal(gw, &rt_cache->rt6i_gateway)) {
2687 /* Handle redirects */
2688 struct ip6rd_flowi {
2690 struct in6_addr gateway;
2693 static struct rt6_info *__ip6_route_redirect(struct net *net,
2694 struct fib6_table *table,
2696 const struct sk_buff *skb,
2699 struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6;
2700 struct rt6_info *ret = NULL;
2701 struct fib6_result res = {};
2702 struct fib6_info *rt;
2703 struct fib6_node *fn;
2705 /* l3mdev_update_flow overrides oif if the device is enslaved; in
2706 * this case we must match on the real ingress device, so reset it
2708 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
2709 fl6->flowi6_oif = skb->dev->ifindex;
2711 /* Get the "current" route for this destination and
2712 * check if the redirect has come from appropriate router.
2714 * RFC 4861 specifies that redirects should only be
2715 * accepted if they come from the nexthop to the target.
2716 * Due to the way the routes are chosen, this notion
2717 * is a bit fuzzy and one might need to check all possible
2722 fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
2724 for_each_fib6_node_rt_rcu(fn) {
2726 res.nh = rt->fib6_nh;
2728 if (fib6_check_expired(rt))
2730 if (rt->fib6_flags & RTF_REJECT)
2732 if (ip6_redirect_nh_match(&res, fl6, &rdfl->gateway, &ret))
2737 rt = net->ipv6.fib6_null_entry;
2738 else if (rt->fib6_flags & RTF_REJECT) {
2739 ret = net->ipv6.ip6_null_entry;
2743 if (rt == net->ipv6.fib6_null_entry) {
2744 fn = fib6_backtrack(fn, &fl6->saddr);
2750 res.nh = rt->fib6_nh;
2753 ip6_hold_safe(net, &ret);
2755 res.fib6_flags = res.f6i->fib6_flags;
2756 res.fib6_type = res.f6i->fib6_type;
2757 ret = ip6_create_rt_rcu(&res);
2762 trace_fib6_table_lookup(net, &res, table, fl6);
2766 static struct dst_entry *ip6_route_redirect(struct net *net,
2767 const struct flowi6 *fl6,
2768 const struct sk_buff *skb,
2769 const struct in6_addr *gateway)
2771 int flags = RT6_LOOKUP_F_HAS_SADDR;
2772 struct ip6rd_flowi rdfl;
2775 rdfl.gateway = *gateway;
2777 return fib6_rule_lookup(net, &rdfl.fl6, skb,
2778 flags, __ip6_route_redirect);
2781 void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark,
2784 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
2785 struct dst_entry *dst;
2786 struct flowi6 fl6 = {
2787 .flowi6_iif = LOOPBACK_IFINDEX,
2789 .flowi6_mark = mark,
2790 .daddr = iph->daddr,
2791 .saddr = iph->saddr,
2792 .flowlabel = ip6_flowinfo(iph),
2796 dst = ip6_route_redirect(net, &fl6, skb, &ipv6_hdr(skb)->saddr);
2797 rt6_do_redirect(dst, NULL, skb);
2800 EXPORT_SYMBOL_GPL(ip6_redirect);
2802 void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif)
2804 const struct ipv6hdr *iph = ipv6_hdr(skb);
2805 const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb);
2806 struct dst_entry *dst;
2807 struct flowi6 fl6 = {
2808 .flowi6_iif = LOOPBACK_IFINDEX,
2811 .saddr = iph->daddr,
2812 .flowi6_uid = sock_net_uid(net, NULL),
2815 dst = ip6_route_redirect(net, &fl6, skb, &iph->saddr);
2816 rt6_do_redirect(dst, NULL, skb);
2820 void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
2822 ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark,
2825 EXPORT_SYMBOL_GPL(ip6_sk_redirect);
2827 static unsigned int ip6_default_advmss(const struct dst_entry *dst)
2829 struct net_device *dev = dst->dev;
2830 unsigned int mtu = dst_mtu(dst);
2831 struct net *net = dev_net(dev);
2833 mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
2835 if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
2836 mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
2839 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
2840 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
2841 * IPV6_MAXPLEN is also valid and means: "any MSS,
2842 * rely only on pmtu discovery"
2844 if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
2849 static unsigned int ip6_mtu(const struct dst_entry *dst)
2851 struct inet6_dev *idev;
2854 mtu = dst_metric_raw(dst, RTAX_MTU);
2861 idev = __in6_dev_get(dst->dev);
2863 mtu = idev->cnf.mtu6;
2867 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
2869 return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
2873 * 1. mtu on route is locked - use it
2874 * 2. mtu from nexthop exception
2875 * 3. mtu from egress device
2877 * based on ip6_dst_mtu_forward and exception logic of
2878 * rt6_find_cached_rt; called with rcu_read_lock
2880 u32 ip6_mtu_from_fib6(const struct fib6_result *res,
2881 const struct in6_addr *daddr,
2882 const struct in6_addr *saddr)
2884 const struct fib6_nh *nh = res->nh;
2885 struct fib6_info *f6i = res->f6i;
2886 struct inet6_dev *idev;
2887 struct rt6_info *rt;
2890 if (unlikely(fib6_metric_locked(f6i, RTAX_MTU))) {
2891 mtu = f6i->fib6_pmtu;
2896 rt = rt6_find_cached_rt(res, daddr, saddr);
2898 mtu = dst_metric_raw(&rt->dst, RTAX_MTU);
2900 struct net_device *dev = nh->fib_nh_dev;
2903 idev = __in6_dev_get(dev);
2904 if (idev && idev->cnf.mtu6 > mtu)
2905 mtu = idev->cnf.mtu6;
2908 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
2910 return mtu - lwtunnel_headroom(nh->fib_nh_lws, mtu);
2913 struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
2916 struct dst_entry *dst;
2917 struct rt6_info *rt;
2918 struct inet6_dev *idev = in6_dev_get(dev);
2919 struct net *net = dev_net(dev);
2921 if (unlikely(!idev))
2922 return ERR_PTR(-ENODEV);
2924 rt = ip6_dst_alloc(net, dev, 0);
2925 if (unlikely(!rt)) {
2927 dst = ERR_PTR(-ENOMEM);
2931 rt->dst.flags |= DST_HOST;
2932 rt->dst.input = ip6_input;
2933 rt->dst.output = ip6_output;
2934 rt->rt6i_gateway = fl6->daddr;
2935 rt->rt6i_dst.addr = fl6->daddr;
2936 rt->rt6i_dst.plen = 128;
2937 rt->rt6i_idev = idev;
2938 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
2940 /* Add this dst into uncached_list so that rt6_disable_ip() can
2941 * do proper release of the net_device
2943 rt6_uncached_list_add(rt);
2944 atomic_inc(&net->ipv6.rt6_stats->fib_rt_uncache);
2946 dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0);
2952 static int ip6_dst_gc(struct dst_ops *ops)
2954 struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
2955 int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
2956 int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
2957 int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
2958 int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
2959 unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
2962 entries = dst_entries_get_fast(ops);
2963 if (time_after(rt_last_gc + rt_min_interval, jiffies) &&
2964 entries <= rt_max_size)
2967 net->ipv6.ip6_rt_gc_expire++;
2968 fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, true);
2969 entries = dst_entries_get_slow(ops);
2970 if (entries < ops->gc_thresh)
2971 net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
2973 net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
2974 return entries > rt_max_size;
2977 static struct rt6_info *ip6_nh_lookup_table(struct net *net,
2978 struct fib6_config *cfg,
2979 const struct in6_addr *gw_addr,
2980 u32 tbid, int flags)
2982 struct flowi6 fl6 = {
2983 .flowi6_oif = cfg->fc_ifindex,
2985 .saddr = cfg->fc_prefsrc,
2987 struct fib6_table *table;
2988 struct rt6_info *rt;
2990 table = fib6_get_table(net, tbid);
2994 if (!ipv6_addr_any(&cfg->fc_prefsrc))
2995 flags |= RT6_LOOKUP_F_HAS_SADDR;
2997 flags |= RT6_LOOKUP_F_IGNORE_LINKSTATE;
2998 rt = ip6_pol_route(net, table, cfg->fc_ifindex, &fl6, NULL, flags);
3000 /* if table lookup failed, fall back to full lookup */
3001 if (rt == net->ipv6.ip6_null_entry) {
3009 static int ip6_route_check_nh_onlink(struct net *net,
3010 struct fib6_config *cfg,
3011 const struct net_device *dev,
3012 struct netlink_ext_ack *extack)
3014 u32 tbid = l3mdev_fib_table(dev) ? : RT_TABLE_MAIN;
3015 const struct in6_addr *gw_addr = &cfg->fc_gateway;
3016 u32 flags = RTF_LOCAL | RTF_ANYCAST | RTF_REJECT;
3017 struct fib6_info *from;
3018 struct rt6_info *grt;
3022 grt = ip6_nh_lookup_table(net, cfg, gw_addr, tbid, 0);
3025 from = rcu_dereference(grt->from);
3026 if (!grt->dst.error &&
3027 /* ignore match if it is the default route */
3028 from && !ipv6_addr_any(&from->fib6_dst.addr) &&
3029 (grt->rt6i_flags & flags || dev != grt->dst.dev)) {
3030 NL_SET_ERR_MSG(extack,
3031 "Nexthop has invalid gateway or device mismatch");
3042 static int ip6_route_check_nh(struct net *net,
3043 struct fib6_config *cfg,
3044 struct net_device **_dev,
3045 struct inet6_dev **idev)
3047 const struct in6_addr *gw_addr = &cfg->fc_gateway;
3048 struct net_device *dev = _dev ? *_dev : NULL;
3049 struct rt6_info *grt = NULL;
3050 int err = -EHOSTUNREACH;
3052 if (cfg->fc_table) {
3053 int flags = RT6_LOOKUP_F_IFACE;
3055 grt = ip6_nh_lookup_table(net, cfg, gw_addr,
3056 cfg->fc_table, flags);
3058 if (grt->rt6i_flags & RTF_GATEWAY ||
3059 (dev && dev != grt->dst.dev)) {
3067 grt = rt6_lookup(net, gw_addr, NULL, cfg->fc_ifindex, NULL, 1);
3073 if (dev != grt->dst.dev) {
3078 *_dev = dev = grt->dst.dev;
3079 *idev = grt->rt6i_idev;
3081 in6_dev_hold(grt->rt6i_idev);
3084 if (!(grt->rt6i_flags & RTF_GATEWAY))
3093 static int ip6_validate_gw(struct net *net, struct fib6_config *cfg,
3094 struct net_device **_dev, struct inet6_dev **idev,
3095 struct netlink_ext_ack *extack)
3097 const struct in6_addr *gw_addr = &cfg->fc_gateway;
3098 int gwa_type = ipv6_addr_type(gw_addr);
3099 bool skip_dev = gwa_type & IPV6_ADDR_LINKLOCAL ? false : true;
3100 const struct net_device *dev = *_dev;
3101 bool need_addr_check = !dev;
3104 /* if gw_addr is local we will fail to detect this in case
3105 * address is still TENTATIVE (DAD in progress). rt6_lookup()
3106 * will return already-added prefix route via interface that
3107 * prefix route was assigned to, which might be non-loopback.
3110 ipv6_chk_addr_and_flags(net, gw_addr, dev, skip_dev, 0, 0)) {
3111 NL_SET_ERR_MSG(extack, "Gateway can not be a local address");
3115 if (gwa_type != (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_UNICAST)) {
3116 /* IPv6 strictly inhibits using not link-local
3117 * addresses as nexthop address.
3118 * Otherwise, router will not able to send redirects.
3119 * It is very good, but in some (rare!) circumstances
3120 * (SIT, PtP, NBMA NOARP links) it is handy to allow
3121 * some exceptions. --ANK
3122 * We allow IPv4-mapped nexthops to support RFC4798-type
3125 if (!(gwa_type & (IPV6_ADDR_UNICAST | IPV6_ADDR_MAPPED))) {
3126 NL_SET_ERR_MSG(extack, "Invalid gateway address");
3130 if (cfg->fc_flags & RTNH_F_ONLINK)
3131 err = ip6_route_check_nh_onlink(net, cfg, dev, extack);
3133 err = ip6_route_check_nh(net, cfg, _dev, idev);
3139 /* reload in case device was changed */
3144 NL_SET_ERR_MSG(extack, "Egress device not specified");
3146 } else if (dev->flags & IFF_LOOPBACK) {
3147 NL_SET_ERR_MSG(extack,
3148 "Egress device can not be loopback device for this route");
3152 /* if we did not check gw_addr above, do so now that the
3153 * egress device has been resolved.
3155 if (need_addr_check &&
3156 ipv6_chk_addr_and_flags(net, gw_addr, dev, skip_dev, 0, 0)) {
3157 NL_SET_ERR_MSG(extack, "Gateway can not be a local address");
3166 static bool fib6_is_reject(u32 flags, struct net_device *dev, int addr_type)
3168 if ((flags & RTF_REJECT) ||
3169 (dev && (dev->flags & IFF_LOOPBACK) &&
3170 !(addr_type & IPV6_ADDR_LOOPBACK) &&
3171 !(flags & RTF_LOCAL)))
3177 int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
3178 struct fib6_config *cfg, gfp_t gfp_flags,
3179 struct netlink_ext_ack *extack)
3181 struct net_device *dev = NULL;
3182 struct inet6_dev *idev = NULL;
3186 fib6_nh->fib_nh_family = AF_INET6;
3189 if (cfg->fc_ifindex) {
3190 dev = dev_get_by_index(net, cfg->fc_ifindex);
3193 idev = in6_dev_get(dev);
3198 if (cfg->fc_flags & RTNH_F_ONLINK) {
3200 NL_SET_ERR_MSG(extack,
3201 "Nexthop device required for onlink");
3205 if (!(dev->flags & IFF_UP)) {
3206 NL_SET_ERR_MSG(extack, "Nexthop device is not up");
3211 fib6_nh->fib_nh_flags |= RTNH_F_ONLINK;
3214 fib6_nh->fib_nh_weight = 1;
3216 /* We cannot add true routes via loopback here,
3217 * they would result in kernel looping; promote them to reject routes
3219 addr_type = ipv6_addr_type(&cfg->fc_dst);
3220 if (fib6_is_reject(cfg->fc_flags, dev, addr_type)) {
3221 /* hold loopback dev/idev if we haven't done so. */
3222 if (dev != net->loopback_dev) {
3227 dev = net->loopback_dev;
3229 idev = in6_dev_get(dev);
3238 if (cfg->fc_flags & RTF_GATEWAY) {
3239 err = ip6_validate_gw(net, cfg, &dev, &idev, extack);
3243 fib6_nh->fib_nh_gw6 = cfg->fc_gateway;
3244 fib6_nh->fib_nh_gw_family = AF_INET6;
3251 if (idev->cnf.disable_ipv6) {
3252 NL_SET_ERR_MSG(extack, "IPv6 is disabled on nexthop device");
3257 if (!(dev->flags & IFF_UP) && !cfg->fc_ignore_dev_down) {
3258 NL_SET_ERR_MSG(extack, "Nexthop device is not up");
3263 if (!(cfg->fc_flags & (RTF_LOCAL | RTF_ANYCAST)) &&
3264 !netif_carrier_ok(dev))
3265 fib6_nh->fib_nh_flags |= RTNH_F_LINKDOWN;
3267 err = fib_nh_common_init(&fib6_nh->nh_common, cfg->fc_encap,
3268 cfg->fc_encap_type, cfg, gfp_flags, extack);
3273 fib6_nh->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, gfp_flags);
3274 if (!fib6_nh->rt6i_pcpu) {
3279 fib6_nh->fib_nh_dev = dev;
3280 fib6_nh->fib_nh_oif = dev->ifindex;
3287 lwtstate_put(fib6_nh->fib_nh_lws);
3288 fib6_nh->fib_nh_lws = NULL;
3296 void fib6_nh_release(struct fib6_nh *fib6_nh)
3298 struct rt6_exception_bucket *bucket;
3302 fib6_nh_flush_exceptions(fib6_nh, NULL);
3303 bucket = fib6_nh_get_excptn_bucket(fib6_nh, NULL);
3305 rcu_assign_pointer(fib6_nh->rt6i_exception_bucket, NULL);
3311 if (fib6_nh->rt6i_pcpu) {
3314 for_each_possible_cpu(cpu) {
3315 struct rt6_info **ppcpu_rt;
3316 struct rt6_info *pcpu_rt;
3318 ppcpu_rt = per_cpu_ptr(fib6_nh->rt6i_pcpu, cpu);
3319 pcpu_rt = *ppcpu_rt;
3321 dst_dev_put(&pcpu_rt->dst);
3322 dst_release(&pcpu_rt->dst);
3327 free_percpu(fib6_nh->rt6i_pcpu);
3330 fib_nh_common_release(&fib6_nh->nh_common);
3333 static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
3335 struct netlink_ext_ack *extack)
3337 struct net *net = cfg->fc_nlinfo.nl_net;
3338 struct fib6_info *rt = NULL;
3339 struct nexthop *nh = NULL;
3340 struct fib6_table *table;
3341 struct fib6_nh *fib6_nh;
3345 /* RTF_PCPU is an internal flag; can not be set by userspace */
3346 if (cfg->fc_flags & RTF_PCPU) {
3347 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_PCPU");
3351 /* RTF_CACHE is an internal flag; can not be set by userspace */
3352 if (cfg->fc_flags & RTF_CACHE) {
3353 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_CACHE");
3357 if (cfg->fc_type > RTN_MAX) {
3358 NL_SET_ERR_MSG(extack, "Invalid route type");
3362 if (cfg->fc_dst_len > 128) {
3363 NL_SET_ERR_MSG(extack, "Invalid prefix length");
3366 if (cfg->fc_src_len > 128) {
3367 NL_SET_ERR_MSG(extack, "Invalid source address length");
3370 #ifndef CONFIG_IPV6_SUBTREES
3371 if (cfg->fc_src_len) {
3372 NL_SET_ERR_MSG(extack,
3373 "Specifying source address requires IPV6_SUBTREES to be enabled");
3379 if (cfg->fc_nlinfo.nlh &&
3380 !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
3381 table = fib6_get_table(net, cfg->fc_table);
3383 pr_warn("NLM_F_CREATE should be specified when creating new route\n");
3384 table = fib6_new_table(net, cfg->fc_table);
3387 table = fib6_new_table(net, cfg->fc_table);
3394 rt = fib6_info_alloc(gfp_flags, !nh);
3398 rt->fib6_metrics = ip_fib_metrics_init(net, cfg->fc_mx, cfg->fc_mx_len,
3400 if (IS_ERR(rt->fib6_metrics)) {
3401 err = PTR_ERR(rt->fib6_metrics);
3402 /* Do not leave garbage there. */
3403 rt->fib6_metrics = (struct dst_metrics *)&dst_default_metrics;
3407 if (cfg->fc_flags & RTF_ADDRCONF)
3408 rt->dst_nocount = true;
3410 if (cfg->fc_flags & RTF_EXPIRES)
3411 fib6_set_expires(rt, jiffies +
3412 clock_t_to_jiffies(cfg->fc_expires));
3414 fib6_clean_expires(rt);
3416 if (cfg->fc_protocol == RTPROT_UNSPEC)
3417 cfg->fc_protocol = RTPROT_BOOT;
3418 rt->fib6_protocol = cfg->fc_protocol;
3420 rt->fib6_table = table;
3421 rt->fib6_metric = cfg->fc_metric;
3422 rt->fib6_type = cfg->fc_type;
3423 rt->fib6_flags = cfg->fc_flags & ~RTF_GATEWAY;
3425 ipv6_addr_prefix(&rt->fib6_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
3426 rt->fib6_dst.plen = cfg->fc_dst_len;
3427 if (rt->fib6_dst.plen == 128)
3428 rt->dst_host = true;
3430 #ifdef CONFIG_IPV6_SUBTREES
3431 ipv6_addr_prefix(&rt->fib6_src.addr, &cfg->fc_src, cfg->fc_src_len);
3432 rt->fib6_src.plen = cfg->fc_src_len;
3435 if (!nexthop_get(nh)) {
3436 NL_SET_ERR_MSG(extack, "Nexthop has been deleted");
3439 if (rt->fib6_src.plen) {
3440 NL_SET_ERR_MSG(extack, "Nexthops can not be used with source routing");
3444 fib6_nh = nexthop_fib6_nh(rt->nh);
3446 err = fib6_nh_init(net, rt->fib6_nh, cfg, gfp_flags, extack);
3450 fib6_nh = rt->fib6_nh;
3452 /* We cannot add true routes via loopback here, they would
3453 * result in kernel looping; promote them to reject routes
3455 addr_type = ipv6_addr_type(&cfg->fc_dst);
3456 if (fib6_is_reject(cfg->fc_flags, rt->fib6_nh->fib_nh_dev,
3458 rt->fib6_flags = RTF_REJECT | RTF_NONEXTHOP;
3461 if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
3462 struct net_device *dev = fib6_nh->fib_nh_dev;
3464 if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
3465 NL_SET_ERR_MSG(extack, "Invalid source address");
3469 rt->fib6_prefsrc.addr = cfg->fc_prefsrc;
3470 rt->fib6_prefsrc.plen = 128;
3472 rt->fib6_prefsrc.plen = 0;
3476 fib6_info_release(rt);
3477 return ERR_PTR(err);
3480 int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags,
3481 struct netlink_ext_ack *extack)
3483 struct fib6_info *rt;
3486 rt = ip6_route_info_create(cfg, gfp_flags, extack);
3490 err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, extack);
3491 fib6_info_release(rt);
3496 static int __ip6_del_rt(struct fib6_info *rt, struct nl_info *info)
3498 struct net *net = info->nl_net;
3499 struct fib6_table *table;
3502 if (rt == net->ipv6.fib6_null_entry) {
3507 table = rt->fib6_table;
3508 spin_lock_bh(&table->tb6_lock);
3509 err = fib6_del(rt, info);
3510 spin_unlock_bh(&table->tb6_lock);
3513 fib6_info_release(rt);
3517 int ip6_del_rt(struct net *net, struct fib6_info *rt)
3519 struct nl_info info = { .nl_net = net };
3521 return __ip6_del_rt(rt, &info);
3524 static int __ip6_del_rt_siblings(struct fib6_info *rt, struct fib6_config *cfg)
3526 struct nl_info *info = &cfg->fc_nlinfo;
3527 struct net *net = info->nl_net;
3528 struct sk_buff *skb = NULL;
3529 struct fib6_table *table;
3532 if (rt == net->ipv6.fib6_null_entry)
3534 table = rt->fib6_table;
3535 spin_lock_bh(&table->tb6_lock);
3537 if (rt->fib6_nsiblings && cfg->fc_delete_all_nh) {
3538 struct fib6_info *sibling, *next_sibling;
3540 /* prefer to send a single notification with all hops */
3541 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
3543 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
3545 if (rt6_fill_node(net, skb, rt, NULL,
3546 NULL, NULL, 0, RTM_DELROUTE,
3547 info->portid, seq, 0) < 0) {
3551 info->skip_notify = 1;
3554 list_for_each_entry_safe(sibling, next_sibling,
3557 err = fib6_del(sibling, info);
3563 err = fib6_del(rt, info);
3565 spin_unlock_bh(&table->tb6_lock);
3567 fib6_info_release(rt);
3570 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
3571 info->nlh, gfp_any());
3576 static int __ip6_del_cached_rt(struct rt6_info *rt, struct fib6_config *cfg)
3580 if (cfg->fc_ifindex && rt->dst.dev->ifindex != cfg->fc_ifindex)
3583 if (cfg->fc_flags & RTF_GATEWAY &&
3584 !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
3587 rc = rt6_remove_exception_rt(rt);
3592 static int ip6_del_cached_rt(struct fib6_config *cfg, struct fib6_info *rt,
3595 struct fib6_result res = {
3599 struct rt6_info *rt_cache;
3601 rt_cache = rt6_find_cached_rt(&res, &cfg->fc_dst, &cfg->fc_src);
3603 return __ip6_del_cached_rt(rt_cache, cfg);
3608 static int ip6_route_del(struct fib6_config *cfg,
3609 struct netlink_ext_ack *extack)
3611 struct fib6_table *table;
3612 struct fib6_info *rt;
3613 struct fib6_node *fn;
3616 table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
3618 NL_SET_ERR_MSG(extack, "FIB table does not exist");
3624 fn = fib6_locate(&table->tb6_root,
3625 &cfg->fc_dst, cfg->fc_dst_len,
3626 &cfg->fc_src, cfg->fc_src_len,
3627 !(cfg->fc_flags & RTF_CACHE));
3630 for_each_fib6_node_rt_rcu(fn) {
3634 if (cfg->fc_flags & RTF_CACHE) {
3637 rc = ip6_del_cached_rt(cfg, rt, nh);
3645 if (cfg->fc_ifindex &&
3647 nh->fib_nh_dev->ifindex != cfg->fc_ifindex))
3649 if (cfg->fc_flags & RTF_GATEWAY &&
3650 !ipv6_addr_equal(&cfg->fc_gateway, &nh->fib_nh_gw6))
3652 if (cfg->fc_metric && cfg->fc_metric != rt->fib6_metric)
3654 if (cfg->fc_protocol && cfg->fc_protocol != rt->fib6_protocol)
3656 if (!fib6_info_hold_safe(rt))
3660 /* if gateway was specified only delete the one hop */
3661 if (cfg->fc_flags & RTF_GATEWAY)
3662 return __ip6_del_rt(rt, &cfg->fc_nlinfo);
3664 return __ip6_del_rt_siblings(rt, cfg);
3672 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
3674 struct netevent_redirect netevent;
3675 struct rt6_info *rt, *nrt = NULL;
3676 struct fib6_result res = {};
3677 struct ndisc_options ndopts;
3678 struct inet6_dev *in6_dev;
3679 struct neighbour *neigh;
3681 int optlen, on_link;
3684 optlen = skb_tail_pointer(skb) - skb_transport_header(skb);
3685 optlen -= sizeof(*msg);
3688 net_dbg_ratelimited("rt6_do_redirect: packet too short\n");
3692 msg = (struct rd_msg *)icmp6_hdr(skb);
3694 if (ipv6_addr_is_multicast(&msg->dest)) {
3695 net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n");
3700 if (ipv6_addr_equal(&msg->dest, &msg->target)) {
3702 } else if (ipv6_addr_type(&msg->target) !=
3703 (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
3704 net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n");
3708 in6_dev = __in6_dev_get(skb->dev);
3711 if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
3715 * The IP source address of the Redirect MUST be the same as the current
3716 * first-hop router for the specified ICMP Destination Address.
3719 if (!ndisc_parse_options(skb->dev, msg->opt, optlen, &ndopts)) {
3720 net_dbg_ratelimited("rt6_redirect: invalid ND options\n");
3725 if (ndopts.nd_opts_tgt_lladdr) {
3726 lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr,
3729 net_dbg_ratelimited("rt6_redirect: invalid link-layer address length\n");
3734 rt = (struct rt6_info *) dst;
3735 if (rt->rt6i_flags & RTF_REJECT) {
3736 net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
3740 /* Redirect received -> path was valid.
3741 * Look, redirects are sent only in response to data packets,
3742 * so that this nexthop apparently is reachable. --ANK
3744 dst_confirm_neigh(&rt->dst, &ipv6_hdr(skb)->saddr);
3746 neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1);
3751 * We have finally decided to accept it.
3754 ndisc_update(skb->dev, neigh, lladdr, NUD_STALE,
3755 NEIGH_UPDATE_F_WEAK_OVERRIDE|
3756 NEIGH_UPDATE_F_OVERRIDE|
3757 (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
3758 NEIGH_UPDATE_F_ISROUTER)),
3759 NDISC_REDIRECT, &ndopts);
3762 res.f6i = rcu_dereference(rt->from);
3766 res.nh = res.f6i->fib6_nh;
3767 res.fib6_flags = res.f6i->fib6_flags;
3768 res.fib6_type = res.f6i->fib6_type;
3769 nrt = ip6_rt_cache_alloc(&res, &msg->dest, NULL);
3773 nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
3775 nrt->rt6i_flags &= ~RTF_GATEWAY;
3777 nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
3779 /* rt6_insert_exception() will take care of duplicated exceptions */
3780 if (rt6_insert_exception(nrt, &res)) {
3781 dst_release_immediate(&nrt->dst);
3785 netevent.old = &rt->dst;
3786 netevent.new = &nrt->dst;
3787 netevent.daddr = &msg->dest;
3788 netevent.neigh = neigh;
3789 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
3793 neigh_release(neigh);
3796 #ifdef CONFIG_IPV6_ROUTE_INFO
3797 static struct fib6_info *rt6_get_route_info(struct net *net,
3798 const struct in6_addr *prefix, int prefixlen,
3799 const struct in6_addr *gwaddr,
3800 struct net_device *dev)
3802 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO;
3803 int ifindex = dev->ifindex;
3804 struct fib6_node *fn;
3805 struct fib6_info *rt = NULL;
3806 struct fib6_table *table;
3808 table = fib6_get_table(net, tb_id);
3813 fn = fib6_locate(&table->tb6_root, prefix, prefixlen, NULL, 0, true);
3817 for_each_fib6_node_rt_rcu(fn) {
3818 /* these routes do not use nexthops */
3821 if (rt->fib6_nh->fib_nh_dev->ifindex != ifindex)
3823 if (!(rt->fib6_flags & RTF_ROUTEINFO) ||
3824 !rt->fib6_nh->fib_nh_gw_family)
3826 if (!ipv6_addr_equal(&rt->fib6_nh->fib_nh_gw6, gwaddr))
3828 if (!fib6_info_hold_safe(rt))
3837 static struct fib6_info *rt6_add_route_info(struct net *net,
3838 const struct in6_addr *prefix, int prefixlen,
3839 const struct in6_addr *gwaddr,
3840 struct net_device *dev,
3843 struct fib6_config cfg = {
3844 .fc_metric = IP6_RT_PRIO_USER,
3845 .fc_ifindex = dev->ifindex,
3846 .fc_dst_len = prefixlen,
3847 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
3848 RTF_UP | RTF_PREF(pref),
3849 .fc_protocol = RTPROT_RA,
3850 .fc_type = RTN_UNICAST,
3851 .fc_nlinfo.portid = 0,
3852 .fc_nlinfo.nlh = NULL,
3853 .fc_nlinfo.nl_net = net,
3856 cfg.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO,
3857 cfg.fc_dst = *prefix;
3858 cfg.fc_gateway = *gwaddr;
3860 /* We should treat it as a default route if prefix length is 0. */
3862 cfg.fc_flags |= RTF_DEFAULT;
3864 ip6_route_add(&cfg, GFP_ATOMIC, NULL);
3866 return rt6_get_route_info(net, prefix, prefixlen, gwaddr, dev);
3870 struct fib6_info *rt6_get_dflt_router(struct net *net,
3871 const struct in6_addr *addr,
3872 struct net_device *dev)
3874 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT;
3875 struct fib6_info *rt;
3876 struct fib6_table *table;
3878 table = fib6_get_table(net, tb_id);
3883 for_each_fib6_node_rt_rcu(&table->tb6_root) {
3886 /* RA routes do not use nexthops */
3891 if (dev == nh->fib_nh_dev &&
3892 ((rt->fib6_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
3893 ipv6_addr_equal(&nh->fib_nh_gw6, addr))
3896 if (rt && !fib6_info_hold_safe(rt))
3902 struct fib6_info *rt6_add_dflt_router(struct net *net,
3903 const struct in6_addr *gwaddr,
3904 struct net_device *dev,
3907 struct fib6_config cfg = {
3908 .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT,
3909 .fc_metric = IP6_RT_PRIO_USER,
3910 .fc_ifindex = dev->ifindex,
3911 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
3912 RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
3913 .fc_protocol = RTPROT_RA,
3914 .fc_type = RTN_UNICAST,
3915 .fc_nlinfo.portid = 0,
3916 .fc_nlinfo.nlh = NULL,
3917 .fc_nlinfo.nl_net = net,
3920 cfg.fc_gateway = *gwaddr;
3922 if (!ip6_route_add(&cfg, GFP_ATOMIC, NULL)) {
3923 struct fib6_table *table;
3925 table = fib6_get_table(dev_net(dev), cfg.fc_table);
3927 table->flags |= RT6_TABLE_HAS_DFLT_ROUTER;
3930 return rt6_get_dflt_router(net, gwaddr, dev);
3933 static void __rt6_purge_dflt_routers(struct net *net,
3934 struct fib6_table *table)
3936 struct fib6_info *rt;
3940 for_each_fib6_node_rt_rcu(&table->tb6_root) {
3941 struct net_device *dev = fib6_info_nh_dev(rt);
3942 struct inet6_dev *idev = dev ? __in6_dev_get(dev) : NULL;
3944 if (rt->fib6_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
3945 (!idev || idev->cnf.accept_ra != 2) &&
3946 fib6_info_hold_safe(rt)) {
3948 ip6_del_rt(net, rt);
3954 table->flags &= ~RT6_TABLE_HAS_DFLT_ROUTER;
3957 void rt6_purge_dflt_routers(struct net *net)
3959 struct fib6_table *table;
3960 struct hlist_head *head;
3965 for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
3966 head = &net->ipv6.fib_table_hash[h];
3967 hlist_for_each_entry_rcu(table, head, tb6_hlist) {
3968 if (table->flags & RT6_TABLE_HAS_DFLT_ROUTER)
3969 __rt6_purge_dflt_routers(net, table);
3976 static void rtmsg_to_fib6_config(struct net *net,
3977 struct in6_rtmsg *rtmsg,
3978 struct fib6_config *cfg)
3980 *cfg = (struct fib6_config){
3981 .fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ?
3983 .fc_ifindex = rtmsg->rtmsg_ifindex,
3984 .fc_metric = rtmsg->rtmsg_metric ? : IP6_RT_PRIO_USER,
3985 .fc_expires = rtmsg->rtmsg_info,
3986 .fc_dst_len = rtmsg->rtmsg_dst_len,
3987 .fc_src_len = rtmsg->rtmsg_src_len,
3988 .fc_flags = rtmsg->rtmsg_flags,
3989 .fc_type = rtmsg->rtmsg_type,
3991 .fc_nlinfo.nl_net = net,
3993 .fc_dst = rtmsg->rtmsg_dst,
3994 .fc_src = rtmsg->rtmsg_src,
3995 .fc_gateway = rtmsg->rtmsg_gateway,
3999 int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
4001 struct fib6_config cfg;
4002 struct in6_rtmsg rtmsg;
4006 case SIOCADDRT: /* Add a route */
4007 case SIOCDELRT: /* Delete a route */
4008 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
4010 err = copy_from_user(&rtmsg, arg,
4011 sizeof(struct in6_rtmsg));
4015 rtmsg_to_fib6_config(net, &rtmsg, &cfg);
4020 err = ip6_route_add(&cfg, GFP_KERNEL, NULL);
4023 err = ip6_route_del(&cfg, NULL);
4037 * Drop the packet on the floor
4040 static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
4042 struct dst_entry *dst = skb_dst(skb);
4043 struct net *net = dev_net(dst->dev);
4044 struct inet6_dev *idev;
4047 if (netif_is_l3_master(skb->dev) &&
4048 dst->dev == net->loopback_dev)
4049 idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif));
4051 idev = ip6_dst_idev(dst);
4053 switch (ipstats_mib_noroutes) {
4054 case IPSTATS_MIB_INNOROUTES:
4055 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
4056 if (type == IPV6_ADDR_ANY) {
4057 IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS);
4061 case IPSTATS_MIB_OUTNOROUTES:
4062 IP6_INC_STATS(net, idev, ipstats_mib_noroutes);
4066 /* Start over by dropping the dst for l3mdev case */
4067 if (netif_is_l3_master(skb->dev))
4070 icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
4075 static int ip6_pkt_discard(struct sk_buff *skb)
4077 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
4080 static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
4082 skb->dev = skb_dst(skb)->dev;
4083 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
4086 static int ip6_pkt_prohibit(struct sk_buff *skb)
4088 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
4091 static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb)
4093 skb->dev = skb_dst(skb)->dev;
4094 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
4098 * Allocate a dst for local (unicast / anycast) address.
4101 struct fib6_info *addrconf_f6i_alloc(struct net *net,
4102 struct inet6_dev *idev,
4103 const struct in6_addr *addr,
4104 bool anycast, gfp_t gfp_flags)
4106 struct fib6_config cfg = {
4107 .fc_table = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL,
4108 .fc_ifindex = idev->dev->ifindex,
4109 .fc_flags = RTF_UP | RTF_ADDRCONF | RTF_NONEXTHOP,
4112 .fc_protocol = RTPROT_KERNEL,
4113 .fc_nlinfo.nl_net = net,
4114 .fc_ignore_dev_down = true,
4118 cfg.fc_type = RTN_ANYCAST;
4119 cfg.fc_flags |= RTF_ANYCAST;
4121 cfg.fc_type = RTN_LOCAL;
4122 cfg.fc_flags |= RTF_LOCAL;
4125 return ip6_route_info_create(&cfg, gfp_flags, NULL);
4128 /* remove deleted ip from prefsrc entries */
4129 struct arg_dev_net_ip {
4130 struct net_device *dev;
4132 struct in6_addr *addr;
4135 static int fib6_remove_prefsrc(struct fib6_info *rt, void *arg)
4137 struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev;
4138 struct net *net = ((struct arg_dev_net_ip *)arg)->net;
4139 struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
4142 ((void *)rt->fib6_nh->fib_nh_dev == dev || !dev) &&
4143 rt != net->ipv6.fib6_null_entry &&
4144 ipv6_addr_equal(addr, &rt->fib6_prefsrc.addr)) {
4145 spin_lock_bh(&rt6_exception_lock);
4146 /* remove prefsrc entry */
4147 rt->fib6_prefsrc.plen = 0;
4148 spin_unlock_bh(&rt6_exception_lock);
4153 void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
4155 struct net *net = dev_net(ifp->idev->dev);
4156 struct arg_dev_net_ip adni = {
4157 .dev = ifp->idev->dev,
4161 fib6_clean_all(net, fib6_remove_prefsrc, &adni);
4164 #define RTF_RA_ROUTER (RTF_ADDRCONF | RTF_DEFAULT)
4166 /* Remove routers and update dst entries when gateway turn into host. */
4167 static int fib6_clean_tohost(struct fib6_info *rt, void *arg)
4169 struct in6_addr *gateway = (struct in6_addr *)arg;
4172 /* RA routes do not use nexthops */
4177 if (((rt->fib6_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) &&
4178 nh->fib_nh_gw_family && ipv6_addr_equal(gateway, &nh->fib_nh_gw6))
4181 /* Further clean up cached routes in exception table.
4182 * This is needed because cached route may have a different
4183 * gateway than its 'parent' in the case of an ip redirect.
4185 fib6_nh_exceptions_clean_tohost(nh, gateway);
4190 void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
4192 fib6_clean_all(net, fib6_clean_tohost, gateway);
4195 struct arg_netdev_event {
4196 const struct net_device *dev;
4198 unsigned char nh_flags;
4199 unsigned long event;
4203 static struct fib6_info *rt6_multipath_first_sibling(const struct fib6_info *rt)
4205 struct fib6_info *iter;
4206 struct fib6_node *fn;
4208 fn = rcu_dereference_protected(rt->fib6_node,
4209 lockdep_is_held(&rt->fib6_table->tb6_lock));
4210 iter = rcu_dereference_protected(fn->leaf,
4211 lockdep_is_held(&rt->fib6_table->tb6_lock));
4213 if (iter->fib6_metric == rt->fib6_metric &&
4214 rt6_qualify_for_ecmp(iter))
4216 iter = rcu_dereference_protected(iter->fib6_next,
4217 lockdep_is_held(&rt->fib6_table->tb6_lock));
4223 /* only called for fib entries with builtin fib6_nh */
4224 static bool rt6_is_dead(const struct fib6_info *rt)
4226 if (rt->fib6_nh->fib_nh_flags & RTNH_F_DEAD ||
4227 (rt->fib6_nh->fib_nh_flags & RTNH_F_LINKDOWN &&
4228 ip6_ignore_linkdown(rt->fib6_nh->fib_nh_dev)))
4234 static int rt6_multipath_total_weight(const struct fib6_info *rt)
4236 struct fib6_info *iter;
4239 if (!rt6_is_dead(rt))
4240 total += rt->fib6_nh->fib_nh_weight;
4242 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
4243 if (!rt6_is_dead(iter))
4244 total += iter->fib6_nh->fib_nh_weight;
4250 static void rt6_upper_bound_set(struct fib6_info *rt, int *weight, int total)
4252 int upper_bound = -1;
4254 if (!rt6_is_dead(rt)) {
4255 *weight += rt->fib6_nh->fib_nh_weight;
4256 upper_bound = DIV_ROUND_CLOSEST_ULL((u64) (*weight) << 31,
4259 atomic_set(&rt->fib6_nh->fib_nh_upper_bound, upper_bound);
4262 static void rt6_multipath_upper_bound_set(struct fib6_info *rt, int total)
4264 struct fib6_info *iter;
4267 rt6_upper_bound_set(rt, &weight, total);
4269 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4270 rt6_upper_bound_set(iter, &weight, total);
4273 void rt6_multipath_rebalance(struct fib6_info *rt)
4275 struct fib6_info *first;
4278 /* In case the entire multipath route was marked for flushing,
4279 * then there is no need to rebalance upon the removal of every
4282 if (!rt->fib6_nsiblings || rt->should_flush)
4285 /* During lookup routes are evaluated in order, so we need to
4286 * make sure upper bounds are assigned from the first sibling
4289 first = rt6_multipath_first_sibling(rt);
4290 if (WARN_ON_ONCE(!first))
4293 total = rt6_multipath_total_weight(first);
4294 rt6_multipath_upper_bound_set(first, total);
4297 static int fib6_ifup(struct fib6_info *rt, void *p_arg)
4299 const struct arg_netdev_event *arg = p_arg;
4300 struct net *net = dev_net(arg->dev);
4302 if (rt != net->ipv6.fib6_null_entry && !rt->nh &&
4303 rt->fib6_nh->fib_nh_dev == arg->dev) {
4304 rt->fib6_nh->fib_nh_flags &= ~arg->nh_flags;
4305 fib6_update_sernum_upto_root(net, rt);
4306 rt6_multipath_rebalance(rt);
4312 void rt6_sync_up(struct net_device *dev, unsigned char nh_flags)
4314 struct arg_netdev_event arg = {
4317 .nh_flags = nh_flags,
4321 if (nh_flags & RTNH_F_DEAD && netif_carrier_ok(dev))
4322 arg.nh_flags |= RTNH_F_LINKDOWN;
4324 fib6_clean_all(dev_net(dev), fib6_ifup, &arg);
4327 /* only called for fib entries with inline fib6_nh */
4328 static bool rt6_multipath_uses_dev(const struct fib6_info *rt,
4329 const struct net_device *dev)
4331 struct fib6_info *iter;
4333 if (rt->fib6_nh->fib_nh_dev == dev)
4335 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4336 if (iter->fib6_nh->fib_nh_dev == dev)
4342 static void rt6_multipath_flush(struct fib6_info *rt)
4344 struct fib6_info *iter;
4346 rt->should_flush = 1;
4347 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4348 iter->should_flush = 1;
4351 static unsigned int rt6_multipath_dead_count(const struct fib6_info *rt,
4352 const struct net_device *down_dev)
4354 struct fib6_info *iter;
4355 unsigned int dead = 0;
4357 if (rt->fib6_nh->fib_nh_dev == down_dev ||
4358 rt->fib6_nh->fib_nh_flags & RTNH_F_DEAD)
4360 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4361 if (iter->fib6_nh->fib_nh_dev == down_dev ||
4362 iter->fib6_nh->fib_nh_flags & RTNH_F_DEAD)
4368 static void rt6_multipath_nh_flags_set(struct fib6_info *rt,
4369 const struct net_device *dev,
4370 unsigned char nh_flags)
4372 struct fib6_info *iter;
4374 if (rt->fib6_nh->fib_nh_dev == dev)
4375 rt->fib6_nh->fib_nh_flags |= nh_flags;
4376 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4377 if (iter->fib6_nh->fib_nh_dev == dev)
4378 iter->fib6_nh->fib_nh_flags |= nh_flags;
4381 /* called with write lock held for table with rt */
4382 static int fib6_ifdown(struct fib6_info *rt, void *p_arg)
4384 const struct arg_netdev_event *arg = p_arg;
4385 const struct net_device *dev = arg->dev;
4386 struct net *net = dev_net(dev);
4388 if (rt == net->ipv6.fib6_null_entry || rt->nh)
4391 switch (arg->event) {
4392 case NETDEV_UNREGISTER:
4393 return rt->fib6_nh->fib_nh_dev == dev ? -1 : 0;
4395 if (rt->should_flush)
4397 if (!rt->fib6_nsiblings)
4398 return rt->fib6_nh->fib_nh_dev == dev ? -1 : 0;
4399 if (rt6_multipath_uses_dev(rt, dev)) {
4402 count = rt6_multipath_dead_count(rt, dev);
4403 if (rt->fib6_nsiblings + 1 == count) {
4404 rt6_multipath_flush(rt);
4407 rt6_multipath_nh_flags_set(rt, dev, RTNH_F_DEAD |
4409 fib6_update_sernum(net, rt);
4410 rt6_multipath_rebalance(rt);
4414 if (rt->fib6_nh->fib_nh_dev != dev ||
4415 rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))
4417 rt->fib6_nh->fib_nh_flags |= RTNH_F_LINKDOWN;
4418 rt6_multipath_rebalance(rt);
4425 void rt6_sync_down_dev(struct net_device *dev, unsigned long event)
4427 struct arg_netdev_event arg = {
4433 struct net *net = dev_net(dev);
4435 if (net->ipv6.sysctl.skip_notify_on_dev_down)
4436 fib6_clean_all_skip_notify(net, fib6_ifdown, &arg);
4438 fib6_clean_all(net, fib6_ifdown, &arg);
4441 void rt6_disable_ip(struct net_device *dev, unsigned long event)
4443 rt6_sync_down_dev(dev, event);
4444 rt6_uncached_list_flush_dev(dev_net(dev), dev);
4445 neigh_ifdown(&nd_tbl, dev);
4448 struct rt6_mtu_change_arg {
4449 struct net_device *dev;
4451 struct fib6_info *f6i;
4454 static int fib6_nh_mtu_change(struct fib6_nh *nh, void *_arg)
4456 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *)_arg;
4457 struct fib6_info *f6i = arg->f6i;
4459 /* For administrative MTU increase, there is no way to discover
4460 * IPv6 PMTU increase, so PMTU increase should be updated here.
4461 * Since RFC 1981 doesn't include administrative MTU increase
4462 * update PMTU increase is a MUST. (i.e. jumbo frame)
4464 if (nh->fib_nh_dev == arg->dev) {
4465 struct inet6_dev *idev = __in6_dev_get(arg->dev);
4466 u32 mtu = f6i->fib6_pmtu;
4468 if (mtu >= arg->mtu ||
4469 (mtu < arg->mtu && mtu == idev->cnf.mtu6))
4470 fib6_metric_set(f6i, RTAX_MTU, arg->mtu);
4472 spin_lock_bh(&rt6_exception_lock);
4473 rt6_exceptions_update_pmtu(idev, nh, arg->mtu);
4474 spin_unlock_bh(&rt6_exception_lock);
4480 static int rt6_mtu_change_route(struct fib6_info *f6i, void *p_arg)
4482 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
4483 struct inet6_dev *idev;
4485 /* In IPv6 pmtu discovery is not optional,
4486 so that RTAX_MTU lock cannot disable it.
4487 We still use this lock to block changes
4488 caused by addrconf/ndisc.
4491 idev = __in6_dev_get(arg->dev);
4495 if (fib6_metric_locked(f6i, RTAX_MTU))
4499 return fib6_nh_mtu_change(f6i->fib6_nh, arg);
4502 void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
4504 struct rt6_mtu_change_arg arg = {
4509 fib6_clean_all(dev_net(dev), rt6_mtu_change_route, &arg);
4512 static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
4513 [RTA_UNSPEC] = { .strict_start_type = RTA_DPORT + 1 },
4514 [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
4515 [RTA_PREFSRC] = { .len = sizeof(struct in6_addr) },
4516 [RTA_OIF] = { .type = NLA_U32 },
4517 [RTA_IIF] = { .type = NLA_U32 },
4518 [RTA_PRIORITY] = { .type = NLA_U32 },
4519 [RTA_METRICS] = { .type = NLA_NESTED },
4520 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
4521 [RTA_PREF] = { .type = NLA_U8 },
4522 [RTA_ENCAP_TYPE] = { .type = NLA_U16 },
4523 [RTA_ENCAP] = { .type = NLA_NESTED },
4524 [RTA_EXPIRES] = { .type = NLA_U32 },
4525 [RTA_UID] = { .type = NLA_U32 },
4526 [RTA_MARK] = { .type = NLA_U32 },
4527 [RTA_TABLE] = { .type = NLA_U32 },
4528 [RTA_IP_PROTO] = { .type = NLA_U8 },
4529 [RTA_SPORT] = { .type = NLA_U16 },
4530 [RTA_DPORT] = { .type = NLA_U16 },
4533 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
4534 struct fib6_config *cfg,
4535 struct netlink_ext_ack *extack)
4538 struct nlattr *tb[RTA_MAX+1];
4542 err = nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
4543 rtm_ipv6_policy, extack);
4548 rtm = nlmsg_data(nlh);
4550 *cfg = (struct fib6_config){
4551 .fc_table = rtm->rtm_table,
4552 .fc_dst_len = rtm->rtm_dst_len,
4553 .fc_src_len = rtm->rtm_src_len,
4555 .fc_protocol = rtm->rtm_protocol,
4556 .fc_type = rtm->rtm_type,
4558 .fc_nlinfo.portid = NETLINK_CB(skb).portid,
4559 .fc_nlinfo.nlh = nlh,
4560 .fc_nlinfo.nl_net = sock_net(skb->sk),
4563 if (rtm->rtm_type == RTN_UNREACHABLE ||
4564 rtm->rtm_type == RTN_BLACKHOLE ||
4565 rtm->rtm_type == RTN_PROHIBIT ||
4566 rtm->rtm_type == RTN_THROW)
4567 cfg->fc_flags |= RTF_REJECT;
4569 if (rtm->rtm_type == RTN_LOCAL)
4570 cfg->fc_flags |= RTF_LOCAL;
4572 if (rtm->rtm_flags & RTM_F_CLONED)
4573 cfg->fc_flags |= RTF_CACHE;
4575 cfg->fc_flags |= (rtm->rtm_flags & RTNH_F_ONLINK);
4577 if (tb[RTA_GATEWAY]) {
4578 cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]);
4579 cfg->fc_flags |= RTF_GATEWAY;
4582 NL_SET_ERR_MSG(extack, "IPv6 does not support RTA_VIA attribute");
4587 int plen = (rtm->rtm_dst_len + 7) >> 3;
4589 if (nla_len(tb[RTA_DST]) < plen)
4592 nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
4596 int plen = (rtm->rtm_src_len + 7) >> 3;
4598 if (nla_len(tb[RTA_SRC]) < plen)
4601 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
4604 if (tb[RTA_PREFSRC])
4605 cfg->fc_prefsrc = nla_get_in6_addr(tb[RTA_PREFSRC]);
4608 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
4610 if (tb[RTA_PRIORITY])
4611 cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);
4613 if (tb[RTA_METRICS]) {
4614 cfg->fc_mx = nla_data(tb[RTA_METRICS]);
4615 cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
4619 cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
4621 if (tb[RTA_MULTIPATH]) {
4622 cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
4623 cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
4625 err = lwtunnel_valid_encap_type_attr(cfg->fc_mp,
4626 cfg->fc_mp_len, extack);
4632 pref = nla_get_u8(tb[RTA_PREF]);
4633 if (pref != ICMPV6_ROUTER_PREF_LOW &&
4634 pref != ICMPV6_ROUTER_PREF_HIGH)
4635 pref = ICMPV6_ROUTER_PREF_MEDIUM;
4636 cfg->fc_flags |= RTF_PREF(pref);
4640 cfg->fc_encap = tb[RTA_ENCAP];
4642 if (tb[RTA_ENCAP_TYPE]) {
4643 cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);
4645 err = lwtunnel_valid_encap_type(cfg->fc_encap_type, extack);
4650 if (tb[RTA_EXPIRES]) {
4651 unsigned long timeout = addrconf_timeout_fixup(nla_get_u32(tb[RTA_EXPIRES]), HZ);
4653 if (addrconf_finite_timeout(timeout)) {
4654 cfg->fc_expires = jiffies_to_clock_t(timeout * HZ);
4655 cfg->fc_flags |= RTF_EXPIRES;
4665 struct fib6_info *fib6_info;
4666 struct fib6_config r_cfg;
4667 struct list_head next;
4670 static int ip6_route_info_append(struct net *net,
4671 struct list_head *rt6_nh_list,
4672 struct fib6_info *rt,
4673 struct fib6_config *r_cfg)
4678 list_for_each_entry(nh, rt6_nh_list, next) {
4679 /* check if fib6_info already exists */
4680 if (rt6_duplicate_nexthop(nh->fib6_info, rt))
4684 nh = kzalloc(sizeof(*nh), GFP_KERNEL);
4688 memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg));
4689 list_add_tail(&nh->next, rt6_nh_list);
4694 static void ip6_route_mpath_notify(struct fib6_info *rt,
4695 struct fib6_info *rt_last,
4696 struct nl_info *info,
4699 /* if this is an APPEND route, then rt points to the first route
4700 * inserted and rt_last points to last route inserted. Userspace
4701 * wants a consistent dump of the route which starts at the first
4702 * nexthop. Since sibling routes are always added at the end of
4703 * the list, find the first sibling of the last route appended
4705 if ((nlflags & NLM_F_APPEND) && rt_last && rt_last->fib6_nsiblings) {
4706 rt = list_first_entry(&rt_last->fib6_siblings,
4712 inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
4715 static int ip6_route_multipath_add(struct fib6_config *cfg,
4716 struct netlink_ext_ack *extack)
4718 struct fib6_info *rt_notif = NULL, *rt_last = NULL;
4719 struct nl_info *info = &cfg->fc_nlinfo;
4720 struct fib6_config r_cfg;
4721 struct rtnexthop *rtnh;
4722 struct fib6_info *rt;
4723 struct rt6_nh *err_nh;
4724 struct rt6_nh *nh, *nh_safe;
4730 int replace = (cfg->fc_nlinfo.nlh &&
4731 (cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_REPLACE));
4732 LIST_HEAD(rt6_nh_list);
4734 nlflags = replace ? NLM_F_REPLACE : NLM_F_CREATE;
4735 if (info->nlh && info->nlh->nlmsg_flags & NLM_F_APPEND)
4736 nlflags |= NLM_F_APPEND;
4738 remaining = cfg->fc_mp_len;
4739 rtnh = (struct rtnexthop *)cfg->fc_mp;
4741 /* Parse a Multipath Entry and build a list (rt6_nh_list) of
4742 * fib6_info structs per nexthop
4744 while (rtnh_ok(rtnh, remaining)) {
4745 memcpy(&r_cfg, cfg, sizeof(*cfg));
4746 if (rtnh->rtnh_ifindex)
4747 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
4749 attrlen = rtnh_attrlen(rtnh);
4751 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
4753 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
4755 r_cfg.fc_gateway = nla_get_in6_addr(nla);
4756 r_cfg.fc_flags |= RTF_GATEWAY;
4758 r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
4759 nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
4761 r_cfg.fc_encap_type = nla_get_u16(nla);
4764 r_cfg.fc_flags |= (rtnh->rtnh_flags & RTNH_F_ONLINK);
4765 rt = ip6_route_info_create(&r_cfg, GFP_KERNEL, extack);
4771 if (!rt6_qualify_for_ecmp(rt)) {
4773 NL_SET_ERR_MSG(extack,
4774 "Device only routes can not be added for IPv6 using the multipath API.");
4775 fib6_info_release(rt);
4779 rt->fib6_nh->fib_nh_weight = rtnh->rtnh_hops + 1;
4781 err = ip6_route_info_append(info->nl_net, &rt6_nh_list,
4784 fib6_info_release(rt);
4788 rtnh = rtnh_next(rtnh, &remaining);
4791 /* for add and replace send one notification with all nexthops.
4792 * Skip the notification in fib6_add_rt2node and send one with
4793 * the full route when done
4795 info->skip_notify = 1;
4798 list_for_each_entry(nh, &rt6_nh_list, next) {
4799 err = __ip6_ins_rt(nh->fib6_info, info, extack);
4800 fib6_info_release(nh->fib6_info);
4803 /* save reference to last route successfully inserted */
4804 rt_last = nh->fib6_info;
4806 /* save reference to first route for notification */
4808 rt_notif = nh->fib6_info;
4811 /* nh->fib6_info is used or freed at this point, reset to NULL*/
4812 nh->fib6_info = NULL;
4815 NL_SET_ERR_MSG_MOD(extack,
4816 "multipath route replace failed (check consistency of installed routes)");
4821 /* Because each route is added like a single route we remove
4822 * these flags after the first nexthop: if there is a collision,
4823 * we have already failed to add the first nexthop:
4824 * fib6_add_rt2node() has rejected it; when replacing, old
4825 * nexthops have been replaced by first new, the rest should
4828 cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
4833 /* success ... tell user about new route */
4834 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
4838 /* send notification for routes that were added so that
4839 * the delete notifications sent by ip6_route_del are
4843 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
4845 /* Delete routes that were already added */
4846 list_for_each_entry(nh, &rt6_nh_list, next) {
4849 ip6_route_del(&nh->r_cfg, extack);
4853 list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) {
4855 fib6_info_release(nh->fib6_info);
4856 list_del(&nh->next);
4863 static int ip6_route_multipath_del(struct fib6_config *cfg,
4864 struct netlink_ext_ack *extack)
4866 struct fib6_config r_cfg;
4867 struct rtnexthop *rtnh;
4870 int err = 1, last_err = 0;
4872 remaining = cfg->fc_mp_len;
4873 rtnh = (struct rtnexthop *)cfg->fc_mp;
4875 /* Parse a Multipath Entry */
4876 while (rtnh_ok(rtnh, remaining)) {
4877 memcpy(&r_cfg, cfg, sizeof(*cfg));
4878 if (rtnh->rtnh_ifindex)
4879 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
4881 attrlen = rtnh_attrlen(rtnh);
4883 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
4885 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
4887 nla_memcpy(&r_cfg.fc_gateway, nla, 16);
4888 r_cfg.fc_flags |= RTF_GATEWAY;
4891 err = ip6_route_del(&r_cfg, extack);
4895 rtnh = rtnh_next(rtnh, &remaining);
4901 static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh,
4902 struct netlink_ext_ack *extack)
4904 struct fib6_config cfg;
4907 err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
4912 return ip6_route_multipath_del(&cfg, extack);
4914 cfg.fc_delete_all_nh = 1;
4915 return ip6_route_del(&cfg, extack);
4919 static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
4920 struct netlink_ext_ack *extack)
4922 struct fib6_config cfg;
4925 err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
4929 if (cfg.fc_metric == 0)
4930 cfg.fc_metric = IP6_RT_PRIO_USER;
4933 return ip6_route_multipath_add(&cfg, extack);
4935 return ip6_route_add(&cfg, GFP_KERNEL, extack);
4938 static size_t rt6_nlmsg_size(struct fib6_info *rt)
4940 int nexthop_len = 0;
4943 nexthop_len += nla_total_size(4); /* RTA_NH_ID */
4945 if (rt->fib6_nsiblings) {
4946 nexthop_len = nla_total_size(0) /* RTA_MULTIPATH */
4947 + NLA_ALIGN(sizeof(struct rtnexthop))
4948 + nla_total_size(16) /* RTA_GATEWAY */
4949 + lwtunnel_get_encap_size(rt->fib6_nh->fib_nh_lws);
4951 nexthop_len *= rt->fib6_nsiblings;
4954 return NLMSG_ALIGN(sizeof(struct rtmsg))
4955 + nla_total_size(16) /* RTA_SRC */
4956 + nla_total_size(16) /* RTA_DST */
4957 + nla_total_size(16) /* RTA_GATEWAY */
4958 + nla_total_size(16) /* RTA_PREFSRC */
4959 + nla_total_size(4) /* RTA_TABLE */
4960 + nla_total_size(4) /* RTA_IIF */
4961 + nla_total_size(4) /* RTA_OIF */
4962 + nla_total_size(4) /* RTA_PRIORITY */
4963 + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
4964 + nla_total_size(sizeof(struct rta_cacheinfo))
4965 + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
4966 + nla_total_size(1) /* RTA_PREF */
4967 + lwtunnel_get_encap_size(rt->fib6_nh->fib_nh_lws)
4971 static int rt6_fill_node_nexthop(struct sk_buff *skb, struct nexthop *nh,
4972 unsigned char *flags)
4974 if (nexthop_is_multipath(nh)) {
4977 mp = nla_nest_start(skb, RTA_MULTIPATH);
4979 goto nla_put_failure;
4981 if (nexthop_mpath_fill_node(skb, nh))
4982 goto nla_put_failure;
4984 nla_nest_end(skb, mp);
4986 struct fib6_nh *fib6_nh;
4988 fib6_nh = nexthop_fib6_nh(nh);
4989 if (fib_nexthop_info(skb, &fib6_nh->nh_common,
4991 goto nla_put_failure;
5000 static int rt6_fill_node(struct net *net, struct sk_buff *skb,
5001 struct fib6_info *rt, struct dst_entry *dst,
5002 struct in6_addr *dest, struct in6_addr *src,
5003 int iif, int type, u32 portid, u32 seq,
5006 struct rt6_info *rt6 = (struct rt6_info *)dst;
5007 struct rt6key *rt6_dst, *rt6_src;
5008 u32 *pmetrics, table, rt6_flags;
5009 unsigned char nh_flags = 0;
5010 struct nlmsghdr *nlh;
5014 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
5019 rt6_dst = &rt6->rt6i_dst;
5020 rt6_src = &rt6->rt6i_src;
5021 rt6_flags = rt6->rt6i_flags;
5023 rt6_dst = &rt->fib6_dst;
5024 rt6_src = &rt->fib6_src;
5025 rt6_flags = rt->fib6_flags;
5028 rtm = nlmsg_data(nlh);
5029 rtm->rtm_family = AF_INET6;
5030 rtm->rtm_dst_len = rt6_dst->plen;
5031 rtm->rtm_src_len = rt6_src->plen;
5034 table = rt->fib6_table->tb6_id;
5036 table = RT6_TABLE_UNSPEC;
5037 rtm->rtm_table = table < 256 ? table : RT_TABLE_COMPAT;
5038 if (nla_put_u32(skb, RTA_TABLE, table))
5039 goto nla_put_failure;
5041 rtm->rtm_type = rt->fib6_type;
5043 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
5044 rtm->rtm_protocol = rt->fib6_protocol;
5046 if (rt6_flags & RTF_CACHE)
5047 rtm->rtm_flags |= RTM_F_CLONED;
5050 if (nla_put_in6_addr(skb, RTA_DST, dest))
5051 goto nla_put_failure;
5052 rtm->rtm_dst_len = 128;
5053 } else if (rtm->rtm_dst_len)
5054 if (nla_put_in6_addr(skb, RTA_DST, &rt6_dst->addr))
5055 goto nla_put_failure;
5056 #ifdef CONFIG_IPV6_SUBTREES
5058 if (nla_put_in6_addr(skb, RTA_SRC, src))
5059 goto nla_put_failure;
5060 rtm->rtm_src_len = 128;
5061 } else if (rtm->rtm_src_len &&
5062 nla_put_in6_addr(skb, RTA_SRC, &rt6_src->addr))
5063 goto nla_put_failure;
5066 #ifdef CONFIG_IPV6_MROUTE
5067 if (ipv6_addr_is_multicast(&rt6_dst->addr)) {
5068 int err = ip6mr_get_route(net, skb, rtm, portid);
5073 goto nla_put_failure;
5076 if (nla_put_u32(skb, RTA_IIF, iif))
5077 goto nla_put_failure;
5079 struct in6_addr saddr_buf;
5080 if (ip6_route_get_saddr(net, rt, dest, 0, &saddr_buf) == 0 &&
5081 nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
5082 goto nla_put_failure;
5085 if (rt->fib6_prefsrc.plen) {
5086 struct in6_addr saddr_buf;
5087 saddr_buf = rt->fib6_prefsrc.addr;
5088 if (nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
5089 goto nla_put_failure;
5092 pmetrics = dst ? dst_metrics_ptr(dst) : rt->fib6_metrics->metrics;
5093 if (rtnetlink_put_metrics(skb, pmetrics) < 0)
5094 goto nla_put_failure;
5096 if (nla_put_u32(skb, RTA_PRIORITY, rt->fib6_metric))
5097 goto nla_put_failure;
5099 /* For multipath routes, walk the siblings list and add
5100 * each as a nexthop within RTA_MULTIPATH.
5103 if (rt6_flags & RTF_GATEWAY &&
5104 nla_put_in6_addr(skb, RTA_GATEWAY, &rt6->rt6i_gateway))
5105 goto nla_put_failure;
5107 if (dst->dev && nla_put_u32(skb, RTA_OIF, dst->dev->ifindex))
5108 goto nla_put_failure;
5109 } else if (rt->fib6_nsiblings) {
5110 struct fib6_info *sibling, *next_sibling;
5113 mp = nla_nest_start_noflag(skb, RTA_MULTIPATH);
5115 goto nla_put_failure;
5117 if (fib_add_nexthop(skb, &rt->fib6_nh->nh_common,
5118 rt->fib6_nh->fib_nh_weight) < 0)
5119 goto nla_put_failure;
5121 list_for_each_entry_safe(sibling, next_sibling,
5122 &rt->fib6_siblings, fib6_siblings) {
5123 if (fib_add_nexthop(skb, &sibling->fib6_nh->nh_common,
5124 sibling->fib6_nh->fib_nh_weight) < 0)
5125 goto nla_put_failure;
5128 nla_nest_end(skb, mp);
5129 } else if (rt->nh) {
5130 if (nla_put_u32(skb, RTA_NH_ID, rt->nh->id))
5131 goto nla_put_failure;
5133 if (nexthop_is_blackhole(rt->nh))
5134 rtm->rtm_type = RTN_BLACKHOLE;
5136 if (rt6_fill_node_nexthop(skb, rt->nh, &nh_flags) < 0)
5137 goto nla_put_failure;
5139 rtm->rtm_flags |= nh_flags;
5141 if (fib_nexthop_info(skb, &rt->fib6_nh->nh_common,
5142 &nh_flags, false) < 0)
5143 goto nla_put_failure;
5145 rtm->rtm_flags |= nh_flags;
5148 if (rt6_flags & RTF_EXPIRES) {
5149 expires = dst ? dst->expires : rt->expires;
5153 if (rtnl_put_cacheinfo(skb, dst, 0, expires, dst ? dst->error : 0) < 0)
5154 goto nla_put_failure;
5156 if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt6_flags)))
5157 goto nla_put_failure;
5160 nlmsg_end(skb, nlh);
5164 nlmsg_cancel(skb, nlh);
5168 static bool fib6_info_uses_dev(const struct fib6_info *f6i,
5169 const struct net_device *dev)
5171 if (f6i->fib6_nh->fib_nh_dev == dev)
5174 if (f6i->fib6_nsiblings) {
5175 struct fib6_info *sibling, *next_sibling;
5177 list_for_each_entry_safe(sibling, next_sibling,
5178 &f6i->fib6_siblings, fib6_siblings) {
5179 if (sibling->fib6_nh->fib_nh_dev == dev)
5187 int rt6_dump_route(struct fib6_info *rt, void *p_arg)
5189 struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
5190 struct fib_dump_filter *filter = &arg->filter;
5191 unsigned int flags = NLM_F_MULTI;
5192 struct net *net = arg->net;
5194 if (rt == net->ipv6.fib6_null_entry)
5197 if ((filter->flags & RTM_F_PREFIX) &&
5198 !(rt->fib6_flags & RTF_PREFIX_RT)) {
5199 /* success since this is not a prefix route */
5202 if (filter->filter_set) {
5203 if ((filter->rt_type && rt->fib6_type != filter->rt_type) ||
5204 (filter->dev && !fib6_info_uses_dev(rt, filter->dev)) ||
5205 (filter->protocol && rt->fib6_protocol != filter->protocol)) {
5208 flags |= NLM_F_DUMP_FILTERED;
5211 return rt6_fill_node(net, arg->skb, rt, NULL, NULL, NULL, 0,
5212 RTM_NEWROUTE, NETLINK_CB(arg->cb->skb).portid,
5213 arg->cb->nlh->nlmsg_seq, flags);
5216 static int inet6_rtm_valid_getroute_req(struct sk_buff *skb,
5217 const struct nlmsghdr *nlh,
5219 struct netlink_ext_ack *extack)
5224 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
5225 NL_SET_ERR_MSG_MOD(extack,
5226 "Invalid header for get route request");
5230 if (!netlink_strict_get_check(skb))
5231 return nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
5232 rtm_ipv6_policy, extack);
5234 rtm = nlmsg_data(nlh);
5235 if ((rtm->rtm_src_len && rtm->rtm_src_len != 128) ||
5236 (rtm->rtm_dst_len && rtm->rtm_dst_len != 128) ||
5237 rtm->rtm_table || rtm->rtm_protocol || rtm->rtm_scope ||
5239 NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for get route request");
5242 if (rtm->rtm_flags & ~RTM_F_FIB_MATCH) {
5243 NL_SET_ERR_MSG_MOD(extack,
5244 "Invalid flags for get route request");
5248 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
5249 rtm_ipv6_policy, extack);
5253 if ((tb[RTA_SRC] && !rtm->rtm_src_len) ||
5254 (tb[RTA_DST] && !rtm->rtm_dst_len)) {
5255 NL_SET_ERR_MSG_MOD(extack, "rtm_src_len and rtm_dst_len must be 128 for IPv6");
5259 for (i = 0; i <= RTA_MAX; i++) {
5275 NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in get route request");
5283 static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
5284 struct netlink_ext_ack *extack)
5286 struct net *net = sock_net(in_skb->sk);
5287 struct nlattr *tb[RTA_MAX+1];
5288 int err, iif = 0, oif = 0;
5289 struct fib6_info *from;
5290 struct dst_entry *dst;
5291 struct rt6_info *rt;
5292 struct sk_buff *skb;
5294 struct flowi6 fl6 = {};
5297 err = inet6_rtm_valid_getroute_req(in_skb, nlh, tb, extack);
5302 rtm = nlmsg_data(nlh);
5303 fl6.flowlabel = ip6_make_flowinfo(rtm->rtm_tos, 0);
5304 fibmatch = !!(rtm->rtm_flags & RTM_F_FIB_MATCH);
5307 if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
5310 fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]);
5314 if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
5317 fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]);
5321 iif = nla_get_u32(tb[RTA_IIF]);
5324 oif = nla_get_u32(tb[RTA_OIF]);
5327 fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
5330 fl6.flowi6_uid = make_kuid(current_user_ns(),
5331 nla_get_u32(tb[RTA_UID]));
5333 fl6.flowi6_uid = iif ? INVALID_UID : current_uid();
5336 fl6.fl6_sport = nla_get_be16(tb[RTA_SPORT]);
5339 fl6.fl6_dport = nla_get_be16(tb[RTA_DPORT]);
5341 if (tb[RTA_IP_PROTO]) {
5342 err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
5343 &fl6.flowi6_proto, AF_INET6,
5350 struct net_device *dev;
5355 dev = dev_get_by_index_rcu(net, iif);
5362 fl6.flowi6_iif = iif;
5364 if (!ipv6_addr_any(&fl6.saddr))
5365 flags |= RT6_LOOKUP_F_HAS_SADDR;
5367 dst = ip6_route_input_lookup(net, dev, &fl6, NULL, flags);
5371 fl6.flowi6_oif = oif;
5373 dst = ip6_route_output(net, NULL, &fl6);
5377 rt = container_of(dst, struct rt6_info, dst);
5378 if (rt->dst.error) {
5379 err = rt->dst.error;
5384 if (rt == net->ipv6.ip6_null_entry) {
5385 err = rt->dst.error;
5390 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
5397 skb_dst_set(skb, &rt->dst);
5400 from = rcu_dereference(rt->from);
5403 err = rt6_fill_node(net, skb, from, NULL, NULL, NULL,
5405 NETLINK_CB(in_skb).portid,
5408 err = rt6_fill_node(net, skb, from, dst, &fl6.daddr,
5409 &fl6.saddr, iif, RTM_NEWROUTE,
5410 NETLINK_CB(in_skb).portid,
5422 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
5427 void inet6_rt_notify(int event, struct fib6_info *rt, struct nl_info *info,
5428 unsigned int nlm_flags)
5430 struct sk_buff *skb;
5431 struct net *net = info->nl_net;
5436 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
5438 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
5442 err = rt6_fill_node(net, skb, rt, NULL, NULL, NULL, 0,
5443 event, info->portid, seq, nlm_flags);
5445 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
5446 WARN_ON(err == -EMSGSIZE);
5450 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
5451 info->nlh, gfp_any());
5455 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
5458 void fib6_rt_update(struct net *net, struct fib6_info *rt,
5459 struct nl_info *info)
5461 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
5462 struct sk_buff *skb;
5465 /* call_fib6_entry_notifiers will be removed when in-kernel notifier
5466 * is implemented and supported for nexthop objects
5468 call_fib6_entry_notifiers(net, FIB_EVENT_ENTRY_REPLACE, rt, NULL);
5470 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
5474 err = rt6_fill_node(net, skb, rt, NULL, NULL, NULL, 0,
5475 RTM_NEWROUTE, info->portid, seq, NLM_F_REPLACE);
5477 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
5478 WARN_ON(err == -EMSGSIZE);
5482 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
5483 info->nlh, gfp_any());
5487 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
5490 static int ip6_route_dev_notify(struct notifier_block *this,
5491 unsigned long event, void *ptr)
5493 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
5494 struct net *net = dev_net(dev);
5496 if (!(dev->flags & IFF_LOOPBACK))
5499 if (event == NETDEV_REGISTER) {
5500 net->ipv6.fib6_null_entry->fib6_nh->fib_nh_dev = dev;
5501 net->ipv6.ip6_null_entry->dst.dev = dev;
5502 net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
5503 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
5504 net->ipv6.ip6_prohibit_entry->dst.dev = dev;
5505 net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
5506 net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
5507 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
5509 } else if (event == NETDEV_UNREGISTER &&
5510 dev->reg_state != NETREG_UNREGISTERED) {
5511 /* NETDEV_UNREGISTER could be fired for multiple times by
5512 * netdev_wait_allrefs(). Make sure we only call this once.
5514 in6_dev_put_clear(&net->ipv6.ip6_null_entry->rt6i_idev);
5515 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
5516 in6_dev_put_clear(&net->ipv6.ip6_prohibit_entry->rt6i_idev);
5517 in6_dev_put_clear(&net->ipv6.ip6_blk_hole_entry->rt6i_idev);
5528 #ifdef CONFIG_PROC_FS
5529 static int rt6_stats_seq_show(struct seq_file *seq, void *v)
5531 struct net *net = (struct net *)seq->private;
5532 seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
5533 net->ipv6.rt6_stats->fib_nodes,
5534 net->ipv6.rt6_stats->fib_route_nodes,
5535 atomic_read(&net->ipv6.rt6_stats->fib_rt_alloc),
5536 net->ipv6.rt6_stats->fib_rt_entries,
5537 net->ipv6.rt6_stats->fib_rt_cache,
5538 dst_entries_get_slow(&net->ipv6.ip6_dst_ops),
5539 net->ipv6.rt6_stats->fib_discarded_routes);
5543 #endif /* CONFIG_PROC_FS */
5545 #ifdef CONFIG_SYSCTL
5548 int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
5549 void __user *buffer, size_t *lenp, loff_t *ppos)
5557 net = (struct net *)ctl->extra1;
5558 delay = net->ipv6.sysctl.flush_delay;
5559 ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
5563 fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
5570 static struct ctl_table ipv6_route_table_template[] = {
5572 .procname = "flush",
5573 .data = &init_net.ipv6.sysctl.flush_delay,
5574 .maxlen = sizeof(int),
5576 .proc_handler = ipv6_sysctl_rtcache_flush
5579 .procname = "gc_thresh",
5580 .data = &ip6_dst_ops_template.gc_thresh,
5581 .maxlen = sizeof(int),
5583 .proc_handler = proc_dointvec,
5586 .procname = "max_size",
5587 .data = &init_net.ipv6.sysctl.ip6_rt_max_size,
5588 .maxlen = sizeof(int),
5590 .proc_handler = proc_dointvec,
5593 .procname = "gc_min_interval",
5594 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
5595 .maxlen = sizeof(int),
5597 .proc_handler = proc_dointvec_jiffies,
5600 .procname = "gc_timeout",
5601 .data = &init_net.ipv6.sysctl.ip6_rt_gc_timeout,
5602 .maxlen = sizeof(int),
5604 .proc_handler = proc_dointvec_jiffies,
5607 .procname = "gc_interval",
5608 .data = &init_net.ipv6.sysctl.ip6_rt_gc_interval,
5609 .maxlen = sizeof(int),
5611 .proc_handler = proc_dointvec_jiffies,
5614 .procname = "gc_elasticity",
5615 .data = &init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
5616 .maxlen = sizeof(int),
5618 .proc_handler = proc_dointvec,
5621 .procname = "mtu_expires",
5622 .data = &init_net.ipv6.sysctl.ip6_rt_mtu_expires,
5623 .maxlen = sizeof(int),
5625 .proc_handler = proc_dointvec_jiffies,
5628 .procname = "min_adv_mss",
5629 .data = &init_net.ipv6.sysctl.ip6_rt_min_advmss,
5630 .maxlen = sizeof(int),
5632 .proc_handler = proc_dointvec,
5635 .procname = "gc_min_interval_ms",
5636 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
5637 .maxlen = sizeof(int),
5639 .proc_handler = proc_dointvec_ms_jiffies,
5642 .procname = "skip_notify_on_dev_down",
5643 .data = &init_net.ipv6.sysctl.skip_notify_on_dev_down,
5644 .maxlen = sizeof(int),
5646 .proc_handler = proc_dointvec,
5653 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
5655 struct ctl_table *table;
5657 table = kmemdup(ipv6_route_table_template,
5658 sizeof(ipv6_route_table_template),
5662 table[0].data = &net->ipv6.sysctl.flush_delay;
5663 table[0].extra1 = net;
5664 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
5665 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
5666 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
5667 table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
5668 table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
5669 table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
5670 table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
5671 table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
5672 table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
5673 table[10].data = &net->ipv6.sysctl.skip_notify_on_dev_down;
5675 /* Don't export sysctls to unprivileged users */
5676 if (net->user_ns != &init_user_ns)
5677 table[0].procname = NULL;
5684 static int __net_init ip6_route_net_init(struct net *net)
5688 memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
5689 sizeof(net->ipv6.ip6_dst_ops));
5691 if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0)
5692 goto out_ip6_dst_ops;
5694 net->ipv6.fib6_null_entry = fib6_info_alloc(GFP_KERNEL, true);
5695 if (!net->ipv6.fib6_null_entry)
5696 goto out_ip6_dst_entries;
5697 memcpy(net->ipv6.fib6_null_entry, &fib6_null_entry_template,
5698 sizeof(*net->ipv6.fib6_null_entry));
5700 net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
5701 sizeof(*net->ipv6.ip6_null_entry),
5703 if (!net->ipv6.ip6_null_entry)
5704 goto out_fib6_null_entry;
5705 net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
5706 dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
5707 ip6_template_metrics, true);
5709 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
5710 net->ipv6.fib6_has_custom_rules = false;
5711 net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
5712 sizeof(*net->ipv6.ip6_prohibit_entry),
5714 if (!net->ipv6.ip6_prohibit_entry)
5715 goto out_ip6_null_entry;
5716 net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
5717 dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
5718 ip6_template_metrics, true);
5720 net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
5721 sizeof(*net->ipv6.ip6_blk_hole_entry),
5723 if (!net->ipv6.ip6_blk_hole_entry)
5724 goto out_ip6_prohibit_entry;
5725 net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
5726 dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
5727 ip6_template_metrics, true);
5730 net->ipv6.sysctl.flush_delay = 0;
5731 net->ipv6.sysctl.ip6_rt_max_size = 4096;
5732 net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
5733 net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
5734 net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
5735 net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;
5736 net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
5737 net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
5738 net->ipv6.sysctl.skip_notify_on_dev_down = 0;
5740 net->ipv6.ip6_rt_gc_expire = 30*HZ;
5746 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
5747 out_ip6_prohibit_entry:
5748 kfree(net->ipv6.ip6_prohibit_entry);
5750 kfree(net->ipv6.ip6_null_entry);
5752 out_fib6_null_entry:
5753 kfree(net->ipv6.fib6_null_entry);
5754 out_ip6_dst_entries:
5755 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
5760 static void __net_exit ip6_route_net_exit(struct net *net)
5762 kfree(net->ipv6.fib6_null_entry);
5763 kfree(net->ipv6.ip6_null_entry);
5764 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
5765 kfree(net->ipv6.ip6_prohibit_entry);
5766 kfree(net->ipv6.ip6_blk_hole_entry);
5768 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
5771 static int __net_init ip6_route_net_init_late(struct net *net)
5773 #ifdef CONFIG_PROC_FS
5774 proc_create_net("ipv6_route", 0, net->proc_net, &ipv6_route_seq_ops,
5775 sizeof(struct ipv6_route_iter));
5776 proc_create_net_single("rt6_stats", 0444, net->proc_net,
5777 rt6_stats_seq_show, NULL);
5782 static void __net_exit ip6_route_net_exit_late(struct net *net)
5784 #ifdef CONFIG_PROC_FS
5785 remove_proc_entry("ipv6_route", net->proc_net);
5786 remove_proc_entry("rt6_stats", net->proc_net);
5790 static struct pernet_operations ip6_route_net_ops = {
5791 .init = ip6_route_net_init,
5792 .exit = ip6_route_net_exit,
5795 static int __net_init ipv6_inetpeer_init(struct net *net)
5797 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
5801 inet_peer_base_init(bp);
5802 net->ipv6.peers = bp;
5806 static void __net_exit ipv6_inetpeer_exit(struct net *net)
5808 struct inet_peer_base *bp = net->ipv6.peers;
5810 net->ipv6.peers = NULL;
5811 inetpeer_invalidate_tree(bp);
5815 static struct pernet_operations ipv6_inetpeer_ops = {
5816 .init = ipv6_inetpeer_init,
5817 .exit = ipv6_inetpeer_exit,
5820 static struct pernet_operations ip6_route_net_late_ops = {
5821 .init = ip6_route_net_init_late,
5822 .exit = ip6_route_net_exit_late,
5825 static struct notifier_block ip6_route_dev_notifier = {
5826 .notifier_call = ip6_route_dev_notify,
5827 .priority = ADDRCONF_NOTIFY_PRIORITY - 10,
5830 void __init ip6_route_init_special_entries(void)
5832 /* Registering of the loopback is done before this portion of code,
5833 * the loopback reference in rt6_info will not be taken, do it
5834 * manually for init_net */
5835 init_net.ipv6.fib6_null_entry->fib6_nh->fib_nh_dev = init_net.loopback_dev;
5836 init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
5837 init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
5838 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
5839 init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
5840 init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
5841 init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
5842 init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
5846 int __init ip6_route_init(void)
5852 ip6_dst_ops_template.kmem_cachep =
5853 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
5854 SLAB_HWCACHE_ALIGN, NULL);
5855 if (!ip6_dst_ops_template.kmem_cachep)
5858 ret = dst_entries_init(&ip6_dst_blackhole_ops);
5860 goto out_kmem_cache;
5862 ret = register_pernet_subsys(&ipv6_inetpeer_ops);
5864 goto out_dst_entries;
5866 ret = register_pernet_subsys(&ip6_route_net_ops);
5868 goto out_register_inetpeer;
5870 ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
5874 goto out_register_subsys;
5880 ret = fib6_rules_init();
5884 ret = register_pernet_subsys(&ip6_route_net_late_ops);
5886 goto fib6_rules_init;
5888 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_NEWROUTE,
5889 inet6_rtm_newroute, NULL, 0);
5891 goto out_register_late_subsys;
5893 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_DELROUTE,
5894 inet6_rtm_delroute, NULL, 0);
5896 goto out_register_late_subsys;
5898 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETROUTE,
5899 inet6_rtm_getroute, NULL,
5900 RTNL_FLAG_DOIT_UNLOCKED);
5902 goto out_register_late_subsys;
5904 ret = register_netdevice_notifier(&ip6_route_dev_notifier);
5906 goto out_register_late_subsys;
5908 for_each_possible_cpu(cpu) {
5909 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
5911 INIT_LIST_HEAD(&ul->head);
5912 spin_lock_init(&ul->lock);
5918 out_register_late_subsys:
5919 rtnl_unregister_all(PF_INET6);
5920 unregister_pernet_subsys(&ip6_route_net_late_ops);
5922 fib6_rules_cleanup();
5927 out_register_subsys:
5928 unregister_pernet_subsys(&ip6_route_net_ops);
5929 out_register_inetpeer:
5930 unregister_pernet_subsys(&ipv6_inetpeer_ops);
5932 dst_entries_destroy(&ip6_dst_blackhole_ops);
5934 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
5938 void ip6_route_cleanup(void)
5940 unregister_netdevice_notifier(&ip6_route_dev_notifier);
5941 unregister_pernet_subsys(&ip6_route_net_late_ops);
5942 fib6_rules_cleanup();
5945 unregister_pernet_subsys(&ipv6_inetpeer_ops);
5946 unregister_pernet_subsys(&ip6_route_net_ops);
5947 dst_entries_destroy(&ip6_dst_blackhole_ops);
5948 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);