1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Linux IPv6 multicast routing support for BSD pim6sd
4 * Based on net/ipv4/ipmr.c.
6 * (c) 2004 Mickael Hoerdt, <hoerdt@clarinet.u-strasbg.fr>
7 * LSIIT Laboratory, Strasbourg, France
8 * (c) 2004 Jean-Philippe Andriot, <jean-philippe.andriot@6WIND.com>
10 * Copyright (C)2007,2008 USAGI/WIDE Project
11 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
14 #include <linux/uaccess.h>
15 #include <linux/types.h>
16 #include <linux/sched.h>
17 #include <linux/errno.h>
19 #include <linux/kernel.h>
20 #include <linux/fcntl.h>
21 #include <linux/stat.h>
22 #include <linux/socket.h>
23 #include <linux/inet.h>
24 #include <linux/netdevice.h>
25 #include <linux/inetdevice.h>
26 #include <linux/proc_fs.h>
27 #include <linux/seq_file.h>
28 #include <linux/init.h>
29 #include <linux/compat.h>
30 #include <linux/rhashtable.h>
31 #include <net/protocol.h>
32 #include <linux/skbuff.h>
34 #include <linux/notifier.h>
35 #include <linux/if_arp.h>
36 #include <net/checksum.h>
37 #include <net/netlink.h>
38 #include <net/fib_rules.h>
41 #include <net/ip6_route.h>
42 #include <linux/mroute6.h>
43 #include <linux/pim.h>
44 #include <net/addrconf.h>
45 #include <linux/netfilter_ipv6.h>
46 #include <linux/export.h>
47 #include <net/ip6_checksum.h>
48 #include <linux/netconf.h>
49 #include <net/ip_tunnels.h>
51 #include <linux/nospec.h>
54 struct fib_rule common;
61 /* Big lock, protecting vif table, mrt cache and mroute socket state.
62 Note that the changes are semaphored via rtnl_lock.
65 static DEFINE_RWLOCK(mrt_lock);
67 /* Multicast router control variables */
69 /* Special spinlock for queue of unresolved entries */
70 static DEFINE_SPINLOCK(mfc_unres_lock);
72 /* We return to original Alan's scheme. Hash table of resolved
73 entries is changed only in process context and protected
74 with weak lock mrt_lock. Queue of unresolved entries is protected
75 with strong spinlock mfc_unres_lock.
77 In this case data path is free of exclusive locks at all.
80 static struct kmem_cache *mrt_cachep __read_mostly;
82 static struct mr_table *ip6mr_new_table(struct net *net, u32 id);
83 static void ip6mr_free_table(struct mr_table *mrt);
85 static void ip6_mr_forward(struct net *net, struct mr_table *mrt,
86 struct net_device *dev, struct sk_buff *skb,
87 struct mfc6_cache *cache);
88 static int ip6mr_cache_report(struct mr_table *mrt, struct sk_buff *pkt,
89 mifi_t mifi, int assert);
90 static void mr6_netlink_event(struct mr_table *mrt, struct mfc6_cache *mfc,
92 static void mrt6msg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt);
93 static int ip6mr_rtm_dumproute(struct sk_buff *skb,
94 struct netlink_callback *cb);
95 static void mroute_clean_tables(struct mr_table *mrt, int flags);
96 static void ipmr_expire_process(struct timer_list *t);
98 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
99 #define ip6mr_for_each_table(mrt, net) \
100 list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list, \
101 lockdep_rtnl_is_held() || \
102 list_empty(&net->ipv6.mr6_tables))
104 static struct mr_table *ip6mr_mr_table_iter(struct net *net,
105 struct mr_table *mrt)
107 struct mr_table *ret;
110 ret = list_entry_rcu(net->ipv6.mr6_tables.next,
111 struct mr_table, list);
113 ret = list_entry_rcu(mrt->list.next,
114 struct mr_table, list);
116 if (&ret->list == &net->ipv6.mr6_tables)
121 static struct mr_table *ip6mr_get_table(struct net *net, u32 id)
123 struct mr_table *mrt;
125 ip6mr_for_each_table(mrt, net) {
132 static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
133 struct mr_table **mrt)
136 struct ip6mr_result res;
137 struct fib_lookup_arg arg = {
139 .flags = FIB_LOOKUP_NOREF,
142 /* update flow if oif or iif point to device enslaved to l3mdev */
143 l3mdev_update_flow(net, flowi6_to_flowi(flp6));
145 err = fib_rules_lookup(net->ipv6.mr6_rules_ops,
146 flowi6_to_flowi(flp6), 0, &arg);
153 static int ip6mr_rule_action(struct fib_rule *rule, struct flowi *flp,
154 int flags, struct fib_lookup_arg *arg)
156 struct ip6mr_result *res = arg->result;
157 struct mr_table *mrt;
159 switch (rule->action) {
162 case FR_ACT_UNREACHABLE:
164 case FR_ACT_PROHIBIT:
166 case FR_ACT_BLACKHOLE:
171 arg->table = fib_rule_get_table(rule, arg);
173 mrt = ip6mr_get_table(rule->fr_net, arg->table);
180 static int ip6mr_rule_match(struct fib_rule *rule, struct flowi *flp, int flags)
185 static int ip6mr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
186 struct fib_rule_hdr *frh, struct nlattr **tb,
187 struct netlink_ext_ack *extack)
192 static int ip6mr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
198 static int ip6mr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
199 struct fib_rule_hdr *frh)
207 static const struct fib_rules_ops __net_initconst ip6mr_rules_ops_template = {
208 .family = RTNL_FAMILY_IP6MR,
209 .rule_size = sizeof(struct ip6mr_rule),
210 .addr_size = sizeof(struct in6_addr),
211 .action = ip6mr_rule_action,
212 .match = ip6mr_rule_match,
213 .configure = ip6mr_rule_configure,
214 .compare = ip6mr_rule_compare,
215 .fill = ip6mr_rule_fill,
216 .nlgroup = RTNLGRP_IPV6_RULE,
217 .owner = THIS_MODULE,
220 static int __net_init ip6mr_rules_init(struct net *net)
222 struct fib_rules_ops *ops;
223 struct mr_table *mrt;
226 ops = fib_rules_register(&ip6mr_rules_ops_template, net);
230 INIT_LIST_HEAD(&net->ipv6.mr6_tables);
232 mrt = ip6mr_new_table(net, RT6_TABLE_DFLT);
238 err = fib_default_rule_add(ops, 0x7fff, RT6_TABLE_DFLT, 0);
242 net->ipv6.mr6_rules_ops = ops;
247 ip6mr_free_table(mrt);
250 fib_rules_unregister(ops);
254 static void __net_exit ip6mr_rules_exit(struct net *net)
256 struct mr_table *mrt, *next;
259 list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) {
260 list_del(&mrt->list);
261 ip6mr_free_table(mrt);
263 fib_rules_unregister(net->ipv6.mr6_rules_ops);
267 static int ip6mr_rules_dump(struct net *net, struct notifier_block *nb,
268 struct netlink_ext_ack *extack)
270 return fib_rules_dump(net, nb, RTNL_FAMILY_IP6MR, extack);
273 static unsigned int ip6mr_rules_seq_read(struct net *net)
275 return fib_rules_seq_read(net, RTNL_FAMILY_IP6MR);
278 bool ip6mr_rule_default(const struct fib_rule *rule)
280 return fib_rule_matchall(rule) && rule->action == FR_ACT_TO_TBL &&
281 rule->table == RT6_TABLE_DFLT && !rule->l3mdev;
283 EXPORT_SYMBOL(ip6mr_rule_default);
285 #define ip6mr_for_each_table(mrt, net) \
286 for (mrt = net->ipv6.mrt6; mrt; mrt = NULL)
288 static struct mr_table *ip6mr_mr_table_iter(struct net *net,
289 struct mr_table *mrt)
292 return net->ipv6.mrt6;
296 static struct mr_table *ip6mr_get_table(struct net *net, u32 id)
298 return net->ipv6.mrt6;
301 static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
302 struct mr_table **mrt)
304 *mrt = net->ipv6.mrt6;
308 static int __net_init ip6mr_rules_init(struct net *net)
310 struct mr_table *mrt;
312 mrt = ip6mr_new_table(net, RT6_TABLE_DFLT);
315 net->ipv6.mrt6 = mrt;
319 static void __net_exit ip6mr_rules_exit(struct net *net)
322 ip6mr_free_table(net->ipv6.mrt6);
323 net->ipv6.mrt6 = NULL;
327 static int ip6mr_rules_dump(struct net *net, struct notifier_block *nb,
328 struct netlink_ext_ack *extack)
333 static unsigned int ip6mr_rules_seq_read(struct net *net)
339 static int ip6mr_hash_cmp(struct rhashtable_compare_arg *arg,
342 const struct mfc6_cache_cmp_arg *cmparg = arg->key;
343 struct mfc6_cache *c = (struct mfc6_cache *)ptr;
345 return !ipv6_addr_equal(&c->mf6c_mcastgrp, &cmparg->mf6c_mcastgrp) ||
346 !ipv6_addr_equal(&c->mf6c_origin, &cmparg->mf6c_origin);
349 static const struct rhashtable_params ip6mr_rht_params = {
350 .head_offset = offsetof(struct mr_mfc, mnode),
351 .key_offset = offsetof(struct mfc6_cache, cmparg),
352 .key_len = sizeof(struct mfc6_cache_cmp_arg),
354 .obj_cmpfn = ip6mr_hash_cmp,
355 .automatic_shrinking = true,
358 static void ip6mr_new_table_set(struct mr_table *mrt,
361 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
362 list_add_tail_rcu(&mrt->list, &net->ipv6.mr6_tables);
366 static struct mfc6_cache_cmp_arg ip6mr_mr_table_ops_cmparg_any = {
367 .mf6c_origin = IN6ADDR_ANY_INIT,
368 .mf6c_mcastgrp = IN6ADDR_ANY_INIT,
371 static struct mr_table_ops ip6mr_mr_table_ops = {
372 .rht_params = &ip6mr_rht_params,
373 .cmparg_any = &ip6mr_mr_table_ops_cmparg_any,
376 static struct mr_table *ip6mr_new_table(struct net *net, u32 id)
378 struct mr_table *mrt;
380 mrt = ip6mr_get_table(net, id);
384 return mr_table_alloc(net, id, &ip6mr_mr_table_ops,
385 ipmr_expire_process, ip6mr_new_table_set);
388 static void ip6mr_free_table(struct mr_table *mrt)
390 del_timer_sync(&mrt->ipmr_expire_timer);
391 mroute_clean_tables(mrt, MRT6_FLUSH_MIFS | MRT6_FLUSH_MIFS_STATIC |
392 MRT6_FLUSH_MFC | MRT6_FLUSH_MFC_STATIC);
393 rhltable_destroy(&mrt->mfc_hash);
397 #ifdef CONFIG_PROC_FS
398 /* The /proc interfaces to multicast routing
399 * /proc/ip6_mr_cache /proc/ip6_mr_vif
402 static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
405 struct mr_vif_iter *iter = seq->private;
406 struct net *net = seq_file_net(seq);
407 struct mr_table *mrt;
409 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
411 return ERR_PTR(-ENOENT);
415 read_lock(&mrt_lock);
416 return mr_vif_seq_start(seq, pos);
419 static void ip6mr_vif_seq_stop(struct seq_file *seq, void *v)
422 read_unlock(&mrt_lock);
425 static int ip6mr_vif_seq_show(struct seq_file *seq, void *v)
427 struct mr_vif_iter *iter = seq->private;
428 struct mr_table *mrt = iter->mrt;
430 if (v == SEQ_START_TOKEN) {
432 "Interface BytesIn PktsIn BytesOut PktsOut Flags\n");
434 const struct vif_device *vif = v;
435 const char *name = vif->dev ? vif->dev->name : "none";
438 "%2td %-10s %8ld %7ld %8ld %7ld %05X\n",
439 vif - mrt->vif_table,
440 name, vif->bytes_in, vif->pkt_in,
441 vif->bytes_out, vif->pkt_out,
447 static const struct seq_operations ip6mr_vif_seq_ops = {
448 .start = ip6mr_vif_seq_start,
449 .next = mr_vif_seq_next,
450 .stop = ip6mr_vif_seq_stop,
451 .show = ip6mr_vif_seq_show,
454 static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
456 struct net *net = seq_file_net(seq);
457 struct mr_table *mrt;
459 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
461 return ERR_PTR(-ENOENT);
463 return mr_mfc_seq_start(seq, pos, mrt, &mfc_unres_lock);
466 static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
470 if (v == SEQ_START_TOKEN) {
474 "Iif Pkts Bytes Wrong Oifs\n");
476 const struct mfc6_cache *mfc = v;
477 const struct mr_mfc_iter *it = seq->private;
478 struct mr_table *mrt = it->mrt;
480 seq_printf(seq, "%pI6 %pI6 %-3hd",
481 &mfc->mf6c_mcastgrp, &mfc->mf6c_origin,
484 if (it->cache != &mrt->mfc_unres_queue) {
485 seq_printf(seq, " %8lu %8lu %8lu",
486 mfc->_c.mfc_un.res.pkt,
487 mfc->_c.mfc_un.res.bytes,
488 mfc->_c.mfc_un.res.wrong_if);
489 for (n = mfc->_c.mfc_un.res.minvif;
490 n < mfc->_c.mfc_un.res.maxvif; n++) {
491 if (VIF_EXISTS(mrt, n) &&
492 mfc->_c.mfc_un.res.ttls[n] < 255)
495 mfc->_c.mfc_un.res.ttls[n]);
498 /* unresolved mfc_caches don't contain
499 * pkt, bytes and wrong_if values
501 seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
508 static const struct seq_operations ipmr_mfc_seq_ops = {
509 .start = ipmr_mfc_seq_start,
510 .next = mr_mfc_seq_next,
511 .stop = mr_mfc_seq_stop,
512 .show = ipmr_mfc_seq_show,
516 #ifdef CONFIG_IPV6_PIMSM_V2
518 static int pim6_rcv(struct sk_buff *skb)
520 struct pimreghdr *pim;
521 struct ipv6hdr *encap;
522 struct net_device *reg_dev = NULL;
523 struct net *net = dev_net(skb->dev);
524 struct mr_table *mrt;
525 struct flowi6 fl6 = {
526 .flowi6_iif = skb->dev->ifindex,
527 .flowi6_mark = skb->mark,
531 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
534 pim = (struct pimreghdr *)skb_transport_header(skb);
535 if (pim->type != ((PIM_VERSION << 4) | PIM_TYPE_REGISTER) ||
536 (pim->flags & PIM_NULL_REGISTER) ||
537 (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
538 sizeof(*pim), IPPROTO_PIM,
539 csum_partial((void *)pim, sizeof(*pim), 0)) &&
540 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
543 /* check if the inner packet is destined to mcast group */
544 encap = (struct ipv6hdr *)(skb_transport_header(skb) +
547 if (!ipv6_addr_is_multicast(&encap->daddr) ||
548 encap->payload_len == 0 ||
549 ntohs(encap->payload_len) + sizeof(*pim) > skb->len)
552 if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
554 reg_vif_num = mrt->mroute_reg_vif_num;
556 read_lock(&mrt_lock);
557 if (reg_vif_num >= 0)
558 reg_dev = mrt->vif_table[reg_vif_num].dev;
560 read_unlock(&mrt_lock);
565 skb->mac_header = skb->network_header;
566 skb_pull(skb, (u8 *)encap - skb->data);
567 skb_reset_network_header(skb);
568 skb->protocol = htons(ETH_P_IPV6);
569 skb->ip_summed = CHECKSUM_NONE;
571 skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));
582 static const struct inet6_protocol pim6_protocol = {
586 /* Service routines creating virtual interfaces: PIMREG */
588 static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
589 struct net_device *dev)
591 struct net *net = dev_net(dev);
592 struct mr_table *mrt;
593 struct flowi6 fl6 = {
594 .flowi6_oif = dev->ifindex,
595 .flowi6_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
596 .flowi6_mark = skb->mark,
599 if (!pskb_inet_may_pull(skb))
602 if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
605 read_lock(&mrt_lock);
606 dev->stats.tx_bytes += skb->len;
607 dev->stats.tx_packets++;
608 ip6mr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, MRT6MSG_WHOLEPKT);
609 read_unlock(&mrt_lock);
614 dev->stats.tx_errors++;
619 static int reg_vif_get_iflink(const struct net_device *dev)
624 static const struct net_device_ops reg_vif_netdev_ops = {
625 .ndo_start_xmit = reg_vif_xmit,
626 .ndo_get_iflink = reg_vif_get_iflink,
629 static void reg_vif_setup(struct net_device *dev)
631 dev->type = ARPHRD_PIMREG;
632 dev->mtu = 1500 - sizeof(struct ipv6hdr) - 8;
633 dev->flags = IFF_NOARP;
634 dev->netdev_ops = ®_vif_netdev_ops;
635 dev->needs_free_netdev = true;
636 dev->features |= NETIF_F_NETNS_LOCAL;
639 static struct net_device *ip6mr_reg_vif(struct net *net, struct mr_table *mrt)
641 struct net_device *dev;
644 if (mrt->id == RT6_TABLE_DFLT)
645 sprintf(name, "pim6reg");
647 sprintf(name, "pim6reg%u", mrt->id);
649 dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup);
653 dev_net_set(dev, net);
655 if (register_netdevice(dev)) {
660 if (dev_open(dev, NULL))
667 unregister_netdevice(dev);
672 static int call_ip6mr_vif_entry_notifiers(struct net *net,
673 enum fib_event_type event_type,
674 struct vif_device *vif,
675 mifi_t vif_index, u32 tb_id)
677 return mr_call_vif_notifiers(net, RTNL_FAMILY_IP6MR, event_type,
678 vif, vif_index, tb_id,
679 &net->ipv6.ipmr_seq);
682 static int call_ip6mr_mfc_entry_notifiers(struct net *net,
683 enum fib_event_type event_type,
684 struct mfc6_cache *mfc, u32 tb_id)
686 return mr_call_mfc_notifiers(net, RTNL_FAMILY_IP6MR, event_type,
687 &mfc->_c, tb_id, &net->ipv6.ipmr_seq);
690 /* Delete a VIF entry */
691 static int mif6_delete(struct mr_table *mrt, int vifi, int notify,
692 struct list_head *head)
694 struct vif_device *v;
695 struct net_device *dev;
696 struct inet6_dev *in6_dev;
698 if (vifi < 0 || vifi >= mrt->maxvif)
699 return -EADDRNOTAVAIL;
701 v = &mrt->vif_table[vifi];
703 if (VIF_EXISTS(mrt, vifi))
704 call_ip6mr_vif_entry_notifiers(read_pnet(&mrt->net),
705 FIB_EVENT_VIF_DEL, v, vifi,
708 write_lock_bh(&mrt_lock);
713 write_unlock_bh(&mrt_lock);
714 return -EADDRNOTAVAIL;
717 #ifdef CONFIG_IPV6_PIMSM_V2
718 if (vifi == mrt->mroute_reg_vif_num)
719 mrt->mroute_reg_vif_num = -1;
722 if (vifi + 1 == mrt->maxvif) {
724 for (tmp = vifi - 1; tmp >= 0; tmp--) {
725 if (VIF_EXISTS(mrt, tmp))
728 mrt->maxvif = tmp + 1;
731 write_unlock_bh(&mrt_lock);
733 dev_set_allmulti(dev, -1);
735 in6_dev = __in6_dev_get(dev);
737 in6_dev->cnf.mc_forwarding--;
738 inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
739 NETCONFA_MC_FORWARDING,
740 dev->ifindex, &in6_dev->cnf);
743 if ((v->flags & MIFF_REGISTER) && !notify)
744 unregister_netdevice_queue(dev, head);
746 dev_put_track(dev, &v->dev_tracker);
750 static inline void ip6mr_cache_free_rcu(struct rcu_head *head)
752 struct mr_mfc *c = container_of(head, struct mr_mfc, rcu);
754 kmem_cache_free(mrt_cachep, (struct mfc6_cache *)c);
757 static inline void ip6mr_cache_free(struct mfc6_cache *c)
759 call_rcu(&c->_c.rcu, ip6mr_cache_free_rcu);
762 /* Destroy an unresolved cache entry, killing queued skbs
763 and reporting error to netlink readers.
766 static void ip6mr_destroy_unres(struct mr_table *mrt, struct mfc6_cache *c)
768 struct net *net = read_pnet(&mrt->net);
771 atomic_dec(&mrt->cache_resolve_queue_len);
773 while ((skb = skb_dequeue(&c->_c.mfc_un.unres.unresolved)) != NULL) {
774 if (ipv6_hdr(skb)->version == 0) {
775 struct nlmsghdr *nlh = skb_pull(skb,
776 sizeof(struct ipv6hdr));
777 nlh->nlmsg_type = NLMSG_ERROR;
778 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
779 skb_trim(skb, nlh->nlmsg_len);
780 ((struct nlmsgerr *)nlmsg_data(nlh))->error = -ETIMEDOUT;
781 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
790 /* Timer process for all the unresolved queue. */
792 static void ipmr_do_expire_process(struct mr_table *mrt)
794 unsigned long now = jiffies;
795 unsigned long expires = 10 * HZ;
796 struct mr_mfc *c, *next;
798 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
799 if (time_after(c->mfc_un.unres.expires, now)) {
801 unsigned long interval = c->mfc_un.unres.expires - now;
802 if (interval < expires)
808 mr6_netlink_event(mrt, (struct mfc6_cache *)c, RTM_DELROUTE);
809 ip6mr_destroy_unres(mrt, (struct mfc6_cache *)c);
812 if (!list_empty(&mrt->mfc_unres_queue))
813 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
816 static void ipmr_expire_process(struct timer_list *t)
818 struct mr_table *mrt = from_timer(mrt, t, ipmr_expire_timer);
820 if (!spin_trylock(&mfc_unres_lock)) {
821 mod_timer(&mrt->ipmr_expire_timer, jiffies + 1);
825 if (!list_empty(&mrt->mfc_unres_queue))
826 ipmr_do_expire_process(mrt);
828 spin_unlock(&mfc_unres_lock);
831 /* Fill oifs list. It is called under write locked mrt_lock. */
833 static void ip6mr_update_thresholds(struct mr_table *mrt,
834 struct mr_mfc *cache,
839 cache->mfc_un.res.minvif = MAXMIFS;
840 cache->mfc_un.res.maxvif = 0;
841 memset(cache->mfc_un.res.ttls, 255, MAXMIFS);
843 for (vifi = 0; vifi < mrt->maxvif; vifi++) {
844 if (VIF_EXISTS(mrt, vifi) &&
845 ttls[vifi] && ttls[vifi] < 255) {
846 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
847 if (cache->mfc_un.res.minvif > vifi)
848 cache->mfc_un.res.minvif = vifi;
849 if (cache->mfc_un.res.maxvif <= vifi)
850 cache->mfc_un.res.maxvif = vifi + 1;
853 cache->mfc_un.res.lastuse = jiffies;
856 static int mif6_add(struct net *net, struct mr_table *mrt,
857 struct mif6ctl *vifc, int mrtsock)
859 int vifi = vifc->mif6c_mifi;
860 struct vif_device *v = &mrt->vif_table[vifi];
861 struct net_device *dev;
862 struct inet6_dev *in6_dev;
866 if (VIF_EXISTS(mrt, vifi))
869 switch (vifc->mif6c_flags) {
870 #ifdef CONFIG_IPV6_PIMSM_V2
873 * Special Purpose VIF in PIM
874 * All the packets will be sent to the daemon
876 if (mrt->mroute_reg_vif_num >= 0)
878 dev = ip6mr_reg_vif(net, mrt);
881 err = dev_set_allmulti(dev, 1);
883 unregister_netdevice(dev);
890 dev = dev_get_by_index(net, vifc->mif6c_pifi);
892 return -EADDRNOTAVAIL;
893 err = dev_set_allmulti(dev, 1);
903 in6_dev = __in6_dev_get(dev);
905 in6_dev->cnf.mc_forwarding++;
906 inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
907 NETCONFA_MC_FORWARDING,
908 dev->ifindex, &in6_dev->cnf);
911 /* Fill in the VIF structures */
912 vif_device_init(v, dev, vifc->vifc_rate_limit, vifc->vifc_threshold,
913 vifc->mif6c_flags | (!mrtsock ? VIFF_STATIC : 0),
916 /* And finish update writing critical data */
917 write_lock_bh(&mrt_lock);
919 netdev_tracker_alloc(dev, &v->dev_tracker, GFP_ATOMIC);
920 #ifdef CONFIG_IPV6_PIMSM_V2
921 if (v->flags & MIFF_REGISTER)
922 mrt->mroute_reg_vif_num = vifi;
924 if (vifi + 1 > mrt->maxvif)
925 mrt->maxvif = vifi + 1;
926 write_unlock_bh(&mrt_lock);
927 call_ip6mr_vif_entry_notifiers(net, FIB_EVENT_VIF_ADD,
932 static struct mfc6_cache *ip6mr_cache_find(struct mr_table *mrt,
933 const struct in6_addr *origin,
934 const struct in6_addr *mcastgrp)
936 struct mfc6_cache_cmp_arg arg = {
937 .mf6c_origin = *origin,
938 .mf6c_mcastgrp = *mcastgrp,
941 return mr_mfc_find(mrt, &arg);
944 /* Look for a (*,G) entry */
945 static struct mfc6_cache *ip6mr_cache_find_any(struct mr_table *mrt,
946 struct in6_addr *mcastgrp,
949 struct mfc6_cache_cmp_arg arg = {
950 .mf6c_origin = in6addr_any,
951 .mf6c_mcastgrp = *mcastgrp,
954 if (ipv6_addr_any(mcastgrp))
955 return mr_mfc_find_any_parent(mrt, mifi);
956 return mr_mfc_find_any(mrt, mifi, &arg);
959 /* Look for a (S,G,iif) entry if parent != -1 */
960 static struct mfc6_cache *
961 ip6mr_cache_find_parent(struct mr_table *mrt,
962 const struct in6_addr *origin,
963 const struct in6_addr *mcastgrp,
966 struct mfc6_cache_cmp_arg arg = {
967 .mf6c_origin = *origin,
968 .mf6c_mcastgrp = *mcastgrp,
971 return mr_mfc_find_parent(mrt, &arg, parent);
974 /* Allocate a multicast cache entry */
975 static struct mfc6_cache *ip6mr_cache_alloc(void)
977 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
980 c->_c.mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
981 c->_c.mfc_un.res.minvif = MAXMIFS;
982 c->_c.free = ip6mr_cache_free_rcu;
983 refcount_set(&c->_c.mfc_un.res.refcount, 1);
987 static struct mfc6_cache *ip6mr_cache_alloc_unres(void)
989 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
992 skb_queue_head_init(&c->_c.mfc_un.unres.unresolved);
993 c->_c.mfc_un.unres.expires = jiffies + 10 * HZ;
998 * A cache entry has gone into a resolved state from queued
1001 static void ip6mr_cache_resolve(struct net *net, struct mr_table *mrt,
1002 struct mfc6_cache *uc, struct mfc6_cache *c)
1004 struct sk_buff *skb;
1007 * Play the pending entries through our router
1010 while ((skb = __skb_dequeue(&uc->_c.mfc_un.unres.unresolved))) {
1011 if (ipv6_hdr(skb)->version == 0) {
1012 struct nlmsghdr *nlh = skb_pull(skb,
1013 sizeof(struct ipv6hdr));
1015 if (mr_fill_mroute(mrt, skb, &c->_c,
1016 nlmsg_data(nlh)) > 0) {
1017 nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh;
1019 nlh->nlmsg_type = NLMSG_ERROR;
1020 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
1021 skb_trim(skb, nlh->nlmsg_len);
1022 ((struct nlmsgerr *)nlmsg_data(nlh))->error = -EMSGSIZE;
1024 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
1026 ip6_mr_forward(net, mrt, skb->dev, skb, c);
1031 * Bounce a cache query up to pim6sd and netlink.
1033 * Called under mrt_lock.
1036 static int ip6mr_cache_report(struct mr_table *mrt, struct sk_buff *pkt,
1037 mifi_t mifi, int assert)
1039 struct sock *mroute6_sk;
1040 struct sk_buff *skb;
1041 struct mrt6msg *msg;
1044 #ifdef CONFIG_IPV6_PIMSM_V2
1045 if (assert == MRT6MSG_WHOLEPKT)
1046 skb = skb_realloc_headroom(pkt, -skb_network_offset(pkt)
1050 skb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(*msg), GFP_ATOMIC);
1055 /* I suppose that internal messages
1056 * do not require checksums */
1058 skb->ip_summed = CHECKSUM_UNNECESSARY;
1060 #ifdef CONFIG_IPV6_PIMSM_V2
1061 if (assert == MRT6MSG_WHOLEPKT) {
1062 /* Ugly, but we have no choice with this interface.
1063 Duplicate old header, fix length etc.
1064 And all this only to mangle msg->im6_msgtype and
1065 to set msg->im6_mbz to "mbz" :-)
1067 skb_push(skb, -skb_network_offset(pkt));
1069 skb_push(skb, sizeof(*msg));
1070 skb_reset_transport_header(skb);
1071 msg = (struct mrt6msg *)skb_transport_header(skb);
1073 msg->im6_msgtype = MRT6MSG_WHOLEPKT;
1074 msg->im6_mif = mrt->mroute_reg_vif_num;
1076 msg->im6_src = ipv6_hdr(pkt)->saddr;
1077 msg->im6_dst = ipv6_hdr(pkt)->daddr;
1079 skb->ip_summed = CHECKSUM_UNNECESSARY;
1084 * Copy the IP header
1087 skb_put(skb, sizeof(struct ipv6hdr));
1088 skb_reset_network_header(skb);
1089 skb_copy_to_linear_data(skb, ipv6_hdr(pkt), sizeof(struct ipv6hdr));
1094 skb_put(skb, sizeof(*msg));
1095 skb_reset_transport_header(skb);
1096 msg = (struct mrt6msg *)skb_transport_header(skb);
1099 msg->im6_msgtype = assert;
1100 msg->im6_mif = mifi;
1102 msg->im6_src = ipv6_hdr(pkt)->saddr;
1103 msg->im6_dst = ipv6_hdr(pkt)->daddr;
1105 skb_dst_set(skb, dst_clone(skb_dst(pkt)));
1106 skb->ip_summed = CHECKSUM_UNNECESSARY;
1110 mroute6_sk = rcu_dereference(mrt->mroute_sk);
1117 mrt6msg_netlink_event(mrt, skb);
1119 /* Deliver to user space multicast routing algorithms */
1120 ret = sock_queue_rcv_skb(mroute6_sk, skb);
1123 net_warn_ratelimited("mroute6: pending queue full, dropping entries\n");
1130 /* Queue a packet for resolution. It gets locked cache entry! */
1131 static int ip6mr_cache_unresolved(struct mr_table *mrt, mifi_t mifi,
1132 struct sk_buff *skb, struct net_device *dev)
1134 struct mfc6_cache *c;
1138 spin_lock_bh(&mfc_unres_lock);
1139 list_for_each_entry(c, &mrt->mfc_unres_queue, _c.list) {
1140 if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) &&
1141 ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) {
1149 * Create a new entry if allowable
1152 c = ip6mr_cache_alloc_unres();
1154 spin_unlock_bh(&mfc_unres_lock);
1160 /* Fill in the new cache entry */
1161 c->_c.mfc_parent = -1;
1162 c->mf6c_origin = ipv6_hdr(skb)->saddr;
1163 c->mf6c_mcastgrp = ipv6_hdr(skb)->daddr;
1166 * Reflect first query at pim6sd
1168 err = ip6mr_cache_report(mrt, skb, mifi, MRT6MSG_NOCACHE);
1170 /* If the report failed throw the cache entry
1173 spin_unlock_bh(&mfc_unres_lock);
1175 ip6mr_cache_free(c);
1180 atomic_inc(&mrt->cache_resolve_queue_len);
1181 list_add(&c->_c.list, &mrt->mfc_unres_queue);
1182 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1184 ipmr_do_expire_process(mrt);
1187 /* See if we can append the packet */
1188 if (c->_c.mfc_un.unres.unresolved.qlen > 3) {
1194 skb->skb_iif = dev->ifindex;
1196 skb_queue_tail(&c->_c.mfc_un.unres.unresolved, skb);
1200 spin_unlock_bh(&mfc_unres_lock);
1205 * MFC6 cache manipulation by user space
1208 static int ip6mr_mfc_delete(struct mr_table *mrt, struct mf6cctl *mfc,
1211 struct mfc6_cache *c;
1213 /* The entries are added/deleted only under RTNL */
1215 c = ip6mr_cache_find_parent(mrt, &mfc->mf6cc_origin.sin6_addr,
1216 &mfc->mf6cc_mcastgrp.sin6_addr, parent);
1220 rhltable_remove(&mrt->mfc_hash, &c->_c.mnode, ip6mr_rht_params);
1221 list_del_rcu(&c->_c.list);
1223 call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net),
1224 FIB_EVENT_ENTRY_DEL, c, mrt->id);
1225 mr6_netlink_event(mrt, c, RTM_DELROUTE);
1226 mr_cache_put(&c->_c);
1230 static int ip6mr_device_event(struct notifier_block *this,
1231 unsigned long event, void *ptr)
1233 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1234 struct net *net = dev_net(dev);
1235 struct mr_table *mrt;
1236 struct vif_device *v;
1239 if (event != NETDEV_UNREGISTER)
1242 ip6mr_for_each_table(mrt, net) {
1243 v = &mrt->vif_table[0];
1244 for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1246 mif6_delete(mrt, ct, 1, NULL);
1253 static unsigned int ip6mr_seq_read(struct net *net)
1257 return net->ipv6.ipmr_seq + ip6mr_rules_seq_read(net);
1260 static int ip6mr_dump(struct net *net, struct notifier_block *nb,
1261 struct netlink_ext_ack *extack)
1263 return mr_dump(net, nb, RTNL_FAMILY_IP6MR, ip6mr_rules_dump,
1264 ip6mr_mr_table_iter, &mrt_lock, extack);
1267 static struct notifier_block ip6_mr_notifier = {
1268 .notifier_call = ip6mr_device_event
1271 static const struct fib_notifier_ops ip6mr_notifier_ops_template = {
1272 .family = RTNL_FAMILY_IP6MR,
1273 .fib_seq_read = ip6mr_seq_read,
1274 .fib_dump = ip6mr_dump,
1275 .owner = THIS_MODULE,
1278 static int __net_init ip6mr_notifier_init(struct net *net)
1280 struct fib_notifier_ops *ops;
1282 net->ipv6.ipmr_seq = 0;
1284 ops = fib_notifier_ops_register(&ip6mr_notifier_ops_template, net);
1286 return PTR_ERR(ops);
1288 net->ipv6.ip6mr_notifier_ops = ops;
1293 static void __net_exit ip6mr_notifier_exit(struct net *net)
1295 fib_notifier_ops_unregister(net->ipv6.ip6mr_notifier_ops);
1296 net->ipv6.ip6mr_notifier_ops = NULL;
1299 /* Setup for IP multicast routing */
1300 static int __net_init ip6mr_net_init(struct net *net)
1304 err = ip6mr_notifier_init(net);
1308 err = ip6mr_rules_init(net);
1310 goto ip6mr_rules_fail;
1312 #ifdef CONFIG_PROC_FS
1314 if (!proc_create_net("ip6_mr_vif", 0, net->proc_net, &ip6mr_vif_seq_ops,
1315 sizeof(struct mr_vif_iter)))
1317 if (!proc_create_net("ip6_mr_cache", 0, net->proc_net, &ipmr_mfc_seq_ops,
1318 sizeof(struct mr_mfc_iter)))
1319 goto proc_cache_fail;
1324 #ifdef CONFIG_PROC_FS
1326 remove_proc_entry("ip6_mr_vif", net->proc_net);
1328 ip6mr_rules_exit(net);
1331 ip6mr_notifier_exit(net);
1335 static void __net_exit ip6mr_net_exit(struct net *net)
1337 #ifdef CONFIG_PROC_FS
1338 remove_proc_entry("ip6_mr_cache", net->proc_net);
1339 remove_proc_entry("ip6_mr_vif", net->proc_net);
1341 ip6mr_rules_exit(net);
1342 ip6mr_notifier_exit(net);
1345 static struct pernet_operations ip6mr_net_ops = {
1346 .init = ip6mr_net_init,
1347 .exit = ip6mr_net_exit,
1350 int __init ip6_mr_init(void)
1354 mrt_cachep = kmem_cache_create("ip6_mrt_cache",
1355 sizeof(struct mfc6_cache),
1356 0, SLAB_HWCACHE_ALIGN,
1361 err = register_pernet_subsys(&ip6mr_net_ops);
1363 goto reg_pernet_fail;
1365 err = register_netdevice_notifier(&ip6_mr_notifier);
1367 goto reg_notif_fail;
1368 #ifdef CONFIG_IPV6_PIMSM_V2
1369 if (inet6_add_protocol(&pim6_protocol, IPPROTO_PIM) < 0) {
1370 pr_err("%s: can't add PIM protocol\n", __func__);
1372 goto add_proto_fail;
1375 err = rtnl_register_module(THIS_MODULE, RTNL_FAMILY_IP6MR, RTM_GETROUTE,
1376 NULL, ip6mr_rtm_dumproute, 0);
1380 #ifdef CONFIG_IPV6_PIMSM_V2
1381 inet6_del_protocol(&pim6_protocol, IPPROTO_PIM);
1383 unregister_netdevice_notifier(&ip6_mr_notifier);
1386 unregister_pernet_subsys(&ip6mr_net_ops);
1388 kmem_cache_destroy(mrt_cachep);
1392 void ip6_mr_cleanup(void)
1394 rtnl_unregister(RTNL_FAMILY_IP6MR, RTM_GETROUTE);
1395 #ifdef CONFIG_IPV6_PIMSM_V2
1396 inet6_del_protocol(&pim6_protocol, IPPROTO_PIM);
1398 unregister_netdevice_notifier(&ip6_mr_notifier);
1399 unregister_pernet_subsys(&ip6mr_net_ops);
1400 kmem_cache_destroy(mrt_cachep);
1403 static int ip6mr_mfc_add(struct net *net, struct mr_table *mrt,
1404 struct mf6cctl *mfc, int mrtsock, int parent)
1406 unsigned char ttls[MAXMIFS];
1407 struct mfc6_cache *uc, *c;
1412 if (mfc->mf6cc_parent >= MAXMIFS)
1415 memset(ttls, 255, MAXMIFS);
1416 for (i = 0; i < MAXMIFS; i++) {
1417 if (IF_ISSET(i, &mfc->mf6cc_ifset))
1421 /* The entries are added/deleted only under RTNL */
1423 c = ip6mr_cache_find_parent(mrt, &mfc->mf6cc_origin.sin6_addr,
1424 &mfc->mf6cc_mcastgrp.sin6_addr, parent);
1427 write_lock_bh(&mrt_lock);
1428 c->_c.mfc_parent = mfc->mf6cc_parent;
1429 ip6mr_update_thresholds(mrt, &c->_c, ttls);
1431 c->_c.mfc_flags |= MFC_STATIC;
1432 write_unlock_bh(&mrt_lock);
1433 call_ip6mr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_REPLACE,
1435 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1439 if (!ipv6_addr_any(&mfc->mf6cc_mcastgrp.sin6_addr) &&
1440 !ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr))
1443 c = ip6mr_cache_alloc();
1447 c->mf6c_origin = mfc->mf6cc_origin.sin6_addr;
1448 c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr;
1449 c->_c.mfc_parent = mfc->mf6cc_parent;
1450 ip6mr_update_thresholds(mrt, &c->_c, ttls);
1452 c->_c.mfc_flags |= MFC_STATIC;
1454 err = rhltable_insert_key(&mrt->mfc_hash, &c->cmparg, &c->_c.mnode,
1457 pr_err("ip6mr: rhtable insert error %d\n", err);
1458 ip6mr_cache_free(c);
1461 list_add_tail_rcu(&c->_c.list, &mrt->mfc_cache_list);
1463 /* Check to see if we resolved a queued list. If so we
1464 * need to send on the frames and tidy up.
1467 spin_lock_bh(&mfc_unres_lock);
1468 list_for_each_entry(_uc, &mrt->mfc_unres_queue, list) {
1469 uc = (struct mfc6_cache *)_uc;
1470 if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) &&
1471 ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) {
1472 list_del(&_uc->list);
1473 atomic_dec(&mrt->cache_resolve_queue_len);
1478 if (list_empty(&mrt->mfc_unres_queue))
1479 del_timer(&mrt->ipmr_expire_timer);
1480 spin_unlock_bh(&mfc_unres_lock);
1483 ip6mr_cache_resolve(net, mrt, uc, c);
1484 ip6mr_cache_free(uc);
1486 call_ip6mr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_ADD,
1488 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1493 * Close the multicast socket, and clear the vif tables etc
1496 static void mroute_clean_tables(struct mr_table *mrt, int flags)
1498 struct mr_mfc *c, *tmp;
1502 /* Shut down all active vif entries */
1503 if (flags & (MRT6_FLUSH_MIFS | MRT6_FLUSH_MIFS_STATIC)) {
1504 for (i = 0; i < mrt->maxvif; i++) {
1505 if (((mrt->vif_table[i].flags & VIFF_STATIC) &&
1506 !(flags & MRT6_FLUSH_MIFS_STATIC)) ||
1507 (!(mrt->vif_table[i].flags & VIFF_STATIC) && !(flags & MRT6_FLUSH_MIFS)))
1509 mif6_delete(mrt, i, 0, &list);
1511 unregister_netdevice_many(&list);
1514 /* Wipe the cache */
1515 if (flags & (MRT6_FLUSH_MFC | MRT6_FLUSH_MFC_STATIC)) {
1516 list_for_each_entry_safe(c, tmp, &mrt->mfc_cache_list, list) {
1517 if (((c->mfc_flags & MFC_STATIC) && !(flags & MRT6_FLUSH_MFC_STATIC)) ||
1518 (!(c->mfc_flags & MFC_STATIC) && !(flags & MRT6_FLUSH_MFC)))
1520 rhltable_remove(&mrt->mfc_hash, &c->mnode, ip6mr_rht_params);
1521 list_del_rcu(&c->list);
1522 call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net),
1523 FIB_EVENT_ENTRY_DEL,
1524 (struct mfc6_cache *)c, mrt->id);
1525 mr6_netlink_event(mrt, (struct mfc6_cache *)c, RTM_DELROUTE);
1530 if (flags & MRT6_FLUSH_MFC) {
1531 if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1532 spin_lock_bh(&mfc_unres_lock);
1533 list_for_each_entry_safe(c, tmp, &mrt->mfc_unres_queue, list) {
1535 mr6_netlink_event(mrt, (struct mfc6_cache *)c,
1537 ip6mr_destroy_unres(mrt, (struct mfc6_cache *)c);
1539 spin_unlock_bh(&mfc_unres_lock);
1544 static int ip6mr_sk_init(struct mr_table *mrt, struct sock *sk)
1547 struct net *net = sock_net(sk);
1550 write_lock_bh(&mrt_lock);
1551 if (rtnl_dereference(mrt->mroute_sk)) {
1554 rcu_assign_pointer(mrt->mroute_sk, sk);
1555 sock_set_flag(sk, SOCK_RCU_FREE);
1556 net->ipv6.devconf_all->mc_forwarding++;
1558 write_unlock_bh(&mrt_lock);
1561 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
1562 NETCONFA_MC_FORWARDING,
1563 NETCONFA_IFINDEX_ALL,
1564 net->ipv6.devconf_all);
1570 int ip6mr_sk_done(struct sock *sk)
1573 struct net *net = sock_net(sk);
1574 struct mr_table *mrt;
1576 if (sk->sk_type != SOCK_RAW ||
1577 inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1581 ip6mr_for_each_table(mrt, net) {
1582 if (sk == rtnl_dereference(mrt->mroute_sk)) {
1583 write_lock_bh(&mrt_lock);
1584 RCU_INIT_POINTER(mrt->mroute_sk, NULL);
1585 /* Note that mroute_sk had SOCK_RCU_FREE set,
1586 * so the RCU grace period before sk freeing
1587 * is guaranteed by sk_destruct()
1589 net->ipv6.devconf_all->mc_forwarding--;
1590 write_unlock_bh(&mrt_lock);
1591 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
1592 NETCONFA_MC_FORWARDING,
1593 NETCONFA_IFINDEX_ALL,
1594 net->ipv6.devconf_all);
1596 mroute_clean_tables(mrt, MRT6_FLUSH_MIFS | MRT6_FLUSH_MFC);
1606 bool mroute6_is_socket(struct net *net, struct sk_buff *skb)
1608 struct mr_table *mrt;
1609 struct flowi6 fl6 = {
1610 .flowi6_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
1611 .flowi6_oif = skb->dev->ifindex,
1612 .flowi6_mark = skb->mark,
1615 if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
1618 return rcu_access_pointer(mrt->mroute_sk);
1620 EXPORT_SYMBOL(mroute6_is_socket);
1623 * Socket options and virtual interface manipulation. The whole
1624 * virtual interface system is a complete heap, but unfortunately
1625 * that's how BSD mrouted happens to think. Maybe one day with a proper
1626 * MOSPF/PIM router set up we can clean this up.
1629 int ip6_mroute_setsockopt(struct sock *sk, int optname, sockptr_t optval,
1630 unsigned int optlen)
1632 int ret, parent = 0;
1636 struct net *net = sock_net(sk);
1637 struct mr_table *mrt;
1639 if (sk->sk_type != SOCK_RAW ||
1640 inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1643 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1647 if (optname != MRT6_INIT) {
1648 if (sk != rcu_access_pointer(mrt->mroute_sk) &&
1649 !ns_capable(net->user_ns, CAP_NET_ADMIN))
1655 if (optlen < sizeof(int))
1658 return ip6mr_sk_init(mrt, sk);
1661 return ip6mr_sk_done(sk);
1664 if (optlen < sizeof(vif))
1666 if (copy_from_sockptr(&vif, optval, sizeof(vif)))
1668 if (vif.mif6c_mifi >= MAXMIFS)
1671 ret = mif6_add(net, mrt, &vif,
1672 sk == rtnl_dereference(mrt->mroute_sk));
1677 if (optlen < sizeof(mifi_t))
1679 if (copy_from_sockptr(&mifi, optval, sizeof(mifi_t)))
1682 ret = mif6_delete(mrt, mifi, 0, NULL);
1687 * Manipulate the forwarding caches. These live
1688 * in a sort of kernel/user symbiosis.
1694 case MRT6_ADD_MFC_PROXY:
1695 case MRT6_DEL_MFC_PROXY:
1696 if (optlen < sizeof(mfc))
1698 if (copy_from_sockptr(&mfc, optval, sizeof(mfc)))
1701 parent = mfc.mf6cc_parent;
1703 if (optname == MRT6_DEL_MFC || optname == MRT6_DEL_MFC_PROXY)
1704 ret = ip6mr_mfc_delete(mrt, &mfc, parent);
1706 ret = ip6mr_mfc_add(net, mrt, &mfc,
1708 rtnl_dereference(mrt->mroute_sk),
1717 if (optlen != sizeof(flags))
1719 if (copy_from_sockptr(&flags, optval, sizeof(flags)))
1722 mroute_clean_tables(mrt, flags);
1728 * Control PIM assert (to activate pim will activate assert)
1734 if (optlen != sizeof(v))
1736 if (copy_from_sockptr(&v, optval, sizeof(v)))
1738 mrt->mroute_do_assert = v;
1742 #ifdef CONFIG_IPV6_PIMSM_V2
1747 if (optlen != sizeof(v))
1749 if (copy_from_sockptr(&v, optval, sizeof(v)))
1754 if (v != mrt->mroute_do_pim) {
1755 mrt->mroute_do_pim = v;
1756 mrt->mroute_do_assert = v;
1763 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
1768 if (optlen != sizeof(u32))
1770 if (copy_from_sockptr(&v, optval, sizeof(v)))
1772 /* "pim6reg%u" should not exceed 16 bytes (IFNAMSIZ) */
1773 if (v != RT_TABLE_DEFAULT && v >= 100000000)
1775 if (sk == rcu_access_pointer(mrt->mroute_sk))
1780 mrt = ip6mr_new_table(net, v);
1784 raw6_sk(sk)->ip6mr_table = v;
1790 * Spurious command, or MRT6_VERSION which you cannot
1794 return -ENOPROTOOPT;
1799 * Getsock opt support for the multicast routing system.
1802 int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval,
1807 struct net *net = sock_net(sk);
1808 struct mr_table *mrt;
1810 if (sk->sk_type != SOCK_RAW ||
1811 inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1814 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1822 #ifdef CONFIG_IPV6_PIMSM_V2
1824 val = mrt->mroute_do_pim;
1828 val = mrt->mroute_do_assert;
1831 return -ENOPROTOOPT;
1834 if (get_user(olr, optlen))
1837 olr = min_t(int, olr, sizeof(int));
1841 if (put_user(olr, optlen))
1843 if (copy_to_user(optval, &val, olr))
1849 * The IP multicast ioctl support routines.
1852 int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
1854 struct sioc_sg_req6 sr;
1855 struct sioc_mif_req6 vr;
1856 struct vif_device *vif;
1857 struct mfc6_cache *c;
1858 struct net *net = sock_net(sk);
1859 struct mr_table *mrt;
1861 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1866 case SIOCGETMIFCNT_IN6:
1867 if (copy_from_user(&vr, arg, sizeof(vr)))
1869 if (vr.mifi >= mrt->maxvif)
1871 vr.mifi = array_index_nospec(vr.mifi, mrt->maxvif);
1872 read_lock(&mrt_lock);
1873 vif = &mrt->vif_table[vr.mifi];
1874 if (VIF_EXISTS(mrt, vr.mifi)) {
1875 vr.icount = vif->pkt_in;
1876 vr.ocount = vif->pkt_out;
1877 vr.ibytes = vif->bytes_in;
1878 vr.obytes = vif->bytes_out;
1879 read_unlock(&mrt_lock);
1881 if (copy_to_user(arg, &vr, sizeof(vr)))
1885 read_unlock(&mrt_lock);
1886 return -EADDRNOTAVAIL;
1887 case SIOCGETSGCNT_IN6:
1888 if (copy_from_user(&sr, arg, sizeof(sr)))
1892 c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1894 sr.pktcnt = c->_c.mfc_un.res.pkt;
1895 sr.bytecnt = c->_c.mfc_un.res.bytes;
1896 sr.wrong_if = c->_c.mfc_un.res.wrong_if;
1899 if (copy_to_user(arg, &sr, sizeof(sr)))
1904 return -EADDRNOTAVAIL;
1906 return -ENOIOCTLCMD;
1910 #ifdef CONFIG_COMPAT
1911 struct compat_sioc_sg_req6 {
1912 struct sockaddr_in6 src;
1913 struct sockaddr_in6 grp;
1914 compat_ulong_t pktcnt;
1915 compat_ulong_t bytecnt;
1916 compat_ulong_t wrong_if;
1919 struct compat_sioc_mif_req6 {
1921 compat_ulong_t icount;
1922 compat_ulong_t ocount;
1923 compat_ulong_t ibytes;
1924 compat_ulong_t obytes;
1927 int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1929 struct compat_sioc_sg_req6 sr;
1930 struct compat_sioc_mif_req6 vr;
1931 struct vif_device *vif;
1932 struct mfc6_cache *c;
1933 struct net *net = sock_net(sk);
1934 struct mr_table *mrt;
1936 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1941 case SIOCGETMIFCNT_IN6:
1942 if (copy_from_user(&vr, arg, sizeof(vr)))
1944 if (vr.mifi >= mrt->maxvif)
1946 vr.mifi = array_index_nospec(vr.mifi, mrt->maxvif);
1947 read_lock(&mrt_lock);
1948 vif = &mrt->vif_table[vr.mifi];
1949 if (VIF_EXISTS(mrt, vr.mifi)) {
1950 vr.icount = vif->pkt_in;
1951 vr.ocount = vif->pkt_out;
1952 vr.ibytes = vif->bytes_in;
1953 vr.obytes = vif->bytes_out;
1954 read_unlock(&mrt_lock);
1956 if (copy_to_user(arg, &vr, sizeof(vr)))
1960 read_unlock(&mrt_lock);
1961 return -EADDRNOTAVAIL;
1962 case SIOCGETSGCNT_IN6:
1963 if (copy_from_user(&sr, arg, sizeof(sr)))
1967 c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1969 sr.pktcnt = c->_c.mfc_un.res.pkt;
1970 sr.bytecnt = c->_c.mfc_un.res.bytes;
1971 sr.wrong_if = c->_c.mfc_un.res.wrong_if;
1974 if (copy_to_user(arg, &sr, sizeof(sr)))
1979 return -EADDRNOTAVAIL;
1981 return -ENOIOCTLCMD;
1986 static inline int ip6mr_forward2_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
1988 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
1989 IPSTATS_MIB_OUTFORWDATAGRAMS);
1990 IP6_ADD_STATS(net, ip6_dst_idev(skb_dst(skb)),
1991 IPSTATS_MIB_OUTOCTETS, skb->len);
1992 return dst_output(net, sk, skb);
1996 * Processing handlers for ip6mr_forward
1999 static int ip6mr_forward2(struct net *net, struct mr_table *mrt,
2000 struct sk_buff *skb, int vifi)
2002 struct ipv6hdr *ipv6h;
2003 struct vif_device *vif = &mrt->vif_table[vifi];
2004 struct net_device *dev;
2005 struct dst_entry *dst;
2011 #ifdef CONFIG_IPV6_PIMSM_V2
2012 if (vif->flags & MIFF_REGISTER) {
2014 vif->bytes_out += skb->len;
2015 vif->dev->stats.tx_bytes += skb->len;
2016 vif->dev->stats.tx_packets++;
2017 ip6mr_cache_report(mrt, skb, vifi, MRT6MSG_WHOLEPKT);
2022 ipv6h = ipv6_hdr(skb);
2024 fl6 = (struct flowi6) {
2025 .flowi6_oif = vif->link,
2026 .daddr = ipv6h->daddr,
2029 dst = ip6_route_output(net, NULL, &fl6);
2036 skb_dst_set(skb, dst);
2039 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
2040 * not only before forwarding, but after forwarding on all output
2041 * interfaces. It is clear, if mrouter runs a multicasting
2042 * program, it should receive packets not depending to what interface
2043 * program is joined.
2044 * If we will not make it, the program will have to join on all
2045 * interfaces. On the other hand, multihoming host (or router, but
2046 * not mrouter) cannot join to more than one interface - it will
2047 * result in receiving multiple packets.
2052 vif->bytes_out += skb->len;
2054 /* We are about to write */
2055 /* XXX: extension headers? */
2056 if (skb_cow(skb, sizeof(*ipv6h) + LL_RESERVED_SPACE(dev)))
2059 ipv6h = ipv6_hdr(skb);
2062 IP6CB(skb)->flags |= IP6SKB_FORWARDED;
2064 return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
2065 net, NULL, skb, skb->dev, dev,
2066 ip6mr_forward2_finish);
2073 static int ip6mr_find_vif(struct mr_table *mrt, struct net_device *dev)
2077 for (ct = mrt->maxvif - 1; ct >= 0; ct--) {
2078 if (mrt->vif_table[ct].dev == dev)
2084 static void ip6_mr_forward(struct net *net, struct mr_table *mrt,
2085 struct net_device *dev, struct sk_buff *skb,
2086 struct mfc6_cache *c)
2090 int true_vifi = ip6mr_find_vif(mrt, dev);
2092 vif = c->_c.mfc_parent;
2093 c->_c.mfc_un.res.pkt++;
2094 c->_c.mfc_un.res.bytes += skb->len;
2095 c->_c.mfc_un.res.lastuse = jiffies;
2097 if (ipv6_addr_any(&c->mf6c_origin) && true_vifi >= 0) {
2098 struct mfc6_cache *cache_proxy;
2100 /* For an (*,G) entry, we only check that the incoming
2101 * interface is part of the static tree.
2104 cache_proxy = mr_mfc_find_any_parent(mrt, vif);
2106 cache_proxy->_c.mfc_un.res.ttls[true_vifi] < 255) {
2114 * Wrong interface: drop packet and (maybe) send PIM assert.
2116 if (mrt->vif_table[vif].dev != dev) {
2117 c->_c.mfc_un.res.wrong_if++;
2119 if (true_vifi >= 0 && mrt->mroute_do_assert &&
2120 /* pimsm uses asserts, when switching from RPT to SPT,
2121 so that we cannot check that packet arrived on an oif.
2122 It is bad, but otherwise we would need to move pretty
2123 large chunk of pimd to kernel. Ough... --ANK
2125 (mrt->mroute_do_pim ||
2126 c->_c.mfc_un.res.ttls[true_vifi] < 255) &&
2128 c->_c.mfc_un.res.last_assert +
2129 MFC_ASSERT_THRESH)) {
2130 c->_c.mfc_un.res.last_assert = jiffies;
2131 ip6mr_cache_report(mrt, skb, true_vifi, MRT6MSG_WRONGMIF);
2137 mrt->vif_table[vif].pkt_in++;
2138 mrt->vif_table[vif].bytes_in += skb->len;
2143 if (ipv6_addr_any(&c->mf6c_origin) &&
2144 ipv6_addr_any(&c->mf6c_mcastgrp)) {
2145 if (true_vifi >= 0 &&
2146 true_vifi != c->_c.mfc_parent &&
2147 ipv6_hdr(skb)->hop_limit >
2148 c->_c.mfc_un.res.ttls[c->_c.mfc_parent]) {
2149 /* It's an (*,*) entry and the packet is not coming from
2150 * the upstream: forward the packet to the upstream
2153 psend = c->_c.mfc_parent;
2158 for (ct = c->_c.mfc_un.res.maxvif - 1;
2159 ct >= c->_c.mfc_un.res.minvif; ct--) {
2160 /* For (*,G) entry, don't forward to the incoming interface */
2161 if ((!ipv6_addr_any(&c->mf6c_origin) || ct != true_vifi) &&
2162 ipv6_hdr(skb)->hop_limit > c->_c.mfc_un.res.ttls[ct]) {
2164 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
2166 ip6mr_forward2(net, mrt, skb2, psend);
2173 ip6mr_forward2(net, mrt, skb, psend);
2183 * Multicast packets for forwarding arrive here
2186 int ip6_mr_input(struct sk_buff *skb)
2188 struct mfc6_cache *cache;
2189 struct net *net = dev_net(skb->dev);
2190 struct mr_table *mrt;
2191 struct flowi6 fl6 = {
2192 .flowi6_iif = skb->dev->ifindex,
2193 .flowi6_mark = skb->mark,
2196 struct net_device *dev;
2198 /* skb->dev passed in is the master dev for vrfs.
2199 * Get the proper interface that does have a vif associated with it.
2202 if (netif_is_l3_master(skb->dev)) {
2203 dev = dev_get_by_index_rcu(net, IPCB(skb)->iif);
2210 err = ip6mr_fib_lookup(net, &fl6, &mrt);
2216 read_lock(&mrt_lock);
2217 cache = ip6mr_cache_find(mrt,
2218 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
2220 int vif = ip6mr_find_vif(mrt, dev);
2223 cache = ip6mr_cache_find_any(mrt,
2224 &ipv6_hdr(skb)->daddr,
2229 * No usable cache entry
2234 vif = ip6mr_find_vif(mrt, dev);
2236 int err = ip6mr_cache_unresolved(mrt, vif, skb, dev);
2237 read_unlock(&mrt_lock);
2241 read_unlock(&mrt_lock);
2246 ip6_mr_forward(net, mrt, dev, skb, cache);
2248 read_unlock(&mrt_lock);
2253 int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm,
2257 struct mr_table *mrt;
2258 struct mfc6_cache *cache;
2259 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
2261 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
2265 read_lock(&mrt_lock);
2266 cache = ip6mr_cache_find(mrt, &rt->rt6i_src.addr, &rt->rt6i_dst.addr);
2267 if (!cache && skb->dev) {
2268 int vif = ip6mr_find_vif(mrt, skb->dev);
2271 cache = ip6mr_cache_find_any(mrt, &rt->rt6i_dst.addr,
2276 struct sk_buff *skb2;
2277 struct ipv6hdr *iph;
2278 struct net_device *dev;
2282 if (!dev || (vif = ip6mr_find_vif(mrt, dev)) < 0) {
2283 read_unlock(&mrt_lock);
2287 /* really correct? */
2288 skb2 = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
2290 read_unlock(&mrt_lock);
2294 NETLINK_CB(skb2).portid = portid;
2295 skb_reset_transport_header(skb2);
2297 skb_put(skb2, sizeof(struct ipv6hdr));
2298 skb_reset_network_header(skb2);
2300 iph = ipv6_hdr(skb2);
2303 iph->flow_lbl[0] = 0;
2304 iph->flow_lbl[1] = 0;
2305 iph->flow_lbl[2] = 0;
2306 iph->payload_len = 0;
2307 iph->nexthdr = IPPROTO_NONE;
2309 iph->saddr = rt->rt6i_src.addr;
2310 iph->daddr = rt->rt6i_dst.addr;
2312 err = ip6mr_cache_unresolved(mrt, vif, skb2, dev);
2313 read_unlock(&mrt_lock);
2318 err = mr_fill_mroute(mrt, skb, &cache->_c, rtm);
2319 read_unlock(&mrt_lock);
2323 static int ip6mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2324 u32 portid, u32 seq, struct mfc6_cache *c, int cmd,
2327 struct nlmsghdr *nlh;
2331 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
2335 rtm = nlmsg_data(nlh);
2336 rtm->rtm_family = RTNL_FAMILY_IP6MR;
2337 rtm->rtm_dst_len = 128;
2338 rtm->rtm_src_len = 128;
2340 rtm->rtm_table = mrt->id;
2341 if (nla_put_u32(skb, RTA_TABLE, mrt->id))
2342 goto nla_put_failure;
2343 rtm->rtm_type = RTN_MULTICAST;
2344 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2345 if (c->_c.mfc_flags & MFC_STATIC)
2346 rtm->rtm_protocol = RTPROT_STATIC;
2348 rtm->rtm_protocol = RTPROT_MROUTED;
2351 if (nla_put_in6_addr(skb, RTA_SRC, &c->mf6c_origin) ||
2352 nla_put_in6_addr(skb, RTA_DST, &c->mf6c_mcastgrp))
2353 goto nla_put_failure;
2354 err = mr_fill_mroute(mrt, skb, &c->_c, rtm);
2355 /* do not break the dump if cache is unresolved */
2356 if (err < 0 && err != -ENOENT)
2357 goto nla_put_failure;
2359 nlmsg_end(skb, nlh);
2363 nlmsg_cancel(skb, nlh);
2367 static int _ip6mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2368 u32 portid, u32 seq, struct mr_mfc *c,
2371 return ip6mr_fill_mroute(mrt, skb, portid, seq, (struct mfc6_cache *)c,
2375 static int mr6_msgsize(bool unresolved, int maxvif)
2378 NLMSG_ALIGN(sizeof(struct rtmsg))
2379 + nla_total_size(4) /* RTA_TABLE */
2380 + nla_total_size(sizeof(struct in6_addr)) /* RTA_SRC */
2381 + nla_total_size(sizeof(struct in6_addr)) /* RTA_DST */
2386 + nla_total_size(4) /* RTA_IIF */
2387 + nla_total_size(0) /* RTA_MULTIPATH */
2388 + maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
2390 + nla_total_size_64bit(sizeof(struct rta_mfc_stats))
2396 static void mr6_netlink_event(struct mr_table *mrt, struct mfc6_cache *mfc,
2399 struct net *net = read_pnet(&mrt->net);
2400 struct sk_buff *skb;
2403 skb = nlmsg_new(mr6_msgsize(mfc->_c.mfc_parent >= MAXMIFS, mrt->maxvif),
2408 err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
2412 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MROUTE, NULL, GFP_ATOMIC);
2418 rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE, err);
2421 static size_t mrt6msg_netlink_msgsize(size_t payloadlen)
2424 NLMSG_ALIGN(sizeof(struct rtgenmsg))
2425 + nla_total_size(1) /* IP6MRA_CREPORT_MSGTYPE */
2426 + nla_total_size(4) /* IP6MRA_CREPORT_MIF_ID */
2427 /* IP6MRA_CREPORT_SRC_ADDR */
2428 + nla_total_size(sizeof(struct in6_addr))
2429 /* IP6MRA_CREPORT_DST_ADDR */
2430 + nla_total_size(sizeof(struct in6_addr))
2431 /* IP6MRA_CREPORT_PKT */
2432 + nla_total_size(payloadlen)
2438 static void mrt6msg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt)
2440 struct net *net = read_pnet(&mrt->net);
2441 struct nlmsghdr *nlh;
2442 struct rtgenmsg *rtgenm;
2443 struct mrt6msg *msg;
2444 struct sk_buff *skb;
2448 payloadlen = pkt->len - sizeof(struct mrt6msg);
2449 msg = (struct mrt6msg *)skb_transport_header(pkt);
2451 skb = nlmsg_new(mrt6msg_netlink_msgsize(payloadlen), GFP_ATOMIC);
2455 nlh = nlmsg_put(skb, 0, 0, RTM_NEWCACHEREPORT,
2456 sizeof(struct rtgenmsg), 0);
2459 rtgenm = nlmsg_data(nlh);
2460 rtgenm->rtgen_family = RTNL_FAMILY_IP6MR;
2461 if (nla_put_u8(skb, IP6MRA_CREPORT_MSGTYPE, msg->im6_msgtype) ||
2462 nla_put_u32(skb, IP6MRA_CREPORT_MIF_ID, msg->im6_mif) ||
2463 nla_put_in6_addr(skb, IP6MRA_CREPORT_SRC_ADDR,
2465 nla_put_in6_addr(skb, IP6MRA_CREPORT_DST_ADDR,
2467 goto nla_put_failure;
2469 nla = nla_reserve(skb, IP6MRA_CREPORT_PKT, payloadlen);
2470 if (!nla || skb_copy_bits(pkt, sizeof(struct mrt6msg),
2471 nla_data(nla), payloadlen))
2472 goto nla_put_failure;
2474 nlmsg_end(skb, nlh);
2476 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MROUTE_R, NULL, GFP_ATOMIC);
2480 nlmsg_cancel(skb, nlh);
2483 rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE_R, -ENOBUFS);
2486 static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2488 const struct nlmsghdr *nlh = cb->nlh;
2489 struct fib_dump_filter filter = {};
2492 if (cb->strict_check) {
2493 err = ip_valid_fib_dump_req(sock_net(skb->sk), nlh,
2499 if (filter.table_id) {
2500 struct mr_table *mrt;
2502 mrt = ip6mr_get_table(sock_net(skb->sk), filter.table_id);
2504 if (rtnl_msg_family(cb->nlh) != RTNL_FAMILY_IP6MR)
2507 NL_SET_ERR_MSG_MOD(cb->extack, "MR table does not exist");
2510 err = mr_table_dump(mrt, skb, cb, _ip6mr_fill_mroute,
2511 &mfc_unres_lock, &filter);
2512 return skb->len ? : err;
2515 return mr_rtm_dumproute(skb, cb, ip6mr_mr_table_iter,
2516 _ip6mr_fill_mroute, &mfc_unres_lock, &filter);