2 * Linux IPv6 multicast routing support for BSD pim6sd
3 * Based on net/ipv4/ipmr.c.
5 * (c) 2004 Mickael Hoerdt, <hoerdt@clarinet.u-strasbg.fr>
6 * LSIIT Laboratory, Strasbourg, France
7 * (c) 2004 Jean-Philippe Andriot, <jean-philippe.andriot@6WIND.com>
9 * Copyright (C)2007,2008 USAGI/WIDE Project
10 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
19 #include <linux/uaccess.h>
20 #include <linux/types.h>
21 #include <linux/sched.h>
22 #include <linux/errno.h>
24 #include <linux/kernel.h>
25 #include <linux/fcntl.h>
26 #include <linux/stat.h>
27 #include <linux/socket.h>
28 #include <linux/inet.h>
29 #include <linux/netdevice.h>
30 #include <linux/inetdevice.h>
31 #include <linux/proc_fs.h>
32 #include <linux/seq_file.h>
33 #include <linux/init.h>
34 #include <linux/compat.h>
35 #include <net/protocol.h>
36 #include <linux/skbuff.h>
38 #include <linux/notifier.h>
39 #include <linux/if_arp.h>
40 #include <net/checksum.h>
41 #include <net/netlink.h>
42 #include <net/fib_rules.h>
45 #include <net/ip6_route.h>
46 #include <linux/mroute6.h>
47 #include <linux/pim.h>
48 #include <net/addrconf.h>
49 #include <linux/netfilter_ipv6.h>
50 #include <linux/export.h>
51 #include <net/ip6_checksum.h>
52 #include <linux/netconf.h>
55 struct fib_rule common;
62 /* Big lock, protecting vif table, mrt cache and mroute socket state.
63 Note that the changes are semaphored via rtnl_lock.
66 static DEFINE_RWLOCK(mrt_lock);
68 /* Multicast router control variables */
70 /* Special spinlock for queue of unresolved entries */
71 static DEFINE_SPINLOCK(mfc_unres_lock);
73 /* We return to original Alan's scheme. Hash table of resolved
74 entries is changed only in process context and protected
75 with weak lock mrt_lock. Queue of unresolved entries is protected
76 with strong spinlock mfc_unres_lock.
78 In this case data path is free of exclusive locks at all.
81 static struct kmem_cache *mrt_cachep __read_mostly;
83 static struct mr_table *ip6mr_new_table(struct net *net, u32 id);
84 static void ip6mr_free_table(struct mr_table *mrt);
86 static void ip6_mr_forward(struct net *net, struct mr_table *mrt,
87 struct sk_buff *skb, struct mfc6_cache *cache);
88 static int ip6mr_cache_report(struct mr_table *mrt, struct sk_buff *pkt,
89 mifi_t mifi, int assert);
90 static void mr6_netlink_event(struct mr_table *mrt, struct mfc6_cache *mfc,
92 static void mrt6msg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt);
93 static int ip6mr_rtm_dumproute(struct sk_buff *skb,
94 struct netlink_callback *cb);
95 static void mroute_clean_tables(struct mr_table *mrt, bool all);
96 static void ipmr_expire_process(struct timer_list *t);
98 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
99 #define ip6mr_for_each_table(mrt, net) \
100 list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list)
102 static struct mr_table *ip6mr_mr_table_iter(struct net *net,
103 struct mr_table *mrt)
105 struct mr_table *ret;
108 ret = list_entry_rcu(net->ipv6.mr6_tables.next,
109 struct mr_table, list);
111 ret = list_entry_rcu(mrt->list.next,
112 struct mr_table, list);
114 if (&ret->list == &net->ipv6.mr6_tables)
119 static struct mr_table *ip6mr_get_table(struct net *net, u32 id)
121 struct mr_table *mrt;
123 ip6mr_for_each_table(mrt, net) {
130 static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
131 struct mr_table **mrt)
134 struct ip6mr_result res;
135 struct fib_lookup_arg arg = {
137 .flags = FIB_LOOKUP_NOREF,
140 err = fib_rules_lookup(net->ipv6.mr6_rules_ops,
141 flowi6_to_flowi(flp6), 0, &arg);
148 static int ip6mr_rule_action(struct fib_rule *rule, struct flowi *flp,
149 int flags, struct fib_lookup_arg *arg)
151 struct ip6mr_result *res = arg->result;
152 struct mr_table *mrt;
154 switch (rule->action) {
157 case FR_ACT_UNREACHABLE:
159 case FR_ACT_PROHIBIT:
161 case FR_ACT_BLACKHOLE:
166 mrt = ip6mr_get_table(rule->fr_net, rule->table);
173 static int ip6mr_rule_match(struct fib_rule *rule, struct flowi *flp, int flags)
178 static const struct nla_policy ip6mr_rule_policy[FRA_MAX + 1] = {
182 static int ip6mr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
183 struct fib_rule_hdr *frh, struct nlattr **tb)
188 static int ip6mr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
194 static int ip6mr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
195 struct fib_rule_hdr *frh)
203 static const struct fib_rules_ops __net_initconst ip6mr_rules_ops_template = {
204 .family = RTNL_FAMILY_IP6MR,
205 .rule_size = sizeof(struct ip6mr_rule),
206 .addr_size = sizeof(struct in6_addr),
207 .action = ip6mr_rule_action,
208 .match = ip6mr_rule_match,
209 .configure = ip6mr_rule_configure,
210 .compare = ip6mr_rule_compare,
211 .fill = ip6mr_rule_fill,
212 .nlgroup = RTNLGRP_IPV6_RULE,
213 .policy = ip6mr_rule_policy,
214 .owner = THIS_MODULE,
217 static int __net_init ip6mr_rules_init(struct net *net)
219 struct fib_rules_ops *ops;
220 struct mr_table *mrt;
223 ops = fib_rules_register(&ip6mr_rules_ops_template, net);
227 INIT_LIST_HEAD(&net->ipv6.mr6_tables);
229 mrt = ip6mr_new_table(net, RT6_TABLE_DFLT);
235 err = fib_default_rule_add(ops, 0x7fff, RT6_TABLE_DFLT, 0);
239 net->ipv6.mr6_rules_ops = ops;
243 ip6mr_free_table(mrt);
245 fib_rules_unregister(ops);
249 static void __net_exit ip6mr_rules_exit(struct net *net)
251 struct mr_table *mrt, *next;
254 list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) {
255 list_del(&mrt->list);
256 ip6mr_free_table(mrt);
258 fib_rules_unregister(net->ipv6.mr6_rules_ops);
262 static int ip6mr_rules_dump(struct net *net, struct notifier_block *nb)
264 return fib_rules_dump(net, nb, RTNL_FAMILY_IP6MR);
267 static unsigned int ip6mr_rules_seq_read(struct net *net)
269 return fib_rules_seq_read(net, RTNL_FAMILY_IP6MR);
272 bool ip6mr_rule_default(const struct fib_rule *rule)
274 return fib_rule_matchall(rule) && rule->action == FR_ACT_TO_TBL &&
275 rule->table == RT6_TABLE_DFLT && !rule->l3mdev;
277 EXPORT_SYMBOL(ip6mr_rule_default);
279 #define ip6mr_for_each_table(mrt, net) \
280 for (mrt = net->ipv6.mrt6; mrt; mrt = NULL)
282 static struct mr_table *ip6mr_mr_table_iter(struct net *net,
283 struct mr_table *mrt)
286 return net->ipv6.mrt6;
290 static struct mr_table *ip6mr_get_table(struct net *net, u32 id)
292 return net->ipv6.mrt6;
295 static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
296 struct mr_table **mrt)
298 *mrt = net->ipv6.mrt6;
302 static int __net_init ip6mr_rules_init(struct net *net)
304 net->ipv6.mrt6 = ip6mr_new_table(net, RT6_TABLE_DFLT);
305 return net->ipv6.mrt6 ? 0 : -ENOMEM;
308 static void __net_exit ip6mr_rules_exit(struct net *net)
311 ip6mr_free_table(net->ipv6.mrt6);
312 net->ipv6.mrt6 = NULL;
316 static int ip6mr_rules_dump(struct net *net, struct notifier_block *nb)
321 static unsigned int ip6mr_rules_seq_read(struct net *net)
327 static int ip6mr_hash_cmp(struct rhashtable_compare_arg *arg,
330 const struct mfc6_cache_cmp_arg *cmparg = arg->key;
331 struct mfc6_cache *c = (struct mfc6_cache *)ptr;
333 return !ipv6_addr_equal(&c->mf6c_mcastgrp, &cmparg->mf6c_mcastgrp) ||
334 !ipv6_addr_equal(&c->mf6c_origin, &cmparg->mf6c_origin);
337 static const struct rhashtable_params ip6mr_rht_params = {
338 .head_offset = offsetof(struct mr_mfc, mnode),
339 .key_offset = offsetof(struct mfc6_cache, cmparg),
340 .key_len = sizeof(struct mfc6_cache_cmp_arg),
343 .obj_cmpfn = ip6mr_hash_cmp,
344 .automatic_shrinking = true,
347 static void ip6mr_new_table_set(struct mr_table *mrt,
350 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
351 list_add_tail_rcu(&mrt->list, &net->ipv6.mr6_tables);
355 static struct mfc6_cache_cmp_arg ip6mr_mr_table_ops_cmparg_any = {
356 .mf6c_origin = IN6ADDR_ANY_INIT,
357 .mf6c_mcastgrp = IN6ADDR_ANY_INIT,
360 static struct mr_table_ops ip6mr_mr_table_ops = {
361 .rht_params = &ip6mr_rht_params,
362 .cmparg_any = &ip6mr_mr_table_ops_cmparg_any,
365 static struct mr_table *ip6mr_new_table(struct net *net, u32 id)
367 struct mr_table *mrt;
369 mrt = ip6mr_get_table(net, id);
373 return mr_table_alloc(net, id, &ip6mr_mr_table_ops,
374 ipmr_expire_process, ip6mr_new_table_set);
377 static void ip6mr_free_table(struct mr_table *mrt)
379 del_timer_sync(&mrt->ipmr_expire_timer);
380 mroute_clean_tables(mrt, true);
381 rhltable_destroy(&mrt->mfc_hash);
385 #ifdef CONFIG_PROC_FS
386 /* The /proc interfaces to multicast routing
387 * /proc/ip6_mr_cache /proc/ip6_mr_vif
390 static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
393 struct mr_vif_iter *iter = seq->private;
394 struct net *net = seq_file_net(seq);
395 struct mr_table *mrt;
397 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
399 return ERR_PTR(-ENOENT);
403 read_lock(&mrt_lock);
404 return mr_vif_seq_start(seq, pos);
407 static void ip6mr_vif_seq_stop(struct seq_file *seq, void *v)
410 read_unlock(&mrt_lock);
413 static int ip6mr_vif_seq_show(struct seq_file *seq, void *v)
415 struct mr_vif_iter *iter = seq->private;
416 struct mr_table *mrt = iter->mrt;
418 if (v == SEQ_START_TOKEN) {
420 "Interface BytesIn PktsIn BytesOut PktsOut Flags\n");
422 const struct vif_device *vif = v;
423 const char *name = vif->dev ? vif->dev->name : "none";
426 "%2td %-10s %8ld %7ld %8ld %7ld %05X\n",
427 vif - mrt->vif_table,
428 name, vif->bytes_in, vif->pkt_in,
429 vif->bytes_out, vif->pkt_out,
435 static const struct seq_operations ip6mr_vif_seq_ops = {
436 .start = ip6mr_vif_seq_start,
437 .next = mr_vif_seq_next,
438 .stop = ip6mr_vif_seq_stop,
439 .show = ip6mr_vif_seq_show,
442 static int ip6mr_vif_open(struct inode *inode, struct file *file)
444 return seq_open_net(inode, file, &ip6mr_vif_seq_ops,
445 sizeof(struct mr_vif_iter));
448 static const struct file_operations ip6mr_vif_fops = {
449 .open = ip6mr_vif_open,
452 .release = seq_release_net,
455 static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
457 struct net *net = seq_file_net(seq);
458 struct mr_table *mrt;
460 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
462 return ERR_PTR(-ENOENT);
464 return mr_mfc_seq_start(seq, pos, mrt, &mfc_unres_lock);
467 static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
471 if (v == SEQ_START_TOKEN) {
475 "Iif Pkts Bytes Wrong Oifs\n");
477 const struct mfc6_cache *mfc = v;
478 const struct mr_mfc_iter *it = seq->private;
479 struct mr_table *mrt = it->mrt;
481 seq_printf(seq, "%pI6 %pI6 %-3hd",
482 &mfc->mf6c_mcastgrp, &mfc->mf6c_origin,
485 if (it->cache != &mrt->mfc_unres_queue) {
486 seq_printf(seq, " %8lu %8lu %8lu",
487 mfc->_c.mfc_un.res.pkt,
488 mfc->_c.mfc_un.res.bytes,
489 mfc->_c.mfc_un.res.wrong_if);
490 for (n = mfc->_c.mfc_un.res.minvif;
491 n < mfc->_c.mfc_un.res.maxvif; n++) {
492 if (VIF_EXISTS(mrt, n) &&
493 mfc->_c.mfc_un.res.ttls[n] < 255)
496 mfc->_c.mfc_un.res.ttls[n]);
499 /* unresolved mfc_caches don't contain
500 * pkt, bytes and wrong_if values
502 seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
509 static const struct seq_operations ipmr_mfc_seq_ops = {
510 .start = ipmr_mfc_seq_start,
511 .next = mr_mfc_seq_next,
512 .stop = mr_mfc_seq_stop,
513 .show = ipmr_mfc_seq_show,
516 static int ipmr_mfc_open(struct inode *inode, struct file *file)
518 return seq_open_net(inode, file, &ipmr_mfc_seq_ops,
519 sizeof(struct mr_mfc_iter));
522 static const struct file_operations ip6mr_mfc_fops = {
523 .open = ipmr_mfc_open,
526 .release = seq_release_net,
530 #ifdef CONFIG_IPV6_PIMSM_V2
532 static int pim6_rcv(struct sk_buff *skb)
534 struct pimreghdr *pim;
535 struct ipv6hdr *encap;
536 struct net_device *reg_dev = NULL;
537 struct net *net = dev_net(skb->dev);
538 struct mr_table *mrt;
539 struct flowi6 fl6 = {
540 .flowi6_iif = skb->dev->ifindex,
541 .flowi6_mark = skb->mark,
545 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
548 pim = (struct pimreghdr *)skb_transport_header(skb);
549 if (pim->type != ((PIM_VERSION << 4) | PIM_TYPE_REGISTER) ||
550 (pim->flags & PIM_NULL_REGISTER) ||
551 (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
552 sizeof(*pim), IPPROTO_PIM,
553 csum_partial((void *)pim, sizeof(*pim), 0)) &&
554 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
557 /* check if the inner packet is destined to mcast group */
558 encap = (struct ipv6hdr *)(skb_transport_header(skb) +
561 if (!ipv6_addr_is_multicast(&encap->daddr) ||
562 encap->payload_len == 0 ||
563 ntohs(encap->payload_len) + sizeof(*pim) > skb->len)
566 if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
568 reg_vif_num = mrt->mroute_reg_vif_num;
570 read_lock(&mrt_lock);
571 if (reg_vif_num >= 0)
572 reg_dev = mrt->vif_table[reg_vif_num].dev;
575 read_unlock(&mrt_lock);
580 skb->mac_header = skb->network_header;
581 skb_pull(skb, (u8 *)encap - skb->data);
582 skb_reset_network_header(skb);
583 skb->protocol = htons(ETH_P_IPV6);
584 skb->ip_summed = CHECKSUM_NONE;
586 skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));
597 static const struct inet6_protocol pim6_protocol = {
601 /* Service routines creating virtual interfaces: PIMREG */
603 static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
604 struct net_device *dev)
606 struct net *net = dev_net(dev);
607 struct mr_table *mrt;
608 struct flowi6 fl6 = {
609 .flowi6_oif = dev->ifindex,
610 .flowi6_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
611 .flowi6_mark = skb->mark,
615 err = ip6mr_fib_lookup(net, &fl6, &mrt);
621 read_lock(&mrt_lock);
622 dev->stats.tx_bytes += skb->len;
623 dev->stats.tx_packets++;
624 ip6mr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, MRT6MSG_WHOLEPKT);
625 read_unlock(&mrt_lock);
630 static int reg_vif_get_iflink(const struct net_device *dev)
635 static const struct net_device_ops reg_vif_netdev_ops = {
636 .ndo_start_xmit = reg_vif_xmit,
637 .ndo_get_iflink = reg_vif_get_iflink,
640 static void reg_vif_setup(struct net_device *dev)
642 dev->type = ARPHRD_PIMREG;
643 dev->mtu = 1500 - sizeof(struct ipv6hdr) - 8;
644 dev->flags = IFF_NOARP;
645 dev->netdev_ops = ®_vif_netdev_ops;
646 dev->needs_free_netdev = true;
647 dev->features |= NETIF_F_NETNS_LOCAL;
650 static struct net_device *ip6mr_reg_vif(struct net *net, struct mr_table *mrt)
652 struct net_device *dev;
655 if (mrt->id == RT6_TABLE_DFLT)
656 sprintf(name, "pim6reg");
658 sprintf(name, "pim6reg%u", mrt->id);
660 dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup);
664 dev_net_set(dev, net);
666 if (register_netdevice(dev)) {
678 unregister_netdevice(dev);
683 static int call_ip6mr_vif_entry_notifiers(struct net *net,
684 enum fib_event_type event_type,
685 struct vif_device *vif,
686 mifi_t vif_index, u32 tb_id)
688 return mr_call_vif_notifiers(net, RTNL_FAMILY_IP6MR, event_type,
689 vif, vif_index, tb_id,
690 &net->ipv6.ipmr_seq);
693 static int call_ip6mr_mfc_entry_notifiers(struct net *net,
694 enum fib_event_type event_type,
695 struct mfc6_cache *mfc, u32 tb_id)
697 return mr_call_mfc_notifiers(net, RTNL_FAMILY_IP6MR, event_type,
698 &mfc->_c, tb_id, &net->ipv6.ipmr_seq);
701 /* Delete a VIF entry */
702 static int mif6_delete(struct mr_table *mrt, int vifi, int notify,
703 struct list_head *head)
705 struct vif_device *v;
706 struct net_device *dev;
707 struct inet6_dev *in6_dev;
709 if (vifi < 0 || vifi >= mrt->maxvif)
710 return -EADDRNOTAVAIL;
712 v = &mrt->vif_table[vifi];
714 if (VIF_EXISTS(mrt, vifi))
715 call_ip6mr_vif_entry_notifiers(read_pnet(&mrt->net),
716 FIB_EVENT_VIF_DEL, v, vifi,
719 write_lock_bh(&mrt_lock);
724 write_unlock_bh(&mrt_lock);
725 return -EADDRNOTAVAIL;
728 #ifdef CONFIG_IPV6_PIMSM_V2
729 if (vifi == mrt->mroute_reg_vif_num)
730 mrt->mroute_reg_vif_num = -1;
733 if (vifi + 1 == mrt->maxvif) {
735 for (tmp = vifi - 1; tmp >= 0; tmp--) {
736 if (VIF_EXISTS(mrt, tmp))
739 mrt->maxvif = tmp + 1;
742 write_unlock_bh(&mrt_lock);
744 dev_set_allmulti(dev, -1);
746 in6_dev = __in6_dev_get(dev);
748 in6_dev->cnf.mc_forwarding--;
749 inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
750 NETCONFA_MC_FORWARDING,
751 dev->ifindex, &in6_dev->cnf);
754 if ((v->flags & MIFF_REGISTER) && !notify)
755 unregister_netdevice_queue(dev, head);
761 static inline void ip6mr_cache_free_rcu(struct rcu_head *head)
763 struct mr_mfc *c = container_of(head, struct mr_mfc, rcu);
765 kmem_cache_free(mrt_cachep, (struct mfc6_cache *)c);
768 static inline void ip6mr_cache_free(struct mfc6_cache *c)
770 call_rcu(&c->_c.rcu, ip6mr_cache_free_rcu);
773 /* Destroy an unresolved cache entry, killing queued skbs
774 and reporting error to netlink readers.
777 static void ip6mr_destroy_unres(struct mr_table *mrt, struct mfc6_cache *c)
779 struct net *net = read_pnet(&mrt->net);
782 atomic_dec(&mrt->cache_resolve_queue_len);
784 while ((skb = skb_dequeue(&c->_c.mfc_un.unres.unresolved)) != NULL) {
785 if (ipv6_hdr(skb)->version == 0) {
786 struct nlmsghdr *nlh = skb_pull(skb,
787 sizeof(struct ipv6hdr));
788 nlh->nlmsg_type = NLMSG_ERROR;
789 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
790 skb_trim(skb, nlh->nlmsg_len);
791 ((struct nlmsgerr *)nlmsg_data(nlh))->error = -ETIMEDOUT;
792 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
801 /* Timer process for all the unresolved queue. */
803 static void ipmr_do_expire_process(struct mr_table *mrt)
805 unsigned long now = jiffies;
806 unsigned long expires = 10 * HZ;
807 struct mr_mfc *c, *next;
809 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
810 if (time_after(c->mfc_un.unres.expires, now)) {
812 unsigned long interval = c->mfc_un.unres.expires - now;
813 if (interval < expires)
819 mr6_netlink_event(mrt, (struct mfc6_cache *)c, RTM_DELROUTE);
820 ip6mr_destroy_unres(mrt, (struct mfc6_cache *)c);
823 if (!list_empty(&mrt->mfc_unres_queue))
824 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
827 static void ipmr_expire_process(struct timer_list *t)
829 struct mr_table *mrt = from_timer(mrt, t, ipmr_expire_timer);
831 if (!spin_trylock(&mfc_unres_lock)) {
832 mod_timer(&mrt->ipmr_expire_timer, jiffies + 1);
836 if (!list_empty(&mrt->mfc_unres_queue))
837 ipmr_do_expire_process(mrt);
839 spin_unlock(&mfc_unres_lock);
842 /* Fill oifs list. It is called under write locked mrt_lock. */
844 static void ip6mr_update_thresholds(struct mr_table *mrt,
845 struct mr_mfc *cache,
850 cache->mfc_un.res.minvif = MAXMIFS;
851 cache->mfc_un.res.maxvif = 0;
852 memset(cache->mfc_un.res.ttls, 255, MAXMIFS);
854 for (vifi = 0; vifi < mrt->maxvif; vifi++) {
855 if (VIF_EXISTS(mrt, vifi) &&
856 ttls[vifi] && ttls[vifi] < 255) {
857 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
858 if (cache->mfc_un.res.minvif > vifi)
859 cache->mfc_un.res.minvif = vifi;
860 if (cache->mfc_un.res.maxvif <= vifi)
861 cache->mfc_un.res.maxvif = vifi + 1;
864 cache->mfc_un.res.lastuse = jiffies;
867 static int mif6_add(struct net *net, struct mr_table *mrt,
868 struct mif6ctl *vifc, int mrtsock)
870 int vifi = vifc->mif6c_mifi;
871 struct vif_device *v = &mrt->vif_table[vifi];
872 struct net_device *dev;
873 struct inet6_dev *in6_dev;
877 if (VIF_EXISTS(mrt, vifi))
880 switch (vifc->mif6c_flags) {
881 #ifdef CONFIG_IPV6_PIMSM_V2
884 * Special Purpose VIF in PIM
885 * All the packets will be sent to the daemon
887 if (mrt->mroute_reg_vif_num >= 0)
889 dev = ip6mr_reg_vif(net, mrt);
892 err = dev_set_allmulti(dev, 1);
894 unregister_netdevice(dev);
901 dev = dev_get_by_index(net, vifc->mif6c_pifi);
903 return -EADDRNOTAVAIL;
904 err = dev_set_allmulti(dev, 1);
914 in6_dev = __in6_dev_get(dev);
916 in6_dev->cnf.mc_forwarding++;
917 inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
918 NETCONFA_MC_FORWARDING,
919 dev->ifindex, &in6_dev->cnf);
922 /* Fill in the VIF structures */
923 vif_device_init(v, dev, vifc->vifc_rate_limit, vifc->vifc_threshold,
924 vifc->mif6c_flags | (!mrtsock ? VIFF_STATIC : 0),
927 /* And finish update writing critical data */
928 write_lock_bh(&mrt_lock);
930 #ifdef CONFIG_IPV6_PIMSM_V2
931 if (v->flags & MIFF_REGISTER)
932 mrt->mroute_reg_vif_num = vifi;
934 if (vifi + 1 > mrt->maxvif)
935 mrt->maxvif = vifi + 1;
936 write_unlock_bh(&mrt_lock);
937 call_ip6mr_vif_entry_notifiers(net, FIB_EVENT_VIF_ADD,
942 static struct mfc6_cache *ip6mr_cache_find(struct mr_table *mrt,
943 const struct in6_addr *origin,
944 const struct in6_addr *mcastgrp)
946 struct mfc6_cache_cmp_arg arg = {
947 .mf6c_origin = *origin,
948 .mf6c_mcastgrp = *mcastgrp,
951 return mr_mfc_find(mrt, &arg);
954 /* Look for a (*,G) entry */
955 static struct mfc6_cache *ip6mr_cache_find_any(struct mr_table *mrt,
956 struct in6_addr *mcastgrp,
959 struct mfc6_cache_cmp_arg arg = {
960 .mf6c_origin = in6addr_any,
961 .mf6c_mcastgrp = *mcastgrp,
964 if (ipv6_addr_any(mcastgrp))
965 return mr_mfc_find_any_parent(mrt, mifi);
966 return mr_mfc_find_any(mrt, mifi, &arg);
969 /* Look for a (S,G,iif) entry if parent != -1 */
970 static struct mfc6_cache *
971 ip6mr_cache_find_parent(struct mr_table *mrt,
972 const struct in6_addr *origin,
973 const struct in6_addr *mcastgrp,
976 struct mfc6_cache_cmp_arg arg = {
977 .mf6c_origin = *origin,
978 .mf6c_mcastgrp = *mcastgrp,
981 return mr_mfc_find_parent(mrt, &arg, parent);
984 /* Allocate a multicast cache entry */
985 static struct mfc6_cache *ip6mr_cache_alloc(void)
987 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
990 c->_c.mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
991 c->_c.mfc_un.res.minvif = MAXMIFS;
992 c->_c.free = ip6mr_cache_free_rcu;
993 refcount_set(&c->_c.mfc_un.res.refcount, 1);
997 static struct mfc6_cache *ip6mr_cache_alloc_unres(void)
999 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
1002 skb_queue_head_init(&c->_c.mfc_un.unres.unresolved);
1003 c->_c.mfc_un.unres.expires = jiffies + 10 * HZ;
1008 * A cache entry has gone into a resolved state from queued
1011 static void ip6mr_cache_resolve(struct net *net, struct mr_table *mrt,
1012 struct mfc6_cache *uc, struct mfc6_cache *c)
1014 struct sk_buff *skb;
1017 * Play the pending entries through our router
1020 while ((skb = __skb_dequeue(&uc->_c.mfc_un.unres.unresolved))) {
1021 if (ipv6_hdr(skb)->version == 0) {
1022 struct nlmsghdr *nlh = skb_pull(skb,
1023 sizeof(struct ipv6hdr));
1025 if (mr_fill_mroute(mrt, skb, &c->_c,
1026 nlmsg_data(nlh)) > 0) {
1027 nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh;
1029 nlh->nlmsg_type = NLMSG_ERROR;
1030 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
1031 skb_trim(skb, nlh->nlmsg_len);
1032 ((struct nlmsgerr *)nlmsg_data(nlh))->error = -EMSGSIZE;
1034 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
1036 ip6_mr_forward(net, mrt, skb, c);
1041 * Bounce a cache query up to pim6sd and netlink.
1043 * Called under mrt_lock.
1046 static int ip6mr_cache_report(struct mr_table *mrt, struct sk_buff *pkt,
1047 mifi_t mifi, int assert)
1049 struct sock *mroute6_sk;
1050 struct sk_buff *skb;
1051 struct mrt6msg *msg;
1054 #ifdef CONFIG_IPV6_PIMSM_V2
1055 if (assert == MRT6MSG_WHOLEPKT)
1056 skb = skb_realloc_headroom(pkt, -skb_network_offset(pkt)
1060 skb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(*msg), GFP_ATOMIC);
1065 /* I suppose that internal messages
1066 * do not require checksums */
1068 skb->ip_summed = CHECKSUM_UNNECESSARY;
1070 #ifdef CONFIG_IPV6_PIMSM_V2
1071 if (assert == MRT6MSG_WHOLEPKT) {
1072 /* Ugly, but we have no choice with this interface.
1073 Duplicate old header, fix length etc.
1074 And all this only to mangle msg->im6_msgtype and
1075 to set msg->im6_mbz to "mbz" :-)
1077 skb_push(skb, -skb_network_offset(pkt));
1079 skb_push(skb, sizeof(*msg));
1080 skb_reset_transport_header(skb);
1081 msg = (struct mrt6msg *)skb_transport_header(skb);
1083 msg->im6_msgtype = MRT6MSG_WHOLEPKT;
1084 msg->im6_mif = mrt->mroute_reg_vif_num;
1086 msg->im6_src = ipv6_hdr(pkt)->saddr;
1087 msg->im6_dst = ipv6_hdr(pkt)->daddr;
1089 skb->ip_summed = CHECKSUM_UNNECESSARY;
1094 * Copy the IP header
1097 skb_put(skb, sizeof(struct ipv6hdr));
1098 skb_reset_network_header(skb);
1099 skb_copy_to_linear_data(skb, ipv6_hdr(pkt), sizeof(struct ipv6hdr));
1104 skb_put(skb, sizeof(*msg));
1105 skb_reset_transport_header(skb);
1106 msg = (struct mrt6msg *)skb_transport_header(skb);
1109 msg->im6_msgtype = assert;
1110 msg->im6_mif = mifi;
1112 msg->im6_src = ipv6_hdr(pkt)->saddr;
1113 msg->im6_dst = ipv6_hdr(pkt)->daddr;
1115 skb_dst_set(skb, dst_clone(skb_dst(pkt)));
1116 skb->ip_summed = CHECKSUM_UNNECESSARY;
1120 mroute6_sk = rcu_dereference(mrt->mroute_sk);
1127 mrt6msg_netlink_event(mrt, skb);
1129 /* Deliver to user space multicast routing algorithms */
1130 ret = sock_queue_rcv_skb(mroute6_sk, skb);
1133 net_warn_ratelimited("mroute6: pending queue full, dropping entries\n");
1140 /* Queue a packet for resolution. It gets locked cache entry! */
1141 static int ip6mr_cache_unresolved(struct mr_table *mrt, mifi_t mifi,
1142 struct sk_buff *skb)
1144 struct mfc6_cache *c;
1148 spin_lock_bh(&mfc_unres_lock);
1149 list_for_each_entry(c, &mrt->mfc_unres_queue, _c.list) {
1150 if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) &&
1151 ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) {
1159 * Create a new entry if allowable
1162 if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
1163 (c = ip6mr_cache_alloc_unres()) == NULL) {
1164 spin_unlock_bh(&mfc_unres_lock);
1170 /* Fill in the new cache entry */
1171 c->_c.mfc_parent = -1;
1172 c->mf6c_origin = ipv6_hdr(skb)->saddr;
1173 c->mf6c_mcastgrp = ipv6_hdr(skb)->daddr;
1176 * Reflect first query at pim6sd
1178 err = ip6mr_cache_report(mrt, skb, mifi, MRT6MSG_NOCACHE);
1180 /* If the report failed throw the cache entry
1183 spin_unlock_bh(&mfc_unres_lock);
1185 ip6mr_cache_free(c);
1190 atomic_inc(&mrt->cache_resolve_queue_len);
1191 list_add(&c->_c.list, &mrt->mfc_unres_queue);
1192 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1194 ipmr_do_expire_process(mrt);
1197 /* See if we can append the packet */
1198 if (c->_c.mfc_un.unres.unresolved.qlen > 3) {
1202 skb_queue_tail(&c->_c.mfc_un.unres.unresolved, skb);
1206 spin_unlock_bh(&mfc_unres_lock);
1211 * MFC6 cache manipulation by user space
1214 static int ip6mr_mfc_delete(struct mr_table *mrt, struct mf6cctl *mfc,
1217 struct mfc6_cache *c;
1219 /* The entries are added/deleted only under RTNL */
1221 c = ip6mr_cache_find_parent(mrt, &mfc->mf6cc_origin.sin6_addr,
1222 &mfc->mf6cc_mcastgrp.sin6_addr, parent);
1226 rhltable_remove(&mrt->mfc_hash, &c->_c.mnode, ip6mr_rht_params);
1227 list_del_rcu(&c->_c.list);
1229 call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net),
1230 FIB_EVENT_ENTRY_DEL, c, mrt->id);
1231 mr6_netlink_event(mrt, c, RTM_DELROUTE);
1232 mr_cache_put(&c->_c);
1236 static int ip6mr_device_event(struct notifier_block *this,
1237 unsigned long event, void *ptr)
1239 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1240 struct net *net = dev_net(dev);
1241 struct mr_table *mrt;
1242 struct vif_device *v;
1245 if (event != NETDEV_UNREGISTER)
1248 ip6mr_for_each_table(mrt, net) {
1249 v = &mrt->vif_table[0];
1250 for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1252 mif6_delete(mrt, ct, 1, NULL);
1259 static unsigned int ip6mr_seq_read(struct net *net)
1263 return net->ipv6.ipmr_seq + ip6mr_rules_seq_read(net);
1266 static int ip6mr_dump(struct net *net, struct notifier_block *nb)
1268 return mr_dump(net, nb, RTNL_FAMILY_IP6MR, ip6mr_rules_dump,
1269 ip6mr_mr_table_iter, &mrt_lock);
1272 static struct notifier_block ip6_mr_notifier = {
1273 .notifier_call = ip6mr_device_event
1276 static const struct fib_notifier_ops ip6mr_notifier_ops_template = {
1277 .family = RTNL_FAMILY_IP6MR,
1278 .fib_seq_read = ip6mr_seq_read,
1279 .fib_dump = ip6mr_dump,
1280 .owner = THIS_MODULE,
1283 static int __net_init ip6mr_notifier_init(struct net *net)
1285 struct fib_notifier_ops *ops;
1287 net->ipv6.ipmr_seq = 0;
1289 ops = fib_notifier_ops_register(&ip6mr_notifier_ops_template, net);
1291 return PTR_ERR(ops);
1293 net->ipv6.ip6mr_notifier_ops = ops;
1298 static void __net_exit ip6mr_notifier_exit(struct net *net)
1300 fib_notifier_ops_unregister(net->ipv6.ip6mr_notifier_ops);
1301 net->ipv6.ip6mr_notifier_ops = NULL;
1304 /* Setup for IP multicast routing */
1305 static int __net_init ip6mr_net_init(struct net *net)
1309 err = ip6mr_notifier_init(net);
1313 err = ip6mr_rules_init(net);
1315 goto ip6mr_rules_fail;
1317 #ifdef CONFIG_PROC_FS
1319 if (!proc_create("ip6_mr_vif", 0, net->proc_net, &ip6mr_vif_fops))
1321 if (!proc_create("ip6_mr_cache", 0, net->proc_net, &ip6mr_mfc_fops))
1322 goto proc_cache_fail;
1327 #ifdef CONFIG_PROC_FS
1329 remove_proc_entry("ip6_mr_vif", net->proc_net);
1331 ip6mr_rules_exit(net);
1334 ip6mr_notifier_exit(net);
1338 static void __net_exit ip6mr_net_exit(struct net *net)
1340 #ifdef CONFIG_PROC_FS
1341 remove_proc_entry("ip6_mr_cache", net->proc_net);
1342 remove_proc_entry("ip6_mr_vif", net->proc_net);
1344 ip6mr_rules_exit(net);
1345 ip6mr_notifier_exit(net);
1348 static struct pernet_operations ip6mr_net_ops = {
1349 .init = ip6mr_net_init,
1350 .exit = ip6mr_net_exit,
1354 int __init ip6_mr_init(void)
1358 mrt_cachep = kmem_cache_create("ip6_mrt_cache",
1359 sizeof(struct mfc6_cache),
1360 0, SLAB_HWCACHE_ALIGN,
1365 err = register_pernet_subsys(&ip6mr_net_ops);
1367 goto reg_pernet_fail;
1369 err = register_netdevice_notifier(&ip6_mr_notifier);
1371 goto reg_notif_fail;
1372 #ifdef CONFIG_IPV6_PIMSM_V2
1373 if (inet6_add_protocol(&pim6_protocol, IPPROTO_PIM) < 0) {
1374 pr_err("%s: can't add PIM protocol\n", __func__);
1376 goto add_proto_fail;
1379 err = rtnl_register_module(THIS_MODULE, RTNL_FAMILY_IP6MR, RTM_GETROUTE,
1380 NULL, ip6mr_rtm_dumproute, 0);
1384 #ifdef CONFIG_IPV6_PIMSM_V2
1385 inet6_del_protocol(&pim6_protocol, IPPROTO_PIM);
1387 unregister_netdevice_notifier(&ip6_mr_notifier);
1390 unregister_pernet_subsys(&ip6mr_net_ops);
1392 kmem_cache_destroy(mrt_cachep);
1396 void ip6_mr_cleanup(void)
1398 rtnl_unregister(RTNL_FAMILY_IP6MR, RTM_GETROUTE);
1399 #ifdef CONFIG_IPV6_PIMSM_V2
1400 inet6_del_protocol(&pim6_protocol, IPPROTO_PIM);
1402 unregister_netdevice_notifier(&ip6_mr_notifier);
1403 unregister_pernet_subsys(&ip6mr_net_ops);
1404 kmem_cache_destroy(mrt_cachep);
1407 static int ip6mr_mfc_add(struct net *net, struct mr_table *mrt,
1408 struct mf6cctl *mfc, int mrtsock, int parent)
1410 unsigned char ttls[MAXMIFS];
1411 struct mfc6_cache *uc, *c;
1416 if (mfc->mf6cc_parent >= MAXMIFS)
1419 memset(ttls, 255, MAXMIFS);
1420 for (i = 0; i < MAXMIFS; i++) {
1421 if (IF_ISSET(i, &mfc->mf6cc_ifset))
1425 /* The entries are added/deleted only under RTNL */
1427 c = ip6mr_cache_find_parent(mrt, &mfc->mf6cc_origin.sin6_addr,
1428 &mfc->mf6cc_mcastgrp.sin6_addr, parent);
1431 write_lock_bh(&mrt_lock);
1432 c->_c.mfc_parent = mfc->mf6cc_parent;
1433 ip6mr_update_thresholds(mrt, &c->_c, ttls);
1435 c->_c.mfc_flags |= MFC_STATIC;
1436 write_unlock_bh(&mrt_lock);
1437 call_ip6mr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_REPLACE,
1439 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1443 if (!ipv6_addr_any(&mfc->mf6cc_mcastgrp.sin6_addr) &&
1444 !ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr))
1447 c = ip6mr_cache_alloc();
1451 c->mf6c_origin = mfc->mf6cc_origin.sin6_addr;
1452 c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr;
1453 c->_c.mfc_parent = mfc->mf6cc_parent;
1454 ip6mr_update_thresholds(mrt, &c->_c, ttls);
1456 c->_c.mfc_flags |= MFC_STATIC;
1458 err = rhltable_insert_key(&mrt->mfc_hash, &c->cmparg, &c->_c.mnode,
1461 pr_err("ip6mr: rhtable insert error %d\n", err);
1462 ip6mr_cache_free(c);
1465 list_add_tail_rcu(&c->_c.list, &mrt->mfc_cache_list);
1467 /* Check to see if we resolved a queued list. If so we
1468 * need to send on the frames and tidy up.
1471 spin_lock_bh(&mfc_unres_lock);
1472 list_for_each_entry(_uc, &mrt->mfc_unres_queue, list) {
1473 uc = (struct mfc6_cache *)_uc;
1474 if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) &&
1475 ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) {
1476 list_del(&_uc->list);
1477 atomic_dec(&mrt->cache_resolve_queue_len);
1482 if (list_empty(&mrt->mfc_unres_queue))
1483 del_timer(&mrt->ipmr_expire_timer);
1484 spin_unlock_bh(&mfc_unres_lock);
1487 ip6mr_cache_resolve(net, mrt, uc, c);
1488 ip6mr_cache_free(uc);
1490 call_ip6mr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_ADD,
1492 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1497 * Close the multicast socket, and clear the vif tables etc
1500 static void mroute_clean_tables(struct mr_table *mrt, bool all)
1502 struct mr_mfc *c, *tmp;
1506 /* Shut down all active vif entries */
1507 for (i = 0; i < mrt->maxvif; i++) {
1508 if (!all && (mrt->vif_table[i].flags & VIFF_STATIC))
1510 mif6_delete(mrt, i, 0, &list);
1512 unregister_netdevice_many(&list);
1514 /* Wipe the cache */
1515 list_for_each_entry_safe(c, tmp, &mrt->mfc_cache_list, list) {
1516 if (!all && (c->mfc_flags & MFC_STATIC))
1518 rhltable_remove(&mrt->mfc_hash, &c->mnode, ip6mr_rht_params);
1519 list_del_rcu(&c->list);
1520 mr6_netlink_event(mrt, (struct mfc6_cache *)c, RTM_DELROUTE);
1524 if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1525 spin_lock_bh(&mfc_unres_lock);
1526 list_for_each_entry_safe(c, tmp, &mrt->mfc_unres_queue, list) {
1528 call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net),
1529 FIB_EVENT_ENTRY_DEL,
1530 (struct mfc6_cache *)c,
1532 mr6_netlink_event(mrt, (struct mfc6_cache *)c,
1534 ip6mr_destroy_unres(mrt, (struct mfc6_cache *)c);
1536 spin_unlock_bh(&mfc_unres_lock);
1540 static int ip6mr_sk_init(struct mr_table *mrt, struct sock *sk)
1543 struct net *net = sock_net(sk);
1546 write_lock_bh(&mrt_lock);
1547 if (rtnl_dereference(mrt->mroute_sk)) {
1550 rcu_assign_pointer(mrt->mroute_sk, sk);
1551 sock_set_flag(sk, SOCK_RCU_FREE);
1552 net->ipv6.devconf_all->mc_forwarding++;
1554 write_unlock_bh(&mrt_lock);
1557 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
1558 NETCONFA_MC_FORWARDING,
1559 NETCONFA_IFINDEX_ALL,
1560 net->ipv6.devconf_all);
1566 int ip6mr_sk_done(struct sock *sk)
1569 struct net *net = sock_net(sk);
1570 struct mr_table *mrt;
1572 if (sk->sk_type != SOCK_RAW ||
1573 inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1577 ip6mr_for_each_table(mrt, net) {
1578 if (sk == rtnl_dereference(mrt->mroute_sk)) {
1579 write_lock_bh(&mrt_lock);
1580 RCU_INIT_POINTER(mrt->mroute_sk, NULL);
1581 /* Note that mroute_sk had SOCK_RCU_FREE set,
1582 * so the RCU grace period before sk freeing
1583 * is guaranteed by sk_destruct()
1585 net->ipv6.devconf_all->mc_forwarding--;
1586 write_unlock_bh(&mrt_lock);
1587 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
1588 NETCONFA_MC_FORWARDING,
1589 NETCONFA_IFINDEX_ALL,
1590 net->ipv6.devconf_all);
1592 mroute_clean_tables(mrt, false);
1602 bool mroute6_is_socket(struct net *net, struct sk_buff *skb)
1604 struct mr_table *mrt;
1605 struct flowi6 fl6 = {
1606 .flowi6_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
1607 .flowi6_oif = skb->dev->ifindex,
1608 .flowi6_mark = skb->mark,
1611 if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
1614 return rcu_access_pointer(mrt->mroute_sk);
1616 EXPORT_SYMBOL(mroute6_is_socket);
1619 * Socket options and virtual interface manipulation. The whole
1620 * virtual interface system is a complete heap, but unfortunately
1621 * that's how BSD mrouted happens to think. Maybe one day with a proper
1622 * MOSPF/PIM router set up we can clean this up.
1625 int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen)
1627 int ret, parent = 0;
1631 struct net *net = sock_net(sk);
1632 struct mr_table *mrt;
1634 if (sk->sk_type != SOCK_RAW ||
1635 inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1638 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1642 if (optname != MRT6_INIT) {
1643 if (sk != rcu_access_pointer(mrt->mroute_sk) &&
1644 !ns_capable(net->user_ns, CAP_NET_ADMIN))
1650 if (optlen < sizeof(int))
1653 return ip6mr_sk_init(mrt, sk);
1656 return ip6mr_sk_done(sk);
1659 if (optlen < sizeof(vif))
1661 if (copy_from_user(&vif, optval, sizeof(vif)))
1663 if (vif.mif6c_mifi >= MAXMIFS)
1666 ret = mif6_add(net, mrt, &vif,
1667 sk == rtnl_dereference(mrt->mroute_sk));
1672 if (optlen < sizeof(mifi_t))
1674 if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
1677 ret = mif6_delete(mrt, mifi, 0, NULL);
1682 * Manipulate the forwarding caches. These live
1683 * in a sort of kernel/user symbiosis.
1689 case MRT6_ADD_MFC_PROXY:
1690 case MRT6_DEL_MFC_PROXY:
1691 if (optlen < sizeof(mfc))
1693 if (copy_from_user(&mfc, optval, sizeof(mfc)))
1696 parent = mfc.mf6cc_parent;
1698 if (optname == MRT6_DEL_MFC || optname == MRT6_DEL_MFC_PROXY)
1699 ret = ip6mr_mfc_delete(mrt, &mfc, parent);
1701 ret = ip6mr_mfc_add(net, mrt, &mfc,
1703 rtnl_dereference(mrt->mroute_sk),
1709 * Control PIM assert (to activate pim will activate assert)
1715 if (optlen != sizeof(v))
1717 if (get_user(v, (int __user *)optval))
1719 mrt->mroute_do_assert = v;
1723 #ifdef CONFIG_IPV6_PIMSM_V2
1728 if (optlen != sizeof(v))
1730 if (get_user(v, (int __user *)optval))
1735 if (v != mrt->mroute_do_pim) {
1736 mrt->mroute_do_pim = v;
1737 mrt->mroute_do_assert = v;
1744 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
1749 if (optlen != sizeof(u32))
1751 if (get_user(v, (u32 __user *)optval))
1753 /* "pim6reg%u" should not exceed 16 bytes (IFNAMSIZ) */
1754 if (v != RT_TABLE_DEFAULT && v >= 100000000)
1756 if (sk == rcu_access_pointer(mrt->mroute_sk))
1761 if (!ip6mr_new_table(net, v))
1763 raw6_sk(sk)->ip6mr_table = v;
1769 * Spurious command, or MRT6_VERSION which you cannot
1773 return -ENOPROTOOPT;
1778 * Getsock opt support for the multicast routing system.
1781 int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval,
1786 struct net *net = sock_net(sk);
1787 struct mr_table *mrt;
1789 if (sk->sk_type != SOCK_RAW ||
1790 inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1793 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1801 #ifdef CONFIG_IPV6_PIMSM_V2
1803 val = mrt->mroute_do_pim;
1807 val = mrt->mroute_do_assert;
1810 return -ENOPROTOOPT;
1813 if (get_user(olr, optlen))
1816 olr = min_t(int, olr, sizeof(int));
1820 if (put_user(olr, optlen))
1822 if (copy_to_user(optval, &val, olr))
1828 * The IP multicast ioctl support routines.
1831 int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
1833 struct sioc_sg_req6 sr;
1834 struct sioc_mif_req6 vr;
1835 struct vif_device *vif;
1836 struct mfc6_cache *c;
1837 struct net *net = sock_net(sk);
1838 struct mr_table *mrt;
1840 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1845 case SIOCGETMIFCNT_IN6:
1846 if (copy_from_user(&vr, arg, sizeof(vr)))
1848 if (vr.mifi >= mrt->maxvif)
1850 read_lock(&mrt_lock);
1851 vif = &mrt->vif_table[vr.mifi];
1852 if (VIF_EXISTS(mrt, vr.mifi)) {
1853 vr.icount = vif->pkt_in;
1854 vr.ocount = vif->pkt_out;
1855 vr.ibytes = vif->bytes_in;
1856 vr.obytes = vif->bytes_out;
1857 read_unlock(&mrt_lock);
1859 if (copy_to_user(arg, &vr, sizeof(vr)))
1863 read_unlock(&mrt_lock);
1864 return -EADDRNOTAVAIL;
1865 case SIOCGETSGCNT_IN6:
1866 if (copy_from_user(&sr, arg, sizeof(sr)))
1870 c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1872 sr.pktcnt = c->_c.mfc_un.res.pkt;
1873 sr.bytecnt = c->_c.mfc_un.res.bytes;
1874 sr.wrong_if = c->_c.mfc_un.res.wrong_if;
1877 if (copy_to_user(arg, &sr, sizeof(sr)))
1882 return -EADDRNOTAVAIL;
1884 return -ENOIOCTLCMD;
1888 #ifdef CONFIG_COMPAT
1889 struct compat_sioc_sg_req6 {
1890 struct sockaddr_in6 src;
1891 struct sockaddr_in6 grp;
1892 compat_ulong_t pktcnt;
1893 compat_ulong_t bytecnt;
1894 compat_ulong_t wrong_if;
1897 struct compat_sioc_mif_req6 {
1899 compat_ulong_t icount;
1900 compat_ulong_t ocount;
1901 compat_ulong_t ibytes;
1902 compat_ulong_t obytes;
1905 int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1907 struct compat_sioc_sg_req6 sr;
1908 struct compat_sioc_mif_req6 vr;
1909 struct vif_device *vif;
1910 struct mfc6_cache *c;
1911 struct net *net = sock_net(sk);
1912 struct mr_table *mrt;
1914 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1919 case SIOCGETMIFCNT_IN6:
1920 if (copy_from_user(&vr, arg, sizeof(vr)))
1922 if (vr.mifi >= mrt->maxvif)
1924 read_lock(&mrt_lock);
1925 vif = &mrt->vif_table[vr.mifi];
1926 if (VIF_EXISTS(mrt, vr.mifi)) {
1927 vr.icount = vif->pkt_in;
1928 vr.ocount = vif->pkt_out;
1929 vr.ibytes = vif->bytes_in;
1930 vr.obytes = vif->bytes_out;
1931 read_unlock(&mrt_lock);
1933 if (copy_to_user(arg, &vr, sizeof(vr)))
1937 read_unlock(&mrt_lock);
1938 return -EADDRNOTAVAIL;
1939 case SIOCGETSGCNT_IN6:
1940 if (copy_from_user(&sr, arg, sizeof(sr)))
1944 c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1946 sr.pktcnt = c->_c.mfc_un.res.pkt;
1947 sr.bytecnt = c->_c.mfc_un.res.bytes;
1948 sr.wrong_if = c->_c.mfc_un.res.wrong_if;
1951 if (copy_to_user(arg, &sr, sizeof(sr)))
1956 return -EADDRNOTAVAIL;
1958 return -ENOIOCTLCMD;
1963 static inline int ip6mr_forward2_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
1965 __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
1966 IPSTATS_MIB_OUTFORWDATAGRAMS);
1967 __IP6_ADD_STATS(net, ip6_dst_idev(skb_dst(skb)),
1968 IPSTATS_MIB_OUTOCTETS, skb->len);
1969 return dst_output(net, sk, skb);
1973 * Processing handlers for ip6mr_forward
1976 static int ip6mr_forward2(struct net *net, struct mr_table *mrt,
1977 struct sk_buff *skb, struct mfc6_cache *c, int vifi)
1979 struct ipv6hdr *ipv6h;
1980 struct vif_device *vif = &mrt->vif_table[vifi];
1981 struct net_device *dev;
1982 struct dst_entry *dst;
1988 #ifdef CONFIG_IPV6_PIMSM_V2
1989 if (vif->flags & MIFF_REGISTER) {
1991 vif->bytes_out += skb->len;
1992 vif->dev->stats.tx_bytes += skb->len;
1993 vif->dev->stats.tx_packets++;
1994 ip6mr_cache_report(mrt, skb, vifi, MRT6MSG_WHOLEPKT);
1999 ipv6h = ipv6_hdr(skb);
2001 fl6 = (struct flowi6) {
2002 .flowi6_oif = vif->link,
2003 .daddr = ipv6h->daddr,
2006 dst = ip6_route_output(net, NULL, &fl6);
2013 skb_dst_set(skb, dst);
2016 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
2017 * not only before forwarding, but after forwarding on all output
2018 * interfaces. It is clear, if mrouter runs a multicasting
2019 * program, it should receive packets not depending to what interface
2020 * program is joined.
2021 * If we will not make it, the program will have to join on all
2022 * interfaces. On the other hand, multihoming host (or router, but
2023 * not mrouter) cannot join to more than one interface - it will
2024 * result in receiving multiple packets.
2029 vif->bytes_out += skb->len;
2031 /* We are about to write */
2032 /* XXX: extension headers? */
2033 if (skb_cow(skb, sizeof(*ipv6h) + LL_RESERVED_SPACE(dev)))
2036 ipv6h = ipv6_hdr(skb);
2039 IP6CB(skb)->flags |= IP6SKB_FORWARDED;
2041 return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
2042 net, NULL, skb, skb->dev, dev,
2043 ip6mr_forward2_finish);
2050 static int ip6mr_find_vif(struct mr_table *mrt, struct net_device *dev)
2054 for (ct = mrt->maxvif - 1; ct >= 0; ct--) {
2055 if (mrt->vif_table[ct].dev == dev)
2061 static void ip6_mr_forward(struct net *net, struct mr_table *mrt,
2062 struct sk_buff *skb, struct mfc6_cache *c)
2066 int true_vifi = ip6mr_find_vif(mrt, skb->dev);
2068 vif = c->_c.mfc_parent;
2069 c->_c.mfc_un.res.pkt++;
2070 c->_c.mfc_un.res.bytes += skb->len;
2071 c->_c.mfc_un.res.lastuse = jiffies;
2073 if (ipv6_addr_any(&c->mf6c_origin) && true_vifi >= 0) {
2074 struct mfc6_cache *cache_proxy;
2076 /* For an (*,G) entry, we only check that the incoming
2077 * interface is part of the static tree.
2080 cache_proxy = mr_mfc_find_any_parent(mrt, vif);
2082 cache_proxy->_c.mfc_un.res.ttls[true_vifi] < 255) {
2090 * Wrong interface: drop packet and (maybe) send PIM assert.
2092 if (mrt->vif_table[vif].dev != skb->dev) {
2093 c->_c.mfc_un.res.wrong_if++;
2095 if (true_vifi >= 0 && mrt->mroute_do_assert &&
2096 /* pimsm uses asserts, when switching from RPT to SPT,
2097 so that we cannot check that packet arrived on an oif.
2098 It is bad, but otherwise we would need to move pretty
2099 large chunk of pimd to kernel. Ough... --ANK
2101 (mrt->mroute_do_pim ||
2102 c->_c.mfc_un.res.ttls[true_vifi] < 255) &&
2104 c->_c.mfc_un.res.last_assert +
2105 MFC_ASSERT_THRESH)) {
2106 c->_c.mfc_un.res.last_assert = jiffies;
2107 ip6mr_cache_report(mrt, skb, true_vifi, MRT6MSG_WRONGMIF);
2113 mrt->vif_table[vif].pkt_in++;
2114 mrt->vif_table[vif].bytes_in += skb->len;
2119 if (ipv6_addr_any(&c->mf6c_origin) &&
2120 ipv6_addr_any(&c->mf6c_mcastgrp)) {
2121 if (true_vifi >= 0 &&
2122 true_vifi != c->_c.mfc_parent &&
2123 ipv6_hdr(skb)->hop_limit >
2124 c->_c.mfc_un.res.ttls[c->_c.mfc_parent]) {
2125 /* It's an (*,*) entry and the packet is not coming from
2126 * the upstream: forward the packet to the upstream
2129 psend = c->_c.mfc_parent;
2134 for (ct = c->_c.mfc_un.res.maxvif - 1;
2135 ct >= c->_c.mfc_un.res.minvif; ct--) {
2136 /* For (*,G) entry, don't forward to the incoming interface */
2137 if ((!ipv6_addr_any(&c->mf6c_origin) || ct != true_vifi) &&
2138 ipv6_hdr(skb)->hop_limit > c->_c.mfc_un.res.ttls[ct]) {
2140 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
2142 ip6mr_forward2(net, mrt, skb2,
2150 ip6mr_forward2(net, mrt, skb, c, psend);
2160 * Multicast packets for forwarding arrive here
2163 int ip6_mr_input(struct sk_buff *skb)
2165 struct mfc6_cache *cache;
2166 struct net *net = dev_net(skb->dev);
2167 struct mr_table *mrt;
2168 struct flowi6 fl6 = {
2169 .flowi6_iif = skb->dev->ifindex,
2170 .flowi6_mark = skb->mark,
2174 err = ip6mr_fib_lookup(net, &fl6, &mrt);
2180 read_lock(&mrt_lock);
2181 cache = ip6mr_cache_find(mrt,
2182 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
2184 int vif = ip6mr_find_vif(mrt, skb->dev);
2187 cache = ip6mr_cache_find_any(mrt,
2188 &ipv6_hdr(skb)->daddr,
2193 * No usable cache entry
2198 vif = ip6mr_find_vif(mrt, skb->dev);
2200 int err = ip6mr_cache_unresolved(mrt, vif, skb);
2201 read_unlock(&mrt_lock);
2205 read_unlock(&mrt_lock);
2210 ip6_mr_forward(net, mrt, skb, cache);
2212 read_unlock(&mrt_lock);
2217 int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm,
2221 struct mr_table *mrt;
2222 struct mfc6_cache *cache;
2223 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
2225 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
2229 read_lock(&mrt_lock);
2230 cache = ip6mr_cache_find(mrt, &rt->rt6i_src.addr, &rt->rt6i_dst.addr);
2231 if (!cache && skb->dev) {
2232 int vif = ip6mr_find_vif(mrt, skb->dev);
2235 cache = ip6mr_cache_find_any(mrt, &rt->rt6i_dst.addr,
2240 struct sk_buff *skb2;
2241 struct ipv6hdr *iph;
2242 struct net_device *dev;
2246 if (!dev || (vif = ip6mr_find_vif(mrt, dev)) < 0) {
2247 read_unlock(&mrt_lock);
2251 /* really correct? */
2252 skb2 = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
2254 read_unlock(&mrt_lock);
2258 NETLINK_CB(skb2).portid = portid;
2259 skb_reset_transport_header(skb2);
2261 skb_put(skb2, sizeof(struct ipv6hdr));
2262 skb_reset_network_header(skb2);
2264 iph = ipv6_hdr(skb2);
2267 iph->flow_lbl[0] = 0;
2268 iph->flow_lbl[1] = 0;
2269 iph->flow_lbl[2] = 0;
2270 iph->payload_len = 0;
2271 iph->nexthdr = IPPROTO_NONE;
2273 iph->saddr = rt->rt6i_src.addr;
2274 iph->daddr = rt->rt6i_dst.addr;
2276 err = ip6mr_cache_unresolved(mrt, vif, skb2);
2277 read_unlock(&mrt_lock);
2282 err = mr_fill_mroute(mrt, skb, &cache->_c, rtm);
2283 read_unlock(&mrt_lock);
2287 static int ip6mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2288 u32 portid, u32 seq, struct mfc6_cache *c, int cmd,
2291 struct nlmsghdr *nlh;
2295 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
2299 rtm = nlmsg_data(nlh);
2300 rtm->rtm_family = RTNL_FAMILY_IP6MR;
2301 rtm->rtm_dst_len = 128;
2302 rtm->rtm_src_len = 128;
2304 rtm->rtm_table = mrt->id;
2305 if (nla_put_u32(skb, RTA_TABLE, mrt->id))
2306 goto nla_put_failure;
2307 rtm->rtm_type = RTN_MULTICAST;
2308 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2309 if (c->_c.mfc_flags & MFC_STATIC)
2310 rtm->rtm_protocol = RTPROT_STATIC;
2312 rtm->rtm_protocol = RTPROT_MROUTED;
2315 if (nla_put_in6_addr(skb, RTA_SRC, &c->mf6c_origin) ||
2316 nla_put_in6_addr(skb, RTA_DST, &c->mf6c_mcastgrp))
2317 goto nla_put_failure;
2318 err = mr_fill_mroute(mrt, skb, &c->_c, rtm);
2319 /* do not break the dump if cache is unresolved */
2320 if (err < 0 && err != -ENOENT)
2321 goto nla_put_failure;
2323 nlmsg_end(skb, nlh);
2327 nlmsg_cancel(skb, nlh);
2331 static int _ip6mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2332 u32 portid, u32 seq, struct mr_mfc *c,
2335 return ip6mr_fill_mroute(mrt, skb, portid, seq, (struct mfc6_cache *)c,
2339 static int mr6_msgsize(bool unresolved, int maxvif)
2342 NLMSG_ALIGN(sizeof(struct rtmsg))
2343 + nla_total_size(4) /* RTA_TABLE */
2344 + nla_total_size(sizeof(struct in6_addr)) /* RTA_SRC */
2345 + nla_total_size(sizeof(struct in6_addr)) /* RTA_DST */
2350 + nla_total_size(4) /* RTA_IIF */
2351 + nla_total_size(0) /* RTA_MULTIPATH */
2352 + maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
2354 + nla_total_size_64bit(sizeof(struct rta_mfc_stats))
2360 static void mr6_netlink_event(struct mr_table *mrt, struct mfc6_cache *mfc,
2363 struct net *net = read_pnet(&mrt->net);
2364 struct sk_buff *skb;
2367 skb = nlmsg_new(mr6_msgsize(mfc->_c.mfc_parent >= MAXMIFS, mrt->maxvif),
2372 err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
2376 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MROUTE, NULL, GFP_ATOMIC);
2382 rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE, err);
2385 static size_t mrt6msg_netlink_msgsize(size_t payloadlen)
2388 NLMSG_ALIGN(sizeof(struct rtgenmsg))
2389 + nla_total_size(1) /* IP6MRA_CREPORT_MSGTYPE */
2390 + nla_total_size(4) /* IP6MRA_CREPORT_MIF_ID */
2391 /* IP6MRA_CREPORT_SRC_ADDR */
2392 + nla_total_size(sizeof(struct in6_addr))
2393 /* IP6MRA_CREPORT_DST_ADDR */
2394 + nla_total_size(sizeof(struct in6_addr))
2395 /* IP6MRA_CREPORT_PKT */
2396 + nla_total_size(payloadlen)
2402 static void mrt6msg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt)
2404 struct net *net = read_pnet(&mrt->net);
2405 struct nlmsghdr *nlh;
2406 struct rtgenmsg *rtgenm;
2407 struct mrt6msg *msg;
2408 struct sk_buff *skb;
2412 payloadlen = pkt->len - sizeof(struct mrt6msg);
2413 msg = (struct mrt6msg *)skb_transport_header(pkt);
2415 skb = nlmsg_new(mrt6msg_netlink_msgsize(payloadlen), GFP_ATOMIC);
2419 nlh = nlmsg_put(skb, 0, 0, RTM_NEWCACHEREPORT,
2420 sizeof(struct rtgenmsg), 0);
2423 rtgenm = nlmsg_data(nlh);
2424 rtgenm->rtgen_family = RTNL_FAMILY_IP6MR;
2425 if (nla_put_u8(skb, IP6MRA_CREPORT_MSGTYPE, msg->im6_msgtype) ||
2426 nla_put_u32(skb, IP6MRA_CREPORT_MIF_ID, msg->im6_mif) ||
2427 nla_put_in6_addr(skb, IP6MRA_CREPORT_SRC_ADDR,
2429 nla_put_in6_addr(skb, IP6MRA_CREPORT_DST_ADDR,
2431 goto nla_put_failure;
2433 nla = nla_reserve(skb, IP6MRA_CREPORT_PKT, payloadlen);
2434 if (!nla || skb_copy_bits(pkt, sizeof(struct mrt6msg),
2435 nla_data(nla), payloadlen))
2436 goto nla_put_failure;
2438 nlmsg_end(skb, nlh);
2440 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MROUTE_R, NULL, GFP_ATOMIC);
2444 nlmsg_cancel(skb, nlh);
2447 rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE_R, -ENOBUFS);
2450 static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2452 return mr_rtm_dumproute(skb, cb, ip6mr_mr_table_iter,
2453 _ip6mr_fill_mroute, &mfc_unres_lock);