2 * Linux IPv6 multicast routing support for BSD pim6sd
3 * Based on net/ipv4/ipmr.c.
5 * (c) 2004 Mickael Hoerdt, <hoerdt@clarinet.u-strasbg.fr>
6 * LSIIT Laboratory, Strasbourg, France
7 * (c) 2004 Jean-Philippe Andriot, <jean-philippe.andriot@6WIND.com>
9 * Copyright (C)2007,2008 USAGI/WIDE Project
10 * YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
19 #include <linux/uaccess.h>
20 #include <linux/types.h>
21 #include <linux/sched.h>
22 #include <linux/errno.h>
24 #include <linux/kernel.h>
25 #include <linux/fcntl.h>
26 #include <linux/stat.h>
27 #include <linux/socket.h>
28 #include <linux/inet.h>
29 #include <linux/netdevice.h>
30 #include <linux/inetdevice.h>
31 #include <linux/proc_fs.h>
32 #include <linux/seq_file.h>
33 #include <linux/init.h>
34 #include <linux/compat.h>
35 #include <net/protocol.h>
36 #include <linux/skbuff.h>
38 #include <linux/notifier.h>
39 #include <linux/if_arp.h>
40 #include <net/checksum.h>
41 #include <net/netlink.h>
42 #include <net/fib_rules.h>
45 #include <net/ip6_route.h>
46 #include <linux/mroute6.h>
47 #include <linux/pim.h>
48 #include <net/addrconf.h>
49 #include <linux/netfilter_ipv6.h>
50 #include <linux/export.h>
51 #include <net/ip6_checksum.h>
52 #include <linux/netconf.h>
55 struct fib_rule common;
62 /* Big lock, protecting vif table, mrt cache and mroute socket state.
63 Note that the changes are semaphored via rtnl_lock.
66 static DEFINE_RWLOCK(mrt_lock);
68 /* Multicast router control variables */
70 /* Special spinlock for queue of unresolved entries */
71 static DEFINE_SPINLOCK(mfc_unres_lock);
73 /* We return to original Alan's scheme. Hash table of resolved
74 entries is changed only in process context and protected
75 with weak lock mrt_lock. Queue of unresolved entries is protected
76 with strong spinlock mfc_unres_lock.
78 In this case data path is free of exclusive locks at all.
81 static struct kmem_cache *mrt_cachep __read_mostly;
83 static struct mr_table *ip6mr_new_table(struct net *net, u32 id);
84 static void ip6mr_free_table(struct mr_table *mrt);
86 static void ip6_mr_forward(struct net *net, struct mr_table *mrt,
87 struct sk_buff *skb, struct mfc6_cache *cache);
88 static int ip6mr_cache_report(struct mr_table *mrt, struct sk_buff *pkt,
89 mifi_t mifi, int assert);
90 static void mr6_netlink_event(struct mr_table *mrt, struct mfc6_cache *mfc,
92 static void mrt6msg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt);
93 static int ip6mr_rtm_dumproute(struct sk_buff *skb,
94 struct netlink_callback *cb);
95 static void mroute_clean_tables(struct mr_table *mrt, bool all);
96 static void ipmr_expire_process(struct timer_list *t);
98 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
99 #define ip6mr_for_each_table(mrt, net) \
100 list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list)
102 static struct mr_table *ip6mr_mr_table_iter(struct net *net,
103 struct mr_table *mrt)
105 struct mr_table *ret;
108 ret = list_entry_rcu(net->ipv6.mr6_tables.next,
109 struct mr_table, list);
111 ret = list_entry_rcu(mrt->list.next,
112 struct mr_table, list);
114 if (&ret->list == &net->ipv6.mr6_tables)
119 static struct mr_table *ip6mr_get_table(struct net *net, u32 id)
121 struct mr_table *mrt;
123 ip6mr_for_each_table(mrt, net) {
130 static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
131 struct mr_table **mrt)
134 struct ip6mr_result res;
135 struct fib_lookup_arg arg = {
137 .flags = FIB_LOOKUP_NOREF,
140 err = fib_rules_lookup(net->ipv6.mr6_rules_ops,
141 flowi6_to_flowi(flp6), 0, &arg);
148 static int ip6mr_rule_action(struct fib_rule *rule, struct flowi *flp,
149 int flags, struct fib_lookup_arg *arg)
151 struct ip6mr_result *res = arg->result;
152 struct mr_table *mrt;
154 switch (rule->action) {
157 case FR_ACT_UNREACHABLE:
159 case FR_ACT_PROHIBIT:
161 case FR_ACT_BLACKHOLE:
166 mrt = ip6mr_get_table(rule->fr_net, rule->table);
173 static int ip6mr_rule_match(struct fib_rule *rule, struct flowi *flp, int flags)
178 static const struct nla_policy ip6mr_rule_policy[FRA_MAX + 1] = {
182 static int ip6mr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
183 struct fib_rule_hdr *frh, struct nlattr **tb,
184 struct netlink_ext_ack *extack)
189 static int ip6mr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
195 static int ip6mr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
196 struct fib_rule_hdr *frh)
204 static const struct fib_rules_ops __net_initconst ip6mr_rules_ops_template = {
205 .family = RTNL_FAMILY_IP6MR,
206 .rule_size = sizeof(struct ip6mr_rule),
207 .addr_size = sizeof(struct in6_addr),
208 .action = ip6mr_rule_action,
209 .match = ip6mr_rule_match,
210 .configure = ip6mr_rule_configure,
211 .compare = ip6mr_rule_compare,
212 .fill = ip6mr_rule_fill,
213 .nlgroup = RTNLGRP_IPV6_RULE,
214 .policy = ip6mr_rule_policy,
215 .owner = THIS_MODULE,
218 static int __net_init ip6mr_rules_init(struct net *net)
220 struct fib_rules_ops *ops;
221 struct mr_table *mrt;
224 ops = fib_rules_register(&ip6mr_rules_ops_template, net);
228 INIT_LIST_HEAD(&net->ipv6.mr6_tables);
230 mrt = ip6mr_new_table(net, RT6_TABLE_DFLT);
236 err = fib_default_rule_add(ops, 0x7fff, RT6_TABLE_DFLT, 0);
240 net->ipv6.mr6_rules_ops = ops;
244 ip6mr_free_table(mrt);
246 fib_rules_unregister(ops);
250 static void __net_exit ip6mr_rules_exit(struct net *net)
252 struct mr_table *mrt, *next;
255 list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) {
256 list_del(&mrt->list);
257 ip6mr_free_table(mrt);
259 fib_rules_unregister(net->ipv6.mr6_rules_ops);
263 static int ip6mr_rules_dump(struct net *net, struct notifier_block *nb)
265 return fib_rules_dump(net, nb, RTNL_FAMILY_IP6MR);
268 static unsigned int ip6mr_rules_seq_read(struct net *net)
270 return fib_rules_seq_read(net, RTNL_FAMILY_IP6MR);
273 bool ip6mr_rule_default(const struct fib_rule *rule)
275 return fib_rule_matchall(rule) && rule->action == FR_ACT_TO_TBL &&
276 rule->table == RT6_TABLE_DFLT && !rule->l3mdev;
278 EXPORT_SYMBOL(ip6mr_rule_default);
280 #define ip6mr_for_each_table(mrt, net) \
281 for (mrt = net->ipv6.mrt6; mrt; mrt = NULL)
283 static struct mr_table *ip6mr_mr_table_iter(struct net *net,
284 struct mr_table *mrt)
287 return net->ipv6.mrt6;
291 static struct mr_table *ip6mr_get_table(struct net *net, u32 id)
293 return net->ipv6.mrt6;
296 static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
297 struct mr_table **mrt)
299 *mrt = net->ipv6.mrt6;
303 static int __net_init ip6mr_rules_init(struct net *net)
305 struct mr_table *mrt;
307 mrt = ip6mr_new_table(net, RT6_TABLE_DFLT);
310 net->ipv6.mrt6 = mrt;
314 static void __net_exit ip6mr_rules_exit(struct net *net)
317 ip6mr_free_table(net->ipv6.mrt6);
318 net->ipv6.mrt6 = NULL;
322 static int ip6mr_rules_dump(struct net *net, struct notifier_block *nb)
327 static unsigned int ip6mr_rules_seq_read(struct net *net)
333 static int ip6mr_hash_cmp(struct rhashtable_compare_arg *arg,
336 const struct mfc6_cache_cmp_arg *cmparg = arg->key;
337 struct mfc6_cache *c = (struct mfc6_cache *)ptr;
339 return !ipv6_addr_equal(&c->mf6c_mcastgrp, &cmparg->mf6c_mcastgrp) ||
340 !ipv6_addr_equal(&c->mf6c_origin, &cmparg->mf6c_origin);
343 static const struct rhashtable_params ip6mr_rht_params = {
344 .head_offset = offsetof(struct mr_mfc, mnode),
345 .key_offset = offsetof(struct mfc6_cache, cmparg),
346 .key_len = sizeof(struct mfc6_cache_cmp_arg),
349 .obj_cmpfn = ip6mr_hash_cmp,
350 .automatic_shrinking = true,
353 static void ip6mr_new_table_set(struct mr_table *mrt,
356 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
357 list_add_tail_rcu(&mrt->list, &net->ipv6.mr6_tables);
361 static struct mfc6_cache_cmp_arg ip6mr_mr_table_ops_cmparg_any = {
362 .mf6c_origin = IN6ADDR_ANY_INIT,
363 .mf6c_mcastgrp = IN6ADDR_ANY_INIT,
366 static struct mr_table_ops ip6mr_mr_table_ops = {
367 .rht_params = &ip6mr_rht_params,
368 .cmparg_any = &ip6mr_mr_table_ops_cmparg_any,
371 static struct mr_table *ip6mr_new_table(struct net *net, u32 id)
373 struct mr_table *mrt;
375 mrt = ip6mr_get_table(net, id);
379 return mr_table_alloc(net, id, &ip6mr_mr_table_ops,
380 ipmr_expire_process, ip6mr_new_table_set);
383 static void ip6mr_free_table(struct mr_table *mrt)
385 del_timer_sync(&mrt->ipmr_expire_timer);
386 mroute_clean_tables(mrt, true);
387 rhltable_destroy(&mrt->mfc_hash);
391 #ifdef CONFIG_PROC_FS
392 /* The /proc interfaces to multicast routing
393 * /proc/ip6_mr_cache /proc/ip6_mr_vif
396 static void *ip6mr_vif_seq_start(struct seq_file *seq, loff_t *pos)
399 struct mr_vif_iter *iter = seq->private;
400 struct net *net = seq_file_net(seq);
401 struct mr_table *mrt;
403 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
405 return ERR_PTR(-ENOENT);
409 read_lock(&mrt_lock);
410 return mr_vif_seq_start(seq, pos);
413 static void ip6mr_vif_seq_stop(struct seq_file *seq, void *v)
416 read_unlock(&mrt_lock);
419 static int ip6mr_vif_seq_show(struct seq_file *seq, void *v)
421 struct mr_vif_iter *iter = seq->private;
422 struct mr_table *mrt = iter->mrt;
424 if (v == SEQ_START_TOKEN) {
426 "Interface BytesIn PktsIn BytesOut PktsOut Flags\n");
428 const struct vif_device *vif = v;
429 const char *name = vif->dev ? vif->dev->name : "none";
432 "%2td %-10s %8ld %7ld %8ld %7ld %05X\n",
433 vif - mrt->vif_table,
434 name, vif->bytes_in, vif->pkt_in,
435 vif->bytes_out, vif->pkt_out,
441 static const struct seq_operations ip6mr_vif_seq_ops = {
442 .start = ip6mr_vif_seq_start,
443 .next = mr_vif_seq_next,
444 .stop = ip6mr_vif_seq_stop,
445 .show = ip6mr_vif_seq_show,
448 static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
450 struct net *net = seq_file_net(seq);
451 struct mr_table *mrt;
453 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
455 return ERR_PTR(-ENOENT);
457 return mr_mfc_seq_start(seq, pos, mrt, &mfc_unres_lock);
460 static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
464 if (v == SEQ_START_TOKEN) {
468 "Iif Pkts Bytes Wrong Oifs\n");
470 const struct mfc6_cache *mfc = v;
471 const struct mr_mfc_iter *it = seq->private;
472 struct mr_table *mrt = it->mrt;
474 seq_printf(seq, "%pI6 %pI6 %-3hd",
475 &mfc->mf6c_mcastgrp, &mfc->mf6c_origin,
478 if (it->cache != &mrt->mfc_unres_queue) {
479 seq_printf(seq, " %8lu %8lu %8lu",
480 mfc->_c.mfc_un.res.pkt,
481 mfc->_c.mfc_un.res.bytes,
482 mfc->_c.mfc_un.res.wrong_if);
483 for (n = mfc->_c.mfc_un.res.minvif;
484 n < mfc->_c.mfc_un.res.maxvif; n++) {
485 if (VIF_EXISTS(mrt, n) &&
486 mfc->_c.mfc_un.res.ttls[n] < 255)
489 mfc->_c.mfc_un.res.ttls[n]);
492 /* unresolved mfc_caches don't contain
493 * pkt, bytes and wrong_if values
495 seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
502 static const struct seq_operations ipmr_mfc_seq_ops = {
503 .start = ipmr_mfc_seq_start,
504 .next = mr_mfc_seq_next,
505 .stop = mr_mfc_seq_stop,
506 .show = ipmr_mfc_seq_show,
510 #ifdef CONFIG_IPV6_PIMSM_V2
512 static int pim6_rcv(struct sk_buff *skb)
514 struct pimreghdr *pim;
515 struct ipv6hdr *encap;
516 struct net_device *reg_dev = NULL;
517 struct net *net = dev_net(skb->dev);
518 struct mr_table *mrt;
519 struct flowi6 fl6 = {
520 .flowi6_iif = skb->dev->ifindex,
521 .flowi6_mark = skb->mark,
525 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
528 pim = (struct pimreghdr *)skb_transport_header(skb);
529 if (pim->type != ((PIM_VERSION << 4) | PIM_TYPE_REGISTER) ||
530 (pim->flags & PIM_NULL_REGISTER) ||
531 (csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
532 sizeof(*pim), IPPROTO_PIM,
533 csum_partial((void *)pim, sizeof(*pim), 0)) &&
534 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
537 /* check if the inner packet is destined to mcast group */
538 encap = (struct ipv6hdr *)(skb_transport_header(skb) +
541 if (!ipv6_addr_is_multicast(&encap->daddr) ||
542 encap->payload_len == 0 ||
543 ntohs(encap->payload_len) + sizeof(*pim) > skb->len)
546 if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
548 reg_vif_num = mrt->mroute_reg_vif_num;
550 read_lock(&mrt_lock);
551 if (reg_vif_num >= 0)
552 reg_dev = mrt->vif_table[reg_vif_num].dev;
555 read_unlock(&mrt_lock);
560 skb->mac_header = skb->network_header;
561 skb_pull(skb, (u8 *)encap - skb->data);
562 skb_reset_network_header(skb);
563 skb->protocol = htons(ETH_P_IPV6);
564 skb->ip_summed = CHECKSUM_NONE;
566 skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));
577 static const struct inet6_protocol pim6_protocol = {
581 /* Service routines creating virtual interfaces: PIMREG */
583 static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
584 struct net_device *dev)
586 struct net *net = dev_net(dev);
587 struct mr_table *mrt;
588 struct flowi6 fl6 = {
589 .flowi6_oif = dev->ifindex,
590 .flowi6_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
591 .flowi6_mark = skb->mark,
595 err = ip6mr_fib_lookup(net, &fl6, &mrt);
601 read_lock(&mrt_lock);
602 dev->stats.tx_bytes += skb->len;
603 dev->stats.tx_packets++;
604 ip6mr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, MRT6MSG_WHOLEPKT);
605 read_unlock(&mrt_lock);
610 static int reg_vif_get_iflink(const struct net_device *dev)
615 static const struct net_device_ops reg_vif_netdev_ops = {
616 .ndo_start_xmit = reg_vif_xmit,
617 .ndo_get_iflink = reg_vif_get_iflink,
620 static void reg_vif_setup(struct net_device *dev)
622 dev->type = ARPHRD_PIMREG;
623 dev->mtu = 1500 - sizeof(struct ipv6hdr) - 8;
624 dev->flags = IFF_NOARP;
625 dev->netdev_ops = ®_vif_netdev_ops;
626 dev->needs_free_netdev = true;
627 dev->features |= NETIF_F_NETNS_LOCAL;
630 static struct net_device *ip6mr_reg_vif(struct net *net, struct mr_table *mrt)
632 struct net_device *dev;
635 if (mrt->id == RT6_TABLE_DFLT)
636 sprintf(name, "pim6reg");
638 sprintf(name, "pim6reg%u", mrt->id);
640 dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup);
644 dev_net_set(dev, net);
646 if (register_netdevice(dev)) {
658 unregister_netdevice(dev);
663 static int call_ip6mr_vif_entry_notifiers(struct net *net,
664 enum fib_event_type event_type,
665 struct vif_device *vif,
666 mifi_t vif_index, u32 tb_id)
668 return mr_call_vif_notifiers(net, RTNL_FAMILY_IP6MR, event_type,
669 vif, vif_index, tb_id,
670 &net->ipv6.ipmr_seq);
673 static int call_ip6mr_mfc_entry_notifiers(struct net *net,
674 enum fib_event_type event_type,
675 struct mfc6_cache *mfc, u32 tb_id)
677 return mr_call_mfc_notifiers(net, RTNL_FAMILY_IP6MR, event_type,
678 &mfc->_c, tb_id, &net->ipv6.ipmr_seq);
681 /* Delete a VIF entry */
682 static int mif6_delete(struct mr_table *mrt, int vifi, int notify,
683 struct list_head *head)
685 struct vif_device *v;
686 struct net_device *dev;
687 struct inet6_dev *in6_dev;
689 if (vifi < 0 || vifi >= mrt->maxvif)
690 return -EADDRNOTAVAIL;
692 v = &mrt->vif_table[vifi];
694 if (VIF_EXISTS(mrt, vifi))
695 call_ip6mr_vif_entry_notifiers(read_pnet(&mrt->net),
696 FIB_EVENT_VIF_DEL, v, vifi,
699 write_lock_bh(&mrt_lock);
704 write_unlock_bh(&mrt_lock);
705 return -EADDRNOTAVAIL;
708 #ifdef CONFIG_IPV6_PIMSM_V2
709 if (vifi == mrt->mroute_reg_vif_num)
710 mrt->mroute_reg_vif_num = -1;
713 if (vifi + 1 == mrt->maxvif) {
715 for (tmp = vifi - 1; tmp >= 0; tmp--) {
716 if (VIF_EXISTS(mrt, tmp))
719 mrt->maxvif = tmp + 1;
722 write_unlock_bh(&mrt_lock);
724 dev_set_allmulti(dev, -1);
726 in6_dev = __in6_dev_get(dev);
728 in6_dev->cnf.mc_forwarding--;
729 inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
730 NETCONFA_MC_FORWARDING,
731 dev->ifindex, &in6_dev->cnf);
734 if ((v->flags & MIFF_REGISTER) && !notify)
735 unregister_netdevice_queue(dev, head);
741 static inline void ip6mr_cache_free_rcu(struct rcu_head *head)
743 struct mr_mfc *c = container_of(head, struct mr_mfc, rcu);
745 kmem_cache_free(mrt_cachep, (struct mfc6_cache *)c);
748 static inline void ip6mr_cache_free(struct mfc6_cache *c)
750 call_rcu(&c->_c.rcu, ip6mr_cache_free_rcu);
753 /* Destroy an unresolved cache entry, killing queued skbs
754 and reporting error to netlink readers.
757 static void ip6mr_destroy_unres(struct mr_table *mrt, struct mfc6_cache *c)
759 struct net *net = read_pnet(&mrt->net);
762 atomic_dec(&mrt->cache_resolve_queue_len);
764 while ((skb = skb_dequeue(&c->_c.mfc_un.unres.unresolved)) != NULL) {
765 if (ipv6_hdr(skb)->version == 0) {
766 struct nlmsghdr *nlh = skb_pull(skb,
767 sizeof(struct ipv6hdr));
768 nlh->nlmsg_type = NLMSG_ERROR;
769 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
770 skb_trim(skb, nlh->nlmsg_len);
771 ((struct nlmsgerr *)nlmsg_data(nlh))->error = -ETIMEDOUT;
772 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
781 /* Timer process for all the unresolved queue. */
783 static void ipmr_do_expire_process(struct mr_table *mrt)
785 unsigned long now = jiffies;
786 unsigned long expires = 10 * HZ;
787 struct mr_mfc *c, *next;
789 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
790 if (time_after(c->mfc_un.unres.expires, now)) {
792 unsigned long interval = c->mfc_un.unres.expires - now;
793 if (interval < expires)
799 mr6_netlink_event(mrt, (struct mfc6_cache *)c, RTM_DELROUTE);
800 ip6mr_destroy_unres(mrt, (struct mfc6_cache *)c);
803 if (!list_empty(&mrt->mfc_unres_queue))
804 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
807 static void ipmr_expire_process(struct timer_list *t)
809 struct mr_table *mrt = from_timer(mrt, t, ipmr_expire_timer);
811 if (!spin_trylock(&mfc_unres_lock)) {
812 mod_timer(&mrt->ipmr_expire_timer, jiffies + 1);
816 if (!list_empty(&mrt->mfc_unres_queue))
817 ipmr_do_expire_process(mrt);
819 spin_unlock(&mfc_unres_lock);
822 /* Fill oifs list. It is called under write locked mrt_lock. */
824 static void ip6mr_update_thresholds(struct mr_table *mrt,
825 struct mr_mfc *cache,
830 cache->mfc_un.res.minvif = MAXMIFS;
831 cache->mfc_un.res.maxvif = 0;
832 memset(cache->mfc_un.res.ttls, 255, MAXMIFS);
834 for (vifi = 0; vifi < mrt->maxvif; vifi++) {
835 if (VIF_EXISTS(mrt, vifi) &&
836 ttls[vifi] && ttls[vifi] < 255) {
837 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
838 if (cache->mfc_un.res.minvif > vifi)
839 cache->mfc_un.res.minvif = vifi;
840 if (cache->mfc_un.res.maxvif <= vifi)
841 cache->mfc_un.res.maxvif = vifi + 1;
844 cache->mfc_un.res.lastuse = jiffies;
847 static int mif6_add(struct net *net, struct mr_table *mrt,
848 struct mif6ctl *vifc, int mrtsock)
850 int vifi = vifc->mif6c_mifi;
851 struct vif_device *v = &mrt->vif_table[vifi];
852 struct net_device *dev;
853 struct inet6_dev *in6_dev;
857 if (VIF_EXISTS(mrt, vifi))
860 switch (vifc->mif6c_flags) {
861 #ifdef CONFIG_IPV6_PIMSM_V2
864 * Special Purpose VIF in PIM
865 * All the packets will be sent to the daemon
867 if (mrt->mroute_reg_vif_num >= 0)
869 dev = ip6mr_reg_vif(net, mrt);
872 err = dev_set_allmulti(dev, 1);
874 unregister_netdevice(dev);
881 dev = dev_get_by_index(net, vifc->mif6c_pifi);
883 return -EADDRNOTAVAIL;
884 err = dev_set_allmulti(dev, 1);
894 in6_dev = __in6_dev_get(dev);
896 in6_dev->cnf.mc_forwarding++;
897 inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
898 NETCONFA_MC_FORWARDING,
899 dev->ifindex, &in6_dev->cnf);
902 /* Fill in the VIF structures */
903 vif_device_init(v, dev, vifc->vifc_rate_limit, vifc->vifc_threshold,
904 vifc->mif6c_flags | (!mrtsock ? VIFF_STATIC : 0),
907 /* And finish update writing critical data */
908 write_lock_bh(&mrt_lock);
910 #ifdef CONFIG_IPV6_PIMSM_V2
911 if (v->flags & MIFF_REGISTER)
912 mrt->mroute_reg_vif_num = vifi;
914 if (vifi + 1 > mrt->maxvif)
915 mrt->maxvif = vifi + 1;
916 write_unlock_bh(&mrt_lock);
917 call_ip6mr_vif_entry_notifiers(net, FIB_EVENT_VIF_ADD,
922 static struct mfc6_cache *ip6mr_cache_find(struct mr_table *mrt,
923 const struct in6_addr *origin,
924 const struct in6_addr *mcastgrp)
926 struct mfc6_cache_cmp_arg arg = {
927 .mf6c_origin = *origin,
928 .mf6c_mcastgrp = *mcastgrp,
931 return mr_mfc_find(mrt, &arg);
934 /* Look for a (*,G) entry */
935 static struct mfc6_cache *ip6mr_cache_find_any(struct mr_table *mrt,
936 struct in6_addr *mcastgrp,
939 struct mfc6_cache_cmp_arg arg = {
940 .mf6c_origin = in6addr_any,
941 .mf6c_mcastgrp = *mcastgrp,
944 if (ipv6_addr_any(mcastgrp))
945 return mr_mfc_find_any_parent(mrt, mifi);
946 return mr_mfc_find_any(mrt, mifi, &arg);
949 /* Look for a (S,G,iif) entry if parent != -1 */
950 static struct mfc6_cache *
951 ip6mr_cache_find_parent(struct mr_table *mrt,
952 const struct in6_addr *origin,
953 const struct in6_addr *mcastgrp,
956 struct mfc6_cache_cmp_arg arg = {
957 .mf6c_origin = *origin,
958 .mf6c_mcastgrp = *mcastgrp,
961 return mr_mfc_find_parent(mrt, &arg, parent);
964 /* Allocate a multicast cache entry */
965 static struct mfc6_cache *ip6mr_cache_alloc(void)
967 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
970 c->_c.mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
971 c->_c.mfc_un.res.minvif = MAXMIFS;
972 c->_c.free = ip6mr_cache_free_rcu;
973 refcount_set(&c->_c.mfc_un.res.refcount, 1);
977 static struct mfc6_cache *ip6mr_cache_alloc_unres(void)
979 struct mfc6_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
982 skb_queue_head_init(&c->_c.mfc_un.unres.unresolved);
983 c->_c.mfc_un.unres.expires = jiffies + 10 * HZ;
988 * A cache entry has gone into a resolved state from queued
991 static void ip6mr_cache_resolve(struct net *net, struct mr_table *mrt,
992 struct mfc6_cache *uc, struct mfc6_cache *c)
997 * Play the pending entries through our router
1000 while ((skb = __skb_dequeue(&uc->_c.mfc_un.unres.unresolved))) {
1001 if (ipv6_hdr(skb)->version == 0) {
1002 struct nlmsghdr *nlh = skb_pull(skb,
1003 sizeof(struct ipv6hdr));
1005 if (mr_fill_mroute(mrt, skb, &c->_c,
1006 nlmsg_data(nlh)) > 0) {
1007 nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh;
1009 nlh->nlmsg_type = NLMSG_ERROR;
1010 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
1011 skb_trim(skb, nlh->nlmsg_len);
1012 ((struct nlmsgerr *)nlmsg_data(nlh))->error = -EMSGSIZE;
1014 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
1016 ip6_mr_forward(net, mrt, skb, c);
1021 * Bounce a cache query up to pim6sd and netlink.
1023 * Called under mrt_lock.
1026 static int ip6mr_cache_report(struct mr_table *mrt, struct sk_buff *pkt,
1027 mifi_t mifi, int assert)
1029 struct sock *mroute6_sk;
1030 struct sk_buff *skb;
1031 struct mrt6msg *msg;
1034 #ifdef CONFIG_IPV6_PIMSM_V2
1035 if (assert == MRT6MSG_WHOLEPKT)
1036 skb = skb_realloc_headroom(pkt, -skb_network_offset(pkt)
1040 skb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(*msg), GFP_ATOMIC);
1045 /* I suppose that internal messages
1046 * do not require checksums */
1048 skb->ip_summed = CHECKSUM_UNNECESSARY;
1050 #ifdef CONFIG_IPV6_PIMSM_V2
1051 if (assert == MRT6MSG_WHOLEPKT) {
1052 /* Ugly, but we have no choice with this interface.
1053 Duplicate old header, fix length etc.
1054 And all this only to mangle msg->im6_msgtype and
1055 to set msg->im6_mbz to "mbz" :-)
1057 skb_push(skb, -skb_network_offset(pkt));
1059 skb_push(skb, sizeof(*msg));
1060 skb_reset_transport_header(skb);
1061 msg = (struct mrt6msg *)skb_transport_header(skb);
1063 msg->im6_msgtype = MRT6MSG_WHOLEPKT;
1064 msg->im6_mif = mrt->mroute_reg_vif_num;
1066 msg->im6_src = ipv6_hdr(pkt)->saddr;
1067 msg->im6_dst = ipv6_hdr(pkt)->daddr;
1069 skb->ip_summed = CHECKSUM_UNNECESSARY;
1074 * Copy the IP header
1077 skb_put(skb, sizeof(struct ipv6hdr));
1078 skb_reset_network_header(skb);
1079 skb_copy_to_linear_data(skb, ipv6_hdr(pkt), sizeof(struct ipv6hdr));
1084 skb_put(skb, sizeof(*msg));
1085 skb_reset_transport_header(skb);
1086 msg = (struct mrt6msg *)skb_transport_header(skb);
1089 msg->im6_msgtype = assert;
1090 msg->im6_mif = mifi;
1092 msg->im6_src = ipv6_hdr(pkt)->saddr;
1093 msg->im6_dst = ipv6_hdr(pkt)->daddr;
1095 skb_dst_set(skb, dst_clone(skb_dst(pkt)));
1096 skb->ip_summed = CHECKSUM_UNNECESSARY;
1100 mroute6_sk = rcu_dereference(mrt->mroute_sk);
1107 mrt6msg_netlink_event(mrt, skb);
1109 /* Deliver to user space multicast routing algorithms */
1110 ret = sock_queue_rcv_skb(mroute6_sk, skb);
1113 net_warn_ratelimited("mroute6: pending queue full, dropping entries\n");
1120 /* Queue a packet for resolution. It gets locked cache entry! */
1121 static int ip6mr_cache_unresolved(struct mr_table *mrt, mifi_t mifi,
1122 struct sk_buff *skb)
1124 struct mfc6_cache *c;
1128 spin_lock_bh(&mfc_unres_lock);
1129 list_for_each_entry(c, &mrt->mfc_unres_queue, _c.list) {
1130 if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) &&
1131 ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr)) {
1139 * Create a new entry if allowable
1142 if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
1143 (c = ip6mr_cache_alloc_unres()) == NULL) {
1144 spin_unlock_bh(&mfc_unres_lock);
1150 /* Fill in the new cache entry */
1151 c->_c.mfc_parent = -1;
1152 c->mf6c_origin = ipv6_hdr(skb)->saddr;
1153 c->mf6c_mcastgrp = ipv6_hdr(skb)->daddr;
1156 * Reflect first query at pim6sd
1158 err = ip6mr_cache_report(mrt, skb, mifi, MRT6MSG_NOCACHE);
1160 /* If the report failed throw the cache entry
1163 spin_unlock_bh(&mfc_unres_lock);
1165 ip6mr_cache_free(c);
1170 atomic_inc(&mrt->cache_resolve_queue_len);
1171 list_add(&c->_c.list, &mrt->mfc_unres_queue);
1172 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1174 ipmr_do_expire_process(mrt);
1177 /* See if we can append the packet */
1178 if (c->_c.mfc_un.unres.unresolved.qlen > 3) {
1182 skb_queue_tail(&c->_c.mfc_un.unres.unresolved, skb);
1186 spin_unlock_bh(&mfc_unres_lock);
1191 * MFC6 cache manipulation by user space
1194 static int ip6mr_mfc_delete(struct mr_table *mrt, struct mf6cctl *mfc,
1197 struct mfc6_cache *c;
1199 /* The entries are added/deleted only under RTNL */
1201 c = ip6mr_cache_find_parent(mrt, &mfc->mf6cc_origin.sin6_addr,
1202 &mfc->mf6cc_mcastgrp.sin6_addr, parent);
1206 rhltable_remove(&mrt->mfc_hash, &c->_c.mnode, ip6mr_rht_params);
1207 list_del_rcu(&c->_c.list);
1209 call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net),
1210 FIB_EVENT_ENTRY_DEL, c, mrt->id);
1211 mr6_netlink_event(mrt, c, RTM_DELROUTE);
1212 mr_cache_put(&c->_c);
1216 static int ip6mr_device_event(struct notifier_block *this,
1217 unsigned long event, void *ptr)
1219 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1220 struct net *net = dev_net(dev);
1221 struct mr_table *mrt;
1222 struct vif_device *v;
1225 if (event != NETDEV_UNREGISTER)
1228 ip6mr_for_each_table(mrt, net) {
1229 v = &mrt->vif_table[0];
1230 for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1232 mif6_delete(mrt, ct, 1, NULL);
1239 static unsigned int ip6mr_seq_read(struct net *net)
1243 return net->ipv6.ipmr_seq + ip6mr_rules_seq_read(net);
1246 static int ip6mr_dump(struct net *net, struct notifier_block *nb)
1248 return mr_dump(net, nb, RTNL_FAMILY_IP6MR, ip6mr_rules_dump,
1249 ip6mr_mr_table_iter, &mrt_lock);
1252 static struct notifier_block ip6_mr_notifier = {
1253 .notifier_call = ip6mr_device_event
1256 static const struct fib_notifier_ops ip6mr_notifier_ops_template = {
1257 .family = RTNL_FAMILY_IP6MR,
1258 .fib_seq_read = ip6mr_seq_read,
1259 .fib_dump = ip6mr_dump,
1260 .owner = THIS_MODULE,
1263 static int __net_init ip6mr_notifier_init(struct net *net)
1265 struct fib_notifier_ops *ops;
1267 net->ipv6.ipmr_seq = 0;
1269 ops = fib_notifier_ops_register(&ip6mr_notifier_ops_template, net);
1271 return PTR_ERR(ops);
1273 net->ipv6.ip6mr_notifier_ops = ops;
1278 static void __net_exit ip6mr_notifier_exit(struct net *net)
1280 fib_notifier_ops_unregister(net->ipv6.ip6mr_notifier_ops);
1281 net->ipv6.ip6mr_notifier_ops = NULL;
1284 /* Setup for IP multicast routing */
1285 static int __net_init ip6mr_net_init(struct net *net)
1289 err = ip6mr_notifier_init(net);
1293 err = ip6mr_rules_init(net);
1295 goto ip6mr_rules_fail;
1297 #ifdef CONFIG_PROC_FS
1299 if (!proc_create_net("ip6_mr_vif", 0, net->proc_net, &ip6mr_vif_seq_ops,
1300 sizeof(struct mr_vif_iter)))
1302 if (!proc_create_net("ip6_mr_cache", 0, net->proc_net, &ipmr_mfc_seq_ops,
1303 sizeof(struct mr_mfc_iter)))
1304 goto proc_cache_fail;
1309 #ifdef CONFIG_PROC_FS
1311 remove_proc_entry("ip6_mr_vif", net->proc_net);
1313 ip6mr_rules_exit(net);
1316 ip6mr_notifier_exit(net);
1320 static void __net_exit ip6mr_net_exit(struct net *net)
1322 #ifdef CONFIG_PROC_FS
1323 remove_proc_entry("ip6_mr_cache", net->proc_net);
1324 remove_proc_entry("ip6_mr_vif", net->proc_net);
1326 ip6mr_rules_exit(net);
1327 ip6mr_notifier_exit(net);
1330 static struct pernet_operations ip6mr_net_ops = {
1331 .init = ip6mr_net_init,
1332 .exit = ip6mr_net_exit,
1335 int __init ip6_mr_init(void)
1339 mrt_cachep = kmem_cache_create("ip6_mrt_cache",
1340 sizeof(struct mfc6_cache),
1341 0, SLAB_HWCACHE_ALIGN,
1346 err = register_pernet_subsys(&ip6mr_net_ops);
1348 goto reg_pernet_fail;
1350 err = register_netdevice_notifier(&ip6_mr_notifier);
1352 goto reg_notif_fail;
1353 #ifdef CONFIG_IPV6_PIMSM_V2
1354 if (inet6_add_protocol(&pim6_protocol, IPPROTO_PIM) < 0) {
1355 pr_err("%s: can't add PIM protocol\n", __func__);
1357 goto add_proto_fail;
1360 err = rtnl_register_module(THIS_MODULE, RTNL_FAMILY_IP6MR, RTM_GETROUTE,
1361 NULL, ip6mr_rtm_dumproute, 0);
1365 #ifdef CONFIG_IPV6_PIMSM_V2
1366 inet6_del_protocol(&pim6_protocol, IPPROTO_PIM);
1368 unregister_netdevice_notifier(&ip6_mr_notifier);
1371 unregister_pernet_subsys(&ip6mr_net_ops);
1373 kmem_cache_destroy(mrt_cachep);
1377 void ip6_mr_cleanup(void)
1379 rtnl_unregister(RTNL_FAMILY_IP6MR, RTM_GETROUTE);
1380 #ifdef CONFIG_IPV6_PIMSM_V2
1381 inet6_del_protocol(&pim6_protocol, IPPROTO_PIM);
1383 unregister_netdevice_notifier(&ip6_mr_notifier);
1384 unregister_pernet_subsys(&ip6mr_net_ops);
1385 kmem_cache_destroy(mrt_cachep);
1388 static int ip6mr_mfc_add(struct net *net, struct mr_table *mrt,
1389 struct mf6cctl *mfc, int mrtsock, int parent)
1391 unsigned char ttls[MAXMIFS];
1392 struct mfc6_cache *uc, *c;
1397 if (mfc->mf6cc_parent >= MAXMIFS)
1400 memset(ttls, 255, MAXMIFS);
1401 for (i = 0; i < MAXMIFS; i++) {
1402 if (IF_ISSET(i, &mfc->mf6cc_ifset))
1406 /* The entries are added/deleted only under RTNL */
1408 c = ip6mr_cache_find_parent(mrt, &mfc->mf6cc_origin.sin6_addr,
1409 &mfc->mf6cc_mcastgrp.sin6_addr, parent);
1412 write_lock_bh(&mrt_lock);
1413 c->_c.mfc_parent = mfc->mf6cc_parent;
1414 ip6mr_update_thresholds(mrt, &c->_c, ttls);
1416 c->_c.mfc_flags |= MFC_STATIC;
1417 write_unlock_bh(&mrt_lock);
1418 call_ip6mr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_REPLACE,
1420 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1424 if (!ipv6_addr_any(&mfc->mf6cc_mcastgrp.sin6_addr) &&
1425 !ipv6_addr_is_multicast(&mfc->mf6cc_mcastgrp.sin6_addr))
1428 c = ip6mr_cache_alloc();
1432 c->mf6c_origin = mfc->mf6cc_origin.sin6_addr;
1433 c->mf6c_mcastgrp = mfc->mf6cc_mcastgrp.sin6_addr;
1434 c->_c.mfc_parent = mfc->mf6cc_parent;
1435 ip6mr_update_thresholds(mrt, &c->_c, ttls);
1437 c->_c.mfc_flags |= MFC_STATIC;
1439 err = rhltable_insert_key(&mrt->mfc_hash, &c->cmparg, &c->_c.mnode,
1442 pr_err("ip6mr: rhtable insert error %d\n", err);
1443 ip6mr_cache_free(c);
1446 list_add_tail_rcu(&c->_c.list, &mrt->mfc_cache_list);
1448 /* Check to see if we resolved a queued list. If so we
1449 * need to send on the frames and tidy up.
1452 spin_lock_bh(&mfc_unres_lock);
1453 list_for_each_entry(_uc, &mrt->mfc_unres_queue, list) {
1454 uc = (struct mfc6_cache *)_uc;
1455 if (ipv6_addr_equal(&uc->mf6c_origin, &c->mf6c_origin) &&
1456 ipv6_addr_equal(&uc->mf6c_mcastgrp, &c->mf6c_mcastgrp)) {
1457 list_del(&_uc->list);
1458 atomic_dec(&mrt->cache_resolve_queue_len);
1463 if (list_empty(&mrt->mfc_unres_queue))
1464 del_timer(&mrt->ipmr_expire_timer);
1465 spin_unlock_bh(&mfc_unres_lock);
1468 ip6mr_cache_resolve(net, mrt, uc, c);
1469 ip6mr_cache_free(uc);
1471 call_ip6mr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_ADD,
1473 mr6_netlink_event(mrt, c, RTM_NEWROUTE);
1478 * Close the multicast socket, and clear the vif tables etc
1481 static void mroute_clean_tables(struct mr_table *mrt, bool all)
1483 struct mr_mfc *c, *tmp;
1487 /* Shut down all active vif entries */
1488 for (i = 0; i < mrt->maxvif; i++) {
1489 if (!all && (mrt->vif_table[i].flags & VIFF_STATIC))
1491 mif6_delete(mrt, i, 0, &list);
1493 unregister_netdevice_many(&list);
1495 /* Wipe the cache */
1496 list_for_each_entry_safe(c, tmp, &mrt->mfc_cache_list, list) {
1497 if (!all && (c->mfc_flags & MFC_STATIC))
1499 rhltable_remove(&mrt->mfc_hash, &c->mnode, ip6mr_rht_params);
1500 list_del_rcu(&c->list);
1501 mr6_netlink_event(mrt, (struct mfc6_cache *)c, RTM_DELROUTE);
1505 if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1506 spin_lock_bh(&mfc_unres_lock);
1507 list_for_each_entry_safe(c, tmp, &mrt->mfc_unres_queue, list) {
1509 call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net),
1510 FIB_EVENT_ENTRY_DEL,
1511 (struct mfc6_cache *)c,
1513 mr6_netlink_event(mrt, (struct mfc6_cache *)c,
1515 ip6mr_destroy_unres(mrt, (struct mfc6_cache *)c);
1517 spin_unlock_bh(&mfc_unres_lock);
1521 static int ip6mr_sk_init(struct mr_table *mrt, struct sock *sk)
1524 struct net *net = sock_net(sk);
1527 write_lock_bh(&mrt_lock);
1528 if (rtnl_dereference(mrt->mroute_sk)) {
1531 rcu_assign_pointer(mrt->mroute_sk, sk);
1532 sock_set_flag(sk, SOCK_RCU_FREE);
1533 net->ipv6.devconf_all->mc_forwarding++;
1535 write_unlock_bh(&mrt_lock);
1538 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
1539 NETCONFA_MC_FORWARDING,
1540 NETCONFA_IFINDEX_ALL,
1541 net->ipv6.devconf_all);
1547 int ip6mr_sk_done(struct sock *sk)
1550 struct net *net = sock_net(sk);
1551 struct mr_table *mrt;
1553 if (sk->sk_type != SOCK_RAW ||
1554 inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1558 ip6mr_for_each_table(mrt, net) {
1559 if (sk == rtnl_dereference(mrt->mroute_sk)) {
1560 write_lock_bh(&mrt_lock);
1561 RCU_INIT_POINTER(mrt->mroute_sk, NULL);
1562 /* Note that mroute_sk had SOCK_RCU_FREE set,
1563 * so the RCU grace period before sk freeing
1564 * is guaranteed by sk_destruct()
1566 net->ipv6.devconf_all->mc_forwarding--;
1567 write_unlock_bh(&mrt_lock);
1568 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
1569 NETCONFA_MC_FORWARDING,
1570 NETCONFA_IFINDEX_ALL,
1571 net->ipv6.devconf_all);
1573 mroute_clean_tables(mrt, false);
1583 bool mroute6_is_socket(struct net *net, struct sk_buff *skb)
1585 struct mr_table *mrt;
1586 struct flowi6 fl6 = {
1587 .flowi6_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
1588 .flowi6_oif = skb->dev->ifindex,
1589 .flowi6_mark = skb->mark,
1592 if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
1595 return rcu_access_pointer(mrt->mroute_sk);
1597 EXPORT_SYMBOL(mroute6_is_socket);
1600 * Socket options and virtual interface manipulation. The whole
1601 * virtual interface system is a complete heap, but unfortunately
1602 * that's how BSD mrouted happens to think. Maybe one day with a proper
1603 * MOSPF/PIM router set up we can clean this up.
1606 int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsigned int optlen)
1608 int ret, parent = 0;
1612 struct net *net = sock_net(sk);
1613 struct mr_table *mrt;
1615 if (sk->sk_type != SOCK_RAW ||
1616 inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1619 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1623 if (optname != MRT6_INIT) {
1624 if (sk != rcu_access_pointer(mrt->mroute_sk) &&
1625 !ns_capable(net->user_ns, CAP_NET_ADMIN))
1631 if (optlen < sizeof(int))
1634 return ip6mr_sk_init(mrt, sk);
1637 return ip6mr_sk_done(sk);
1640 if (optlen < sizeof(vif))
1642 if (copy_from_user(&vif, optval, sizeof(vif)))
1644 if (vif.mif6c_mifi >= MAXMIFS)
1647 ret = mif6_add(net, mrt, &vif,
1648 sk == rtnl_dereference(mrt->mroute_sk));
1653 if (optlen < sizeof(mifi_t))
1655 if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
1658 ret = mif6_delete(mrt, mifi, 0, NULL);
1663 * Manipulate the forwarding caches. These live
1664 * in a sort of kernel/user symbiosis.
1670 case MRT6_ADD_MFC_PROXY:
1671 case MRT6_DEL_MFC_PROXY:
1672 if (optlen < sizeof(mfc))
1674 if (copy_from_user(&mfc, optval, sizeof(mfc)))
1677 parent = mfc.mf6cc_parent;
1679 if (optname == MRT6_DEL_MFC || optname == MRT6_DEL_MFC_PROXY)
1680 ret = ip6mr_mfc_delete(mrt, &mfc, parent);
1682 ret = ip6mr_mfc_add(net, mrt, &mfc,
1684 rtnl_dereference(mrt->mroute_sk),
1690 * Control PIM assert (to activate pim will activate assert)
1696 if (optlen != sizeof(v))
1698 if (get_user(v, (int __user *)optval))
1700 mrt->mroute_do_assert = v;
1704 #ifdef CONFIG_IPV6_PIMSM_V2
1709 if (optlen != sizeof(v))
1711 if (get_user(v, (int __user *)optval))
1716 if (v != mrt->mroute_do_pim) {
1717 mrt->mroute_do_pim = v;
1718 mrt->mroute_do_assert = v;
1725 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
1730 if (optlen != sizeof(u32))
1732 if (get_user(v, (u32 __user *)optval))
1734 /* "pim6reg%u" should not exceed 16 bytes (IFNAMSIZ) */
1735 if (v != RT_TABLE_DEFAULT && v >= 100000000)
1737 if (sk == rcu_access_pointer(mrt->mroute_sk))
1742 mrt = ip6mr_new_table(net, v);
1746 raw6_sk(sk)->ip6mr_table = v;
1752 * Spurious command, or MRT6_VERSION which you cannot
1756 return -ENOPROTOOPT;
1761 * Getsock opt support for the multicast routing system.
1764 int ip6_mroute_getsockopt(struct sock *sk, int optname, char __user *optval,
1769 struct net *net = sock_net(sk);
1770 struct mr_table *mrt;
1772 if (sk->sk_type != SOCK_RAW ||
1773 inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
1776 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1784 #ifdef CONFIG_IPV6_PIMSM_V2
1786 val = mrt->mroute_do_pim;
1790 val = mrt->mroute_do_assert;
1793 return -ENOPROTOOPT;
1796 if (get_user(olr, optlen))
1799 olr = min_t(int, olr, sizeof(int));
1803 if (put_user(olr, optlen))
1805 if (copy_to_user(optval, &val, olr))
1811 * The IP multicast ioctl support routines.
1814 int ip6mr_ioctl(struct sock *sk, int cmd, void __user *arg)
1816 struct sioc_sg_req6 sr;
1817 struct sioc_mif_req6 vr;
1818 struct vif_device *vif;
1819 struct mfc6_cache *c;
1820 struct net *net = sock_net(sk);
1821 struct mr_table *mrt;
1823 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1828 case SIOCGETMIFCNT_IN6:
1829 if (copy_from_user(&vr, arg, sizeof(vr)))
1831 if (vr.mifi >= mrt->maxvif)
1833 read_lock(&mrt_lock);
1834 vif = &mrt->vif_table[vr.mifi];
1835 if (VIF_EXISTS(mrt, vr.mifi)) {
1836 vr.icount = vif->pkt_in;
1837 vr.ocount = vif->pkt_out;
1838 vr.ibytes = vif->bytes_in;
1839 vr.obytes = vif->bytes_out;
1840 read_unlock(&mrt_lock);
1842 if (copy_to_user(arg, &vr, sizeof(vr)))
1846 read_unlock(&mrt_lock);
1847 return -EADDRNOTAVAIL;
1848 case SIOCGETSGCNT_IN6:
1849 if (copy_from_user(&sr, arg, sizeof(sr)))
1853 c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1855 sr.pktcnt = c->_c.mfc_un.res.pkt;
1856 sr.bytecnt = c->_c.mfc_un.res.bytes;
1857 sr.wrong_if = c->_c.mfc_un.res.wrong_if;
1860 if (copy_to_user(arg, &sr, sizeof(sr)))
1865 return -EADDRNOTAVAIL;
1867 return -ENOIOCTLCMD;
1871 #ifdef CONFIG_COMPAT
1872 struct compat_sioc_sg_req6 {
1873 struct sockaddr_in6 src;
1874 struct sockaddr_in6 grp;
1875 compat_ulong_t pktcnt;
1876 compat_ulong_t bytecnt;
1877 compat_ulong_t wrong_if;
1880 struct compat_sioc_mif_req6 {
1882 compat_ulong_t icount;
1883 compat_ulong_t ocount;
1884 compat_ulong_t ibytes;
1885 compat_ulong_t obytes;
1888 int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1890 struct compat_sioc_sg_req6 sr;
1891 struct compat_sioc_mif_req6 vr;
1892 struct vif_device *vif;
1893 struct mfc6_cache *c;
1894 struct net *net = sock_net(sk);
1895 struct mr_table *mrt;
1897 mrt = ip6mr_get_table(net, raw6_sk(sk)->ip6mr_table ? : RT6_TABLE_DFLT);
1902 case SIOCGETMIFCNT_IN6:
1903 if (copy_from_user(&vr, arg, sizeof(vr)))
1905 if (vr.mifi >= mrt->maxvif)
1907 read_lock(&mrt_lock);
1908 vif = &mrt->vif_table[vr.mifi];
1909 if (VIF_EXISTS(mrt, vr.mifi)) {
1910 vr.icount = vif->pkt_in;
1911 vr.ocount = vif->pkt_out;
1912 vr.ibytes = vif->bytes_in;
1913 vr.obytes = vif->bytes_out;
1914 read_unlock(&mrt_lock);
1916 if (copy_to_user(arg, &vr, sizeof(vr)))
1920 read_unlock(&mrt_lock);
1921 return -EADDRNOTAVAIL;
1922 case SIOCGETSGCNT_IN6:
1923 if (copy_from_user(&sr, arg, sizeof(sr)))
1927 c = ip6mr_cache_find(mrt, &sr.src.sin6_addr, &sr.grp.sin6_addr);
1929 sr.pktcnt = c->_c.mfc_un.res.pkt;
1930 sr.bytecnt = c->_c.mfc_un.res.bytes;
1931 sr.wrong_if = c->_c.mfc_un.res.wrong_if;
1934 if (copy_to_user(arg, &sr, sizeof(sr)))
1939 return -EADDRNOTAVAIL;
1941 return -ENOIOCTLCMD;
1946 static inline int ip6mr_forward2_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
1948 __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
1949 IPSTATS_MIB_OUTFORWDATAGRAMS);
1950 __IP6_ADD_STATS(net, ip6_dst_idev(skb_dst(skb)),
1951 IPSTATS_MIB_OUTOCTETS, skb->len);
1952 return dst_output(net, sk, skb);
1956 * Processing handlers for ip6mr_forward
1959 static int ip6mr_forward2(struct net *net, struct mr_table *mrt,
1960 struct sk_buff *skb, struct mfc6_cache *c, int vifi)
1962 struct ipv6hdr *ipv6h;
1963 struct vif_device *vif = &mrt->vif_table[vifi];
1964 struct net_device *dev;
1965 struct dst_entry *dst;
1971 #ifdef CONFIG_IPV6_PIMSM_V2
1972 if (vif->flags & MIFF_REGISTER) {
1974 vif->bytes_out += skb->len;
1975 vif->dev->stats.tx_bytes += skb->len;
1976 vif->dev->stats.tx_packets++;
1977 ip6mr_cache_report(mrt, skb, vifi, MRT6MSG_WHOLEPKT);
1982 ipv6h = ipv6_hdr(skb);
1984 fl6 = (struct flowi6) {
1985 .flowi6_oif = vif->link,
1986 .daddr = ipv6h->daddr,
1989 dst = ip6_route_output(net, NULL, &fl6);
1996 skb_dst_set(skb, dst);
1999 * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
2000 * not only before forwarding, but after forwarding on all output
2001 * interfaces. It is clear, if mrouter runs a multicasting
2002 * program, it should receive packets not depending to what interface
2003 * program is joined.
2004 * If we will not make it, the program will have to join on all
2005 * interfaces. On the other hand, multihoming host (or router, but
2006 * not mrouter) cannot join to more than one interface - it will
2007 * result in receiving multiple packets.
2012 vif->bytes_out += skb->len;
2014 /* We are about to write */
2015 /* XXX: extension headers? */
2016 if (skb_cow(skb, sizeof(*ipv6h) + LL_RESERVED_SPACE(dev)))
2019 ipv6h = ipv6_hdr(skb);
2022 IP6CB(skb)->flags |= IP6SKB_FORWARDED;
2024 return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
2025 net, NULL, skb, skb->dev, dev,
2026 ip6mr_forward2_finish);
2033 static int ip6mr_find_vif(struct mr_table *mrt, struct net_device *dev)
2037 for (ct = mrt->maxvif - 1; ct >= 0; ct--) {
2038 if (mrt->vif_table[ct].dev == dev)
2044 static void ip6_mr_forward(struct net *net, struct mr_table *mrt,
2045 struct sk_buff *skb, struct mfc6_cache *c)
2049 int true_vifi = ip6mr_find_vif(mrt, skb->dev);
2051 vif = c->_c.mfc_parent;
2052 c->_c.mfc_un.res.pkt++;
2053 c->_c.mfc_un.res.bytes += skb->len;
2054 c->_c.mfc_un.res.lastuse = jiffies;
2056 if (ipv6_addr_any(&c->mf6c_origin) && true_vifi >= 0) {
2057 struct mfc6_cache *cache_proxy;
2059 /* For an (*,G) entry, we only check that the incoming
2060 * interface is part of the static tree.
2063 cache_proxy = mr_mfc_find_any_parent(mrt, vif);
2065 cache_proxy->_c.mfc_un.res.ttls[true_vifi] < 255) {
2073 * Wrong interface: drop packet and (maybe) send PIM assert.
2075 if (mrt->vif_table[vif].dev != skb->dev) {
2076 c->_c.mfc_un.res.wrong_if++;
2078 if (true_vifi >= 0 && mrt->mroute_do_assert &&
2079 /* pimsm uses asserts, when switching from RPT to SPT,
2080 so that we cannot check that packet arrived on an oif.
2081 It is bad, but otherwise we would need to move pretty
2082 large chunk of pimd to kernel. Ough... --ANK
2084 (mrt->mroute_do_pim ||
2085 c->_c.mfc_un.res.ttls[true_vifi] < 255) &&
2087 c->_c.mfc_un.res.last_assert +
2088 MFC_ASSERT_THRESH)) {
2089 c->_c.mfc_un.res.last_assert = jiffies;
2090 ip6mr_cache_report(mrt, skb, true_vifi, MRT6MSG_WRONGMIF);
2096 mrt->vif_table[vif].pkt_in++;
2097 mrt->vif_table[vif].bytes_in += skb->len;
2102 if (ipv6_addr_any(&c->mf6c_origin) &&
2103 ipv6_addr_any(&c->mf6c_mcastgrp)) {
2104 if (true_vifi >= 0 &&
2105 true_vifi != c->_c.mfc_parent &&
2106 ipv6_hdr(skb)->hop_limit >
2107 c->_c.mfc_un.res.ttls[c->_c.mfc_parent]) {
2108 /* It's an (*,*) entry and the packet is not coming from
2109 * the upstream: forward the packet to the upstream
2112 psend = c->_c.mfc_parent;
2117 for (ct = c->_c.mfc_un.res.maxvif - 1;
2118 ct >= c->_c.mfc_un.res.minvif; ct--) {
2119 /* For (*,G) entry, don't forward to the incoming interface */
2120 if ((!ipv6_addr_any(&c->mf6c_origin) || ct != true_vifi) &&
2121 ipv6_hdr(skb)->hop_limit > c->_c.mfc_un.res.ttls[ct]) {
2123 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
2125 ip6mr_forward2(net, mrt, skb2,
2133 ip6mr_forward2(net, mrt, skb, c, psend);
2143 * Multicast packets for forwarding arrive here
2146 int ip6_mr_input(struct sk_buff *skb)
2148 struct mfc6_cache *cache;
2149 struct net *net = dev_net(skb->dev);
2150 struct mr_table *mrt;
2151 struct flowi6 fl6 = {
2152 .flowi6_iif = skb->dev->ifindex,
2153 .flowi6_mark = skb->mark,
2157 err = ip6mr_fib_lookup(net, &fl6, &mrt);
2163 read_lock(&mrt_lock);
2164 cache = ip6mr_cache_find(mrt,
2165 &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr);
2167 int vif = ip6mr_find_vif(mrt, skb->dev);
2170 cache = ip6mr_cache_find_any(mrt,
2171 &ipv6_hdr(skb)->daddr,
2176 * No usable cache entry
2181 vif = ip6mr_find_vif(mrt, skb->dev);
2183 int err = ip6mr_cache_unresolved(mrt, vif, skb);
2184 read_unlock(&mrt_lock);
2188 read_unlock(&mrt_lock);
2193 ip6_mr_forward(net, mrt, skb, cache);
2195 read_unlock(&mrt_lock);
2200 int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm,
2204 struct mr_table *mrt;
2205 struct mfc6_cache *cache;
2206 struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
2208 mrt = ip6mr_get_table(net, RT6_TABLE_DFLT);
2212 read_lock(&mrt_lock);
2213 cache = ip6mr_cache_find(mrt, &rt->rt6i_src.addr, &rt->rt6i_dst.addr);
2214 if (!cache && skb->dev) {
2215 int vif = ip6mr_find_vif(mrt, skb->dev);
2218 cache = ip6mr_cache_find_any(mrt, &rt->rt6i_dst.addr,
2223 struct sk_buff *skb2;
2224 struct ipv6hdr *iph;
2225 struct net_device *dev;
2229 if (!dev || (vif = ip6mr_find_vif(mrt, dev)) < 0) {
2230 read_unlock(&mrt_lock);
2234 /* really correct? */
2235 skb2 = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
2237 read_unlock(&mrt_lock);
2241 NETLINK_CB(skb2).portid = portid;
2242 skb_reset_transport_header(skb2);
2244 skb_put(skb2, sizeof(struct ipv6hdr));
2245 skb_reset_network_header(skb2);
2247 iph = ipv6_hdr(skb2);
2250 iph->flow_lbl[0] = 0;
2251 iph->flow_lbl[1] = 0;
2252 iph->flow_lbl[2] = 0;
2253 iph->payload_len = 0;
2254 iph->nexthdr = IPPROTO_NONE;
2256 iph->saddr = rt->rt6i_src.addr;
2257 iph->daddr = rt->rt6i_dst.addr;
2259 err = ip6mr_cache_unresolved(mrt, vif, skb2);
2260 read_unlock(&mrt_lock);
2265 err = mr_fill_mroute(mrt, skb, &cache->_c, rtm);
2266 read_unlock(&mrt_lock);
2270 static int ip6mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2271 u32 portid, u32 seq, struct mfc6_cache *c, int cmd,
2274 struct nlmsghdr *nlh;
2278 nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
2282 rtm = nlmsg_data(nlh);
2283 rtm->rtm_family = RTNL_FAMILY_IP6MR;
2284 rtm->rtm_dst_len = 128;
2285 rtm->rtm_src_len = 128;
2287 rtm->rtm_table = mrt->id;
2288 if (nla_put_u32(skb, RTA_TABLE, mrt->id))
2289 goto nla_put_failure;
2290 rtm->rtm_type = RTN_MULTICAST;
2291 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2292 if (c->_c.mfc_flags & MFC_STATIC)
2293 rtm->rtm_protocol = RTPROT_STATIC;
2295 rtm->rtm_protocol = RTPROT_MROUTED;
2298 if (nla_put_in6_addr(skb, RTA_SRC, &c->mf6c_origin) ||
2299 nla_put_in6_addr(skb, RTA_DST, &c->mf6c_mcastgrp))
2300 goto nla_put_failure;
2301 err = mr_fill_mroute(mrt, skb, &c->_c, rtm);
2302 /* do not break the dump if cache is unresolved */
2303 if (err < 0 && err != -ENOENT)
2304 goto nla_put_failure;
2306 nlmsg_end(skb, nlh);
2310 nlmsg_cancel(skb, nlh);
2314 static int _ip6mr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2315 u32 portid, u32 seq, struct mr_mfc *c,
2318 return ip6mr_fill_mroute(mrt, skb, portid, seq, (struct mfc6_cache *)c,
2322 static int mr6_msgsize(bool unresolved, int maxvif)
2325 NLMSG_ALIGN(sizeof(struct rtmsg))
2326 + nla_total_size(4) /* RTA_TABLE */
2327 + nla_total_size(sizeof(struct in6_addr)) /* RTA_SRC */
2328 + nla_total_size(sizeof(struct in6_addr)) /* RTA_DST */
2333 + nla_total_size(4) /* RTA_IIF */
2334 + nla_total_size(0) /* RTA_MULTIPATH */
2335 + maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
2337 + nla_total_size_64bit(sizeof(struct rta_mfc_stats))
2343 static void mr6_netlink_event(struct mr_table *mrt, struct mfc6_cache *mfc,
2346 struct net *net = read_pnet(&mrt->net);
2347 struct sk_buff *skb;
2350 skb = nlmsg_new(mr6_msgsize(mfc->_c.mfc_parent >= MAXMIFS, mrt->maxvif),
2355 err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
2359 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MROUTE, NULL, GFP_ATOMIC);
2365 rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE, err);
2368 static size_t mrt6msg_netlink_msgsize(size_t payloadlen)
2371 NLMSG_ALIGN(sizeof(struct rtgenmsg))
2372 + nla_total_size(1) /* IP6MRA_CREPORT_MSGTYPE */
2373 + nla_total_size(4) /* IP6MRA_CREPORT_MIF_ID */
2374 /* IP6MRA_CREPORT_SRC_ADDR */
2375 + nla_total_size(sizeof(struct in6_addr))
2376 /* IP6MRA_CREPORT_DST_ADDR */
2377 + nla_total_size(sizeof(struct in6_addr))
2378 /* IP6MRA_CREPORT_PKT */
2379 + nla_total_size(payloadlen)
2385 static void mrt6msg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt)
2387 struct net *net = read_pnet(&mrt->net);
2388 struct nlmsghdr *nlh;
2389 struct rtgenmsg *rtgenm;
2390 struct mrt6msg *msg;
2391 struct sk_buff *skb;
2395 payloadlen = pkt->len - sizeof(struct mrt6msg);
2396 msg = (struct mrt6msg *)skb_transport_header(pkt);
2398 skb = nlmsg_new(mrt6msg_netlink_msgsize(payloadlen), GFP_ATOMIC);
2402 nlh = nlmsg_put(skb, 0, 0, RTM_NEWCACHEREPORT,
2403 sizeof(struct rtgenmsg), 0);
2406 rtgenm = nlmsg_data(nlh);
2407 rtgenm->rtgen_family = RTNL_FAMILY_IP6MR;
2408 if (nla_put_u8(skb, IP6MRA_CREPORT_MSGTYPE, msg->im6_msgtype) ||
2409 nla_put_u32(skb, IP6MRA_CREPORT_MIF_ID, msg->im6_mif) ||
2410 nla_put_in6_addr(skb, IP6MRA_CREPORT_SRC_ADDR,
2412 nla_put_in6_addr(skb, IP6MRA_CREPORT_DST_ADDR,
2414 goto nla_put_failure;
2416 nla = nla_reserve(skb, IP6MRA_CREPORT_PKT, payloadlen);
2417 if (!nla || skb_copy_bits(pkt, sizeof(struct mrt6msg),
2418 nla_data(nla), payloadlen))
2419 goto nla_put_failure;
2421 nlmsg_end(skb, nlh);
2423 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_MROUTE_R, NULL, GFP_ATOMIC);
2427 nlmsg_cancel(skb, nlh);
2430 rtnl_set_sk_err(net, RTNLGRP_IPV6_MROUTE_R, -ENOBUFS);
2433 static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2435 return mr_rtm_dumproute(skb, cb, ip6mr_mr_table_iter,
2436 _ip6mr_fill_mroute, &mfc_unres_lock);