Merge tag 'for-5.9-rc2-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave...
[linux-2.6-microblaze.git] / net / ipv4 / ipmr.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *      IP multicast routing support for mrouted 3.6/3.8
4  *
5  *              (c) 1995 Alan Cox, <alan@lxorguk.ukuu.org.uk>
6  *        Linux Consultancy and Custom Driver Development
7  *
8  *      Fixes:
9  *      Michael Chastain        :       Incorrect size of copying.
10  *      Alan Cox                :       Added the cache manager code
11  *      Alan Cox                :       Fixed the clone/copy bug and device race.
12  *      Mike McLagan            :       Routing by source
13  *      Malcolm Beattie         :       Buffer handling fixes.
14  *      Alexey Kuznetsov        :       Double buffer free and other fixes.
15  *      SVR Anand               :       Fixed several multicast bugs and problems.
16  *      Alexey Kuznetsov        :       Status, optimisations and more.
17  *      Brad Parker             :       Better behaviour on mrouted upcall
18  *                                      overflow.
19  *      Carlos Picoto           :       PIMv1 Support
20  *      Pavlin Ivanov Radoslavov:       PIMv2 Registers must checksum only PIM header
21  *                                      Relax this requirement to work with older peers.
22  */
23
24 #include <linux/uaccess.h>
25 #include <linux/types.h>
26 #include <linux/cache.h>
27 #include <linux/capability.h>
28 #include <linux/errno.h>
29 #include <linux/mm.h>
30 #include <linux/kernel.h>
31 #include <linux/fcntl.h>
32 #include <linux/stat.h>
33 #include <linux/socket.h>
34 #include <linux/in.h>
35 #include <linux/inet.h>
36 #include <linux/netdevice.h>
37 #include <linux/inetdevice.h>
38 #include <linux/igmp.h>
39 #include <linux/proc_fs.h>
40 #include <linux/seq_file.h>
41 #include <linux/mroute.h>
42 #include <linux/init.h>
43 #include <linux/if_ether.h>
44 #include <linux/slab.h>
45 #include <net/net_namespace.h>
46 #include <net/ip.h>
47 #include <net/protocol.h>
48 #include <linux/skbuff.h>
49 #include <net/route.h>
50 #include <net/icmp.h>
51 #include <net/udp.h>
52 #include <net/raw.h>
53 #include <linux/notifier.h>
54 #include <linux/if_arp.h>
55 #include <linux/netfilter_ipv4.h>
56 #include <linux/compat.h>
57 #include <linux/export.h>
58 #include <linux/rhashtable.h>
59 #include <net/ip_tunnels.h>
60 #include <net/checksum.h>
61 #include <net/netlink.h>
62 #include <net/fib_rules.h>
63 #include <linux/netconf.h>
64 #include <net/rtnh.h>
65
66 #include <linux/nospec.h>
67
68 struct ipmr_rule {
69         struct fib_rule         common;
70 };
71
72 struct ipmr_result {
73         struct mr_table         *mrt;
74 };
75
76 /* Big lock, protecting vif table, mrt cache and mroute socket state.
77  * Note that the changes are semaphored via rtnl_lock.
78  */
79
80 static DEFINE_RWLOCK(mrt_lock);
81
82 /* Multicast router control variables */
83
84 /* Special spinlock for queue of unresolved entries */
85 static DEFINE_SPINLOCK(mfc_unres_lock);
86
87 /* We return to original Alan's scheme. Hash table of resolved
88  * entries is changed only in process context and protected
89  * with weak lock mrt_lock. Queue of unresolved entries is protected
90  * with strong spinlock mfc_unres_lock.
91  *
92  * In this case data path is free of exclusive locks at all.
93  */
94
95 static struct kmem_cache *mrt_cachep __ro_after_init;
96
97 static struct mr_table *ipmr_new_table(struct net *net, u32 id);
98 static void ipmr_free_table(struct mr_table *mrt);
99
100 static void ip_mr_forward(struct net *net, struct mr_table *mrt,
101                           struct net_device *dev, struct sk_buff *skb,
102                           struct mfc_cache *cache, int local);
103 static int ipmr_cache_report(struct mr_table *mrt,
104                              struct sk_buff *pkt, vifi_t vifi, int assert);
105 static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
106                                  int cmd);
107 static void igmpmsg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt);
108 static void mroute_clean_tables(struct mr_table *mrt, int flags);
109 static void ipmr_expire_process(struct timer_list *t);
110
111 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
112 #define ipmr_for_each_table(mrt, net)                                   \
113         list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list,        \
114                                 lockdep_rtnl_is_held() ||               \
115                                 list_empty(&net->ipv4.mr_tables))
116
117 static struct mr_table *ipmr_mr_table_iter(struct net *net,
118                                            struct mr_table *mrt)
119 {
120         struct mr_table *ret;
121
122         if (!mrt)
123                 ret = list_entry_rcu(net->ipv4.mr_tables.next,
124                                      struct mr_table, list);
125         else
126                 ret = list_entry_rcu(mrt->list.next,
127                                      struct mr_table, list);
128
129         if (&ret->list == &net->ipv4.mr_tables)
130                 return NULL;
131         return ret;
132 }
133
134 static struct mr_table *ipmr_get_table(struct net *net, u32 id)
135 {
136         struct mr_table *mrt;
137
138         ipmr_for_each_table(mrt, net) {
139                 if (mrt->id == id)
140                         return mrt;
141         }
142         return NULL;
143 }
144
145 static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
146                            struct mr_table **mrt)
147 {
148         int err;
149         struct ipmr_result res;
150         struct fib_lookup_arg arg = {
151                 .result = &res,
152                 .flags = FIB_LOOKUP_NOREF,
153         };
154
155         /* update flow if oif or iif point to device enslaved to l3mdev */
156         l3mdev_update_flow(net, flowi4_to_flowi(flp4));
157
158         err = fib_rules_lookup(net->ipv4.mr_rules_ops,
159                                flowi4_to_flowi(flp4), 0, &arg);
160         if (err < 0)
161                 return err;
162         *mrt = res.mrt;
163         return 0;
164 }
165
166 static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp,
167                             int flags, struct fib_lookup_arg *arg)
168 {
169         struct ipmr_result *res = arg->result;
170         struct mr_table *mrt;
171
172         switch (rule->action) {
173         case FR_ACT_TO_TBL:
174                 break;
175         case FR_ACT_UNREACHABLE:
176                 return -ENETUNREACH;
177         case FR_ACT_PROHIBIT:
178                 return -EACCES;
179         case FR_ACT_BLACKHOLE:
180         default:
181                 return -EINVAL;
182         }
183
184         arg->table = fib_rule_get_table(rule, arg);
185
186         mrt = ipmr_get_table(rule->fr_net, arg->table);
187         if (!mrt)
188                 return -EAGAIN;
189         res->mrt = mrt;
190         return 0;
191 }
192
193 static int ipmr_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
194 {
195         return 1;
196 }
197
198 static const struct nla_policy ipmr_rule_policy[FRA_MAX + 1] = {
199         FRA_GENERIC_POLICY,
200 };
201
202 static int ipmr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
203                                struct fib_rule_hdr *frh, struct nlattr **tb,
204                                struct netlink_ext_ack *extack)
205 {
206         return 0;
207 }
208
209 static int ipmr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
210                              struct nlattr **tb)
211 {
212         return 1;
213 }
214
215 static int ipmr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
216                           struct fib_rule_hdr *frh)
217 {
218         frh->dst_len = 0;
219         frh->src_len = 0;
220         frh->tos     = 0;
221         return 0;
222 }
223
224 static const struct fib_rules_ops __net_initconst ipmr_rules_ops_template = {
225         .family         = RTNL_FAMILY_IPMR,
226         .rule_size      = sizeof(struct ipmr_rule),
227         .addr_size      = sizeof(u32),
228         .action         = ipmr_rule_action,
229         .match          = ipmr_rule_match,
230         .configure      = ipmr_rule_configure,
231         .compare        = ipmr_rule_compare,
232         .fill           = ipmr_rule_fill,
233         .nlgroup        = RTNLGRP_IPV4_RULE,
234         .policy         = ipmr_rule_policy,
235         .owner          = THIS_MODULE,
236 };
237
238 static int __net_init ipmr_rules_init(struct net *net)
239 {
240         struct fib_rules_ops *ops;
241         struct mr_table *mrt;
242         int err;
243
244         ops = fib_rules_register(&ipmr_rules_ops_template, net);
245         if (IS_ERR(ops))
246                 return PTR_ERR(ops);
247
248         INIT_LIST_HEAD(&net->ipv4.mr_tables);
249
250         mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
251         if (IS_ERR(mrt)) {
252                 err = PTR_ERR(mrt);
253                 goto err1;
254         }
255
256         err = fib_default_rule_add(ops, 0x7fff, RT_TABLE_DEFAULT, 0);
257         if (err < 0)
258                 goto err2;
259
260         net->ipv4.mr_rules_ops = ops;
261         return 0;
262
263 err2:
264         ipmr_free_table(mrt);
265 err1:
266         fib_rules_unregister(ops);
267         return err;
268 }
269
270 static void __net_exit ipmr_rules_exit(struct net *net)
271 {
272         struct mr_table *mrt, *next;
273
274         rtnl_lock();
275         list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
276                 list_del(&mrt->list);
277                 ipmr_free_table(mrt);
278         }
279         fib_rules_unregister(net->ipv4.mr_rules_ops);
280         rtnl_unlock();
281 }
282
283 static int ipmr_rules_dump(struct net *net, struct notifier_block *nb,
284                            struct netlink_ext_ack *extack)
285 {
286         return fib_rules_dump(net, nb, RTNL_FAMILY_IPMR, extack);
287 }
288
289 static unsigned int ipmr_rules_seq_read(struct net *net)
290 {
291         return fib_rules_seq_read(net, RTNL_FAMILY_IPMR);
292 }
293
294 bool ipmr_rule_default(const struct fib_rule *rule)
295 {
296         return fib_rule_matchall(rule) && rule->table == RT_TABLE_DEFAULT;
297 }
298 EXPORT_SYMBOL(ipmr_rule_default);
299 #else
300 #define ipmr_for_each_table(mrt, net) \
301         for (mrt = net->ipv4.mrt; mrt; mrt = NULL)
302
303 static struct mr_table *ipmr_mr_table_iter(struct net *net,
304                                            struct mr_table *mrt)
305 {
306         if (!mrt)
307                 return net->ipv4.mrt;
308         return NULL;
309 }
310
311 static struct mr_table *ipmr_get_table(struct net *net, u32 id)
312 {
313         return net->ipv4.mrt;
314 }
315
316 static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
317                            struct mr_table **mrt)
318 {
319         *mrt = net->ipv4.mrt;
320         return 0;
321 }
322
323 static int __net_init ipmr_rules_init(struct net *net)
324 {
325         struct mr_table *mrt;
326
327         mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
328         if (IS_ERR(mrt))
329                 return PTR_ERR(mrt);
330         net->ipv4.mrt = mrt;
331         return 0;
332 }
333
334 static void __net_exit ipmr_rules_exit(struct net *net)
335 {
336         rtnl_lock();
337         ipmr_free_table(net->ipv4.mrt);
338         net->ipv4.mrt = NULL;
339         rtnl_unlock();
340 }
341
342 static int ipmr_rules_dump(struct net *net, struct notifier_block *nb,
343                            struct netlink_ext_ack *extack)
344 {
345         return 0;
346 }
347
348 static unsigned int ipmr_rules_seq_read(struct net *net)
349 {
350         return 0;
351 }
352
353 bool ipmr_rule_default(const struct fib_rule *rule)
354 {
355         return true;
356 }
357 EXPORT_SYMBOL(ipmr_rule_default);
358 #endif
359
360 static inline int ipmr_hash_cmp(struct rhashtable_compare_arg *arg,
361                                 const void *ptr)
362 {
363         const struct mfc_cache_cmp_arg *cmparg = arg->key;
364         struct mfc_cache *c = (struct mfc_cache *)ptr;
365
366         return cmparg->mfc_mcastgrp != c->mfc_mcastgrp ||
367                cmparg->mfc_origin != c->mfc_origin;
368 }
369
370 static const struct rhashtable_params ipmr_rht_params = {
371         .head_offset = offsetof(struct mr_mfc, mnode),
372         .key_offset = offsetof(struct mfc_cache, cmparg),
373         .key_len = sizeof(struct mfc_cache_cmp_arg),
374         .nelem_hint = 3,
375         .obj_cmpfn = ipmr_hash_cmp,
376         .automatic_shrinking = true,
377 };
378
379 static void ipmr_new_table_set(struct mr_table *mrt,
380                                struct net *net)
381 {
382 #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
383         list_add_tail_rcu(&mrt->list, &net->ipv4.mr_tables);
384 #endif
385 }
386
387 static struct mfc_cache_cmp_arg ipmr_mr_table_ops_cmparg_any = {
388         .mfc_mcastgrp = htonl(INADDR_ANY),
389         .mfc_origin = htonl(INADDR_ANY),
390 };
391
392 static struct mr_table_ops ipmr_mr_table_ops = {
393         .rht_params = &ipmr_rht_params,
394         .cmparg_any = &ipmr_mr_table_ops_cmparg_any,
395 };
396
397 static struct mr_table *ipmr_new_table(struct net *net, u32 id)
398 {
399         struct mr_table *mrt;
400
401         /* "pimreg%u" should not exceed 16 bytes (IFNAMSIZ) */
402         if (id != RT_TABLE_DEFAULT && id >= 1000000000)
403                 return ERR_PTR(-EINVAL);
404
405         mrt = ipmr_get_table(net, id);
406         if (mrt)
407                 return mrt;
408
409         return mr_table_alloc(net, id, &ipmr_mr_table_ops,
410                               ipmr_expire_process, ipmr_new_table_set);
411 }
412
413 static void ipmr_free_table(struct mr_table *mrt)
414 {
415         del_timer_sync(&mrt->ipmr_expire_timer);
416         mroute_clean_tables(mrt, MRT_FLUSH_VIFS | MRT_FLUSH_VIFS_STATIC |
417                                  MRT_FLUSH_MFC | MRT_FLUSH_MFC_STATIC);
418         rhltable_destroy(&mrt->mfc_hash);
419         kfree(mrt);
420 }
421
422 /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
423
424 /* Initialize ipmr pimreg/tunnel in_device */
425 static bool ipmr_init_vif_indev(const struct net_device *dev)
426 {
427         struct in_device *in_dev;
428
429         ASSERT_RTNL();
430
431         in_dev = __in_dev_get_rtnl(dev);
432         if (!in_dev)
433                 return false;
434         ipv4_devconf_setall(in_dev);
435         neigh_parms_data_state_setall(in_dev->arp_parms);
436         IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
437
438         return true;
439 }
440
441 static struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
442 {
443         struct net_device *tunnel_dev, *new_dev;
444         struct ip_tunnel_parm p = { };
445         int err;
446
447         tunnel_dev = __dev_get_by_name(net, "tunl0");
448         if (!tunnel_dev)
449                 goto out;
450
451         p.iph.daddr = v->vifc_rmt_addr.s_addr;
452         p.iph.saddr = v->vifc_lcl_addr.s_addr;
453         p.iph.version = 4;
454         p.iph.ihl = 5;
455         p.iph.protocol = IPPROTO_IPIP;
456         sprintf(p.name, "dvmrp%d", v->vifc_vifi);
457
458         if (!tunnel_dev->netdev_ops->ndo_tunnel_ctl)
459                 goto out;
460         err = tunnel_dev->netdev_ops->ndo_tunnel_ctl(tunnel_dev, &p,
461                         SIOCADDTUNNEL);
462         if (err)
463                 goto out;
464
465         new_dev = __dev_get_by_name(net, p.name);
466         if (!new_dev)
467                 goto out;
468
469         new_dev->flags |= IFF_MULTICAST;
470         if (!ipmr_init_vif_indev(new_dev))
471                 goto out_unregister;
472         if (dev_open(new_dev, NULL))
473                 goto out_unregister;
474         dev_hold(new_dev);
475         err = dev_set_allmulti(new_dev, 1);
476         if (err) {
477                 dev_close(new_dev);
478                 tunnel_dev->netdev_ops->ndo_tunnel_ctl(tunnel_dev, &p,
479                                 SIOCDELTUNNEL);
480                 dev_put(new_dev);
481                 new_dev = ERR_PTR(err);
482         }
483         return new_dev;
484
485 out_unregister:
486         unregister_netdevice(new_dev);
487 out:
488         return ERR_PTR(-ENOBUFS);
489 }
490
491 #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
492 static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
493 {
494         struct net *net = dev_net(dev);
495         struct mr_table *mrt;
496         struct flowi4 fl4 = {
497                 .flowi4_oif     = dev->ifindex,
498                 .flowi4_iif     = skb->skb_iif ? : LOOPBACK_IFINDEX,
499                 .flowi4_mark    = skb->mark,
500         };
501         int err;
502
503         err = ipmr_fib_lookup(net, &fl4, &mrt);
504         if (err < 0) {
505                 kfree_skb(skb);
506                 return err;
507         }
508
509         read_lock(&mrt_lock);
510         dev->stats.tx_bytes += skb->len;
511         dev->stats.tx_packets++;
512         ipmr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, IGMPMSG_WHOLEPKT);
513         read_unlock(&mrt_lock);
514         kfree_skb(skb);
515         return NETDEV_TX_OK;
516 }
517
518 static int reg_vif_get_iflink(const struct net_device *dev)
519 {
520         return 0;
521 }
522
523 static const struct net_device_ops reg_vif_netdev_ops = {
524         .ndo_start_xmit = reg_vif_xmit,
525         .ndo_get_iflink = reg_vif_get_iflink,
526 };
527
528 static void reg_vif_setup(struct net_device *dev)
529 {
530         dev->type               = ARPHRD_PIMREG;
531         dev->mtu                = ETH_DATA_LEN - sizeof(struct iphdr) - 8;
532         dev->flags              = IFF_NOARP;
533         dev->netdev_ops         = &reg_vif_netdev_ops;
534         dev->needs_free_netdev  = true;
535         dev->features           |= NETIF_F_NETNS_LOCAL;
536 }
537
538 static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
539 {
540         struct net_device *dev;
541         char name[IFNAMSIZ];
542
543         if (mrt->id == RT_TABLE_DEFAULT)
544                 sprintf(name, "pimreg");
545         else
546                 sprintf(name, "pimreg%u", mrt->id);
547
548         dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup);
549
550         if (!dev)
551                 return NULL;
552
553         dev_net_set(dev, net);
554
555         if (register_netdevice(dev)) {
556                 free_netdev(dev);
557                 return NULL;
558         }
559
560         if (!ipmr_init_vif_indev(dev))
561                 goto failure;
562         if (dev_open(dev, NULL))
563                 goto failure;
564
565         dev_hold(dev);
566
567         return dev;
568
569 failure:
570         unregister_netdevice(dev);
571         return NULL;
572 }
573
574 /* called with rcu_read_lock() */
575 static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
576                      unsigned int pimlen)
577 {
578         struct net_device *reg_dev = NULL;
579         struct iphdr *encap;
580
581         encap = (struct iphdr *)(skb_transport_header(skb) + pimlen);
582         /* Check that:
583          * a. packet is really sent to a multicast group
584          * b. packet is not a NULL-REGISTER
585          * c. packet is not truncated
586          */
587         if (!ipv4_is_multicast(encap->daddr) ||
588             encap->tot_len == 0 ||
589             ntohs(encap->tot_len) + pimlen > skb->len)
590                 return 1;
591
592         read_lock(&mrt_lock);
593         if (mrt->mroute_reg_vif_num >= 0)
594                 reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev;
595         read_unlock(&mrt_lock);
596
597         if (!reg_dev)
598                 return 1;
599
600         skb->mac_header = skb->network_header;
601         skb_pull(skb, (u8 *)encap - skb->data);
602         skb_reset_network_header(skb);
603         skb->protocol = htons(ETH_P_IP);
604         skb->ip_summed = CHECKSUM_NONE;
605
606         skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));
607
608         netif_rx(skb);
609
610         return NET_RX_SUCCESS;
611 }
612 #else
613 static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
614 {
615         return NULL;
616 }
617 #endif
618
619 static int call_ipmr_vif_entry_notifiers(struct net *net,
620                                          enum fib_event_type event_type,
621                                          struct vif_device *vif,
622                                          vifi_t vif_index, u32 tb_id)
623 {
624         return mr_call_vif_notifiers(net, RTNL_FAMILY_IPMR, event_type,
625                                      vif, vif_index, tb_id,
626                                      &net->ipv4.ipmr_seq);
627 }
628
629 static int call_ipmr_mfc_entry_notifiers(struct net *net,
630                                          enum fib_event_type event_type,
631                                          struct mfc_cache *mfc, u32 tb_id)
632 {
633         return mr_call_mfc_notifiers(net, RTNL_FAMILY_IPMR, event_type,
634                                      &mfc->_c, tb_id, &net->ipv4.ipmr_seq);
635 }
636
637 /**
638  *      vif_delete - Delete a VIF entry
639  *      @mrt: Table to delete from
640  *      @vifi: VIF identifier to delete
641  *      @notify: Set to 1, if the caller is a notifier_call
642  *      @head: if unregistering the VIF, place it on this queue
643  */
644 static int vif_delete(struct mr_table *mrt, int vifi, int notify,
645                       struct list_head *head)
646 {
647         struct net *net = read_pnet(&mrt->net);
648         struct vif_device *v;
649         struct net_device *dev;
650         struct in_device *in_dev;
651
652         if (vifi < 0 || vifi >= mrt->maxvif)
653                 return -EADDRNOTAVAIL;
654
655         v = &mrt->vif_table[vifi];
656
657         if (VIF_EXISTS(mrt, vifi))
658                 call_ipmr_vif_entry_notifiers(net, FIB_EVENT_VIF_DEL, v, vifi,
659                                               mrt->id);
660
661         write_lock_bh(&mrt_lock);
662         dev = v->dev;
663         v->dev = NULL;
664
665         if (!dev) {
666                 write_unlock_bh(&mrt_lock);
667                 return -EADDRNOTAVAIL;
668         }
669
670         if (vifi == mrt->mroute_reg_vif_num)
671                 mrt->mroute_reg_vif_num = -1;
672
673         if (vifi + 1 == mrt->maxvif) {
674                 int tmp;
675
676                 for (tmp = vifi - 1; tmp >= 0; tmp--) {
677                         if (VIF_EXISTS(mrt, tmp))
678                                 break;
679                 }
680                 mrt->maxvif = tmp+1;
681         }
682
683         write_unlock_bh(&mrt_lock);
684
685         dev_set_allmulti(dev, -1);
686
687         in_dev = __in_dev_get_rtnl(dev);
688         if (in_dev) {
689                 IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)--;
690                 inet_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
691                                             NETCONFA_MC_FORWARDING,
692                                             dev->ifindex, &in_dev->cnf);
693                 ip_rt_multicast_event(in_dev);
694         }
695
696         if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER) && !notify)
697                 unregister_netdevice_queue(dev, head);
698
699         dev_put(dev);
700         return 0;
701 }
702
703 static void ipmr_cache_free_rcu(struct rcu_head *head)
704 {
705         struct mr_mfc *c = container_of(head, struct mr_mfc, rcu);
706
707         kmem_cache_free(mrt_cachep, (struct mfc_cache *)c);
708 }
709
710 static void ipmr_cache_free(struct mfc_cache *c)
711 {
712         call_rcu(&c->_c.rcu, ipmr_cache_free_rcu);
713 }
714
715 /* Destroy an unresolved cache entry, killing queued skbs
716  * and reporting error to netlink readers.
717  */
718 static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
719 {
720         struct net *net = read_pnet(&mrt->net);
721         struct sk_buff *skb;
722         struct nlmsgerr *e;
723
724         atomic_dec(&mrt->cache_resolve_queue_len);
725
726         while ((skb = skb_dequeue(&c->_c.mfc_un.unres.unresolved))) {
727                 if (ip_hdr(skb)->version == 0) {
728                         struct nlmsghdr *nlh = skb_pull(skb,
729                                                         sizeof(struct iphdr));
730                         nlh->nlmsg_type = NLMSG_ERROR;
731                         nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
732                         skb_trim(skb, nlh->nlmsg_len);
733                         e = nlmsg_data(nlh);
734                         e->error = -ETIMEDOUT;
735                         memset(&e->msg, 0, sizeof(e->msg));
736
737                         rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
738                 } else {
739                         kfree_skb(skb);
740                 }
741         }
742
743         ipmr_cache_free(c);
744 }
745
746 /* Timer process for the unresolved queue. */
747 static void ipmr_expire_process(struct timer_list *t)
748 {
749         struct mr_table *mrt = from_timer(mrt, t, ipmr_expire_timer);
750         struct mr_mfc *c, *next;
751         unsigned long expires;
752         unsigned long now;
753
754         if (!spin_trylock(&mfc_unres_lock)) {
755                 mod_timer(&mrt->ipmr_expire_timer, jiffies+HZ/10);
756                 return;
757         }
758
759         if (list_empty(&mrt->mfc_unres_queue))
760                 goto out;
761
762         now = jiffies;
763         expires = 10*HZ;
764
765         list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
766                 if (time_after(c->mfc_un.unres.expires, now)) {
767                         unsigned long interval = c->mfc_un.unres.expires - now;
768                         if (interval < expires)
769                                 expires = interval;
770                         continue;
771                 }
772
773                 list_del(&c->list);
774                 mroute_netlink_event(mrt, (struct mfc_cache *)c, RTM_DELROUTE);
775                 ipmr_destroy_unres(mrt, (struct mfc_cache *)c);
776         }
777
778         if (!list_empty(&mrt->mfc_unres_queue))
779                 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
780
781 out:
782         spin_unlock(&mfc_unres_lock);
783 }
784
785 /* Fill oifs list. It is called under write locked mrt_lock. */
786 static void ipmr_update_thresholds(struct mr_table *mrt, struct mr_mfc *cache,
787                                    unsigned char *ttls)
788 {
789         int vifi;
790
791         cache->mfc_un.res.minvif = MAXVIFS;
792         cache->mfc_un.res.maxvif = 0;
793         memset(cache->mfc_un.res.ttls, 255, MAXVIFS);
794
795         for (vifi = 0; vifi < mrt->maxvif; vifi++) {
796                 if (VIF_EXISTS(mrt, vifi) &&
797                     ttls[vifi] && ttls[vifi] < 255) {
798                         cache->mfc_un.res.ttls[vifi] = ttls[vifi];
799                         if (cache->mfc_un.res.minvif > vifi)
800                                 cache->mfc_un.res.minvif = vifi;
801                         if (cache->mfc_un.res.maxvif <= vifi)
802                                 cache->mfc_un.res.maxvif = vifi + 1;
803                 }
804         }
805         cache->mfc_un.res.lastuse = jiffies;
806 }
807
808 static int vif_add(struct net *net, struct mr_table *mrt,
809                    struct vifctl *vifc, int mrtsock)
810 {
811         struct netdev_phys_item_id ppid = { };
812         int vifi = vifc->vifc_vifi;
813         struct vif_device *v = &mrt->vif_table[vifi];
814         struct net_device *dev;
815         struct in_device *in_dev;
816         int err;
817
818         /* Is vif busy ? */
819         if (VIF_EXISTS(mrt, vifi))
820                 return -EADDRINUSE;
821
822         switch (vifc->vifc_flags) {
823         case VIFF_REGISTER:
824                 if (!ipmr_pimsm_enabled())
825                         return -EINVAL;
826                 /* Special Purpose VIF in PIM
827                  * All the packets will be sent to the daemon
828                  */
829                 if (mrt->mroute_reg_vif_num >= 0)
830                         return -EADDRINUSE;
831                 dev = ipmr_reg_vif(net, mrt);
832                 if (!dev)
833                         return -ENOBUFS;
834                 err = dev_set_allmulti(dev, 1);
835                 if (err) {
836                         unregister_netdevice(dev);
837                         dev_put(dev);
838                         return err;
839                 }
840                 break;
841         case VIFF_TUNNEL:
842                 dev = ipmr_new_tunnel(net, vifc);
843                 if (IS_ERR(dev))
844                         return PTR_ERR(dev);
845                 break;
846         case VIFF_USE_IFINDEX:
847         case 0:
848                 if (vifc->vifc_flags == VIFF_USE_IFINDEX) {
849                         dev = dev_get_by_index(net, vifc->vifc_lcl_ifindex);
850                         if (dev && !__in_dev_get_rtnl(dev)) {
851                                 dev_put(dev);
852                                 return -EADDRNOTAVAIL;
853                         }
854                 } else {
855                         dev = ip_dev_find(net, vifc->vifc_lcl_addr.s_addr);
856                 }
857                 if (!dev)
858                         return -EADDRNOTAVAIL;
859                 err = dev_set_allmulti(dev, 1);
860                 if (err) {
861                         dev_put(dev);
862                         return err;
863                 }
864                 break;
865         default:
866                 return -EINVAL;
867         }
868
869         in_dev = __in_dev_get_rtnl(dev);
870         if (!in_dev) {
871                 dev_put(dev);
872                 return -EADDRNOTAVAIL;
873         }
874         IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++;
875         inet_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_MC_FORWARDING,
876                                     dev->ifindex, &in_dev->cnf);
877         ip_rt_multicast_event(in_dev);
878
879         /* Fill in the VIF structures */
880         vif_device_init(v, dev, vifc->vifc_rate_limit,
881                         vifc->vifc_threshold,
882                         vifc->vifc_flags | (!mrtsock ? VIFF_STATIC : 0),
883                         (VIFF_TUNNEL | VIFF_REGISTER));
884
885         err = dev_get_port_parent_id(dev, &ppid, true);
886         if (err == 0) {
887                 memcpy(v->dev_parent_id.id, ppid.id, ppid.id_len);
888                 v->dev_parent_id.id_len = ppid.id_len;
889         } else {
890                 v->dev_parent_id.id_len = 0;
891         }
892
893         v->local = vifc->vifc_lcl_addr.s_addr;
894         v->remote = vifc->vifc_rmt_addr.s_addr;
895
896         /* And finish update writing critical data */
897         write_lock_bh(&mrt_lock);
898         v->dev = dev;
899         if (v->flags & VIFF_REGISTER)
900                 mrt->mroute_reg_vif_num = vifi;
901         if (vifi+1 > mrt->maxvif)
902                 mrt->maxvif = vifi+1;
903         write_unlock_bh(&mrt_lock);
904         call_ipmr_vif_entry_notifiers(net, FIB_EVENT_VIF_ADD, v, vifi, mrt->id);
905         return 0;
906 }
907
908 /* called with rcu_read_lock() */
909 static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt,
910                                          __be32 origin,
911                                          __be32 mcastgrp)
912 {
913         struct mfc_cache_cmp_arg arg = {
914                         .mfc_mcastgrp = mcastgrp,
915                         .mfc_origin = origin
916         };
917
918         return mr_mfc_find(mrt, &arg);
919 }
920
921 /* Look for a (*,G) entry */
922 static struct mfc_cache *ipmr_cache_find_any(struct mr_table *mrt,
923                                              __be32 mcastgrp, int vifi)
924 {
925         struct mfc_cache_cmp_arg arg = {
926                         .mfc_mcastgrp = mcastgrp,
927                         .mfc_origin = htonl(INADDR_ANY)
928         };
929
930         if (mcastgrp == htonl(INADDR_ANY))
931                 return mr_mfc_find_any_parent(mrt, vifi);
932         return mr_mfc_find_any(mrt, vifi, &arg);
933 }
934
935 /* Look for a (S,G,iif) entry if parent != -1 */
936 static struct mfc_cache *ipmr_cache_find_parent(struct mr_table *mrt,
937                                                 __be32 origin, __be32 mcastgrp,
938                                                 int parent)
939 {
940         struct mfc_cache_cmp_arg arg = {
941                         .mfc_mcastgrp = mcastgrp,
942                         .mfc_origin = origin,
943         };
944
945         return mr_mfc_find_parent(mrt, &arg, parent);
946 }
947
948 /* Allocate a multicast cache entry */
949 static struct mfc_cache *ipmr_cache_alloc(void)
950 {
951         struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
952
953         if (c) {
954                 c->_c.mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
955                 c->_c.mfc_un.res.minvif = MAXVIFS;
956                 c->_c.free = ipmr_cache_free_rcu;
957                 refcount_set(&c->_c.mfc_un.res.refcount, 1);
958         }
959         return c;
960 }
961
962 static struct mfc_cache *ipmr_cache_alloc_unres(void)
963 {
964         struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
965
966         if (c) {
967                 skb_queue_head_init(&c->_c.mfc_un.unres.unresolved);
968                 c->_c.mfc_un.unres.expires = jiffies + 10 * HZ;
969         }
970         return c;
971 }
972
973 /* A cache entry has gone into a resolved state from queued */
974 static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
975                                struct mfc_cache *uc, struct mfc_cache *c)
976 {
977         struct sk_buff *skb;
978         struct nlmsgerr *e;
979
980         /* Play the pending entries through our router */
981         while ((skb = __skb_dequeue(&uc->_c.mfc_un.unres.unresolved))) {
982                 if (ip_hdr(skb)->version == 0) {
983                         struct nlmsghdr *nlh = skb_pull(skb,
984                                                         sizeof(struct iphdr));
985
986                         if (mr_fill_mroute(mrt, skb, &c->_c,
987                                            nlmsg_data(nlh)) > 0) {
988                                 nlh->nlmsg_len = skb_tail_pointer(skb) -
989                                                  (u8 *)nlh;
990                         } else {
991                                 nlh->nlmsg_type = NLMSG_ERROR;
992                                 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
993                                 skb_trim(skb, nlh->nlmsg_len);
994                                 e = nlmsg_data(nlh);
995                                 e->error = -EMSGSIZE;
996                                 memset(&e->msg, 0, sizeof(e->msg));
997                         }
998
999                         rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
1000                 } else {
1001                         ip_mr_forward(net, mrt, skb->dev, skb, c, 0);
1002                 }
1003         }
1004 }
1005
1006 /* Bounce a cache query up to mrouted and netlink.
1007  *
1008  * Called under mrt_lock.
1009  */
1010 static int ipmr_cache_report(struct mr_table *mrt,
1011                              struct sk_buff *pkt, vifi_t vifi, int assert)
1012 {
1013         const int ihl = ip_hdrlen(pkt);
1014         struct sock *mroute_sk;
1015         struct igmphdr *igmp;
1016         struct igmpmsg *msg;
1017         struct sk_buff *skb;
1018         int ret;
1019
1020         if (assert == IGMPMSG_WHOLEPKT || assert == IGMPMSG_WRVIFWHOLE)
1021                 skb = skb_realloc_headroom(pkt, sizeof(struct iphdr));
1022         else
1023                 skb = alloc_skb(128, GFP_ATOMIC);
1024
1025         if (!skb)
1026                 return -ENOBUFS;
1027
1028         if (assert == IGMPMSG_WHOLEPKT || assert == IGMPMSG_WRVIFWHOLE) {
1029                 /* Ugly, but we have no choice with this interface.
1030                  * Duplicate old header, fix ihl, length etc.
1031                  * And all this only to mangle msg->im_msgtype and
1032                  * to set msg->im_mbz to "mbz" :-)
1033                  */
1034                 skb_push(skb, sizeof(struct iphdr));
1035                 skb_reset_network_header(skb);
1036                 skb_reset_transport_header(skb);
1037                 msg = (struct igmpmsg *)skb_network_header(skb);
1038                 memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr));
1039                 msg->im_msgtype = assert;
1040                 msg->im_mbz = 0;
1041                 if (assert == IGMPMSG_WRVIFWHOLE)
1042                         msg->im_vif = vifi;
1043                 else
1044                         msg->im_vif = mrt->mroute_reg_vif_num;
1045                 ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
1046                 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
1047                                              sizeof(struct iphdr));
1048         } else {
1049                 /* Copy the IP header */
1050                 skb_set_network_header(skb, skb->len);
1051                 skb_put(skb, ihl);
1052                 skb_copy_to_linear_data(skb, pkt->data, ihl);
1053                 /* Flag to the kernel this is a route add */
1054                 ip_hdr(skb)->protocol = 0;
1055                 msg = (struct igmpmsg *)skb_network_header(skb);
1056                 msg->im_vif = vifi;
1057                 skb_dst_set(skb, dst_clone(skb_dst(pkt)));
1058                 /* Add our header */
1059                 igmp = skb_put(skb, sizeof(struct igmphdr));
1060                 igmp->type = assert;
1061                 msg->im_msgtype = assert;
1062                 igmp->code = 0;
1063                 ip_hdr(skb)->tot_len = htons(skb->len); /* Fix the length */
1064                 skb->transport_header = skb->network_header;
1065         }
1066
1067         rcu_read_lock();
1068         mroute_sk = rcu_dereference(mrt->mroute_sk);
1069         if (!mroute_sk) {
1070                 rcu_read_unlock();
1071                 kfree_skb(skb);
1072                 return -EINVAL;
1073         }
1074
1075         igmpmsg_netlink_event(mrt, skb);
1076
1077         /* Deliver to mrouted */
1078         ret = sock_queue_rcv_skb(mroute_sk, skb);
1079         rcu_read_unlock();
1080         if (ret < 0) {
1081                 net_warn_ratelimited("mroute: pending queue full, dropping entries\n");
1082                 kfree_skb(skb);
1083         }
1084
1085         return ret;
1086 }
1087
1088 /* Queue a packet for resolution. It gets locked cache entry! */
1089 static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi,
1090                                  struct sk_buff *skb, struct net_device *dev)
1091 {
1092         const struct iphdr *iph = ip_hdr(skb);
1093         struct mfc_cache *c;
1094         bool found = false;
1095         int err;
1096
1097         spin_lock_bh(&mfc_unres_lock);
1098         list_for_each_entry(c, &mrt->mfc_unres_queue, _c.list) {
1099                 if (c->mfc_mcastgrp == iph->daddr &&
1100                     c->mfc_origin == iph->saddr) {
1101                         found = true;
1102                         break;
1103                 }
1104         }
1105
1106         if (!found) {
1107                 /* Create a new entry if allowable */
1108                 c = ipmr_cache_alloc_unres();
1109                 if (!c) {
1110                         spin_unlock_bh(&mfc_unres_lock);
1111
1112                         kfree_skb(skb);
1113                         return -ENOBUFS;
1114                 }
1115
1116                 /* Fill in the new cache entry */
1117                 c->_c.mfc_parent = -1;
1118                 c->mfc_origin   = iph->saddr;
1119                 c->mfc_mcastgrp = iph->daddr;
1120
1121                 /* Reflect first query at mrouted. */
1122                 err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE);
1123
1124                 if (err < 0) {
1125                         /* If the report failed throw the cache entry
1126                            out - Brad Parker
1127                          */
1128                         spin_unlock_bh(&mfc_unres_lock);
1129
1130                         ipmr_cache_free(c);
1131                         kfree_skb(skb);
1132                         return err;
1133                 }
1134
1135                 atomic_inc(&mrt->cache_resolve_queue_len);
1136                 list_add(&c->_c.list, &mrt->mfc_unres_queue);
1137                 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1138
1139                 if (atomic_read(&mrt->cache_resolve_queue_len) == 1)
1140                         mod_timer(&mrt->ipmr_expire_timer,
1141                                   c->_c.mfc_un.unres.expires);
1142         }
1143
1144         /* See if we can append the packet */
1145         if (c->_c.mfc_un.unres.unresolved.qlen > 3) {
1146                 kfree_skb(skb);
1147                 err = -ENOBUFS;
1148         } else {
1149                 if (dev) {
1150                         skb->dev = dev;
1151                         skb->skb_iif = dev->ifindex;
1152                 }
1153                 skb_queue_tail(&c->_c.mfc_un.unres.unresolved, skb);
1154                 err = 0;
1155         }
1156
1157         spin_unlock_bh(&mfc_unres_lock);
1158         return err;
1159 }
1160
1161 /* MFC cache manipulation by user space mroute daemon */
1162
1163 static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc, int parent)
1164 {
1165         struct net *net = read_pnet(&mrt->net);
1166         struct mfc_cache *c;
1167
1168         /* The entries are added/deleted only under RTNL */
1169         rcu_read_lock();
1170         c = ipmr_cache_find_parent(mrt, mfc->mfcc_origin.s_addr,
1171                                    mfc->mfcc_mcastgrp.s_addr, parent);
1172         rcu_read_unlock();
1173         if (!c)
1174                 return -ENOENT;
1175         rhltable_remove(&mrt->mfc_hash, &c->_c.mnode, ipmr_rht_params);
1176         list_del_rcu(&c->_c.list);
1177         call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_DEL, c, mrt->id);
1178         mroute_netlink_event(mrt, c, RTM_DELROUTE);
1179         mr_cache_put(&c->_c);
1180
1181         return 0;
1182 }
1183
1184 static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
1185                         struct mfcctl *mfc, int mrtsock, int parent)
1186 {
1187         struct mfc_cache *uc, *c;
1188         struct mr_mfc *_uc;
1189         bool found;
1190         int ret;
1191
1192         if (mfc->mfcc_parent >= MAXVIFS)
1193                 return -ENFILE;
1194
1195         /* The entries are added/deleted only under RTNL */
1196         rcu_read_lock();
1197         c = ipmr_cache_find_parent(mrt, mfc->mfcc_origin.s_addr,
1198                                    mfc->mfcc_mcastgrp.s_addr, parent);
1199         rcu_read_unlock();
1200         if (c) {
1201                 write_lock_bh(&mrt_lock);
1202                 c->_c.mfc_parent = mfc->mfcc_parent;
1203                 ipmr_update_thresholds(mrt, &c->_c, mfc->mfcc_ttls);
1204                 if (!mrtsock)
1205                         c->_c.mfc_flags |= MFC_STATIC;
1206                 write_unlock_bh(&mrt_lock);
1207                 call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_REPLACE, c,
1208                                               mrt->id);
1209                 mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1210                 return 0;
1211         }
1212
1213         if (mfc->mfcc_mcastgrp.s_addr != htonl(INADDR_ANY) &&
1214             !ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr))
1215                 return -EINVAL;
1216
1217         c = ipmr_cache_alloc();
1218         if (!c)
1219                 return -ENOMEM;
1220
1221         c->mfc_origin = mfc->mfcc_origin.s_addr;
1222         c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr;
1223         c->_c.mfc_parent = mfc->mfcc_parent;
1224         ipmr_update_thresholds(mrt, &c->_c, mfc->mfcc_ttls);
1225         if (!mrtsock)
1226                 c->_c.mfc_flags |= MFC_STATIC;
1227
1228         ret = rhltable_insert_key(&mrt->mfc_hash, &c->cmparg, &c->_c.mnode,
1229                                   ipmr_rht_params);
1230         if (ret) {
1231                 pr_err("ipmr: rhtable insert error %d\n", ret);
1232                 ipmr_cache_free(c);
1233                 return ret;
1234         }
1235         list_add_tail_rcu(&c->_c.list, &mrt->mfc_cache_list);
1236         /* Check to see if we resolved a queued list. If so we
1237          * need to send on the frames and tidy up.
1238          */
1239         found = false;
1240         spin_lock_bh(&mfc_unres_lock);
1241         list_for_each_entry(_uc, &mrt->mfc_unres_queue, list) {
1242                 uc = (struct mfc_cache *)_uc;
1243                 if (uc->mfc_origin == c->mfc_origin &&
1244                     uc->mfc_mcastgrp == c->mfc_mcastgrp) {
1245                         list_del(&_uc->list);
1246                         atomic_dec(&mrt->cache_resolve_queue_len);
1247                         found = true;
1248                         break;
1249                 }
1250         }
1251         if (list_empty(&mrt->mfc_unres_queue))
1252                 del_timer(&mrt->ipmr_expire_timer);
1253         spin_unlock_bh(&mfc_unres_lock);
1254
1255         if (found) {
1256                 ipmr_cache_resolve(net, mrt, uc, c);
1257                 ipmr_cache_free(uc);
1258         }
1259         call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_ADD, c, mrt->id);
1260         mroute_netlink_event(mrt, c, RTM_NEWROUTE);
1261         return 0;
1262 }
1263
1264 /* Close the multicast socket, and clear the vif tables etc */
1265 static void mroute_clean_tables(struct mr_table *mrt, int flags)
1266 {
1267         struct net *net = read_pnet(&mrt->net);
1268         struct mr_mfc *c, *tmp;
1269         struct mfc_cache *cache;
1270         LIST_HEAD(list);
1271         int i;
1272
1273         /* Shut down all active vif entries */
1274         if (flags & (MRT_FLUSH_VIFS | MRT_FLUSH_VIFS_STATIC)) {
1275                 for (i = 0; i < mrt->maxvif; i++) {
1276                         if (((mrt->vif_table[i].flags & VIFF_STATIC) &&
1277                              !(flags & MRT_FLUSH_VIFS_STATIC)) ||
1278                             (!(mrt->vif_table[i].flags & VIFF_STATIC) && !(flags & MRT_FLUSH_VIFS)))
1279                                 continue;
1280                         vif_delete(mrt, i, 0, &list);
1281                 }
1282                 unregister_netdevice_many(&list);
1283         }
1284
1285         /* Wipe the cache */
1286         if (flags & (MRT_FLUSH_MFC | MRT_FLUSH_MFC_STATIC)) {
1287                 list_for_each_entry_safe(c, tmp, &mrt->mfc_cache_list, list) {
1288                         if (((c->mfc_flags & MFC_STATIC) && !(flags & MRT_FLUSH_MFC_STATIC)) ||
1289                             (!(c->mfc_flags & MFC_STATIC) && !(flags & MRT_FLUSH_MFC)))
1290                                 continue;
1291                         rhltable_remove(&mrt->mfc_hash, &c->mnode, ipmr_rht_params);
1292                         list_del_rcu(&c->list);
1293                         cache = (struct mfc_cache *)c;
1294                         call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_DEL, cache,
1295                                                       mrt->id);
1296                         mroute_netlink_event(mrt, cache, RTM_DELROUTE);
1297                         mr_cache_put(c);
1298                 }
1299         }
1300
1301         if (flags & MRT_FLUSH_MFC) {
1302                 if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
1303                         spin_lock_bh(&mfc_unres_lock);
1304                         list_for_each_entry_safe(c, tmp, &mrt->mfc_unres_queue, list) {
1305                                 list_del(&c->list);
1306                                 cache = (struct mfc_cache *)c;
1307                                 mroute_netlink_event(mrt, cache, RTM_DELROUTE);
1308                                 ipmr_destroy_unres(mrt, cache);
1309                         }
1310                         spin_unlock_bh(&mfc_unres_lock);
1311                 }
1312         }
1313 }
1314
1315 /* called from ip_ra_control(), before an RCU grace period,
1316  * we dont need to call synchronize_rcu() here
1317  */
1318 static void mrtsock_destruct(struct sock *sk)
1319 {
1320         struct net *net = sock_net(sk);
1321         struct mr_table *mrt;
1322
1323         rtnl_lock();
1324         ipmr_for_each_table(mrt, net) {
1325                 if (sk == rtnl_dereference(mrt->mroute_sk)) {
1326                         IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
1327                         inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
1328                                                     NETCONFA_MC_FORWARDING,
1329                                                     NETCONFA_IFINDEX_ALL,
1330                                                     net->ipv4.devconf_all);
1331                         RCU_INIT_POINTER(mrt->mroute_sk, NULL);
1332                         mroute_clean_tables(mrt, MRT_FLUSH_VIFS | MRT_FLUSH_MFC);
1333                 }
1334         }
1335         rtnl_unlock();
1336 }
1337
1338 /* Socket options and virtual interface manipulation. The whole
1339  * virtual interface system is a complete heap, but unfortunately
1340  * that's how BSD mrouted happens to think. Maybe one day with a proper
1341  * MOSPF/PIM router set up we can clean this up.
1342  */
1343
1344 int ip_mroute_setsockopt(struct sock *sk, int optname, sockptr_t optval,
1345                          unsigned int optlen)
1346 {
1347         struct net *net = sock_net(sk);
1348         int val, ret = 0, parent = 0;
1349         struct mr_table *mrt;
1350         struct vifctl vif;
1351         struct mfcctl mfc;
1352         bool do_wrvifwhole;
1353         u32 uval;
1354
1355         /* There's one exception to the lock - MRT_DONE which needs to unlock */
1356         rtnl_lock();
1357         if (sk->sk_type != SOCK_RAW ||
1358             inet_sk(sk)->inet_num != IPPROTO_IGMP) {
1359                 ret = -EOPNOTSUPP;
1360                 goto out_unlock;
1361         }
1362
1363         mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1364         if (!mrt) {
1365                 ret = -ENOENT;
1366                 goto out_unlock;
1367         }
1368         if (optname != MRT_INIT) {
1369                 if (sk != rcu_access_pointer(mrt->mroute_sk) &&
1370                     !ns_capable(net->user_ns, CAP_NET_ADMIN)) {
1371                         ret = -EACCES;
1372                         goto out_unlock;
1373                 }
1374         }
1375
1376         switch (optname) {
1377         case MRT_INIT:
1378                 if (optlen != sizeof(int)) {
1379                         ret = -EINVAL;
1380                         break;
1381                 }
1382                 if (rtnl_dereference(mrt->mroute_sk)) {
1383                         ret = -EADDRINUSE;
1384                         break;
1385                 }
1386
1387                 ret = ip_ra_control(sk, 1, mrtsock_destruct);
1388                 if (ret == 0) {
1389                         rcu_assign_pointer(mrt->mroute_sk, sk);
1390                         IPV4_DEVCONF_ALL(net, MC_FORWARDING)++;
1391                         inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
1392                                                     NETCONFA_MC_FORWARDING,
1393                                                     NETCONFA_IFINDEX_ALL,
1394                                                     net->ipv4.devconf_all);
1395                 }
1396                 break;
1397         case MRT_DONE:
1398                 if (sk != rcu_access_pointer(mrt->mroute_sk)) {
1399                         ret = -EACCES;
1400                 } else {
1401                         /* We need to unlock here because mrtsock_destruct takes
1402                          * care of rtnl itself and we can't change that due to
1403                          * the IP_ROUTER_ALERT setsockopt which runs without it.
1404                          */
1405                         rtnl_unlock();
1406                         ret = ip_ra_control(sk, 0, NULL);
1407                         goto out;
1408                 }
1409                 break;
1410         case MRT_ADD_VIF:
1411         case MRT_DEL_VIF:
1412                 if (optlen != sizeof(vif)) {
1413                         ret = -EINVAL;
1414                         break;
1415                 }
1416                 if (copy_from_sockptr(&vif, optval, sizeof(vif))) {
1417                         ret = -EFAULT;
1418                         break;
1419                 }
1420                 if (vif.vifc_vifi >= MAXVIFS) {
1421                         ret = -ENFILE;
1422                         break;
1423                 }
1424                 if (optname == MRT_ADD_VIF) {
1425                         ret = vif_add(net, mrt, &vif,
1426                                       sk == rtnl_dereference(mrt->mroute_sk));
1427                 } else {
1428                         ret = vif_delete(mrt, vif.vifc_vifi, 0, NULL);
1429                 }
1430                 break;
1431         /* Manipulate the forwarding caches. These live
1432          * in a sort of kernel/user symbiosis.
1433          */
1434         case MRT_ADD_MFC:
1435         case MRT_DEL_MFC:
1436                 parent = -1;
1437                 fallthrough;
1438         case MRT_ADD_MFC_PROXY:
1439         case MRT_DEL_MFC_PROXY:
1440                 if (optlen != sizeof(mfc)) {
1441                         ret = -EINVAL;
1442                         break;
1443                 }
1444                 if (copy_from_sockptr(&mfc, optval, sizeof(mfc))) {
1445                         ret = -EFAULT;
1446                         break;
1447                 }
1448                 if (parent == 0)
1449                         parent = mfc.mfcc_parent;
1450                 if (optname == MRT_DEL_MFC || optname == MRT_DEL_MFC_PROXY)
1451                         ret = ipmr_mfc_delete(mrt, &mfc, parent);
1452                 else
1453                         ret = ipmr_mfc_add(net, mrt, &mfc,
1454                                            sk == rtnl_dereference(mrt->mroute_sk),
1455                                            parent);
1456                 break;
1457         case MRT_FLUSH:
1458                 if (optlen != sizeof(val)) {
1459                         ret = -EINVAL;
1460                         break;
1461                 }
1462                 if (copy_from_sockptr(&val, optval, sizeof(val))) {
1463                         ret = -EFAULT;
1464                         break;
1465                 }
1466                 mroute_clean_tables(mrt, val);
1467                 break;
1468         /* Control PIM assert. */
1469         case MRT_ASSERT:
1470                 if (optlen != sizeof(val)) {
1471                         ret = -EINVAL;
1472                         break;
1473                 }
1474                 if (copy_from_sockptr(&val, optval, sizeof(val))) {
1475                         ret = -EFAULT;
1476                         break;
1477                 }
1478                 mrt->mroute_do_assert = val;
1479                 break;
1480         case MRT_PIM:
1481                 if (!ipmr_pimsm_enabled()) {
1482                         ret = -ENOPROTOOPT;
1483                         break;
1484                 }
1485                 if (optlen != sizeof(val)) {
1486                         ret = -EINVAL;
1487                         break;
1488                 }
1489                 if (copy_from_sockptr(&val, optval, sizeof(val))) {
1490                         ret = -EFAULT;
1491                         break;
1492                 }
1493
1494                 do_wrvifwhole = (val == IGMPMSG_WRVIFWHOLE);
1495                 val = !!val;
1496                 if (val != mrt->mroute_do_pim) {
1497                         mrt->mroute_do_pim = val;
1498                         mrt->mroute_do_assert = val;
1499                         mrt->mroute_do_wrvifwhole = do_wrvifwhole;
1500                 }
1501                 break;
1502         case MRT_TABLE:
1503                 if (!IS_BUILTIN(CONFIG_IP_MROUTE_MULTIPLE_TABLES)) {
1504                         ret = -ENOPROTOOPT;
1505                         break;
1506                 }
1507                 if (optlen != sizeof(uval)) {
1508                         ret = -EINVAL;
1509                         break;
1510                 }
1511                 if (copy_from_sockptr(&uval, optval, sizeof(uval))) {
1512                         ret = -EFAULT;
1513                         break;
1514                 }
1515
1516                 if (sk == rtnl_dereference(mrt->mroute_sk)) {
1517                         ret = -EBUSY;
1518                 } else {
1519                         mrt = ipmr_new_table(net, uval);
1520                         if (IS_ERR(mrt))
1521                                 ret = PTR_ERR(mrt);
1522                         else
1523                                 raw_sk(sk)->ipmr_table = uval;
1524                 }
1525                 break;
1526         /* Spurious command, or MRT_VERSION which you cannot set. */
1527         default:
1528                 ret = -ENOPROTOOPT;
1529         }
1530 out_unlock:
1531         rtnl_unlock();
1532 out:
1533         return ret;
1534 }
1535
1536 /* Getsock opt support for the multicast routing system. */
1537 int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int __user *optlen)
1538 {
1539         int olr;
1540         int val;
1541         struct net *net = sock_net(sk);
1542         struct mr_table *mrt;
1543
1544         if (sk->sk_type != SOCK_RAW ||
1545             inet_sk(sk)->inet_num != IPPROTO_IGMP)
1546                 return -EOPNOTSUPP;
1547
1548         mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1549         if (!mrt)
1550                 return -ENOENT;
1551
1552         switch (optname) {
1553         case MRT_VERSION:
1554                 val = 0x0305;
1555                 break;
1556         case MRT_PIM:
1557                 if (!ipmr_pimsm_enabled())
1558                         return -ENOPROTOOPT;
1559                 val = mrt->mroute_do_pim;
1560                 break;
1561         case MRT_ASSERT:
1562                 val = mrt->mroute_do_assert;
1563                 break;
1564         default:
1565                 return -ENOPROTOOPT;
1566         }
1567
1568         if (get_user(olr, optlen))
1569                 return -EFAULT;
1570         olr = min_t(unsigned int, olr, sizeof(int));
1571         if (olr < 0)
1572                 return -EINVAL;
1573         if (put_user(olr, optlen))
1574                 return -EFAULT;
1575         if (copy_to_user(optval, &val, olr))
1576                 return -EFAULT;
1577         return 0;
1578 }
1579
1580 /* The IP multicast ioctl support routines. */
1581 int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
1582 {
1583         struct sioc_sg_req sr;
1584         struct sioc_vif_req vr;
1585         struct vif_device *vif;
1586         struct mfc_cache *c;
1587         struct net *net = sock_net(sk);
1588         struct mr_table *mrt;
1589
1590         mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1591         if (!mrt)
1592                 return -ENOENT;
1593
1594         switch (cmd) {
1595         case SIOCGETVIFCNT:
1596                 if (copy_from_user(&vr, arg, sizeof(vr)))
1597                         return -EFAULT;
1598                 if (vr.vifi >= mrt->maxvif)
1599                         return -EINVAL;
1600                 vr.vifi = array_index_nospec(vr.vifi, mrt->maxvif);
1601                 read_lock(&mrt_lock);
1602                 vif = &mrt->vif_table[vr.vifi];
1603                 if (VIF_EXISTS(mrt, vr.vifi)) {
1604                         vr.icount = vif->pkt_in;
1605                         vr.ocount = vif->pkt_out;
1606                         vr.ibytes = vif->bytes_in;
1607                         vr.obytes = vif->bytes_out;
1608                         read_unlock(&mrt_lock);
1609
1610                         if (copy_to_user(arg, &vr, sizeof(vr)))
1611                                 return -EFAULT;
1612                         return 0;
1613                 }
1614                 read_unlock(&mrt_lock);
1615                 return -EADDRNOTAVAIL;
1616         case SIOCGETSGCNT:
1617                 if (copy_from_user(&sr, arg, sizeof(sr)))
1618                         return -EFAULT;
1619
1620                 rcu_read_lock();
1621                 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
1622                 if (c) {
1623                         sr.pktcnt = c->_c.mfc_un.res.pkt;
1624                         sr.bytecnt = c->_c.mfc_un.res.bytes;
1625                         sr.wrong_if = c->_c.mfc_un.res.wrong_if;
1626                         rcu_read_unlock();
1627
1628                         if (copy_to_user(arg, &sr, sizeof(sr)))
1629                                 return -EFAULT;
1630                         return 0;
1631                 }
1632                 rcu_read_unlock();
1633                 return -EADDRNOTAVAIL;
1634         default:
1635                 return -ENOIOCTLCMD;
1636         }
1637 }
1638
1639 #ifdef CONFIG_COMPAT
1640 struct compat_sioc_sg_req {
1641         struct in_addr src;
1642         struct in_addr grp;
1643         compat_ulong_t pktcnt;
1644         compat_ulong_t bytecnt;
1645         compat_ulong_t wrong_if;
1646 };
1647
1648 struct compat_sioc_vif_req {
1649         vifi_t  vifi;           /* Which iface */
1650         compat_ulong_t icount;
1651         compat_ulong_t ocount;
1652         compat_ulong_t ibytes;
1653         compat_ulong_t obytes;
1654 };
1655
1656 int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
1657 {
1658         struct compat_sioc_sg_req sr;
1659         struct compat_sioc_vif_req vr;
1660         struct vif_device *vif;
1661         struct mfc_cache *c;
1662         struct net *net = sock_net(sk);
1663         struct mr_table *mrt;
1664
1665         mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1666         if (!mrt)
1667                 return -ENOENT;
1668
1669         switch (cmd) {
1670         case SIOCGETVIFCNT:
1671                 if (copy_from_user(&vr, arg, sizeof(vr)))
1672                         return -EFAULT;
1673                 if (vr.vifi >= mrt->maxvif)
1674                         return -EINVAL;
1675                 vr.vifi = array_index_nospec(vr.vifi, mrt->maxvif);
1676                 read_lock(&mrt_lock);
1677                 vif = &mrt->vif_table[vr.vifi];
1678                 if (VIF_EXISTS(mrt, vr.vifi)) {
1679                         vr.icount = vif->pkt_in;
1680                         vr.ocount = vif->pkt_out;
1681                         vr.ibytes = vif->bytes_in;
1682                         vr.obytes = vif->bytes_out;
1683                         read_unlock(&mrt_lock);
1684
1685                         if (copy_to_user(arg, &vr, sizeof(vr)))
1686                                 return -EFAULT;
1687                         return 0;
1688                 }
1689                 read_unlock(&mrt_lock);
1690                 return -EADDRNOTAVAIL;
1691         case SIOCGETSGCNT:
1692                 if (copy_from_user(&sr, arg, sizeof(sr)))
1693                         return -EFAULT;
1694
1695                 rcu_read_lock();
1696                 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
1697                 if (c) {
1698                         sr.pktcnt = c->_c.mfc_un.res.pkt;
1699                         sr.bytecnt = c->_c.mfc_un.res.bytes;
1700                         sr.wrong_if = c->_c.mfc_un.res.wrong_if;
1701                         rcu_read_unlock();
1702
1703                         if (copy_to_user(arg, &sr, sizeof(sr)))
1704                                 return -EFAULT;
1705                         return 0;
1706                 }
1707                 rcu_read_unlock();
1708                 return -EADDRNOTAVAIL;
1709         default:
1710                 return -ENOIOCTLCMD;
1711         }
1712 }
1713 #endif
1714
1715 static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
1716 {
1717         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1718         struct net *net = dev_net(dev);
1719         struct mr_table *mrt;
1720         struct vif_device *v;
1721         int ct;
1722
1723         if (event != NETDEV_UNREGISTER)
1724                 return NOTIFY_DONE;
1725
1726         ipmr_for_each_table(mrt, net) {
1727                 v = &mrt->vif_table[0];
1728                 for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1729                         if (v->dev == dev)
1730                                 vif_delete(mrt, ct, 1, NULL);
1731                 }
1732         }
1733         return NOTIFY_DONE;
1734 }
1735
1736 static struct notifier_block ip_mr_notifier = {
1737         .notifier_call = ipmr_device_event,
1738 };
1739
1740 /* Encapsulate a packet by attaching a valid IPIP header to it.
1741  * This avoids tunnel drivers and other mess and gives us the speed so
1742  * important for multicast video.
1743  */
1744 static void ip_encap(struct net *net, struct sk_buff *skb,
1745                      __be32 saddr, __be32 daddr)
1746 {
1747         struct iphdr *iph;
1748         const struct iphdr *old_iph = ip_hdr(skb);
1749
1750         skb_push(skb, sizeof(struct iphdr));
1751         skb->transport_header = skb->network_header;
1752         skb_reset_network_header(skb);
1753         iph = ip_hdr(skb);
1754
1755         iph->version    =       4;
1756         iph->tos        =       old_iph->tos;
1757         iph->ttl        =       old_iph->ttl;
1758         iph->frag_off   =       0;
1759         iph->daddr      =       daddr;
1760         iph->saddr      =       saddr;
1761         iph->protocol   =       IPPROTO_IPIP;
1762         iph->ihl        =       5;
1763         iph->tot_len    =       htons(skb->len);
1764         ip_select_ident(net, skb, NULL);
1765         ip_send_check(iph);
1766
1767         memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1768         nf_reset_ct(skb);
1769 }
1770
1771 static inline int ipmr_forward_finish(struct net *net, struct sock *sk,
1772                                       struct sk_buff *skb)
1773 {
1774         struct ip_options *opt = &(IPCB(skb)->opt);
1775
1776         IP_INC_STATS(net, IPSTATS_MIB_OUTFORWDATAGRAMS);
1777         IP_ADD_STATS(net, IPSTATS_MIB_OUTOCTETS, skb->len);
1778
1779         if (unlikely(opt->optlen))
1780                 ip_forward_options(skb);
1781
1782         return dst_output(net, sk, skb);
1783 }
1784
1785 #ifdef CONFIG_NET_SWITCHDEV
1786 static bool ipmr_forward_offloaded(struct sk_buff *skb, struct mr_table *mrt,
1787                                    int in_vifi, int out_vifi)
1788 {
1789         struct vif_device *out_vif = &mrt->vif_table[out_vifi];
1790         struct vif_device *in_vif = &mrt->vif_table[in_vifi];
1791
1792         if (!skb->offload_l3_fwd_mark)
1793                 return false;
1794         if (!out_vif->dev_parent_id.id_len || !in_vif->dev_parent_id.id_len)
1795                 return false;
1796         return netdev_phys_item_id_same(&out_vif->dev_parent_id,
1797                                         &in_vif->dev_parent_id);
1798 }
1799 #else
1800 static bool ipmr_forward_offloaded(struct sk_buff *skb, struct mr_table *mrt,
1801                                    int in_vifi, int out_vifi)
1802 {
1803         return false;
1804 }
1805 #endif
1806
1807 /* Processing handlers for ipmr_forward */
1808
1809 static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1810                             int in_vifi, struct sk_buff *skb, int vifi)
1811 {
1812         const struct iphdr *iph = ip_hdr(skb);
1813         struct vif_device *vif = &mrt->vif_table[vifi];
1814         struct net_device *dev;
1815         struct rtable *rt;
1816         struct flowi4 fl4;
1817         int    encap = 0;
1818
1819         if (!vif->dev)
1820                 goto out_free;
1821
1822         if (vif->flags & VIFF_REGISTER) {
1823                 vif->pkt_out++;
1824                 vif->bytes_out += skb->len;
1825                 vif->dev->stats.tx_bytes += skb->len;
1826                 vif->dev->stats.tx_packets++;
1827                 ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT);
1828                 goto out_free;
1829         }
1830
1831         if (ipmr_forward_offloaded(skb, mrt, in_vifi, vifi))
1832                 goto out_free;
1833
1834         if (vif->flags & VIFF_TUNNEL) {
1835                 rt = ip_route_output_ports(net, &fl4, NULL,
1836                                            vif->remote, vif->local,
1837                                            0, 0,
1838                                            IPPROTO_IPIP,
1839                                            RT_TOS(iph->tos), vif->link);
1840                 if (IS_ERR(rt))
1841                         goto out_free;
1842                 encap = sizeof(struct iphdr);
1843         } else {
1844                 rt = ip_route_output_ports(net, &fl4, NULL, iph->daddr, 0,
1845                                            0, 0,
1846                                            IPPROTO_IPIP,
1847                                            RT_TOS(iph->tos), vif->link);
1848                 if (IS_ERR(rt))
1849                         goto out_free;
1850         }
1851
1852         dev = rt->dst.dev;
1853
1854         if (skb->len+encap > dst_mtu(&rt->dst) && (ntohs(iph->frag_off) & IP_DF)) {
1855                 /* Do not fragment multicasts. Alas, IPv4 does not
1856                  * allow to send ICMP, so that packets will disappear
1857                  * to blackhole.
1858                  */
1859                 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
1860                 ip_rt_put(rt);
1861                 goto out_free;
1862         }
1863
1864         encap += LL_RESERVED_SPACE(dev) + rt->dst.header_len;
1865
1866         if (skb_cow(skb, encap)) {
1867                 ip_rt_put(rt);
1868                 goto out_free;
1869         }
1870
1871         vif->pkt_out++;
1872         vif->bytes_out += skb->len;
1873
1874         skb_dst_drop(skb);
1875         skb_dst_set(skb, &rt->dst);
1876         ip_decrease_ttl(ip_hdr(skb));
1877
1878         /* FIXME: forward and output firewalls used to be called here.
1879          * What do we do with netfilter? -- RR
1880          */
1881         if (vif->flags & VIFF_TUNNEL) {
1882                 ip_encap(net, skb, vif->local, vif->remote);
1883                 /* FIXME: extra output firewall step used to be here. --RR */
1884                 vif->dev->stats.tx_packets++;
1885                 vif->dev->stats.tx_bytes += skb->len;
1886         }
1887
1888         IPCB(skb)->flags |= IPSKB_FORWARDED;
1889
1890         /* RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
1891          * not only before forwarding, but after forwarding on all output
1892          * interfaces. It is clear, if mrouter runs a multicasting
1893          * program, it should receive packets not depending to what interface
1894          * program is joined.
1895          * If we will not make it, the program will have to join on all
1896          * interfaces. On the other hand, multihoming host (or router, but
1897          * not mrouter) cannot join to more than one interface - it will
1898          * result in receiving multiple packets.
1899          */
1900         NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD,
1901                 net, NULL, skb, skb->dev, dev,
1902                 ipmr_forward_finish);
1903         return;
1904
1905 out_free:
1906         kfree_skb(skb);
1907 }
1908
1909 static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev)
1910 {
1911         int ct;
1912
1913         for (ct = mrt->maxvif-1; ct >= 0; ct--) {
1914                 if (mrt->vif_table[ct].dev == dev)
1915                         break;
1916         }
1917         return ct;
1918 }
1919
1920 /* "local" means that we should preserve one skb (for local delivery) */
1921 static void ip_mr_forward(struct net *net, struct mr_table *mrt,
1922                           struct net_device *dev, struct sk_buff *skb,
1923                           struct mfc_cache *c, int local)
1924 {
1925         int true_vifi = ipmr_find_vif(mrt, dev);
1926         int psend = -1;
1927         int vif, ct;
1928
1929         vif = c->_c.mfc_parent;
1930         c->_c.mfc_un.res.pkt++;
1931         c->_c.mfc_un.res.bytes += skb->len;
1932         c->_c.mfc_un.res.lastuse = jiffies;
1933
1934         if (c->mfc_origin == htonl(INADDR_ANY) && true_vifi >= 0) {
1935                 struct mfc_cache *cache_proxy;
1936
1937                 /* For an (*,G) entry, we only check that the incomming
1938                  * interface is part of the static tree.
1939                  */
1940                 cache_proxy = mr_mfc_find_any_parent(mrt, vif);
1941                 if (cache_proxy &&
1942                     cache_proxy->_c.mfc_un.res.ttls[true_vifi] < 255)
1943                         goto forward;
1944         }
1945
1946         /* Wrong interface: drop packet and (maybe) send PIM assert. */
1947         if (mrt->vif_table[vif].dev != dev) {
1948                 if (rt_is_output_route(skb_rtable(skb))) {
1949                         /* It is our own packet, looped back.
1950                          * Very complicated situation...
1951                          *
1952                          * The best workaround until routing daemons will be
1953                          * fixed is not to redistribute packet, if it was
1954                          * send through wrong interface. It means, that
1955                          * multicast applications WILL NOT work for
1956                          * (S,G), which have default multicast route pointing
1957                          * to wrong oif. In any case, it is not a good
1958                          * idea to use multicasting applications on router.
1959                          */
1960                         goto dont_forward;
1961                 }
1962
1963                 c->_c.mfc_un.res.wrong_if++;
1964
1965                 if (true_vifi >= 0 && mrt->mroute_do_assert &&
1966                     /* pimsm uses asserts, when switching from RPT to SPT,
1967                      * so that we cannot check that packet arrived on an oif.
1968                      * It is bad, but otherwise we would need to move pretty
1969                      * large chunk of pimd to kernel. Ough... --ANK
1970                      */
1971                     (mrt->mroute_do_pim ||
1972                      c->_c.mfc_un.res.ttls[true_vifi] < 255) &&
1973                     time_after(jiffies,
1974                                c->_c.mfc_un.res.last_assert +
1975                                MFC_ASSERT_THRESH)) {
1976                         c->_c.mfc_un.res.last_assert = jiffies;
1977                         ipmr_cache_report(mrt, skb, true_vifi, IGMPMSG_WRONGVIF);
1978                         if (mrt->mroute_do_wrvifwhole)
1979                                 ipmr_cache_report(mrt, skb, true_vifi,
1980                                                   IGMPMSG_WRVIFWHOLE);
1981                 }
1982                 goto dont_forward;
1983         }
1984
1985 forward:
1986         mrt->vif_table[vif].pkt_in++;
1987         mrt->vif_table[vif].bytes_in += skb->len;
1988
1989         /* Forward the frame */
1990         if (c->mfc_origin == htonl(INADDR_ANY) &&
1991             c->mfc_mcastgrp == htonl(INADDR_ANY)) {
1992                 if (true_vifi >= 0 &&
1993                     true_vifi != c->_c.mfc_parent &&
1994                     ip_hdr(skb)->ttl >
1995                                 c->_c.mfc_un.res.ttls[c->_c.mfc_parent]) {
1996                         /* It's an (*,*) entry and the packet is not coming from
1997                          * the upstream: forward the packet to the upstream
1998                          * only.
1999                          */
2000                         psend = c->_c.mfc_parent;
2001                         goto last_forward;
2002                 }
2003                 goto dont_forward;
2004         }
2005         for (ct = c->_c.mfc_un.res.maxvif - 1;
2006              ct >= c->_c.mfc_un.res.minvif; ct--) {
2007                 /* For (*,G) entry, don't forward to the incoming interface */
2008                 if ((c->mfc_origin != htonl(INADDR_ANY) ||
2009                      ct != true_vifi) &&
2010                     ip_hdr(skb)->ttl > c->_c.mfc_un.res.ttls[ct]) {
2011                         if (psend != -1) {
2012                                 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
2013
2014                                 if (skb2)
2015                                         ipmr_queue_xmit(net, mrt, true_vifi,
2016                                                         skb2, psend);
2017                         }
2018                         psend = ct;
2019                 }
2020         }
2021 last_forward:
2022         if (psend != -1) {
2023                 if (local) {
2024                         struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
2025
2026                         if (skb2)
2027                                 ipmr_queue_xmit(net, mrt, true_vifi, skb2,
2028                                                 psend);
2029                 } else {
2030                         ipmr_queue_xmit(net, mrt, true_vifi, skb, psend);
2031                         return;
2032                 }
2033         }
2034
2035 dont_forward:
2036         if (!local)
2037                 kfree_skb(skb);
2038 }
2039
2040 static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct sk_buff *skb)
2041 {
2042         struct rtable *rt = skb_rtable(skb);
2043         struct iphdr *iph = ip_hdr(skb);
2044         struct flowi4 fl4 = {
2045                 .daddr = iph->daddr,
2046                 .saddr = iph->saddr,
2047                 .flowi4_tos = RT_TOS(iph->tos),
2048                 .flowi4_oif = (rt_is_output_route(rt) ?
2049                                skb->dev->ifindex : 0),
2050                 .flowi4_iif = (rt_is_output_route(rt) ?
2051                                LOOPBACK_IFINDEX :
2052                                skb->dev->ifindex),
2053                 .flowi4_mark = skb->mark,
2054         };
2055         struct mr_table *mrt;
2056         int err;
2057
2058         err = ipmr_fib_lookup(net, &fl4, &mrt);
2059         if (err)
2060                 return ERR_PTR(err);
2061         return mrt;
2062 }
2063
2064 /* Multicast packets for forwarding arrive here
2065  * Called with rcu_read_lock();
2066  */
2067 int ip_mr_input(struct sk_buff *skb)
2068 {
2069         struct mfc_cache *cache;
2070         struct net *net = dev_net(skb->dev);
2071         int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL;
2072         struct mr_table *mrt;
2073         struct net_device *dev;
2074
2075         /* skb->dev passed in is the loX master dev for vrfs.
2076          * As there are no vifs associated with loopback devices,
2077          * get the proper interface that does have a vif associated with it.
2078          */
2079         dev = skb->dev;
2080         if (netif_is_l3_master(skb->dev)) {
2081                 dev = dev_get_by_index_rcu(net, IPCB(skb)->iif);
2082                 if (!dev) {
2083                         kfree_skb(skb);
2084                         return -ENODEV;
2085                 }
2086         }
2087
2088         /* Packet is looped back after forward, it should not be
2089          * forwarded second time, but still can be delivered locally.
2090          */
2091         if (IPCB(skb)->flags & IPSKB_FORWARDED)
2092                 goto dont_forward;
2093
2094         mrt = ipmr_rt_fib_lookup(net, skb);
2095         if (IS_ERR(mrt)) {
2096                 kfree_skb(skb);
2097                 return PTR_ERR(mrt);
2098         }
2099         if (!local) {
2100                 if (IPCB(skb)->opt.router_alert) {
2101                         if (ip_call_ra_chain(skb))
2102                                 return 0;
2103                 } else if (ip_hdr(skb)->protocol == IPPROTO_IGMP) {
2104                         /* IGMPv1 (and broken IGMPv2 implementations sort of
2105                          * Cisco IOS <= 11.2(8)) do not put router alert
2106                          * option to IGMP packets destined to routable
2107                          * groups. It is very bad, because it means
2108                          * that we can forward NO IGMP messages.
2109                          */
2110                         struct sock *mroute_sk;
2111
2112                         mroute_sk = rcu_dereference(mrt->mroute_sk);
2113                         if (mroute_sk) {
2114                                 nf_reset_ct(skb);
2115                                 raw_rcv(mroute_sk, skb);
2116                                 return 0;
2117                         }
2118                     }
2119         }
2120
2121         /* already under rcu_read_lock() */
2122         cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
2123         if (!cache) {
2124                 int vif = ipmr_find_vif(mrt, dev);
2125
2126                 if (vif >= 0)
2127                         cache = ipmr_cache_find_any(mrt, ip_hdr(skb)->daddr,
2128                                                     vif);
2129         }
2130
2131         /* No usable cache entry */
2132         if (!cache) {
2133                 int vif;
2134
2135                 if (local) {
2136                         struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
2137                         ip_local_deliver(skb);
2138                         if (!skb2)
2139                                 return -ENOBUFS;
2140                         skb = skb2;
2141                 }
2142
2143                 read_lock(&mrt_lock);
2144                 vif = ipmr_find_vif(mrt, dev);
2145                 if (vif >= 0) {
2146                         int err2 = ipmr_cache_unresolved(mrt, vif, skb, dev);
2147                         read_unlock(&mrt_lock);
2148
2149                         return err2;
2150                 }
2151                 read_unlock(&mrt_lock);
2152                 kfree_skb(skb);
2153                 return -ENODEV;
2154         }
2155
2156         read_lock(&mrt_lock);
2157         ip_mr_forward(net, mrt, dev, skb, cache, local);
2158         read_unlock(&mrt_lock);
2159
2160         if (local)
2161                 return ip_local_deliver(skb);
2162
2163         return 0;
2164
2165 dont_forward:
2166         if (local)
2167                 return ip_local_deliver(skb);
2168         kfree_skb(skb);
2169         return 0;
2170 }
2171
2172 #ifdef CONFIG_IP_PIMSM_V1
2173 /* Handle IGMP messages of PIMv1 */
2174 int pim_rcv_v1(struct sk_buff *skb)
2175 {
2176         struct igmphdr *pim;
2177         struct net *net = dev_net(skb->dev);
2178         struct mr_table *mrt;
2179
2180         if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
2181                 goto drop;
2182
2183         pim = igmp_hdr(skb);
2184
2185         mrt = ipmr_rt_fib_lookup(net, skb);
2186         if (IS_ERR(mrt))
2187                 goto drop;
2188         if (!mrt->mroute_do_pim ||
2189             pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
2190                 goto drop;
2191
2192         if (__pim_rcv(mrt, skb, sizeof(*pim))) {
2193 drop:
2194                 kfree_skb(skb);
2195         }
2196         return 0;
2197 }
2198 #endif
2199
2200 #ifdef CONFIG_IP_PIMSM_V2
2201 static int pim_rcv(struct sk_buff *skb)
2202 {
2203         struct pimreghdr *pim;
2204         struct net *net = dev_net(skb->dev);
2205         struct mr_table *mrt;
2206
2207         if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
2208                 goto drop;
2209
2210         pim = (struct pimreghdr *)skb_transport_header(skb);
2211         if (pim->type != ((PIM_VERSION << 4) | (PIM_TYPE_REGISTER)) ||
2212             (pim->flags & PIM_NULL_REGISTER) ||
2213             (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
2214              csum_fold(skb_checksum(skb, 0, skb->len, 0))))
2215                 goto drop;
2216
2217         mrt = ipmr_rt_fib_lookup(net, skb);
2218         if (IS_ERR(mrt))
2219                 goto drop;
2220         if (__pim_rcv(mrt, skb, sizeof(*pim))) {
2221 drop:
2222                 kfree_skb(skb);
2223         }
2224         return 0;
2225 }
2226 #endif
2227
2228 int ipmr_get_route(struct net *net, struct sk_buff *skb,
2229                    __be32 saddr, __be32 daddr,
2230                    struct rtmsg *rtm, u32 portid)
2231 {
2232         struct mfc_cache *cache;
2233         struct mr_table *mrt;
2234         int err;
2235
2236         mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2237         if (!mrt)
2238                 return -ENOENT;
2239
2240         rcu_read_lock();
2241         cache = ipmr_cache_find(mrt, saddr, daddr);
2242         if (!cache && skb->dev) {
2243                 int vif = ipmr_find_vif(mrt, skb->dev);
2244
2245                 if (vif >= 0)
2246                         cache = ipmr_cache_find_any(mrt, daddr, vif);
2247         }
2248         if (!cache) {
2249                 struct sk_buff *skb2;
2250                 struct iphdr *iph;
2251                 struct net_device *dev;
2252                 int vif = -1;
2253
2254                 dev = skb->dev;
2255                 read_lock(&mrt_lock);
2256                 if (dev)
2257                         vif = ipmr_find_vif(mrt, dev);
2258                 if (vif < 0) {
2259                         read_unlock(&mrt_lock);
2260                         rcu_read_unlock();
2261                         return -ENODEV;
2262                 }
2263
2264                 skb2 = skb_realloc_headroom(skb, sizeof(struct iphdr));
2265                 if (!skb2) {
2266                         read_unlock(&mrt_lock);
2267                         rcu_read_unlock();
2268                         return -ENOMEM;
2269                 }
2270
2271                 NETLINK_CB(skb2).portid = portid;
2272                 skb_push(skb2, sizeof(struct iphdr));
2273                 skb_reset_network_header(skb2);
2274                 iph = ip_hdr(skb2);
2275                 iph->ihl = sizeof(struct iphdr) >> 2;
2276                 iph->saddr = saddr;
2277                 iph->daddr = daddr;
2278                 iph->version = 0;
2279                 err = ipmr_cache_unresolved(mrt, vif, skb2, dev);
2280                 read_unlock(&mrt_lock);
2281                 rcu_read_unlock();
2282                 return err;
2283         }
2284
2285         read_lock(&mrt_lock);
2286         err = mr_fill_mroute(mrt, skb, &cache->_c, rtm);
2287         read_unlock(&mrt_lock);
2288         rcu_read_unlock();
2289         return err;
2290 }
2291
2292 static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2293                             u32 portid, u32 seq, struct mfc_cache *c, int cmd,
2294                             int flags)
2295 {
2296         struct nlmsghdr *nlh;
2297         struct rtmsg *rtm;
2298         int err;
2299
2300         nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
2301         if (!nlh)
2302                 return -EMSGSIZE;
2303
2304         rtm = nlmsg_data(nlh);
2305         rtm->rtm_family   = RTNL_FAMILY_IPMR;
2306         rtm->rtm_dst_len  = 32;
2307         rtm->rtm_src_len  = 32;
2308         rtm->rtm_tos      = 0;
2309         rtm->rtm_table    = mrt->id;
2310         if (nla_put_u32(skb, RTA_TABLE, mrt->id))
2311                 goto nla_put_failure;
2312         rtm->rtm_type     = RTN_MULTICAST;
2313         rtm->rtm_scope    = RT_SCOPE_UNIVERSE;
2314         if (c->_c.mfc_flags & MFC_STATIC)
2315                 rtm->rtm_protocol = RTPROT_STATIC;
2316         else
2317                 rtm->rtm_protocol = RTPROT_MROUTED;
2318         rtm->rtm_flags    = 0;
2319
2320         if (nla_put_in_addr(skb, RTA_SRC, c->mfc_origin) ||
2321             nla_put_in_addr(skb, RTA_DST, c->mfc_mcastgrp))
2322                 goto nla_put_failure;
2323         err = mr_fill_mroute(mrt, skb, &c->_c, rtm);
2324         /* do not break the dump if cache is unresolved */
2325         if (err < 0 && err != -ENOENT)
2326                 goto nla_put_failure;
2327
2328         nlmsg_end(skb, nlh);
2329         return 0;
2330
2331 nla_put_failure:
2332         nlmsg_cancel(skb, nlh);
2333         return -EMSGSIZE;
2334 }
2335
2336 static int _ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2337                              u32 portid, u32 seq, struct mr_mfc *c, int cmd,
2338                              int flags)
2339 {
2340         return ipmr_fill_mroute(mrt, skb, portid, seq, (struct mfc_cache *)c,
2341                                 cmd, flags);
2342 }
2343
2344 static size_t mroute_msgsize(bool unresolved, int maxvif)
2345 {
2346         size_t len =
2347                 NLMSG_ALIGN(sizeof(struct rtmsg))
2348                 + nla_total_size(4)     /* RTA_TABLE */
2349                 + nla_total_size(4)     /* RTA_SRC */
2350                 + nla_total_size(4)     /* RTA_DST */
2351                 ;
2352
2353         if (!unresolved)
2354                 len = len
2355                       + nla_total_size(4)       /* RTA_IIF */
2356                       + nla_total_size(0)       /* RTA_MULTIPATH */
2357                       + maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
2358                                                 /* RTA_MFC_STATS */
2359                       + nla_total_size_64bit(sizeof(struct rta_mfc_stats))
2360                 ;
2361
2362         return len;
2363 }
2364
2365 static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
2366                                  int cmd)
2367 {
2368         struct net *net = read_pnet(&mrt->net);
2369         struct sk_buff *skb;
2370         int err = -ENOBUFS;
2371
2372         skb = nlmsg_new(mroute_msgsize(mfc->_c.mfc_parent >= MAXVIFS,
2373                                        mrt->maxvif),
2374                         GFP_ATOMIC);
2375         if (!skb)
2376                 goto errout;
2377
2378         err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
2379         if (err < 0)
2380                 goto errout;
2381
2382         rtnl_notify(skb, net, 0, RTNLGRP_IPV4_MROUTE, NULL, GFP_ATOMIC);
2383         return;
2384
2385 errout:
2386         kfree_skb(skb);
2387         if (err < 0)
2388                 rtnl_set_sk_err(net, RTNLGRP_IPV4_MROUTE, err);
2389 }
2390
2391 static size_t igmpmsg_netlink_msgsize(size_t payloadlen)
2392 {
2393         size_t len =
2394                 NLMSG_ALIGN(sizeof(struct rtgenmsg))
2395                 + nla_total_size(1)     /* IPMRA_CREPORT_MSGTYPE */
2396                 + nla_total_size(4)     /* IPMRA_CREPORT_VIF_ID */
2397                 + nla_total_size(4)     /* IPMRA_CREPORT_SRC_ADDR */
2398                 + nla_total_size(4)     /* IPMRA_CREPORT_DST_ADDR */
2399                                         /* IPMRA_CREPORT_PKT */
2400                 + nla_total_size(payloadlen)
2401                 ;
2402
2403         return len;
2404 }
2405
2406 static void igmpmsg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt)
2407 {
2408         struct net *net = read_pnet(&mrt->net);
2409         struct nlmsghdr *nlh;
2410         struct rtgenmsg *rtgenm;
2411         struct igmpmsg *msg;
2412         struct sk_buff *skb;
2413         struct nlattr *nla;
2414         int payloadlen;
2415
2416         payloadlen = pkt->len - sizeof(struct igmpmsg);
2417         msg = (struct igmpmsg *)skb_network_header(pkt);
2418
2419         skb = nlmsg_new(igmpmsg_netlink_msgsize(payloadlen), GFP_ATOMIC);
2420         if (!skb)
2421                 goto errout;
2422
2423         nlh = nlmsg_put(skb, 0, 0, RTM_NEWCACHEREPORT,
2424                         sizeof(struct rtgenmsg), 0);
2425         if (!nlh)
2426                 goto errout;
2427         rtgenm = nlmsg_data(nlh);
2428         rtgenm->rtgen_family = RTNL_FAMILY_IPMR;
2429         if (nla_put_u8(skb, IPMRA_CREPORT_MSGTYPE, msg->im_msgtype) ||
2430             nla_put_u32(skb, IPMRA_CREPORT_VIF_ID, msg->im_vif) ||
2431             nla_put_in_addr(skb, IPMRA_CREPORT_SRC_ADDR,
2432                             msg->im_src.s_addr) ||
2433             nla_put_in_addr(skb, IPMRA_CREPORT_DST_ADDR,
2434                             msg->im_dst.s_addr))
2435                 goto nla_put_failure;
2436
2437         nla = nla_reserve(skb, IPMRA_CREPORT_PKT, payloadlen);
2438         if (!nla || skb_copy_bits(pkt, sizeof(struct igmpmsg),
2439                                   nla_data(nla), payloadlen))
2440                 goto nla_put_failure;
2441
2442         nlmsg_end(skb, nlh);
2443
2444         rtnl_notify(skb, net, 0, RTNLGRP_IPV4_MROUTE_R, NULL, GFP_ATOMIC);
2445         return;
2446
2447 nla_put_failure:
2448         nlmsg_cancel(skb, nlh);
2449 errout:
2450         kfree_skb(skb);
2451         rtnl_set_sk_err(net, RTNLGRP_IPV4_MROUTE_R, -ENOBUFS);
2452 }
2453
2454 static int ipmr_rtm_valid_getroute_req(struct sk_buff *skb,
2455                                        const struct nlmsghdr *nlh,
2456                                        struct nlattr **tb,
2457                                        struct netlink_ext_ack *extack)
2458 {
2459         struct rtmsg *rtm;
2460         int i, err;
2461
2462         if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
2463                 NL_SET_ERR_MSG(extack, "ipv4: Invalid header for multicast route get request");
2464                 return -EINVAL;
2465         }
2466
2467         if (!netlink_strict_get_check(skb))
2468                 return nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
2469                                               rtm_ipv4_policy, extack);
2470
2471         rtm = nlmsg_data(nlh);
2472         if ((rtm->rtm_src_len && rtm->rtm_src_len != 32) ||
2473             (rtm->rtm_dst_len && rtm->rtm_dst_len != 32) ||
2474             rtm->rtm_tos || rtm->rtm_table || rtm->rtm_protocol ||
2475             rtm->rtm_scope || rtm->rtm_type || rtm->rtm_flags) {
2476                 NL_SET_ERR_MSG(extack, "ipv4: Invalid values in header for multicast route get request");
2477                 return -EINVAL;
2478         }
2479
2480         err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
2481                                             rtm_ipv4_policy, extack);
2482         if (err)
2483                 return err;
2484
2485         if ((tb[RTA_SRC] && !rtm->rtm_src_len) ||
2486             (tb[RTA_DST] && !rtm->rtm_dst_len)) {
2487                 NL_SET_ERR_MSG(extack, "ipv4: rtm_src_len and rtm_dst_len must be 32 for IPv4");
2488                 return -EINVAL;
2489         }
2490
2491         for (i = 0; i <= RTA_MAX; i++) {
2492                 if (!tb[i])
2493                         continue;
2494
2495                 switch (i) {
2496                 case RTA_SRC:
2497                 case RTA_DST:
2498                 case RTA_TABLE:
2499                         break;
2500                 default:
2501                         NL_SET_ERR_MSG(extack, "ipv4: Unsupported attribute in multicast route get request");
2502                         return -EINVAL;
2503                 }
2504         }
2505
2506         return 0;
2507 }
2508
2509 static int ipmr_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
2510                              struct netlink_ext_ack *extack)
2511 {
2512         struct net *net = sock_net(in_skb->sk);
2513         struct nlattr *tb[RTA_MAX + 1];
2514         struct sk_buff *skb = NULL;
2515         struct mfc_cache *cache;
2516         struct mr_table *mrt;
2517         __be32 src, grp;
2518         u32 tableid;
2519         int err;
2520
2521         err = ipmr_rtm_valid_getroute_req(in_skb, nlh, tb, extack);
2522         if (err < 0)
2523                 goto errout;
2524
2525         src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
2526         grp = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0;
2527         tableid = tb[RTA_TABLE] ? nla_get_u32(tb[RTA_TABLE]) : 0;
2528
2529         mrt = ipmr_get_table(net, tableid ? tableid : RT_TABLE_DEFAULT);
2530         if (!mrt) {
2531                 err = -ENOENT;
2532                 goto errout_free;
2533         }
2534
2535         /* entries are added/deleted only under RTNL */
2536         rcu_read_lock();
2537         cache = ipmr_cache_find(mrt, src, grp);
2538         rcu_read_unlock();
2539         if (!cache) {
2540                 err = -ENOENT;
2541                 goto errout_free;
2542         }
2543
2544         skb = nlmsg_new(mroute_msgsize(false, mrt->maxvif), GFP_KERNEL);
2545         if (!skb) {
2546                 err = -ENOBUFS;
2547                 goto errout_free;
2548         }
2549
2550         err = ipmr_fill_mroute(mrt, skb, NETLINK_CB(in_skb).portid,
2551                                nlh->nlmsg_seq, cache,
2552                                RTM_NEWROUTE, 0);
2553         if (err < 0)
2554                 goto errout_free;
2555
2556         err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
2557
2558 errout:
2559         return err;
2560
2561 errout_free:
2562         kfree_skb(skb);
2563         goto errout;
2564 }
2565
2566 static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
2567 {
2568         struct fib_dump_filter filter = {};
2569         int err;
2570
2571         if (cb->strict_check) {
2572                 err = ip_valid_fib_dump_req(sock_net(skb->sk), cb->nlh,
2573                                             &filter, cb);
2574                 if (err < 0)
2575                         return err;
2576         }
2577
2578         if (filter.table_id) {
2579                 struct mr_table *mrt;
2580
2581                 mrt = ipmr_get_table(sock_net(skb->sk), filter.table_id);
2582                 if (!mrt) {
2583                         if (rtnl_msg_family(cb->nlh) != RTNL_FAMILY_IPMR)
2584                                 return skb->len;
2585
2586                         NL_SET_ERR_MSG(cb->extack, "ipv4: MR table does not exist");
2587                         return -ENOENT;
2588                 }
2589                 err = mr_table_dump(mrt, skb, cb, _ipmr_fill_mroute,
2590                                     &mfc_unres_lock, &filter);
2591                 return skb->len ? : err;
2592         }
2593
2594         return mr_rtm_dumproute(skb, cb, ipmr_mr_table_iter,
2595                                 _ipmr_fill_mroute, &mfc_unres_lock, &filter);
2596 }
2597
2598 static const struct nla_policy rtm_ipmr_policy[RTA_MAX + 1] = {
2599         [RTA_SRC]       = { .type = NLA_U32 },
2600         [RTA_DST]       = { .type = NLA_U32 },
2601         [RTA_IIF]       = { .type = NLA_U32 },
2602         [RTA_TABLE]     = { .type = NLA_U32 },
2603         [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
2604 };
2605
2606 static bool ipmr_rtm_validate_proto(unsigned char rtm_protocol)
2607 {
2608         switch (rtm_protocol) {
2609         case RTPROT_STATIC:
2610         case RTPROT_MROUTED:
2611                 return true;
2612         }
2613         return false;
2614 }
2615
2616 static int ipmr_nla_get_ttls(const struct nlattr *nla, struct mfcctl *mfcc)
2617 {
2618         struct rtnexthop *rtnh = nla_data(nla);
2619         int remaining = nla_len(nla), vifi = 0;
2620
2621         while (rtnh_ok(rtnh, remaining)) {
2622                 mfcc->mfcc_ttls[vifi] = rtnh->rtnh_hops;
2623                 if (++vifi == MAXVIFS)
2624                         break;
2625                 rtnh = rtnh_next(rtnh, &remaining);
2626         }
2627
2628         return remaining > 0 ? -EINVAL : vifi;
2629 }
2630
2631 /* returns < 0 on error, 0 for ADD_MFC and 1 for ADD_MFC_PROXY */
2632 static int rtm_to_ipmr_mfcc(struct net *net, struct nlmsghdr *nlh,
2633                             struct mfcctl *mfcc, int *mrtsock,
2634                             struct mr_table **mrtret,
2635                             struct netlink_ext_ack *extack)
2636 {
2637         struct net_device *dev = NULL;
2638         u32 tblid = RT_TABLE_DEFAULT;
2639         struct mr_table *mrt;
2640         struct nlattr *attr;
2641         struct rtmsg *rtm;
2642         int ret, rem;
2643
2644         ret = nlmsg_validate_deprecated(nlh, sizeof(*rtm), RTA_MAX,
2645                                         rtm_ipmr_policy, extack);
2646         if (ret < 0)
2647                 goto out;
2648         rtm = nlmsg_data(nlh);
2649
2650         ret = -EINVAL;
2651         if (rtm->rtm_family != RTNL_FAMILY_IPMR || rtm->rtm_dst_len != 32 ||
2652             rtm->rtm_type != RTN_MULTICAST ||
2653             rtm->rtm_scope != RT_SCOPE_UNIVERSE ||
2654             !ipmr_rtm_validate_proto(rtm->rtm_protocol))
2655                 goto out;
2656
2657         memset(mfcc, 0, sizeof(*mfcc));
2658         mfcc->mfcc_parent = -1;
2659         ret = 0;
2660         nlmsg_for_each_attr(attr, nlh, sizeof(struct rtmsg), rem) {
2661                 switch (nla_type(attr)) {
2662                 case RTA_SRC:
2663                         mfcc->mfcc_origin.s_addr = nla_get_be32(attr);
2664                         break;
2665                 case RTA_DST:
2666                         mfcc->mfcc_mcastgrp.s_addr = nla_get_be32(attr);
2667                         break;
2668                 case RTA_IIF:
2669                         dev = __dev_get_by_index(net, nla_get_u32(attr));
2670                         if (!dev) {
2671                                 ret = -ENODEV;
2672                                 goto out;
2673                         }
2674                         break;
2675                 case RTA_MULTIPATH:
2676                         if (ipmr_nla_get_ttls(attr, mfcc) < 0) {
2677                                 ret = -EINVAL;
2678                                 goto out;
2679                         }
2680                         break;
2681                 case RTA_PREFSRC:
2682                         ret = 1;
2683                         break;
2684                 case RTA_TABLE:
2685                         tblid = nla_get_u32(attr);
2686                         break;
2687                 }
2688         }
2689         mrt = ipmr_get_table(net, tblid);
2690         if (!mrt) {
2691                 ret = -ENOENT;
2692                 goto out;
2693         }
2694         *mrtret = mrt;
2695         *mrtsock = rtm->rtm_protocol == RTPROT_MROUTED ? 1 : 0;
2696         if (dev)
2697                 mfcc->mfcc_parent = ipmr_find_vif(mrt, dev);
2698
2699 out:
2700         return ret;
2701 }
2702
2703 /* takes care of both newroute and delroute */
2704 static int ipmr_rtm_route(struct sk_buff *skb, struct nlmsghdr *nlh,
2705                           struct netlink_ext_ack *extack)
2706 {
2707         struct net *net = sock_net(skb->sk);
2708         int ret, mrtsock, parent;
2709         struct mr_table *tbl;
2710         struct mfcctl mfcc;
2711
2712         mrtsock = 0;
2713         tbl = NULL;
2714         ret = rtm_to_ipmr_mfcc(net, nlh, &mfcc, &mrtsock, &tbl, extack);
2715         if (ret < 0)
2716                 return ret;
2717
2718         parent = ret ? mfcc.mfcc_parent : -1;
2719         if (nlh->nlmsg_type == RTM_NEWROUTE)
2720                 return ipmr_mfc_add(net, tbl, &mfcc, mrtsock, parent);
2721         else
2722                 return ipmr_mfc_delete(tbl, &mfcc, parent);
2723 }
2724
2725 static bool ipmr_fill_table(struct mr_table *mrt, struct sk_buff *skb)
2726 {
2727         u32 queue_len = atomic_read(&mrt->cache_resolve_queue_len);
2728
2729         if (nla_put_u32(skb, IPMRA_TABLE_ID, mrt->id) ||
2730             nla_put_u32(skb, IPMRA_TABLE_CACHE_RES_QUEUE_LEN, queue_len) ||
2731             nla_put_s32(skb, IPMRA_TABLE_MROUTE_REG_VIF_NUM,
2732                         mrt->mroute_reg_vif_num) ||
2733             nla_put_u8(skb, IPMRA_TABLE_MROUTE_DO_ASSERT,
2734                        mrt->mroute_do_assert) ||
2735             nla_put_u8(skb, IPMRA_TABLE_MROUTE_DO_PIM, mrt->mroute_do_pim) ||
2736             nla_put_u8(skb, IPMRA_TABLE_MROUTE_DO_WRVIFWHOLE,
2737                        mrt->mroute_do_wrvifwhole))
2738                 return false;
2739
2740         return true;
2741 }
2742
2743 static bool ipmr_fill_vif(struct mr_table *mrt, u32 vifid, struct sk_buff *skb)
2744 {
2745         struct nlattr *vif_nest;
2746         struct vif_device *vif;
2747
2748         /* if the VIF doesn't exist just continue */
2749         if (!VIF_EXISTS(mrt, vifid))
2750                 return true;
2751
2752         vif = &mrt->vif_table[vifid];
2753         vif_nest = nla_nest_start_noflag(skb, IPMRA_VIF);
2754         if (!vif_nest)
2755                 return false;
2756         if (nla_put_u32(skb, IPMRA_VIFA_IFINDEX, vif->dev->ifindex) ||
2757             nla_put_u32(skb, IPMRA_VIFA_VIF_ID, vifid) ||
2758             nla_put_u16(skb, IPMRA_VIFA_FLAGS, vif->flags) ||
2759             nla_put_u64_64bit(skb, IPMRA_VIFA_BYTES_IN, vif->bytes_in,
2760                               IPMRA_VIFA_PAD) ||
2761             nla_put_u64_64bit(skb, IPMRA_VIFA_BYTES_OUT, vif->bytes_out,
2762                               IPMRA_VIFA_PAD) ||
2763             nla_put_u64_64bit(skb, IPMRA_VIFA_PACKETS_IN, vif->pkt_in,
2764                               IPMRA_VIFA_PAD) ||
2765             nla_put_u64_64bit(skb, IPMRA_VIFA_PACKETS_OUT, vif->pkt_out,
2766                               IPMRA_VIFA_PAD) ||
2767             nla_put_be32(skb, IPMRA_VIFA_LOCAL_ADDR, vif->local) ||
2768             nla_put_be32(skb, IPMRA_VIFA_REMOTE_ADDR, vif->remote)) {
2769                 nla_nest_cancel(skb, vif_nest);
2770                 return false;
2771         }
2772         nla_nest_end(skb, vif_nest);
2773
2774         return true;
2775 }
2776
2777 static int ipmr_valid_dumplink(const struct nlmsghdr *nlh,
2778                                struct netlink_ext_ack *extack)
2779 {
2780         struct ifinfomsg *ifm;
2781
2782         if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
2783                 NL_SET_ERR_MSG(extack, "ipv4: Invalid header for ipmr link dump");
2784                 return -EINVAL;
2785         }
2786
2787         if (nlmsg_attrlen(nlh, sizeof(*ifm))) {
2788                 NL_SET_ERR_MSG(extack, "Invalid data after header in ipmr link dump");
2789                 return -EINVAL;
2790         }
2791
2792         ifm = nlmsg_data(nlh);
2793         if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
2794             ifm->ifi_change || ifm->ifi_index) {
2795                 NL_SET_ERR_MSG(extack, "Invalid values in header for ipmr link dump request");
2796                 return -EINVAL;
2797         }
2798
2799         return 0;
2800 }
2801
2802 static int ipmr_rtm_dumplink(struct sk_buff *skb, struct netlink_callback *cb)
2803 {
2804         struct net *net = sock_net(skb->sk);
2805         struct nlmsghdr *nlh = NULL;
2806         unsigned int t = 0, s_t;
2807         unsigned int e = 0, s_e;
2808         struct mr_table *mrt;
2809
2810         if (cb->strict_check) {
2811                 int err = ipmr_valid_dumplink(cb->nlh, cb->extack);
2812
2813                 if (err < 0)
2814                         return err;
2815         }
2816
2817         s_t = cb->args[0];
2818         s_e = cb->args[1];
2819
2820         ipmr_for_each_table(mrt, net) {
2821                 struct nlattr *vifs, *af;
2822                 struct ifinfomsg *hdr;
2823                 u32 i;
2824
2825                 if (t < s_t)
2826                         goto skip_table;
2827                 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
2828                                 cb->nlh->nlmsg_seq, RTM_NEWLINK,
2829                                 sizeof(*hdr), NLM_F_MULTI);
2830                 if (!nlh)
2831                         break;
2832
2833                 hdr = nlmsg_data(nlh);
2834                 memset(hdr, 0, sizeof(*hdr));
2835                 hdr->ifi_family = RTNL_FAMILY_IPMR;
2836
2837                 af = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
2838                 if (!af) {
2839                         nlmsg_cancel(skb, nlh);
2840                         goto out;
2841                 }
2842
2843                 if (!ipmr_fill_table(mrt, skb)) {
2844                         nlmsg_cancel(skb, nlh);
2845                         goto out;
2846                 }
2847
2848                 vifs = nla_nest_start_noflag(skb, IPMRA_TABLE_VIFS);
2849                 if (!vifs) {
2850                         nla_nest_end(skb, af);
2851                         nlmsg_end(skb, nlh);
2852                         goto out;
2853                 }
2854                 for (i = 0; i < mrt->maxvif; i++) {
2855                         if (e < s_e)
2856                                 goto skip_entry;
2857                         if (!ipmr_fill_vif(mrt, i, skb)) {
2858                                 nla_nest_end(skb, vifs);
2859                                 nla_nest_end(skb, af);
2860                                 nlmsg_end(skb, nlh);
2861                                 goto out;
2862                         }
2863 skip_entry:
2864                         e++;
2865                 }
2866                 s_e = 0;
2867                 e = 0;
2868                 nla_nest_end(skb, vifs);
2869                 nla_nest_end(skb, af);
2870                 nlmsg_end(skb, nlh);
2871 skip_table:
2872                 t++;
2873         }
2874
2875 out:
2876         cb->args[1] = e;
2877         cb->args[0] = t;
2878
2879         return skb->len;
2880 }
2881
2882 #ifdef CONFIG_PROC_FS
2883 /* The /proc interfaces to multicast routing :
2884  * /proc/net/ip_mr_cache & /proc/net/ip_mr_vif
2885  */
2886
2887 static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
2888         __acquires(mrt_lock)
2889 {
2890         struct mr_vif_iter *iter = seq->private;
2891         struct net *net = seq_file_net(seq);
2892         struct mr_table *mrt;
2893
2894         mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2895         if (!mrt)
2896                 return ERR_PTR(-ENOENT);
2897
2898         iter->mrt = mrt;
2899
2900         read_lock(&mrt_lock);
2901         return mr_vif_seq_start(seq, pos);
2902 }
2903
2904 static void ipmr_vif_seq_stop(struct seq_file *seq, void *v)
2905         __releases(mrt_lock)
2906 {
2907         read_unlock(&mrt_lock);
2908 }
2909
2910 static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
2911 {
2912         struct mr_vif_iter *iter = seq->private;
2913         struct mr_table *mrt = iter->mrt;
2914
2915         if (v == SEQ_START_TOKEN) {
2916                 seq_puts(seq,
2917                          "Interface      BytesIn  PktsIn  BytesOut PktsOut Flags Local    Remote\n");
2918         } else {
2919                 const struct vif_device *vif = v;
2920                 const char *name =  vif->dev ?
2921                                     vif->dev->name : "none";
2922
2923                 seq_printf(seq,
2924                            "%2td %-10s %8ld %7ld  %8ld %7ld %05X %08X %08X\n",
2925                            vif - mrt->vif_table,
2926                            name, vif->bytes_in, vif->pkt_in,
2927                            vif->bytes_out, vif->pkt_out,
2928                            vif->flags, vif->local, vif->remote);
2929         }
2930         return 0;
2931 }
2932
2933 static const struct seq_operations ipmr_vif_seq_ops = {
2934         .start = ipmr_vif_seq_start,
2935         .next  = mr_vif_seq_next,
2936         .stop  = ipmr_vif_seq_stop,
2937         .show  = ipmr_vif_seq_show,
2938 };
2939
2940 static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
2941 {
2942         struct net *net = seq_file_net(seq);
2943         struct mr_table *mrt;
2944
2945         mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2946         if (!mrt)
2947                 return ERR_PTR(-ENOENT);
2948
2949         return mr_mfc_seq_start(seq, pos, mrt, &mfc_unres_lock);
2950 }
2951
2952 static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
2953 {
2954         int n;
2955
2956         if (v == SEQ_START_TOKEN) {
2957                 seq_puts(seq,
2958                  "Group    Origin   Iif     Pkts    Bytes    Wrong Oifs\n");
2959         } else {
2960                 const struct mfc_cache *mfc = v;
2961                 const struct mr_mfc_iter *it = seq->private;
2962                 const struct mr_table *mrt = it->mrt;
2963
2964                 seq_printf(seq, "%08X %08X %-3hd",
2965                            (__force u32) mfc->mfc_mcastgrp,
2966                            (__force u32) mfc->mfc_origin,
2967                            mfc->_c.mfc_parent);
2968
2969                 if (it->cache != &mrt->mfc_unres_queue) {
2970                         seq_printf(seq, " %8lu %8lu %8lu",
2971                                    mfc->_c.mfc_un.res.pkt,
2972                                    mfc->_c.mfc_un.res.bytes,
2973                                    mfc->_c.mfc_un.res.wrong_if);
2974                         for (n = mfc->_c.mfc_un.res.minvif;
2975                              n < mfc->_c.mfc_un.res.maxvif; n++) {
2976                                 if (VIF_EXISTS(mrt, n) &&
2977                                     mfc->_c.mfc_un.res.ttls[n] < 255)
2978                                         seq_printf(seq,
2979                                            " %2d:%-3d",
2980                                            n, mfc->_c.mfc_un.res.ttls[n]);
2981                         }
2982                 } else {
2983                         /* unresolved mfc_caches don't contain
2984                          * pkt, bytes and wrong_if values
2985                          */
2986                         seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
2987                 }
2988                 seq_putc(seq, '\n');
2989         }
2990         return 0;
2991 }
2992
2993 static const struct seq_operations ipmr_mfc_seq_ops = {
2994         .start = ipmr_mfc_seq_start,
2995         .next  = mr_mfc_seq_next,
2996         .stop  = mr_mfc_seq_stop,
2997         .show  = ipmr_mfc_seq_show,
2998 };
2999 #endif
3000
3001 #ifdef CONFIG_IP_PIMSM_V2
3002 static const struct net_protocol pim_protocol = {
3003         .handler        =       pim_rcv,
3004         .netns_ok       =       1,
3005 };
3006 #endif
3007
3008 static unsigned int ipmr_seq_read(struct net *net)
3009 {
3010         ASSERT_RTNL();
3011
3012         return net->ipv4.ipmr_seq + ipmr_rules_seq_read(net);
3013 }
3014
3015 static int ipmr_dump(struct net *net, struct notifier_block *nb,
3016                      struct netlink_ext_ack *extack)
3017 {
3018         return mr_dump(net, nb, RTNL_FAMILY_IPMR, ipmr_rules_dump,
3019                        ipmr_mr_table_iter, &mrt_lock, extack);
3020 }
3021
3022 static const struct fib_notifier_ops ipmr_notifier_ops_template = {
3023         .family         = RTNL_FAMILY_IPMR,
3024         .fib_seq_read   = ipmr_seq_read,
3025         .fib_dump       = ipmr_dump,
3026         .owner          = THIS_MODULE,
3027 };
3028
3029 static int __net_init ipmr_notifier_init(struct net *net)
3030 {
3031         struct fib_notifier_ops *ops;
3032
3033         net->ipv4.ipmr_seq = 0;
3034
3035         ops = fib_notifier_ops_register(&ipmr_notifier_ops_template, net);
3036         if (IS_ERR(ops))
3037                 return PTR_ERR(ops);
3038         net->ipv4.ipmr_notifier_ops = ops;
3039
3040         return 0;
3041 }
3042
3043 static void __net_exit ipmr_notifier_exit(struct net *net)
3044 {
3045         fib_notifier_ops_unregister(net->ipv4.ipmr_notifier_ops);
3046         net->ipv4.ipmr_notifier_ops = NULL;
3047 }
3048
3049 /* Setup for IP multicast routing */
3050 static int __net_init ipmr_net_init(struct net *net)
3051 {
3052         int err;
3053
3054         err = ipmr_notifier_init(net);
3055         if (err)
3056                 goto ipmr_notifier_fail;
3057
3058         err = ipmr_rules_init(net);
3059         if (err < 0)
3060                 goto ipmr_rules_fail;
3061
3062 #ifdef CONFIG_PROC_FS
3063         err = -ENOMEM;
3064         if (!proc_create_net("ip_mr_vif", 0, net->proc_net, &ipmr_vif_seq_ops,
3065                         sizeof(struct mr_vif_iter)))
3066                 goto proc_vif_fail;
3067         if (!proc_create_net("ip_mr_cache", 0, net->proc_net, &ipmr_mfc_seq_ops,
3068                         sizeof(struct mr_mfc_iter)))
3069                 goto proc_cache_fail;
3070 #endif
3071         return 0;
3072
3073 #ifdef CONFIG_PROC_FS
3074 proc_cache_fail:
3075         remove_proc_entry("ip_mr_vif", net->proc_net);
3076 proc_vif_fail:
3077         ipmr_rules_exit(net);
3078 #endif
3079 ipmr_rules_fail:
3080         ipmr_notifier_exit(net);
3081 ipmr_notifier_fail:
3082         return err;
3083 }
3084
3085 static void __net_exit ipmr_net_exit(struct net *net)
3086 {
3087 #ifdef CONFIG_PROC_FS
3088         remove_proc_entry("ip_mr_cache", net->proc_net);
3089         remove_proc_entry("ip_mr_vif", net->proc_net);
3090 #endif
3091         ipmr_notifier_exit(net);
3092         ipmr_rules_exit(net);
3093 }
3094
3095 static struct pernet_operations ipmr_net_ops = {
3096         .init = ipmr_net_init,
3097         .exit = ipmr_net_exit,
3098 };
3099
3100 int __init ip_mr_init(void)
3101 {
3102         int err;
3103
3104         mrt_cachep = kmem_cache_create("ip_mrt_cache",
3105                                        sizeof(struct mfc_cache),
3106                                        0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
3107                                        NULL);
3108
3109         err = register_pernet_subsys(&ipmr_net_ops);
3110         if (err)
3111                 goto reg_pernet_fail;
3112
3113         err = register_netdevice_notifier(&ip_mr_notifier);
3114         if (err)
3115                 goto reg_notif_fail;
3116 #ifdef CONFIG_IP_PIMSM_V2
3117         if (inet_add_protocol(&pim_protocol, IPPROTO_PIM) < 0) {
3118                 pr_err("%s: can't add PIM protocol\n", __func__);
3119                 err = -EAGAIN;
3120                 goto add_proto_fail;
3121         }
3122 #endif
3123         rtnl_register(RTNL_FAMILY_IPMR, RTM_GETROUTE,
3124                       ipmr_rtm_getroute, ipmr_rtm_dumproute, 0);
3125         rtnl_register(RTNL_FAMILY_IPMR, RTM_NEWROUTE,
3126                       ipmr_rtm_route, NULL, 0);
3127         rtnl_register(RTNL_FAMILY_IPMR, RTM_DELROUTE,
3128                       ipmr_rtm_route, NULL, 0);
3129
3130         rtnl_register(RTNL_FAMILY_IPMR, RTM_GETLINK,
3131                       NULL, ipmr_rtm_dumplink, 0);
3132         return 0;
3133
3134 #ifdef CONFIG_IP_PIMSM_V2
3135 add_proto_fail:
3136         unregister_netdevice_notifier(&ip_mr_notifier);
3137 #endif
3138 reg_notif_fail:
3139         unregister_pernet_subsys(&ipmr_net_ops);
3140 reg_pernet_fail:
3141         kmem_cache_destroy(mrt_cachep);
3142         return err;
3143 }