Merge branch 'for-5.15-verbose-console' into for-linus
[linux-2.6-microblaze.git] / net / bridge / br_mdb.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/err.h>
3 #include <linux/igmp.h>
4 #include <linux/kernel.h>
5 #include <linux/netdevice.h>
6 #include <linux/rculist.h>
7 #include <linux/skbuff.h>
8 #include <linux/if_ether.h>
9 #include <net/ip.h>
10 #include <net/netlink.h>
11 #include <net/switchdev.h>
12 #if IS_ENABLED(CONFIG_IPV6)
13 #include <net/ipv6.h>
14 #include <net/addrconf.h>
15 #endif
16
17 #include "br_private.h"
18
19 static bool br_rports_have_mc_router(struct net_bridge *br)
20 {
21 #if IS_ENABLED(CONFIG_IPV6)
22         return !hlist_empty(&br->ip4_mc_router_list) ||
23                !hlist_empty(&br->ip6_mc_router_list);
24 #else
25         return !hlist_empty(&br->ip4_mc_router_list);
26 #endif
27 }
28
29 static bool
30 br_ip4_rports_get_timer(struct net_bridge_port *port, unsigned long *timer)
31 {
32         *timer = br_timer_value(&port->ip4_mc_router_timer);
33         return !hlist_unhashed(&port->ip4_rlist);
34 }
35
36 static bool
37 br_ip6_rports_get_timer(struct net_bridge_port *port, unsigned long *timer)
38 {
39 #if IS_ENABLED(CONFIG_IPV6)
40         *timer = br_timer_value(&port->ip6_mc_router_timer);
41         return !hlist_unhashed(&port->ip6_rlist);
42 #else
43         *timer = 0;
44         return false;
45 #endif
46 }
47
48 static int br_rports_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
49                                struct net_device *dev)
50 {
51         struct net_bridge *br = netdev_priv(dev);
52         bool have_ip4_mc_rtr, have_ip6_mc_rtr;
53         unsigned long ip4_timer, ip6_timer;
54         struct nlattr *nest, *port_nest;
55         struct net_bridge_port *p;
56
57         if (!br->multicast_router)
58                 return 0;
59
60         if (!br_rports_have_mc_router(br))
61                 return 0;
62
63         nest = nla_nest_start_noflag(skb, MDBA_ROUTER);
64         if (nest == NULL)
65                 return -EMSGSIZE;
66
67         list_for_each_entry_rcu(p, &br->port_list, list) {
68                 have_ip4_mc_rtr = br_ip4_rports_get_timer(p, &ip4_timer);
69                 have_ip6_mc_rtr = br_ip6_rports_get_timer(p, &ip6_timer);
70
71                 if (!have_ip4_mc_rtr && !have_ip6_mc_rtr)
72                         continue;
73
74                 port_nest = nla_nest_start_noflag(skb, MDBA_ROUTER_PORT);
75                 if (!port_nest)
76                         goto fail;
77
78                 if (nla_put_nohdr(skb, sizeof(u32), &p->dev->ifindex) ||
79                     nla_put_u32(skb, MDBA_ROUTER_PATTR_TIMER,
80                                 max(ip4_timer, ip6_timer)) ||
81                     nla_put_u8(skb, MDBA_ROUTER_PATTR_TYPE,
82                                p->multicast_router) ||
83                     (have_ip4_mc_rtr &&
84                      nla_put_u32(skb, MDBA_ROUTER_PATTR_INET_TIMER,
85                                  ip4_timer)) ||
86                     (have_ip6_mc_rtr &&
87                      nla_put_u32(skb, MDBA_ROUTER_PATTR_INET6_TIMER,
88                                  ip6_timer))) {
89                         nla_nest_cancel(skb, port_nest);
90                         goto fail;
91                 }
92                 nla_nest_end(skb, port_nest);
93         }
94
95         nla_nest_end(skb, nest);
96         return 0;
97 fail:
98         nla_nest_cancel(skb, nest);
99         return -EMSGSIZE;
100 }
101
102 static void __mdb_entry_fill_flags(struct br_mdb_entry *e, unsigned char flags)
103 {
104         e->state = flags & MDB_PG_FLAGS_PERMANENT;
105         e->flags = 0;
106         if (flags & MDB_PG_FLAGS_OFFLOAD)
107                 e->flags |= MDB_FLAGS_OFFLOAD;
108         if (flags & MDB_PG_FLAGS_FAST_LEAVE)
109                 e->flags |= MDB_FLAGS_FAST_LEAVE;
110         if (flags & MDB_PG_FLAGS_STAR_EXCL)
111                 e->flags |= MDB_FLAGS_STAR_EXCL;
112         if (flags & MDB_PG_FLAGS_BLOCKED)
113                 e->flags |= MDB_FLAGS_BLOCKED;
114 }
115
116 static void __mdb_entry_to_br_ip(struct br_mdb_entry *entry, struct br_ip *ip,
117                                  struct nlattr **mdb_attrs)
118 {
119         memset(ip, 0, sizeof(struct br_ip));
120         ip->vid = entry->vid;
121         ip->proto = entry->addr.proto;
122         switch (ip->proto) {
123         case htons(ETH_P_IP):
124                 ip->dst.ip4 = entry->addr.u.ip4;
125                 if (mdb_attrs && mdb_attrs[MDBE_ATTR_SOURCE])
126                         ip->src.ip4 = nla_get_in_addr(mdb_attrs[MDBE_ATTR_SOURCE]);
127                 break;
128 #if IS_ENABLED(CONFIG_IPV6)
129         case htons(ETH_P_IPV6):
130                 ip->dst.ip6 = entry->addr.u.ip6;
131                 if (mdb_attrs && mdb_attrs[MDBE_ATTR_SOURCE])
132                         ip->src.ip6 = nla_get_in6_addr(mdb_attrs[MDBE_ATTR_SOURCE]);
133                 break;
134 #endif
135         default:
136                 ether_addr_copy(ip->dst.mac_addr, entry->addr.u.mac_addr);
137         }
138
139 }
140
141 static int __mdb_fill_srcs(struct sk_buff *skb,
142                            struct net_bridge_port_group *p)
143 {
144         struct net_bridge_group_src *ent;
145         struct nlattr *nest, *nest_ent;
146
147         if (hlist_empty(&p->src_list))
148                 return 0;
149
150         nest = nla_nest_start(skb, MDBA_MDB_EATTR_SRC_LIST);
151         if (!nest)
152                 return -EMSGSIZE;
153
154         hlist_for_each_entry_rcu(ent, &p->src_list, node,
155                                  lockdep_is_held(&p->key.port->br->multicast_lock)) {
156                 nest_ent = nla_nest_start(skb, MDBA_MDB_SRCLIST_ENTRY);
157                 if (!nest_ent)
158                         goto out_cancel_err;
159                 switch (ent->addr.proto) {
160                 case htons(ETH_P_IP):
161                         if (nla_put_in_addr(skb, MDBA_MDB_SRCATTR_ADDRESS,
162                                             ent->addr.src.ip4)) {
163                                 nla_nest_cancel(skb, nest_ent);
164                                 goto out_cancel_err;
165                         }
166                         break;
167 #if IS_ENABLED(CONFIG_IPV6)
168                 case htons(ETH_P_IPV6):
169                         if (nla_put_in6_addr(skb, MDBA_MDB_SRCATTR_ADDRESS,
170                                              &ent->addr.src.ip6)) {
171                                 nla_nest_cancel(skb, nest_ent);
172                                 goto out_cancel_err;
173                         }
174                         break;
175 #endif
176                 default:
177                         nla_nest_cancel(skb, nest_ent);
178                         continue;
179                 }
180                 if (nla_put_u32(skb, MDBA_MDB_SRCATTR_TIMER,
181                                 br_timer_value(&ent->timer))) {
182                         nla_nest_cancel(skb, nest_ent);
183                         goto out_cancel_err;
184                 }
185                 nla_nest_end(skb, nest_ent);
186         }
187
188         nla_nest_end(skb, nest);
189
190         return 0;
191
192 out_cancel_err:
193         nla_nest_cancel(skb, nest);
194         return -EMSGSIZE;
195 }
196
197 static int __mdb_fill_info(struct sk_buff *skb,
198                            struct net_bridge_mdb_entry *mp,
199                            struct net_bridge_port_group *p)
200 {
201         bool dump_srcs_mode = false;
202         struct timer_list *mtimer;
203         struct nlattr *nest_ent;
204         struct br_mdb_entry e;
205         u8 flags = 0;
206         int ifindex;
207
208         memset(&e, 0, sizeof(e));
209         if (p) {
210                 ifindex = p->key.port->dev->ifindex;
211                 mtimer = &p->timer;
212                 flags = p->flags;
213         } else {
214                 ifindex = mp->br->dev->ifindex;
215                 mtimer = &mp->timer;
216         }
217
218         __mdb_entry_fill_flags(&e, flags);
219         e.ifindex = ifindex;
220         e.vid = mp->addr.vid;
221         if (mp->addr.proto == htons(ETH_P_IP))
222                 e.addr.u.ip4 = mp->addr.dst.ip4;
223 #if IS_ENABLED(CONFIG_IPV6)
224         else if (mp->addr.proto == htons(ETH_P_IPV6))
225                 e.addr.u.ip6 = mp->addr.dst.ip6;
226 #endif
227         else
228                 ether_addr_copy(e.addr.u.mac_addr, mp->addr.dst.mac_addr);
229         e.addr.proto = mp->addr.proto;
230         nest_ent = nla_nest_start_noflag(skb,
231                                          MDBA_MDB_ENTRY_INFO);
232         if (!nest_ent)
233                 return -EMSGSIZE;
234
235         if (nla_put_nohdr(skb, sizeof(e), &e) ||
236             nla_put_u32(skb,
237                         MDBA_MDB_EATTR_TIMER,
238                         br_timer_value(mtimer)))
239                 goto nest_err;
240
241         switch (mp->addr.proto) {
242         case htons(ETH_P_IP):
243                 dump_srcs_mode = !!(mp->br->multicast_igmp_version == 3);
244                 if (mp->addr.src.ip4) {
245                         if (nla_put_in_addr(skb, MDBA_MDB_EATTR_SOURCE,
246                                             mp->addr.src.ip4))
247                                 goto nest_err;
248                         break;
249                 }
250                 break;
251 #if IS_ENABLED(CONFIG_IPV6)
252         case htons(ETH_P_IPV6):
253                 dump_srcs_mode = !!(mp->br->multicast_mld_version == 2);
254                 if (!ipv6_addr_any(&mp->addr.src.ip6)) {
255                         if (nla_put_in6_addr(skb, MDBA_MDB_EATTR_SOURCE,
256                                              &mp->addr.src.ip6))
257                                 goto nest_err;
258                         break;
259                 }
260                 break;
261 #endif
262         default:
263                 ether_addr_copy(e.addr.u.mac_addr, mp->addr.dst.mac_addr);
264         }
265         if (p) {
266                 if (nla_put_u8(skb, MDBA_MDB_EATTR_RTPROT, p->rt_protocol))
267                         goto nest_err;
268                 if (dump_srcs_mode &&
269                     (__mdb_fill_srcs(skb, p) ||
270                      nla_put_u8(skb, MDBA_MDB_EATTR_GROUP_MODE,
271                                 p->filter_mode)))
272                         goto nest_err;
273         }
274         nla_nest_end(skb, nest_ent);
275
276         return 0;
277
278 nest_err:
279         nla_nest_cancel(skb, nest_ent);
280         return -EMSGSIZE;
281 }
282
283 static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
284                             struct net_device *dev)
285 {
286         int idx = 0, s_idx = cb->args[1], err = 0, pidx = 0, s_pidx = cb->args[2];
287         struct net_bridge *br = netdev_priv(dev);
288         struct net_bridge_mdb_entry *mp;
289         struct nlattr *nest, *nest2;
290
291         if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
292                 return 0;
293
294         nest = nla_nest_start_noflag(skb, MDBA_MDB);
295         if (nest == NULL)
296                 return -EMSGSIZE;
297
298         hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) {
299                 struct net_bridge_port_group *p;
300                 struct net_bridge_port_group __rcu **pp;
301
302                 if (idx < s_idx)
303                         goto skip;
304
305                 nest2 = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
306                 if (!nest2) {
307                         err = -EMSGSIZE;
308                         break;
309                 }
310
311                 if (!s_pidx && mp->host_joined) {
312                         err = __mdb_fill_info(skb, mp, NULL);
313                         if (err) {
314                                 nla_nest_cancel(skb, nest2);
315                                 break;
316                         }
317                 }
318
319                 for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL;
320                       pp = &p->next) {
321                         if (!p->key.port)
322                                 continue;
323                         if (pidx < s_pidx)
324                                 goto skip_pg;
325
326                         err = __mdb_fill_info(skb, mp, p);
327                         if (err) {
328                                 nla_nest_end(skb, nest2);
329                                 goto out;
330                         }
331 skip_pg:
332                         pidx++;
333                 }
334                 pidx = 0;
335                 s_pidx = 0;
336                 nla_nest_end(skb, nest2);
337 skip:
338                 idx++;
339         }
340
341 out:
342         cb->args[1] = idx;
343         cb->args[2] = pidx;
344         nla_nest_end(skb, nest);
345         return err;
346 }
347
348 static int br_mdb_valid_dump_req(const struct nlmsghdr *nlh,
349                                  struct netlink_ext_ack *extack)
350 {
351         struct br_port_msg *bpm;
352
353         if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*bpm))) {
354                 NL_SET_ERR_MSG_MOD(extack, "Invalid header for mdb dump request");
355                 return -EINVAL;
356         }
357
358         bpm = nlmsg_data(nlh);
359         if (bpm->ifindex) {
360                 NL_SET_ERR_MSG_MOD(extack, "Filtering by device index is not supported for mdb dump request");
361                 return -EINVAL;
362         }
363         if (nlmsg_attrlen(nlh, sizeof(*bpm))) {
364                 NL_SET_ERR_MSG(extack, "Invalid data after header in mdb dump request");
365                 return -EINVAL;
366         }
367
368         return 0;
369 }
370
371 static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
372 {
373         struct net_device *dev;
374         struct net *net = sock_net(skb->sk);
375         struct nlmsghdr *nlh = NULL;
376         int idx = 0, s_idx;
377
378         if (cb->strict_check) {
379                 int err = br_mdb_valid_dump_req(cb->nlh, cb->extack);
380
381                 if (err < 0)
382                         return err;
383         }
384
385         s_idx = cb->args[0];
386
387         rcu_read_lock();
388
389         cb->seq = net->dev_base_seq;
390
391         for_each_netdev_rcu(net, dev) {
392                 if (dev->priv_flags & IFF_EBRIDGE) {
393                         struct br_port_msg *bpm;
394
395                         if (idx < s_idx)
396                                 goto skip;
397
398                         nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
399                                         cb->nlh->nlmsg_seq, RTM_GETMDB,
400                                         sizeof(*bpm), NLM_F_MULTI);
401                         if (nlh == NULL)
402                                 break;
403
404                         bpm = nlmsg_data(nlh);
405                         memset(bpm, 0, sizeof(*bpm));
406                         bpm->ifindex = dev->ifindex;
407                         if (br_mdb_fill_info(skb, cb, dev) < 0)
408                                 goto out;
409                         if (br_rports_fill_info(skb, cb, dev) < 0)
410                                 goto out;
411
412                         cb->args[1] = 0;
413                         nlmsg_end(skb, nlh);
414                 skip:
415                         idx++;
416                 }
417         }
418
419 out:
420         if (nlh)
421                 nlmsg_end(skb, nlh);
422         rcu_read_unlock();
423         cb->args[0] = idx;
424         return skb->len;
425 }
426
427 static int nlmsg_populate_mdb_fill(struct sk_buff *skb,
428                                    struct net_device *dev,
429                                    struct net_bridge_mdb_entry *mp,
430                                    struct net_bridge_port_group *pg,
431                                    int type)
432 {
433         struct nlmsghdr *nlh;
434         struct br_port_msg *bpm;
435         struct nlattr *nest, *nest2;
436
437         nlh = nlmsg_put(skb, 0, 0, type, sizeof(*bpm), 0);
438         if (!nlh)
439                 return -EMSGSIZE;
440
441         bpm = nlmsg_data(nlh);
442         memset(bpm, 0, sizeof(*bpm));
443         bpm->family  = AF_BRIDGE;
444         bpm->ifindex = dev->ifindex;
445         nest = nla_nest_start_noflag(skb, MDBA_MDB);
446         if (nest == NULL)
447                 goto cancel;
448         nest2 = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
449         if (nest2 == NULL)
450                 goto end;
451
452         if (__mdb_fill_info(skb, mp, pg))
453                 goto end;
454
455         nla_nest_end(skb, nest2);
456         nla_nest_end(skb, nest);
457         nlmsg_end(skb, nlh);
458         return 0;
459
460 end:
461         nla_nest_end(skb, nest);
462 cancel:
463         nlmsg_cancel(skb, nlh);
464         return -EMSGSIZE;
465 }
466
467 static size_t rtnl_mdb_nlmsg_size(struct net_bridge_port_group *pg)
468 {
469         size_t nlmsg_size = NLMSG_ALIGN(sizeof(struct br_port_msg)) +
470                             nla_total_size(sizeof(struct br_mdb_entry)) +
471                             nla_total_size(sizeof(u32));
472         struct net_bridge_group_src *ent;
473         size_t addr_size = 0;
474
475         if (!pg)
476                 goto out;
477
478         /* MDBA_MDB_EATTR_RTPROT */
479         nlmsg_size += nla_total_size(sizeof(u8));
480
481         switch (pg->key.addr.proto) {
482         case htons(ETH_P_IP):
483                 /* MDBA_MDB_EATTR_SOURCE */
484                 if (pg->key.addr.src.ip4)
485                         nlmsg_size += nla_total_size(sizeof(__be32));
486                 if (pg->key.port->br->multicast_igmp_version == 2)
487                         goto out;
488                 addr_size = sizeof(__be32);
489                 break;
490 #if IS_ENABLED(CONFIG_IPV6)
491         case htons(ETH_P_IPV6):
492                 /* MDBA_MDB_EATTR_SOURCE */
493                 if (!ipv6_addr_any(&pg->key.addr.src.ip6))
494                         nlmsg_size += nla_total_size(sizeof(struct in6_addr));
495                 if (pg->key.port->br->multicast_mld_version == 1)
496                         goto out;
497                 addr_size = sizeof(struct in6_addr);
498                 break;
499 #endif
500         }
501
502         /* MDBA_MDB_EATTR_GROUP_MODE */
503         nlmsg_size += nla_total_size(sizeof(u8));
504
505         /* MDBA_MDB_EATTR_SRC_LIST nested attr */
506         if (!hlist_empty(&pg->src_list))
507                 nlmsg_size += nla_total_size(0);
508
509         hlist_for_each_entry(ent, &pg->src_list, node) {
510                 /* MDBA_MDB_SRCLIST_ENTRY nested attr +
511                  * MDBA_MDB_SRCATTR_ADDRESS + MDBA_MDB_SRCATTR_TIMER
512                  */
513                 nlmsg_size += nla_total_size(0) +
514                               nla_total_size(addr_size) +
515                               nla_total_size(sizeof(u32));
516         }
517 out:
518         return nlmsg_size;
519 }
520
521 struct br_mdb_complete_info {
522         struct net_bridge_port *port;
523         struct br_ip ip;
524 };
525
526 static void br_mdb_complete(struct net_device *dev, int err, void *priv)
527 {
528         struct br_mdb_complete_info *data = priv;
529         struct net_bridge_port_group __rcu **pp;
530         struct net_bridge_port_group *p;
531         struct net_bridge_mdb_entry *mp;
532         struct net_bridge_port *port = data->port;
533         struct net_bridge *br = port->br;
534
535         if (err)
536                 goto err;
537
538         spin_lock_bh(&br->multicast_lock);
539         mp = br_mdb_ip_get(br, &data->ip);
540         if (!mp)
541                 goto out;
542         for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL;
543              pp = &p->next) {
544                 if (p->key.port != port)
545                         continue;
546                 p->flags |= MDB_PG_FLAGS_OFFLOAD;
547         }
548 out:
549         spin_unlock_bh(&br->multicast_lock);
550 err:
551         kfree(priv);
552 }
553
554 static void br_switchdev_mdb_populate(struct switchdev_obj_port_mdb *mdb,
555                                       const struct net_bridge_mdb_entry *mp)
556 {
557         if (mp->addr.proto == htons(ETH_P_IP))
558                 ip_eth_mc_map(mp->addr.dst.ip4, mdb->addr);
559 #if IS_ENABLED(CONFIG_IPV6)
560         else if (mp->addr.proto == htons(ETH_P_IPV6))
561                 ipv6_eth_mc_map(&mp->addr.dst.ip6, mdb->addr);
562 #endif
563         else
564                 ether_addr_copy(mdb->addr, mp->addr.dst.mac_addr);
565
566         mdb->vid = mp->addr.vid;
567 }
568
569 static int br_mdb_replay_one(struct notifier_block *nb, struct net_device *dev,
570                              const struct switchdev_obj_port_mdb *mdb,
571                              unsigned long action, const void *ctx,
572                              struct netlink_ext_ack *extack)
573 {
574         struct switchdev_notifier_port_obj_info obj_info = {
575                 .info = {
576                         .dev = dev,
577                         .extack = extack,
578                         .ctx = ctx,
579                 },
580                 .obj = &mdb->obj,
581         };
582         int err;
583
584         err = nb->notifier_call(nb, action, &obj_info);
585         return notifier_to_errno(err);
586 }
587
588 static int br_mdb_queue_one(struct list_head *mdb_list,
589                             enum switchdev_obj_id id,
590                             const struct net_bridge_mdb_entry *mp,
591                             struct net_device *orig_dev)
592 {
593         struct switchdev_obj_port_mdb *mdb;
594
595         mdb = kzalloc(sizeof(*mdb), GFP_ATOMIC);
596         if (!mdb)
597                 return -ENOMEM;
598
599         mdb->obj.id = id;
600         mdb->obj.orig_dev = orig_dev;
601         br_switchdev_mdb_populate(mdb, mp);
602         list_add_tail(&mdb->obj.list, mdb_list);
603
604         return 0;
605 }
606
607 int br_mdb_replay(struct net_device *br_dev, struct net_device *dev,
608                   const void *ctx, bool adding, struct notifier_block *nb,
609                   struct netlink_ext_ack *extack)
610 {
611         const struct net_bridge_mdb_entry *mp;
612         struct switchdev_obj *obj, *tmp;
613         struct net_bridge *br;
614         unsigned long action;
615         LIST_HEAD(mdb_list);
616         int err = 0;
617
618         ASSERT_RTNL();
619
620         if (!netif_is_bridge_master(br_dev) || !netif_is_bridge_port(dev))
621                 return -EINVAL;
622
623         br = netdev_priv(br_dev);
624
625         if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
626                 return 0;
627
628         /* We cannot walk over br->mdb_list protected just by the rtnl_mutex,
629          * because the write-side protection is br->multicast_lock. But we
630          * need to emulate the [ blocking ] calling context of a regular
631          * switchdev event, so since both br->multicast_lock and RCU read side
632          * critical sections are atomic, we have no choice but to pick the RCU
633          * read side lock, queue up all our events, leave the critical section
634          * and notify switchdev from blocking context.
635          */
636         rcu_read_lock();
637
638         hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) {
639                 struct net_bridge_port_group __rcu * const *pp;
640                 const struct net_bridge_port_group *p;
641
642                 if (mp->host_joined) {
643                         err = br_mdb_queue_one(&mdb_list,
644                                                SWITCHDEV_OBJ_ID_HOST_MDB,
645                                                mp, br_dev);
646                         if (err) {
647                                 rcu_read_unlock();
648                                 goto out_free_mdb;
649                         }
650                 }
651
652                 for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL;
653                      pp = &p->next) {
654                         if (p->key.port->dev != dev)
655                                 continue;
656
657                         err = br_mdb_queue_one(&mdb_list,
658                                                SWITCHDEV_OBJ_ID_PORT_MDB,
659                                                mp, dev);
660                         if (err) {
661                                 rcu_read_unlock();
662                                 goto out_free_mdb;
663                         }
664                 }
665         }
666
667         rcu_read_unlock();
668
669         if (adding)
670                 action = SWITCHDEV_PORT_OBJ_ADD;
671         else
672                 action = SWITCHDEV_PORT_OBJ_DEL;
673
674         list_for_each_entry(obj, &mdb_list, list) {
675                 err = br_mdb_replay_one(nb, dev, SWITCHDEV_OBJ_PORT_MDB(obj),
676                                         action, ctx, extack);
677                 if (err)
678                         goto out_free_mdb;
679         }
680
681 out_free_mdb:
682         list_for_each_entry_safe(obj, tmp, &mdb_list, list) {
683                 list_del(&obj->list);
684                 kfree(SWITCHDEV_OBJ_PORT_MDB(obj));
685         }
686
687         return err;
688 }
689 EXPORT_SYMBOL_GPL(br_mdb_replay);
690
691 static void br_mdb_switchdev_host_port(struct net_device *dev,
692                                        struct net_device *lower_dev,
693                                        struct net_bridge_mdb_entry *mp,
694                                        int type)
695 {
696         struct switchdev_obj_port_mdb mdb = {
697                 .obj = {
698                         .id = SWITCHDEV_OBJ_ID_HOST_MDB,
699                         .flags = SWITCHDEV_F_DEFER,
700                         .orig_dev = dev,
701                 },
702         };
703
704         br_switchdev_mdb_populate(&mdb, mp);
705
706         switch (type) {
707         case RTM_NEWMDB:
708                 switchdev_port_obj_add(lower_dev, &mdb.obj, NULL);
709                 break;
710         case RTM_DELMDB:
711                 switchdev_port_obj_del(lower_dev, &mdb.obj);
712                 break;
713         }
714 }
715
716 static void br_mdb_switchdev_host(struct net_device *dev,
717                                   struct net_bridge_mdb_entry *mp, int type)
718 {
719         struct net_device *lower_dev;
720         struct list_head *iter;
721
722         netdev_for_each_lower_dev(dev, lower_dev, iter)
723                 br_mdb_switchdev_host_port(dev, lower_dev, mp, type);
724 }
725
726 void br_mdb_notify(struct net_device *dev,
727                    struct net_bridge_mdb_entry *mp,
728                    struct net_bridge_port_group *pg,
729                    int type)
730 {
731         struct br_mdb_complete_info *complete_info;
732         struct switchdev_obj_port_mdb mdb = {
733                 .obj = {
734                         .id = SWITCHDEV_OBJ_ID_PORT_MDB,
735                         .flags = SWITCHDEV_F_DEFER,
736                 },
737         };
738         struct net *net = dev_net(dev);
739         struct sk_buff *skb;
740         int err = -ENOBUFS;
741
742         if (pg) {
743                 br_switchdev_mdb_populate(&mdb, mp);
744
745                 mdb.obj.orig_dev = pg->key.port->dev;
746                 switch (type) {
747                 case RTM_NEWMDB:
748                         complete_info = kmalloc(sizeof(*complete_info), GFP_ATOMIC);
749                         if (!complete_info)
750                                 break;
751                         complete_info->port = pg->key.port;
752                         complete_info->ip = mp->addr;
753                         mdb.obj.complete_priv = complete_info;
754                         mdb.obj.complete = br_mdb_complete;
755                         if (switchdev_port_obj_add(pg->key.port->dev, &mdb.obj, NULL))
756                                 kfree(complete_info);
757                         break;
758                 case RTM_DELMDB:
759                         switchdev_port_obj_del(pg->key.port->dev, &mdb.obj);
760                         break;
761                 }
762         } else {
763                 br_mdb_switchdev_host(dev, mp, type);
764         }
765
766         skb = nlmsg_new(rtnl_mdb_nlmsg_size(pg), GFP_ATOMIC);
767         if (!skb)
768                 goto errout;
769
770         err = nlmsg_populate_mdb_fill(skb, dev, mp, pg, type);
771         if (err < 0) {
772                 kfree_skb(skb);
773                 goto errout;
774         }
775
776         rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
777         return;
778 errout:
779         rtnl_set_sk_err(net, RTNLGRP_MDB, err);
780 }
781
782 static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
783                                    struct net_device *dev,
784                                    int ifindex, u32 pid,
785                                    u32 seq, int type, unsigned int flags)
786 {
787         struct br_port_msg *bpm;
788         struct nlmsghdr *nlh;
789         struct nlattr *nest;
790
791         nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0);
792         if (!nlh)
793                 return -EMSGSIZE;
794
795         bpm = nlmsg_data(nlh);
796         memset(bpm, 0, sizeof(*bpm));
797         bpm->family = AF_BRIDGE;
798         bpm->ifindex = dev->ifindex;
799         nest = nla_nest_start_noflag(skb, MDBA_ROUTER);
800         if (!nest)
801                 goto cancel;
802
803         if (nla_put_u32(skb, MDBA_ROUTER_PORT, ifindex))
804                 goto end;
805
806         nla_nest_end(skb, nest);
807         nlmsg_end(skb, nlh);
808         return 0;
809
810 end:
811         nla_nest_end(skb, nest);
812 cancel:
813         nlmsg_cancel(skb, nlh);
814         return -EMSGSIZE;
815 }
816
817 static inline size_t rtnl_rtr_nlmsg_size(void)
818 {
819         return NLMSG_ALIGN(sizeof(struct br_port_msg))
820                 + nla_total_size(sizeof(__u32));
821 }
822
823 void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port,
824                    int type)
825 {
826         struct net *net = dev_net(dev);
827         struct sk_buff *skb;
828         int err = -ENOBUFS;
829         int ifindex;
830
831         ifindex = port ? port->dev->ifindex : 0;
832         skb = nlmsg_new(rtnl_rtr_nlmsg_size(), GFP_ATOMIC);
833         if (!skb)
834                 goto errout;
835
836         err = nlmsg_populate_rtr_fill(skb, dev, ifindex, 0, 0, type, NTF_SELF);
837         if (err < 0) {
838                 kfree_skb(skb);
839                 goto errout;
840         }
841
842         rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
843         return;
844
845 errout:
846         rtnl_set_sk_err(net, RTNLGRP_MDB, err);
847 }
848
849 static bool is_valid_mdb_entry(struct br_mdb_entry *entry,
850                                struct netlink_ext_ack *extack)
851 {
852         if (entry->ifindex == 0) {
853                 NL_SET_ERR_MSG_MOD(extack, "Zero entry ifindex is not allowed");
854                 return false;
855         }
856
857         if (entry->addr.proto == htons(ETH_P_IP)) {
858                 if (!ipv4_is_multicast(entry->addr.u.ip4)) {
859                         NL_SET_ERR_MSG_MOD(extack, "IPv4 entry group address is not multicast");
860                         return false;
861                 }
862                 if (ipv4_is_local_multicast(entry->addr.u.ip4)) {
863                         NL_SET_ERR_MSG_MOD(extack, "IPv4 entry group address is local multicast");
864                         return false;
865                 }
866 #if IS_ENABLED(CONFIG_IPV6)
867         } else if (entry->addr.proto == htons(ETH_P_IPV6)) {
868                 if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6)) {
869                         NL_SET_ERR_MSG_MOD(extack, "IPv6 entry group address is link-local all nodes");
870                         return false;
871                 }
872 #endif
873         } else if (entry->addr.proto == 0) {
874                 /* L2 mdb */
875                 if (!is_multicast_ether_addr(entry->addr.u.mac_addr)) {
876                         NL_SET_ERR_MSG_MOD(extack, "L2 entry group is not multicast");
877                         return false;
878                 }
879         } else {
880                 NL_SET_ERR_MSG_MOD(extack, "Unknown entry protocol");
881                 return false;
882         }
883
884         if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) {
885                 NL_SET_ERR_MSG_MOD(extack, "Unknown entry state");
886                 return false;
887         }
888         if (entry->vid >= VLAN_VID_MASK) {
889                 NL_SET_ERR_MSG_MOD(extack, "Invalid entry VLAN id");
890                 return false;
891         }
892
893         return true;
894 }
895
896 static bool is_valid_mdb_source(struct nlattr *attr, __be16 proto,
897                                 struct netlink_ext_ack *extack)
898 {
899         switch (proto) {
900         case htons(ETH_P_IP):
901                 if (nla_len(attr) != sizeof(struct in_addr)) {
902                         NL_SET_ERR_MSG_MOD(extack, "IPv4 invalid source address length");
903                         return false;
904                 }
905                 if (ipv4_is_multicast(nla_get_in_addr(attr))) {
906                         NL_SET_ERR_MSG_MOD(extack, "IPv4 multicast source address is not allowed");
907                         return false;
908                 }
909                 break;
910 #if IS_ENABLED(CONFIG_IPV6)
911         case htons(ETH_P_IPV6): {
912                 struct in6_addr src;
913
914                 if (nla_len(attr) != sizeof(struct in6_addr)) {
915                         NL_SET_ERR_MSG_MOD(extack, "IPv6 invalid source address length");
916                         return false;
917                 }
918                 src = nla_get_in6_addr(attr);
919                 if (ipv6_addr_is_multicast(&src)) {
920                         NL_SET_ERR_MSG_MOD(extack, "IPv6 multicast source address is not allowed");
921                         return false;
922                 }
923                 break;
924         }
925 #endif
926         default:
927                 NL_SET_ERR_MSG_MOD(extack, "Invalid protocol used with source address");
928                 return false;
929         }
930
931         return true;
932 }
933
934 static const struct nla_policy br_mdbe_attrs_pol[MDBE_ATTR_MAX + 1] = {
935         [MDBE_ATTR_SOURCE] = NLA_POLICY_RANGE(NLA_BINARY,
936                                               sizeof(struct in_addr),
937                                               sizeof(struct in6_addr)),
938 };
939
940 static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh,
941                         struct net_device **pdev, struct br_mdb_entry **pentry,
942                         struct nlattr **mdb_attrs, struct netlink_ext_ack *extack)
943 {
944         struct net *net = sock_net(skb->sk);
945         struct br_mdb_entry *entry;
946         struct br_port_msg *bpm;
947         struct nlattr *tb[MDBA_SET_ENTRY_MAX+1];
948         struct net_device *dev;
949         int err;
950
951         err = nlmsg_parse_deprecated(nlh, sizeof(*bpm), tb,
952                                      MDBA_SET_ENTRY_MAX, NULL, NULL);
953         if (err < 0)
954                 return err;
955
956         bpm = nlmsg_data(nlh);
957         if (bpm->ifindex == 0) {
958                 NL_SET_ERR_MSG_MOD(extack, "Invalid bridge ifindex");
959                 return -EINVAL;
960         }
961
962         dev = __dev_get_by_index(net, bpm->ifindex);
963         if (dev == NULL) {
964                 NL_SET_ERR_MSG_MOD(extack, "Bridge device doesn't exist");
965                 return -ENODEV;
966         }
967
968         if (!(dev->priv_flags & IFF_EBRIDGE)) {
969                 NL_SET_ERR_MSG_MOD(extack, "Device is not a bridge");
970                 return -EOPNOTSUPP;
971         }
972
973         *pdev = dev;
974
975         if (!tb[MDBA_SET_ENTRY]) {
976                 NL_SET_ERR_MSG_MOD(extack, "Missing MDBA_SET_ENTRY attribute");
977                 return -EINVAL;
978         }
979         if (nla_len(tb[MDBA_SET_ENTRY]) != sizeof(struct br_mdb_entry)) {
980                 NL_SET_ERR_MSG_MOD(extack, "Invalid MDBA_SET_ENTRY attribute length");
981                 return -EINVAL;
982         }
983
984         entry = nla_data(tb[MDBA_SET_ENTRY]);
985         if (!is_valid_mdb_entry(entry, extack))
986                 return -EINVAL;
987         *pentry = entry;
988
989         if (tb[MDBA_SET_ENTRY_ATTRS]) {
990                 err = nla_parse_nested(mdb_attrs, MDBE_ATTR_MAX,
991                                        tb[MDBA_SET_ENTRY_ATTRS],
992                                        br_mdbe_attrs_pol, extack);
993                 if (err)
994                         return err;
995                 if (mdb_attrs[MDBE_ATTR_SOURCE] &&
996                     !is_valid_mdb_source(mdb_attrs[MDBE_ATTR_SOURCE],
997                                          entry->addr.proto, extack))
998                         return -EINVAL;
999         } else {
1000                 memset(mdb_attrs, 0,
1001                        sizeof(struct nlattr *) * (MDBE_ATTR_MAX + 1));
1002         }
1003
1004         return 0;
1005 }
1006
1007 static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
1008                             struct br_mdb_entry *entry,
1009                             struct nlattr **mdb_attrs,
1010                             struct netlink_ext_ack *extack)
1011 {
1012         struct net_bridge_mdb_entry *mp, *star_mp;
1013         struct net_bridge_port_group *p;
1014         struct net_bridge_port_group __rcu **pp;
1015         struct br_ip group, star_group;
1016         unsigned long now = jiffies;
1017         unsigned char flags = 0;
1018         u8 filter_mode;
1019         int err;
1020
1021         __mdb_entry_to_br_ip(entry, &group, mdb_attrs);
1022
1023         /* host join errors which can happen before creating the group */
1024         if (!port) {
1025                 /* don't allow any flags for host-joined groups */
1026                 if (entry->state) {
1027                         NL_SET_ERR_MSG_MOD(extack, "Flags are not allowed for host groups");
1028                         return -EINVAL;
1029                 }
1030                 if (!br_multicast_is_star_g(&group)) {
1031                         NL_SET_ERR_MSG_MOD(extack, "Groups with sources cannot be manually host joined");
1032                         return -EINVAL;
1033                 }
1034         }
1035
1036         if (br_group_is_l2(&group) && entry->state != MDB_PERMANENT) {
1037                 NL_SET_ERR_MSG_MOD(extack, "Only permanent L2 entries allowed");
1038                 return -EINVAL;
1039         }
1040
1041         mp = br_mdb_ip_get(br, &group);
1042         if (!mp) {
1043                 mp = br_multicast_new_group(br, &group);
1044                 err = PTR_ERR_OR_ZERO(mp);
1045                 if (err)
1046                         return err;
1047         }
1048
1049         /* host join */
1050         if (!port) {
1051                 if (mp->host_joined) {
1052                         NL_SET_ERR_MSG_MOD(extack, "Group is already joined by host");
1053                         return -EEXIST;
1054                 }
1055
1056                 br_multicast_host_join(mp, false);
1057                 br_mdb_notify(br->dev, mp, NULL, RTM_NEWMDB);
1058
1059                 return 0;
1060         }
1061
1062         for (pp = &mp->ports;
1063              (p = mlock_dereference(*pp, br)) != NULL;
1064              pp = &p->next) {
1065                 if (p->key.port == port) {
1066                         NL_SET_ERR_MSG_MOD(extack, "Group is already joined by port");
1067                         return -EEXIST;
1068                 }
1069                 if ((unsigned long)p->key.port < (unsigned long)port)
1070                         break;
1071         }
1072
1073         filter_mode = br_multicast_is_star_g(&group) ? MCAST_EXCLUDE :
1074                                                        MCAST_INCLUDE;
1075
1076         if (entry->state == MDB_PERMANENT)
1077                 flags |= MDB_PG_FLAGS_PERMANENT;
1078
1079         p = br_multicast_new_port_group(port, &group, *pp, flags, NULL,
1080                                         filter_mode, RTPROT_STATIC);
1081         if (unlikely(!p)) {
1082                 NL_SET_ERR_MSG_MOD(extack, "Couldn't allocate new port group");
1083                 return -ENOMEM;
1084         }
1085         rcu_assign_pointer(*pp, p);
1086         if (entry->state == MDB_TEMPORARY)
1087                 mod_timer(&p->timer, now + br->multicast_membership_interval);
1088         br_mdb_notify(br->dev, mp, p, RTM_NEWMDB);
1089         /* if we are adding a new EXCLUDE port group (*,G) it needs to be also
1090          * added to all S,G entries for proper replication, if we are adding
1091          * a new INCLUDE port (S,G) then all of *,G EXCLUDE ports need to be
1092          * added to it for proper replication
1093          */
1094         if (br_multicast_should_handle_mode(br, group.proto)) {
1095                 switch (filter_mode) {
1096                 case MCAST_EXCLUDE:
1097                         br_multicast_star_g_handle_mode(p, MCAST_EXCLUDE);
1098                         break;
1099                 case MCAST_INCLUDE:
1100                         star_group = p->key.addr;
1101                         memset(&star_group.src, 0, sizeof(star_group.src));
1102                         star_mp = br_mdb_ip_get(br, &star_group);
1103                         if (star_mp)
1104                                 br_multicast_sg_add_exclude_ports(star_mp, p);
1105                         break;
1106                 }
1107         }
1108
1109         return 0;
1110 }
1111
1112 static int __br_mdb_add(struct net *net, struct net_bridge *br,
1113                         struct net_bridge_port *p,
1114                         struct br_mdb_entry *entry,
1115                         struct nlattr **mdb_attrs,
1116                         struct netlink_ext_ack *extack)
1117 {
1118         int ret;
1119
1120         spin_lock_bh(&br->multicast_lock);
1121         ret = br_mdb_add_group(br, p, entry, mdb_attrs, extack);
1122         spin_unlock_bh(&br->multicast_lock);
1123
1124         return ret;
1125 }
1126
1127 static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
1128                       struct netlink_ext_ack *extack)
1129 {
1130         struct nlattr *mdb_attrs[MDBE_ATTR_MAX + 1];
1131         struct net *net = sock_net(skb->sk);
1132         struct net_bridge_vlan_group *vg;
1133         struct net_bridge_port *p = NULL;
1134         struct net_device *dev, *pdev;
1135         struct br_mdb_entry *entry;
1136         struct net_bridge_vlan *v;
1137         struct net_bridge *br;
1138         int err;
1139
1140         err = br_mdb_parse(skb, nlh, &dev, &entry, mdb_attrs, extack);
1141         if (err < 0)
1142                 return err;
1143
1144         br = netdev_priv(dev);
1145
1146         if (!netif_running(br->dev)) {
1147                 NL_SET_ERR_MSG_MOD(extack, "Bridge device is not running");
1148                 return -EINVAL;
1149         }
1150
1151         if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) {
1152                 NL_SET_ERR_MSG_MOD(extack, "Bridge's multicast processing is disabled");
1153                 return -EINVAL;
1154         }
1155
1156         if (entry->ifindex != br->dev->ifindex) {
1157                 pdev = __dev_get_by_index(net, entry->ifindex);
1158                 if (!pdev) {
1159                         NL_SET_ERR_MSG_MOD(extack, "Port net device doesn't exist");
1160                         return -ENODEV;
1161                 }
1162
1163                 p = br_port_get_rtnl(pdev);
1164                 if (!p) {
1165                         NL_SET_ERR_MSG_MOD(extack, "Net device is not a bridge port");
1166                         return -EINVAL;
1167                 }
1168
1169                 if (p->br != br) {
1170                         NL_SET_ERR_MSG_MOD(extack, "Port belongs to a different bridge device");
1171                         return -EINVAL;
1172                 }
1173                 if (p->state == BR_STATE_DISABLED) {
1174                         NL_SET_ERR_MSG_MOD(extack, "Port is in disabled state");
1175                         return -EINVAL;
1176                 }
1177                 vg = nbp_vlan_group(p);
1178         } else {
1179                 vg = br_vlan_group(br);
1180         }
1181
1182         /* If vlan filtering is enabled and VLAN is not specified
1183          * install mdb entry on all vlans configured on the port.
1184          */
1185         if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) {
1186                 list_for_each_entry(v, &vg->vlan_list, vlist) {
1187                         entry->vid = v->vid;
1188                         err = __br_mdb_add(net, br, p, entry, mdb_attrs, extack);
1189                         if (err)
1190                                 break;
1191                 }
1192         } else {
1193                 err = __br_mdb_add(net, br, p, entry, mdb_attrs, extack);
1194         }
1195
1196         return err;
1197 }
1198
1199 static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry,
1200                         struct nlattr **mdb_attrs)
1201 {
1202         struct net_bridge_mdb_entry *mp;
1203         struct net_bridge_port_group *p;
1204         struct net_bridge_port_group __rcu **pp;
1205         struct br_ip ip;
1206         int err = -EINVAL;
1207
1208         if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED))
1209                 return -EINVAL;
1210
1211         __mdb_entry_to_br_ip(entry, &ip, mdb_attrs);
1212
1213         spin_lock_bh(&br->multicast_lock);
1214         mp = br_mdb_ip_get(br, &ip);
1215         if (!mp)
1216                 goto unlock;
1217
1218         /* host leave */
1219         if (entry->ifindex == mp->br->dev->ifindex && mp->host_joined) {
1220                 br_multicast_host_leave(mp, false);
1221                 err = 0;
1222                 br_mdb_notify(br->dev, mp, NULL, RTM_DELMDB);
1223                 if (!mp->ports && netif_running(br->dev))
1224                         mod_timer(&mp->timer, jiffies);
1225                 goto unlock;
1226         }
1227
1228         for (pp = &mp->ports;
1229              (p = mlock_dereference(*pp, br)) != NULL;
1230              pp = &p->next) {
1231                 if (!p->key.port || p->key.port->dev->ifindex != entry->ifindex)
1232                         continue;
1233
1234                 if (p->key.port->state == BR_STATE_DISABLED)
1235                         goto unlock;
1236
1237                 br_multicast_del_pg(mp, p, pp);
1238                 err = 0;
1239                 break;
1240         }
1241
1242 unlock:
1243         spin_unlock_bh(&br->multicast_lock);
1244         return err;
1245 }
1246
1247 static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
1248                       struct netlink_ext_ack *extack)
1249 {
1250         struct nlattr *mdb_attrs[MDBE_ATTR_MAX + 1];
1251         struct net *net = sock_net(skb->sk);
1252         struct net_bridge_vlan_group *vg;
1253         struct net_bridge_port *p = NULL;
1254         struct net_device *dev, *pdev;
1255         struct br_mdb_entry *entry;
1256         struct net_bridge_vlan *v;
1257         struct net_bridge *br;
1258         int err;
1259
1260         err = br_mdb_parse(skb, nlh, &dev, &entry, mdb_attrs, extack);
1261         if (err < 0)
1262                 return err;
1263
1264         br = netdev_priv(dev);
1265
1266         if (entry->ifindex != br->dev->ifindex) {
1267                 pdev = __dev_get_by_index(net, entry->ifindex);
1268                 if (!pdev)
1269                         return -ENODEV;
1270
1271                 p = br_port_get_rtnl(pdev);
1272                 if (!p || p->br != br || p->state == BR_STATE_DISABLED)
1273                         return -EINVAL;
1274                 vg = nbp_vlan_group(p);
1275         } else {
1276                 vg = br_vlan_group(br);
1277         }
1278
1279         /* If vlan filtering is enabled and VLAN is not specified
1280          * delete mdb entry on all vlans configured on the port.
1281          */
1282         if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) {
1283                 list_for_each_entry(v, &vg->vlan_list, vlist) {
1284                         entry->vid = v->vid;
1285                         err = __br_mdb_del(br, entry, mdb_attrs);
1286                 }
1287         } else {
1288                 err = __br_mdb_del(br, entry, mdb_attrs);
1289         }
1290
1291         return err;
1292 }
1293
1294 void br_mdb_init(void)
1295 {
1296         rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_GETMDB, NULL, br_mdb_dump, 0);
1297         rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_NEWMDB, br_mdb_add, NULL, 0);
1298         rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_DELMDB, br_mdb_del, NULL, 0);
1299 }
1300
1301 void br_mdb_uninit(void)
1302 {
1303         rtnl_unregister(PF_BRIDGE, RTM_GETMDB);
1304         rtnl_unregister(PF_BRIDGE, RTM_NEWMDB);
1305         rtnl_unregister(PF_BRIDGE, RTM_DELMDB);
1306 }