Merge tag 'lkdtm-next' of https://git.kernel.org/pub/scm/linux/kernel/git/kees/linux...
[linux-2.6-microblaze.git] / net / bridge / br_switchdev.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/list.h>
4 #include <linux/netdevice.h>
5 #include <linux/rtnetlink.h>
6 #include <linux/skbuff.h>
7 #include <net/ip.h>
8 #include <net/switchdev.h>
9
10 #include "br_private.h"
11
12 static struct static_key_false br_switchdev_tx_fwd_offload;
13
14 static bool nbp_switchdev_can_offload_tx_fwd(const struct net_bridge_port *p,
15                                              const struct sk_buff *skb)
16 {
17         if (!static_branch_unlikely(&br_switchdev_tx_fwd_offload))
18                 return false;
19
20         return (p->flags & BR_TX_FWD_OFFLOAD) &&
21                (p->hwdom != BR_INPUT_SKB_CB(skb)->src_hwdom);
22 }
23
24 bool br_switchdev_frame_uses_tx_fwd_offload(struct sk_buff *skb)
25 {
26         if (!static_branch_unlikely(&br_switchdev_tx_fwd_offload))
27                 return false;
28
29         return BR_INPUT_SKB_CB(skb)->tx_fwd_offload;
30 }
31
32 void br_switchdev_frame_set_offload_fwd_mark(struct sk_buff *skb)
33 {
34         skb->offload_fwd_mark = br_switchdev_frame_uses_tx_fwd_offload(skb);
35 }
36
37 /* Mark the frame for TX forwarding offload if this egress port supports it */
38 void nbp_switchdev_frame_mark_tx_fwd_offload(const struct net_bridge_port *p,
39                                              struct sk_buff *skb)
40 {
41         if (nbp_switchdev_can_offload_tx_fwd(p, skb))
42                 BR_INPUT_SKB_CB(skb)->tx_fwd_offload = true;
43 }
44
45 /* Lazily adds the hwdom of the egress bridge port to the bit mask of hwdoms
46  * that the skb has been already forwarded to, to avoid further cloning to
47  * other ports in the same hwdom by making nbp_switchdev_allowed_egress()
48  * return false.
49  */
50 void nbp_switchdev_frame_mark_tx_fwd_to_hwdom(const struct net_bridge_port *p,
51                                               struct sk_buff *skb)
52 {
53         if (nbp_switchdev_can_offload_tx_fwd(p, skb))
54                 set_bit(p->hwdom, &BR_INPUT_SKB_CB(skb)->fwd_hwdoms);
55 }
56
57 void nbp_switchdev_frame_mark(const struct net_bridge_port *p,
58                               struct sk_buff *skb)
59 {
60         if (p->hwdom)
61                 BR_INPUT_SKB_CB(skb)->src_hwdom = p->hwdom;
62 }
63
64 bool nbp_switchdev_allowed_egress(const struct net_bridge_port *p,
65                                   const struct sk_buff *skb)
66 {
67         struct br_input_skb_cb *cb = BR_INPUT_SKB_CB(skb);
68
69         return !test_bit(p->hwdom, &cb->fwd_hwdoms) &&
70                 (!skb->offload_fwd_mark || cb->src_hwdom != p->hwdom);
71 }
72
73 /* Flags that can be offloaded to hardware */
74 #define BR_PORT_FLAGS_HW_OFFLOAD (BR_LEARNING | BR_FLOOD | \
75                                   BR_MCAST_FLOOD | BR_BCAST_FLOOD | BR_PORT_LOCKED)
76
77 int br_switchdev_set_port_flag(struct net_bridge_port *p,
78                                unsigned long flags,
79                                unsigned long mask,
80                                struct netlink_ext_ack *extack)
81 {
82         struct switchdev_attr attr = {
83                 .orig_dev = p->dev,
84         };
85         struct switchdev_notifier_port_attr_info info = {
86                 .attr = &attr,
87         };
88         int err;
89
90         mask &= BR_PORT_FLAGS_HW_OFFLOAD;
91         if (!mask)
92                 return 0;
93
94         attr.id = SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS;
95         attr.u.brport_flags.val = flags;
96         attr.u.brport_flags.mask = mask;
97
98         /* We run from atomic context here */
99         err = call_switchdev_notifiers(SWITCHDEV_PORT_ATTR_SET, p->dev,
100                                        &info.info, extack);
101         err = notifier_to_errno(err);
102         if (err == -EOPNOTSUPP)
103                 return 0;
104
105         if (err) {
106                 if (extack && !extack->_msg)
107                         NL_SET_ERR_MSG_MOD(extack,
108                                            "bridge flag offload is not supported");
109                 return -EOPNOTSUPP;
110         }
111
112         attr.id = SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS;
113         attr.flags = SWITCHDEV_F_DEFER;
114
115         err = switchdev_port_attr_set(p->dev, &attr, extack);
116         if (err) {
117                 if (extack && !extack->_msg)
118                         NL_SET_ERR_MSG_MOD(extack,
119                                            "error setting offload flag on port");
120                 return err;
121         }
122
123         return 0;
124 }
125
126 static void br_switchdev_fdb_populate(struct net_bridge *br,
127                                       struct switchdev_notifier_fdb_info *item,
128                                       const struct net_bridge_fdb_entry *fdb,
129                                       const void *ctx)
130 {
131         const struct net_bridge_port *p = READ_ONCE(fdb->dst);
132
133         item->addr = fdb->key.addr.addr;
134         item->vid = fdb->key.vlan_id;
135         item->added_by_user = test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
136         item->offloaded = test_bit(BR_FDB_OFFLOADED, &fdb->flags);
137         item->is_local = test_bit(BR_FDB_LOCAL, &fdb->flags);
138         item->info.dev = (!p || item->is_local) ? br->dev : p->dev;
139         item->info.ctx = ctx;
140 }
141
142 void
143 br_switchdev_fdb_notify(struct net_bridge *br,
144                         const struct net_bridge_fdb_entry *fdb, int type)
145 {
146         struct switchdev_notifier_fdb_info item;
147
148         br_switchdev_fdb_populate(br, &item, fdb, NULL);
149
150         switch (type) {
151         case RTM_DELNEIGH:
152                 call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_DEVICE,
153                                          item.info.dev, &item.info, NULL);
154                 break;
155         case RTM_NEWNEIGH:
156                 call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_DEVICE,
157                                          item.info.dev, &item.info, NULL);
158                 break;
159         }
160 }
161
162 int br_switchdev_port_vlan_add(struct net_device *dev, u16 vid, u16 flags,
163                                bool changed, struct netlink_ext_ack *extack)
164 {
165         struct switchdev_obj_port_vlan v = {
166                 .obj.orig_dev = dev,
167                 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
168                 .flags = flags,
169                 .vid = vid,
170                 .changed = changed,
171         };
172
173         return switchdev_port_obj_add(dev, &v.obj, extack);
174 }
175
176 int br_switchdev_port_vlan_del(struct net_device *dev, u16 vid)
177 {
178         struct switchdev_obj_port_vlan v = {
179                 .obj.orig_dev = dev,
180                 .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
181                 .vid = vid,
182         };
183
184         return switchdev_port_obj_del(dev, &v.obj);
185 }
186
187 static int nbp_switchdev_hwdom_set(struct net_bridge_port *joining)
188 {
189         struct net_bridge *br = joining->br;
190         struct net_bridge_port *p;
191         int hwdom;
192
193         /* joining is yet to be added to the port list. */
194         list_for_each_entry(p, &br->port_list, list) {
195                 if (netdev_phys_item_id_same(&joining->ppid, &p->ppid)) {
196                         joining->hwdom = p->hwdom;
197                         return 0;
198                 }
199         }
200
201         hwdom = find_next_zero_bit(&br->busy_hwdoms, BR_HWDOM_MAX, 1);
202         if (hwdom >= BR_HWDOM_MAX)
203                 return -EBUSY;
204
205         set_bit(hwdom, &br->busy_hwdoms);
206         joining->hwdom = hwdom;
207         return 0;
208 }
209
210 static void nbp_switchdev_hwdom_put(struct net_bridge_port *leaving)
211 {
212         struct net_bridge *br = leaving->br;
213         struct net_bridge_port *p;
214
215         /* leaving is no longer in the port list. */
216         list_for_each_entry(p, &br->port_list, list) {
217                 if (p->hwdom == leaving->hwdom)
218                         return;
219         }
220
221         clear_bit(leaving->hwdom, &br->busy_hwdoms);
222 }
223
224 static int nbp_switchdev_add(struct net_bridge_port *p,
225                              struct netdev_phys_item_id ppid,
226                              bool tx_fwd_offload,
227                              struct netlink_ext_ack *extack)
228 {
229         int err;
230
231         if (p->offload_count) {
232                 /* Prevent unsupported configurations such as a bridge port
233                  * which is a bonding interface, and the member ports are from
234                  * different hardware switches.
235                  */
236                 if (!netdev_phys_item_id_same(&p->ppid, &ppid)) {
237                         NL_SET_ERR_MSG_MOD(extack,
238                                            "Same bridge port cannot be offloaded by two physical switches");
239                         return -EBUSY;
240                 }
241
242                 /* Tolerate drivers that call switchdev_bridge_port_offload()
243                  * more than once for the same bridge port, such as when the
244                  * bridge port is an offloaded bonding/team interface.
245                  */
246                 p->offload_count++;
247
248                 return 0;
249         }
250
251         p->ppid = ppid;
252         p->offload_count = 1;
253
254         err = nbp_switchdev_hwdom_set(p);
255         if (err)
256                 return err;
257
258         if (tx_fwd_offload) {
259                 p->flags |= BR_TX_FWD_OFFLOAD;
260                 static_branch_inc(&br_switchdev_tx_fwd_offload);
261         }
262
263         return 0;
264 }
265
266 static void nbp_switchdev_del(struct net_bridge_port *p)
267 {
268         if (WARN_ON(!p->offload_count))
269                 return;
270
271         p->offload_count--;
272
273         if (p->offload_count)
274                 return;
275
276         if (p->hwdom)
277                 nbp_switchdev_hwdom_put(p);
278
279         if (p->flags & BR_TX_FWD_OFFLOAD) {
280                 p->flags &= ~BR_TX_FWD_OFFLOAD;
281                 static_branch_dec(&br_switchdev_tx_fwd_offload);
282         }
283 }
284
285 static int
286 br_switchdev_fdb_replay_one(struct net_bridge *br, struct notifier_block *nb,
287                             const struct net_bridge_fdb_entry *fdb,
288                             unsigned long action, const void *ctx)
289 {
290         struct switchdev_notifier_fdb_info item;
291         int err;
292
293         br_switchdev_fdb_populate(br, &item, fdb, ctx);
294
295         err = nb->notifier_call(nb, action, &item);
296         return notifier_to_errno(err);
297 }
298
299 static int
300 br_switchdev_fdb_replay(const struct net_device *br_dev, const void *ctx,
301                         bool adding, struct notifier_block *nb)
302 {
303         struct net_bridge_fdb_entry *fdb;
304         struct net_bridge *br;
305         unsigned long action;
306         int err = 0;
307
308         if (!nb)
309                 return 0;
310
311         if (!netif_is_bridge_master(br_dev))
312                 return -EINVAL;
313
314         br = netdev_priv(br_dev);
315
316         if (adding)
317                 action = SWITCHDEV_FDB_ADD_TO_DEVICE;
318         else
319                 action = SWITCHDEV_FDB_DEL_TO_DEVICE;
320
321         rcu_read_lock();
322
323         hlist_for_each_entry_rcu(fdb, &br->fdb_list, fdb_node) {
324                 err = br_switchdev_fdb_replay_one(br, nb, fdb, action, ctx);
325                 if (err)
326                         break;
327         }
328
329         rcu_read_unlock();
330
331         return err;
332 }
333
334 static int br_switchdev_vlan_attr_replay(struct net_device *br_dev,
335                                          const void *ctx,
336                                          struct notifier_block *nb,
337                                          struct netlink_ext_ack *extack)
338 {
339         struct switchdev_notifier_port_attr_info attr_info = {
340                 .info = {
341                         .dev = br_dev,
342                         .extack = extack,
343                         .ctx = ctx,
344                 },
345         };
346         struct net_bridge *br = netdev_priv(br_dev);
347         struct net_bridge_vlan_group *vg;
348         struct switchdev_attr attr;
349         struct net_bridge_vlan *v;
350         int err;
351
352         attr_info.attr = &attr;
353         attr.orig_dev = br_dev;
354
355         vg = br_vlan_group(br);
356         if (!vg)
357                 return 0;
358
359         list_for_each_entry(v, &vg->vlan_list, vlist) {
360                 if (v->msti) {
361                         attr.id = SWITCHDEV_ATTR_ID_VLAN_MSTI;
362                         attr.u.vlan_msti.vid = v->vid;
363                         attr.u.vlan_msti.msti = v->msti;
364
365                         err = nb->notifier_call(nb, SWITCHDEV_PORT_ATTR_SET,
366                                                 &attr_info);
367                         err = notifier_to_errno(err);
368                         if (err)
369                                 return err;
370                 }
371         }
372
373         return 0;
374 }
375
376 static int
377 br_switchdev_vlan_replay_one(struct notifier_block *nb,
378                              struct net_device *dev,
379                              struct switchdev_obj_port_vlan *vlan,
380                              const void *ctx, unsigned long action,
381                              struct netlink_ext_ack *extack)
382 {
383         struct switchdev_notifier_port_obj_info obj_info = {
384                 .info = {
385                         .dev = dev,
386                         .extack = extack,
387                         .ctx = ctx,
388                 },
389                 .obj = &vlan->obj,
390         };
391         int err;
392
393         err = nb->notifier_call(nb, action, &obj_info);
394         return notifier_to_errno(err);
395 }
396
397 static int br_switchdev_vlan_replay_group(struct notifier_block *nb,
398                                           struct net_device *dev,
399                                           struct net_bridge_vlan_group *vg,
400                                           const void *ctx, unsigned long action,
401                                           struct netlink_ext_ack *extack)
402 {
403         struct net_bridge_vlan *v;
404         int err = 0;
405         u16 pvid;
406
407         if (!vg)
408                 return 0;
409
410         pvid = br_get_pvid(vg);
411
412         list_for_each_entry(v, &vg->vlan_list, vlist) {
413                 struct switchdev_obj_port_vlan vlan = {
414                         .obj.orig_dev = dev,
415                         .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
416                         .flags = br_vlan_flags(v, pvid),
417                         .vid = v->vid,
418                 };
419
420                 if (!br_vlan_should_use(v))
421                         continue;
422
423                 err = br_switchdev_vlan_replay_one(nb, dev, &vlan, ctx,
424                                                    action, extack);
425                 if (err)
426                         return err;
427         }
428
429         return 0;
430 }
431
432 static int br_switchdev_vlan_replay(struct net_device *br_dev,
433                                     const void *ctx, bool adding,
434                                     struct notifier_block *nb,
435                                     struct netlink_ext_ack *extack)
436 {
437         struct net_bridge *br = netdev_priv(br_dev);
438         struct net_bridge_port *p;
439         unsigned long action;
440         int err;
441
442         ASSERT_RTNL();
443
444         if (!nb)
445                 return 0;
446
447         if (!netif_is_bridge_master(br_dev))
448                 return -EINVAL;
449
450         if (adding)
451                 action = SWITCHDEV_PORT_OBJ_ADD;
452         else
453                 action = SWITCHDEV_PORT_OBJ_DEL;
454
455         err = br_switchdev_vlan_replay_group(nb, br_dev, br_vlan_group(br),
456                                              ctx, action, extack);
457         if (err)
458                 return err;
459
460         list_for_each_entry(p, &br->port_list, list) {
461                 struct net_device *dev = p->dev;
462
463                 err = br_switchdev_vlan_replay_group(nb, dev,
464                                                      nbp_vlan_group(p),
465                                                      ctx, action, extack);
466                 if (err)
467                         return err;
468         }
469
470         if (adding) {
471                 err = br_switchdev_vlan_attr_replay(br_dev, ctx, nb, extack);
472                 if (err)
473                         return err;
474         }
475
476         return 0;
477 }
478
479 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
480 struct br_switchdev_mdb_complete_info {
481         struct net_bridge_port *port;
482         struct br_ip ip;
483 };
484
485 static void br_switchdev_mdb_complete(struct net_device *dev, int err, void *priv)
486 {
487         struct br_switchdev_mdb_complete_info *data = priv;
488         struct net_bridge_port_group __rcu **pp;
489         struct net_bridge_port_group *p;
490         struct net_bridge_mdb_entry *mp;
491         struct net_bridge_port *port = data->port;
492         struct net_bridge *br = port->br;
493
494         if (err)
495                 goto err;
496
497         spin_lock_bh(&br->multicast_lock);
498         mp = br_mdb_ip_get(br, &data->ip);
499         if (!mp)
500                 goto out;
501         for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL;
502              pp = &p->next) {
503                 if (p->key.port != port)
504                         continue;
505                 p->flags |= MDB_PG_FLAGS_OFFLOAD;
506         }
507 out:
508         spin_unlock_bh(&br->multicast_lock);
509 err:
510         kfree(priv);
511 }
512
513 static void br_switchdev_mdb_populate(struct switchdev_obj_port_mdb *mdb,
514                                       const struct net_bridge_mdb_entry *mp)
515 {
516         if (mp->addr.proto == htons(ETH_P_IP))
517                 ip_eth_mc_map(mp->addr.dst.ip4, mdb->addr);
518 #if IS_ENABLED(CONFIG_IPV6)
519         else if (mp->addr.proto == htons(ETH_P_IPV6))
520                 ipv6_eth_mc_map(&mp->addr.dst.ip6, mdb->addr);
521 #endif
522         else
523                 ether_addr_copy(mdb->addr, mp->addr.dst.mac_addr);
524
525         mdb->vid = mp->addr.vid;
526 }
527
528 static void br_switchdev_host_mdb_one(struct net_device *dev,
529                                       struct net_device *lower_dev,
530                                       struct net_bridge_mdb_entry *mp,
531                                       int type)
532 {
533         struct switchdev_obj_port_mdb mdb = {
534                 .obj = {
535                         .id = SWITCHDEV_OBJ_ID_HOST_MDB,
536                         .flags = SWITCHDEV_F_DEFER,
537                         .orig_dev = dev,
538                 },
539         };
540
541         br_switchdev_mdb_populate(&mdb, mp);
542
543         switch (type) {
544         case RTM_NEWMDB:
545                 switchdev_port_obj_add(lower_dev, &mdb.obj, NULL);
546                 break;
547         case RTM_DELMDB:
548                 switchdev_port_obj_del(lower_dev, &mdb.obj);
549                 break;
550         }
551 }
552
553 static void br_switchdev_host_mdb(struct net_device *dev,
554                                   struct net_bridge_mdb_entry *mp, int type)
555 {
556         struct net_device *lower_dev;
557         struct list_head *iter;
558
559         netdev_for_each_lower_dev(dev, lower_dev, iter)
560                 br_switchdev_host_mdb_one(dev, lower_dev, mp, type);
561 }
562
563 static int
564 br_switchdev_mdb_replay_one(struct notifier_block *nb, struct net_device *dev,
565                             const struct switchdev_obj_port_mdb *mdb,
566                             unsigned long action, const void *ctx,
567                             struct netlink_ext_ack *extack)
568 {
569         struct switchdev_notifier_port_obj_info obj_info = {
570                 .info = {
571                         .dev = dev,
572                         .extack = extack,
573                         .ctx = ctx,
574                 },
575                 .obj = &mdb->obj,
576         };
577         int err;
578
579         err = nb->notifier_call(nb, action, &obj_info);
580         return notifier_to_errno(err);
581 }
582
583 static int br_switchdev_mdb_queue_one(struct list_head *mdb_list,
584                                       enum switchdev_obj_id id,
585                                       const struct net_bridge_mdb_entry *mp,
586                                       struct net_device *orig_dev)
587 {
588         struct switchdev_obj_port_mdb *mdb;
589
590         mdb = kzalloc(sizeof(*mdb), GFP_ATOMIC);
591         if (!mdb)
592                 return -ENOMEM;
593
594         mdb->obj.id = id;
595         mdb->obj.orig_dev = orig_dev;
596         br_switchdev_mdb_populate(mdb, mp);
597         list_add_tail(&mdb->obj.list, mdb_list);
598
599         return 0;
600 }
601
602 void br_switchdev_mdb_notify(struct net_device *dev,
603                              struct net_bridge_mdb_entry *mp,
604                              struct net_bridge_port_group *pg,
605                              int type)
606 {
607         struct br_switchdev_mdb_complete_info *complete_info;
608         struct switchdev_obj_port_mdb mdb = {
609                 .obj = {
610                         .id = SWITCHDEV_OBJ_ID_PORT_MDB,
611                         .flags = SWITCHDEV_F_DEFER,
612                 },
613         };
614
615         if (!pg)
616                 return br_switchdev_host_mdb(dev, mp, type);
617
618         br_switchdev_mdb_populate(&mdb, mp);
619
620         mdb.obj.orig_dev = pg->key.port->dev;
621         switch (type) {
622         case RTM_NEWMDB:
623                 complete_info = kmalloc(sizeof(*complete_info), GFP_ATOMIC);
624                 if (!complete_info)
625                         break;
626                 complete_info->port = pg->key.port;
627                 complete_info->ip = mp->addr;
628                 mdb.obj.complete_priv = complete_info;
629                 mdb.obj.complete = br_switchdev_mdb_complete;
630                 if (switchdev_port_obj_add(pg->key.port->dev, &mdb.obj, NULL))
631                         kfree(complete_info);
632                 break;
633         case RTM_DELMDB:
634                 switchdev_port_obj_del(pg->key.port->dev, &mdb.obj);
635                 break;
636         }
637 }
638 #endif
639
640 static int
641 br_switchdev_mdb_replay(struct net_device *br_dev, struct net_device *dev,
642                         const void *ctx, bool adding, struct notifier_block *nb,
643                         struct netlink_ext_ack *extack)
644 {
645 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
646         const struct net_bridge_mdb_entry *mp;
647         struct switchdev_obj *obj, *tmp;
648         struct net_bridge *br;
649         unsigned long action;
650         LIST_HEAD(mdb_list);
651         int err = 0;
652
653         ASSERT_RTNL();
654
655         if (!nb)
656                 return 0;
657
658         if (!netif_is_bridge_master(br_dev) || !netif_is_bridge_port(dev))
659                 return -EINVAL;
660
661         br = netdev_priv(br_dev);
662
663         if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
664                 return 0;
665
666         /* We cannot walk over br->mdb_list protected just by the rtnl_mutex,
667          * because the write-side protection is br->multicast_lock. But we
668          * need to emulate the [ blocking ] calling context of a regular
669          * switchdev event, so since both br->multicast_lock and RCU read side
670          * critical sections are atomic, we have no choice but to pick the RCU
671          * read side lock, queue up all our events, leave the critical section
672          * and notify switchdev from blocking context.
673          */
674         rcu_read_lock();
675
676         hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) {
677                 struct net_bridge_port_group __rcu * const *pp;
678                 const struct net_bridge_port_group *p;
679
680                 if (mp->host_joined) {
681                         err = br_switchdev_mdb_queue_one(&mdb_list,
682                                                          SWITCHDEV_OBJ_ID_HOST_MDB,
683                                                          mp, br_dev);
684                         if (err) {
685                                 rcu_read_unlock();
686                                 goto out_free_mdb;
687                         }
688                 }
689
690                 for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL;
691                      pp = &p->next) {
692                         if (p->key.port->dev != dev)
693                                 continue;
694
695                         err = br_switchdev_mdb_queue_one(&mdb_list,
696                                                          SWITCHDEV_OBJ_ID_PORT_MDB,
697                                                          mp, dev);
698                         if (err) {
699                                 rcu_read_unlock();
700                                 goto out_free_mdb;
701                         }
702                 }
703         }
704
705         rcu_read_unlock();
706
707         if (adding)
708                 action = SWITCHDEV_PORT_OBJ_ADD;
709         else
710                 action = SWITCHDEV_PORT_OBJ_DEL;
711
712         list_for_each_entry(obj, &mdb_list, list) {
713                 err = br_switchdev_mdb_replay_one(nb, dev,
714                                                   SWITCHDEV_OBJ_PORT_MDB(obj),
715                                                   action, ctx, extack);
716                 if (err)
717                         goto out_free_mdb;
718         }
719
720 out_free_mdb:
721         list_for_each_entry_safe(obj, tmp, &mdb_list, list) {
722                 list_del(&obj->list);
723                 kfree(SWITCHDEV_OBJ_PORT_MDB(obj));
724         }
725
726         if (err)
727                 return err;
728 #endif
729
730         return 0;
731 }
732
733 static int nbp_switchdev_sync_objs(struct net_bridge_port *p, const void *ctx,
734                                    struct notifier_block *atomic_nb,
735                                    struct notifier_block *blocking_nb,
736                                    struct netlink_ext_ack *extack)
737 {
738         struct net_device *br_dev = p->br->dev;
739         struct net_device *dev = p->dev;
740         int err;
741
742         err = br_switchdev_vlan_replay(br_dev, ctx, true, blocking_nb, extack);
743         if (err && err != -EOPNOTSUPP)
744                 return err;
745
746         err = br_switchdev_mdb_replay(br_dev, dev, ctx, true, blocking_nb,
747                                       extack);
748         if (err && err != -EOPNOTSUPP)
749                 return err;
750
751         err = br_switchdev_fdb_replay(br_dev, ctx, true, atomic_nb);
752         if (err && err != -EOPNOTSUPP)
753                 return err;
754
755         return 0;
756 }
757
758 static void nbp_switchdev_unsync_objs(struct net_bridge_port *p,
759                                       const void *ctx,
760                                       struct notifier_block *atomic_nb,
761                                       struct notifier_block *blocking_nb)
762 {
763         struct net_device *br_dev = p->br->dev;
764         struct net_device *dev = p->dev;
765
766         br_switchdev_fdb_replay(br_dev, ctx, false, atomic_nb);
767
768         br_switchdev_mdb_replay(br_dev, dev, ctx, false, blocking_nb, NULL);
769
770         br_switchdev_vlan_replay(br_dev, ctx, false, blocking_nb, NULL);
771 }
772
773 /* Let the bridge know that this port is offloaded, so that it can assign a
774  * switchdev hardware domain to it.
775  */
776 int br_switchdev_port_offload(struct net_bridge_port *p,
777                               struct net_device *dev, const void *ctx,
778                               struct notifier_block *atomic_nb,
779                               struct notifier_block *blocking_nb,
780                               bool tx_fwd_offload,
781                               struct netlink_ext_ack *extack)
782 {
783         struct netdev_phys_item_id ppid;
784         int err;
785
786         err = dev_get_port_parent_id(dev, &ppid, false);
787         if (err)
788                 return err;
789
790         err = nbp_switchdev_add(p, ppid, tx_fwd_offload, extack);
791         if (err)
792                 return err;
793
794         err = nbp_switchdev_sync_objs(p, ctx, atomic_nb, blocking_nb, extack);
795         if (err)
796                 goto out_switchdev_del;
797
798         return 0;
799
800 out_switchdev_del:
801         nbp_switchdev_del(p);
802
803         return err;
804 }
805
806 void br_switchdev_port_unoffload(struct net_bridge_port *p, const void *ctx,
807                                  struct notifier_block *atomic_nb,
808                                  struct notifier_block *blocking_nb)
809 {
810         nbp_switchdev_unsync_objs(p, ctx, atomic_nb, blocking_nb);
811
812         nbp_switchdev_del(p);
813 }