Linux 6.9-rc1
[linux-2.6-microblaze.git] / net / switchdev / switchdev.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * net/switchdev/switchdev.c - Switch device API
4  * Copyright (c) 2014-2015 Jiri Pirko <jiri@resnulli.us>
5  * Copyright (c) 2014-2015 Scott Feldman <sfeldma@gmail.com>
6  */
7
8 #include <linux/kernel.h>
9 #include <linux/types.h>
10 #include <linux/init.h>
11 #include <linux/mutex.h>
12 #include <linux/notifier.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/if_bridge.h>
16 #include <linux/list.h>
17 #include <linux/workqueue.h>
18 #include <linux/if_vlan.h>
19 #include <linux/rtnetlink.h>
20 #include <net/switchdev.h>
21
22 static bool switchdev_obj_eq(const struct switchdev_obj *a,
23                              const struct switchdev_obj *b)
24 {
25         const struct switchdev_obj_port_vlan *va, *vb;
26         const struct switchdev_obj_port_mdb *ma, *mb;
27
28         if (a->id != b->id || a->orig_dev != b->orig_dev)
29                 return false;
30
31         switch (a->id) {
32         case SWITCHDEV_OBJ_ID_PORT_VLAN:
33                 va = SWITCHDEV_OBJ_PORT_VLAN(a);
34                 vb = SWITCHDEV_OBJ_PORT_VLAN(b);
35                 return va->flags == vb->flags &&
36                         va->vid == vb->vid &&
37                         va->changed == vb->changed;
38         case SWITCHDEV_OBJ_ID_PORT_MDB:
39         case SWITCHDEV_OBJ_ID_HOST_MDB:
40                 ma = SWITCHDEV_OBJ_PORT_MDB(a);
41                 mb = SWITCHDEV_OBJ_PORT_MDB(b);
42                 return ma->vid == mb->vid &&
43                         ether_addr_equal(ma->addr, mb->addr);
44         default:
45                 break;
46         }
47
48         BUG();
49 }
50
51 static LIST_HEAD(deferred);
52 static DEFINE_SPINLOCK(deferred_lock);
53
54 typedef void switchdev_deferred_func_t(struct net_device *dev,
55                                        const void *data);
56
57 struct switchdev_deferred_item {
58         struct list_head list;
59         struct net_device *dev;
60         netdevice_tracker dev_tracker;
61         switchdev_deferred_func_t *func;
62         unsigned long data[];
63 };
64
65 static struct switchdev_deferred_item *switchdev_deferred_dequeue(void)
66 {
67         struct switchdev_deferred_item *dfitem;
68
69         spin_lock_bh(&deferred_lock);
70         if (list_empty(&deferred)) {
71                 dfitem = NULL;
72                 goto unlock;
73         }
74         dfitem = list_first_entry(&deferred,
75                                   struct switchdev_deferred_item, list);
76         list_del(&dfitem->list);
77 unlock:
78         spin_unlock_bh(&deferred_lock);
79         return dfitem;
80 }
81
82 /**
83  *      switchdev_deferred_process - Process ops in deferred queue
84  *
85  *      Called to flush the ops currently queued in deferred ops queue.
86  *      rtnl_lock must be held.
87  */
88 void switchdev_deferred_process(void)
89 {
90         struct switchdev_deferred_item *dfitem;
91
92         ASSERT_RTNL();
93
94         while ((dfitem = switchdev_deferred_dequeue())) {
95                 dfitem->func(dfitem->dev, dfitem->data);
96                 netdev_put(dfitem->dev, &dfitem->dev_tracker);
97                 kfree(dfitem);
98         }
99 }
100 EXPORT_SYMBOL_GPL(switchdev_deferred_process);
101
102 static void switchdev_deferred_process_work(struct work_struct *work)
103 {
104         rtnl_lock();
105         switchdev_deferred_process();
106         rtnl_unlock();
107 }
108
109 static DECLARE_WORK(deferred_process_work, switchdev_deferred_process_work);
110
111 static int switchdev_deferred_enqueue(struct net_device *dev,
112                                       const void *data, size_t data_len,
113                                       switchdev_deferred_func_t *func)
114 {
115         struct switchdev_deferred_item *dfitem;
116
117         dfitem = kmalloc(struct_size(dfitem, data, data_len), GFP_ATOMIC);
118         if (!dfitem)
119                 return -ENOMEM;
120         dfitem->dev = dev;
121         dfitem->func = func;
122         memcpy(dfitem->data, data, data_len);
123         netdev_hold(dev, &dfitem->dev_tracker, GFP_ATOMIC);
124         spin_lock_bh(&deferred_lock);
125         list_add_tail(&dfitem->list, &deferred);
126         spin_unlock_bh(&deferred_lock);
127         schedule_work(&deferred_process_work);
128         return 0;
129 }
130
131 static int switchdev_port_attr_notify(enum switchdev_notifier_type nt,
132                                       struct net_device *dev,
133                                       const struct switchdev_attr *attr,
134                                       struct netlink_ext_ack *extack)
135 {
136         int err;
137         int rc;
138
139         struct switchdev_notifier_port_attr_info attr_info = {
140                 .attr = attr,
141                 .handled = false,
142         };
143
144         rc = call_switchdev_blocking_notifiers(nt, dev,
145                                                &attr_info.info, extack);
146         err = notifier_to_errno(rc);
147         if (err) {
148                 WARN_ON(!attr_info.handled);
149                 return err;
150         }
151
152         if (!attr_info.handled)
153                 return -EOPNOTSUPP;
154
155         return 0;
156 }
157
158 static int switchdev_port_attr_set_now(struct net_device *dev,
159                                        const struct switchdev_attr *attr,
160                                        struct netlink_ext_ack *extack)
161 {
162         return switchdev_port_attr_notify(SWITCHDEV_PORT_ATTR_SET, dev, attr,
163                                           extack);
164 }
165
166 static void switchdev_port_attr_set_deferred(struct net_device *dev,
167                                              const void *data)
168 {
169         const struct switchdev_attr *attr = data;
170         int err;
171
172         err = switchdev_port_attr_set_now(dev, attr, NULL);
173         if (err && err != -EOPNOTSUPP)
174                 netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n",
175                            err, attr->id);
176         if (attr->complete)
177                 attr->complete(dev, err, attr->complete_priv);
178 }
179
180 static int switchdev_port_attr_set_defer(struct net_device *dev,
181                                          const struct switchdev_attr *attr)
182 {
183         return switchdev_deferred_enqueue(dev, attr, sizeof(*attr),
184                                           switchdev_port_attr_set_deferred);
185 }
186
187 /**
188  *      switchdev_port_attr_set - Set port attribute
189  *
190  *      @dev: port device
191  *      @attr: attribute to set
192  *      @extack: netlink extended ack, for error message propagation
193  *
194  *      rtnl_lock must be held and must not be in atomic section,
195  *      in case SWITCHDEV_F_DEFER flag is not set.
196  */
197 int switchdev_port_attr_set(struct net_device *dev,
198                             const struct switchdev_attr *attr,
199                             struct netlink_ext_ack *extack)
200 {
201         if (attr->flags & SWITCHDEV_F_DEFER)
202                 return switchdev_port_attr_set_defer(dev, attr);
203         ASSERT_RTNL();
204         return switchdev_port_attr_set_now(dev, attr, extack);
205 }
206 EXPORT_SYMBOL_GPL(switchdev_port_attr_set);
207
208 static size_t switchdev_obj_size(const struct switchdev_obj *obj)
209 {
210         switch (obj->id) {
211         case SWITCHDEV_OBJ_ID_PORT_VLAN:
212                 return sizeof(struct switchdev_obj_port_vlan);
213         case SWITCHDEV_OBJ_ID_PORT_MDB:
214                 return sizeof(struct switchdev_obj_port_mdb);
215         case SWITCHDEV_OBJ_ID_HOST_MDB:
216                 return sizeof(struct switchdev_obj_port_mdb);
217         default:
218                 BUG();
219         }
220         return 0;
221 }
222
223 static int switchdev_port_obj_notify(enum switchdev_notifier_type nt,
224                                      struct net_device *dev,
225                                      const struct switchdev_obj *obj,
226                                      struct netlink_ext_ack *extack)
227 {
228         int rc;
229         int err;
230
231         struct switchdev_notifier_port_obj_info obj_info = {
232                 .obj = obj,
233                 .handled = false,
234         };
235
236         rc = call_switchdev_blocking_notifiers(nt, dev, &obj_info.info, extack);
237         err = notifier_to_errno(rc);
238         if (err) {
239                 WARN_ON(!obj_info.handled);
240                 return err;
241         }
242         if (!obj_info.handled)
243                 return -EOPNOTSUPP;
244         return 0;
245 }
246
247 static void switchdev_port_obj_add_deferred(struct net_device *dev,
248                                             const void *data)
249 {
250         const struct switchdev_obj *obj = data;
251         int err;
252
253         ASSERT_RTNL();
254         err = switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
255                                         dev, obj, NULL);
256         if (err && err != -EOPNOTSUPP)
257                 netdev_err(dev, "failed (err=%d) to add object (id=%d)\n",
258                            err, obj->id);
259         if (obj->complete)
260                 obj->complete(dev, err, obj->complete_priv);
261 }
262
263 static int switchdev_port_obj_add_defer(struct net_device *dev,
264                                         const struct switchdev_obj *obj)
265 {
266         return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
267                                           switchdev_port_obj_add_deferred);
268 }
269
270 /**
271  *      switchdev_port_obj_add - Add port object
272  *
273  *      @dev: port device
274  *      @obj: object to add
275  *      @extack: netlink extended ack
276  *
277  *      rtnl_lock must be held and must not be in atomic section,
278  *      in case SWITCHDEV_F_DEFER flag is not set.
279  */
280 int switchdev_port_obj_add(struct net_device *dev,
281                            const struct switchdev_obj *obj,
282                            struct netlink_ext_ack *extack)
283 {
284         if (obj->flags & SWITCHDEV_F_DEFER)
285                 return switchdev_port_obj_add_defer(dev, obj);
286         ASSERT_RTNL();
287         return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
288                                          dev, obj, extack);
289 }
290 EXPORT_SYMBOL_GPL(switchdev_port_obj_add);
291
292 static int switchdev_port_obj_del_now(struct net_device *dev,
293                                       const struct switchdev_obj *obj)
294 {
295         return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_DEL,
296                                          dev, obj, NULL);
297 }
298
299 static void switchdev_port_obj_del_deferred(struct net_device *dev,
300                                             const void *data)
301 {
302         const struct switchdev_obj *obj = data;
303         int err;
304
305         err = switchdev_port_obj_del_now(dev, obj);
306         if (err && err != -EOPNOTSUPP)
307                 netdev_err(dev, "failed (err=%d) to del object (id=%d)\n",
308                            err, obj->id);
309         if (obj->complete)
310                 obj->complete(dev, err, obj->complete_priv);
311 }
312
313 static int switchdev_port_obj_del_defer(struct net_device *dev,
314                                         const struct switchdev_obj *obj)
315 {
316         return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
317                                           switchdev_port_obj_del_deferred);
318 }
319
320 /**
321  *      switchdev_port_obj_del - Delete port object
322  *
323  *      @dev: port device
324  *      @obj: object to delete
325  *
326  *      rtnl_lock must be held and must not be in atomic section,
327  *      in case SWITCHDEV_F_DEFER flag is not set.
328  */
329 int switchdev_port_obj_del(struct net_device *dev,
330                            const struct switchdev_obj *obj)
331 {
332         if (obj->flags & SWITCHDEV_F_DEFER)
333                 return switchdev_port_obj_del_defer(dev, obj);
334         ASSERT_RTNL();
335         return switchdev_port_obj_del_now(dev, obj);
336 }
337 EXPORT_SYMBOL_GPL(switchdev_port_obj_del);
338
339 /**
340  *      switchdev_port_obj_act_is_deferred - Is object action pending?
341  *
342  *      @dev: port device
343  *      @nt: type of action; add or delete
344  *      @obj: object to test
345  *
346  *      Returns true if a deferred item is pending, which is
347  *      equivalent to the action @nt on an object @obj.
348  *
349  *      rtnl_lock must be held.
350  */
351 bool switchdev_port_obj_act_is_deferred(struct net_device *dev,
352                                         enum switchdev_notifier_type nt,
353                                         const struct switchdev_obj *obj)
354 {
355         struct switchdev_deferred_item *dfitem;
356         bool found = false;
357
358         ASSERT_RTNL();
359
360         spin_lock_bh(&deferred_lock);
361
362         list_for_each_entry(dfitem, &deferred, list) {
363                 if (dfitem->dev != dev)
364                         continue;
365
366                 if ((dfitem->func == switchdev_port_obj_add_deferred &&
367                      nt == SWITCHDEV_PORT_OBJ_ADD) ||
368                     (dfitem->func == switchdev_port_obj_del_deferred &&
369                      nt == SWITCHDEV_PORT_OBJ_DEL)) {
370                         if (switchdev_obj_eq((const void *)dfitem->data, obj)) {
371                                 found = true;
372                                 break;
373                         }
374                 }
375         }
376
377         spin_unlock_bh(&deferred_lock);
378
379         return found;
380 }
381 EXPORT_SYMBOL_GPL(switchdev_port_obj_act_is_deferred);
382
383 static ATOMIC_NOTIFIER_HEAD(switchdev_notif_chain);
384 static BLOCKING_NOTIFIER_HEAD(switchdev_blocking_notif_chain);
385
386 /**
387  *      register_switchdev_notifier - Register notifier
388  *      @nb: notifier_block
389  *
390  *      Register switch device notifier.
391  */
392 int register_switchdev_notifier(struct notifier_block *nb)
393 {
394         return atomic_notifier_chain_register(&switchdev_notif_chain, nb);
395 }
396 EXPORT_SYMBOL_GPL(register_switchdev_notifier);
397
398 /**
399  *      unregister_switchdev_notifier - Unregister notifier
400  *      @nb: notifier_block
401  *
402  *      Unregister switch device notifier.
403  */
404 int unregister_switchdev_notifier(struct notifier_block *nb)
405 {
406         return atomic_notifier_chain_unregister(&switchdev_notif_chain, nb);
407 }
408 EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
409
410 /**
411  *      call_switchdev_notifiers - Call notifiers
412  *      @val: value passed unmodified to notifier function
413  *      @dev: port device
414  *      @info: notifier information data
415  *      @extack: netlink extended ack
416  *      Call all network notifier blocks.
417  */
418 int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
419                              struct switchdev_notifier_info *info,
420                              struct netlink_ext_ack *extack)
421 {
422         info->dev = dev;
423         info->extack = extack;
424         return atomic_notifier_call_chain(&switchdev_notif_chain, val, info);
425 }
426 EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
427
428 int register_switchdev_blocking_notifier(struct notifier_block *nb)
429 {
430         struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
431
432         return blocking_notifier_chain_register(chain, nb);
433 }
434 EXPORT_SYMBOL_GPL(register_switchdev_blocking_notifier);
435
436 int unregister_switchdev_blocking_notifier(struct notifier_block *nb)
437 {
438         struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
439
440         return blocking_notifier_chain_unregister(chain, nb);
441 }
442 EXPORT_SYMBOL_GPL(unregister_switchdev_blocking_notifier);
443
444 int call_switchdev_blocking_notifiers(unsigned long val, struct net_device *dev,
445                                       struct switchdev_notifier_info *info,
446                                       struct netlink_ext_ack *extack)
447 {
448         info->dev = dev;
449         info->extack = extack;
450         return blocking_notifier_call_chain(&switchdev_blocking_notif_chain,
451                                             val, info);
452 }
453 EXPORT_SYMBOL_GPL(call_switchdev_blocking_notifiers);
454
455 struct switchdev_nested_priv {
456         bool (*check_cb)(const struct net_device *dev);
457         bool (*foreign_dev_check_cb)(const struct net_device *dev,
458                                      const struct net_device *foreign_dev);
459         const struct net_device *dev;
460         struct net_device *lower_dev;
461 };
462
463 static int switchdev_lower_dev_walk(struct net_device *lower_dev,
464                                     struct netdev_nested_priv *priv)
465 {
466         struct switchdev_nested_priv *switchdev_priv = priv->data;
467         bool (*foreign_dev_check_cb)(const struct net_device *dev,
468                                      const struct net_device *foreign_dev);
469         bool (*check_cb)(const struct net_device *dev);
470         const struct net_device *dev;
471
472         check_cb = switchdev_priv->check_cb;
473         foreign_dev_check_cb = switchdev_priv->foreign_dev_check_cb;
474         dev = switchdev_priv->dev;
475
476         if (check_cb(lower_dev) && !foreign_dev_check_cb(lower_dev, dev)) {
477                 switchdev_priv->lower_dev = lower_dev;
478                 return 1;
479         }
480
481         return 0;
482 }
483
484 static struct net_device *
485 switchdev_lower_dev_find_rcu(struct net_device *dev,
486                              bool (*check_cb)(const struct net_device *dev),
487                              bool (*foreign_dev_check_cb)(const struct net_device *dev,
488                                                           const struct net_device *foreign_dev))
489 {
490         struct switchdev_nested_priv switchdev_priv = {
491                 .check_cb = check_cb,
492                 .foreign_dev_check_cb = foreign_dev_check_cb,
493                 .dev = dev,
494                 .lower_dev = NULL,
495         };
496         struct netdev_nested_priv priv = {
497                 .data = &switchdev_priv,
498         };
499
500         netdev_walk_all_lower_dev_rcu(dev, switchdev_lower_dev_walk, &priv);
501
502         return switchdev_priv.lower_dev;
503 }
504
505 static struct net_device *
506 switchdev_lower_dev_find(struct net_device *dev,
507                          bool (*check_cb)(const struct net_device *dev),
508                          bool (*foreign_dev_check_cb)(const struct net_device *dev,
509                                                       const struct net_device *foreign_dev))
510 {
511         struct switchdev_nested_priv switchdev_priv = {
512                 .check_cb = check_cb,
513                 .foreign_dev_check_cb = foreign_dev_check_cb,
514                 .dev = dev,
515                 .lower_dev = NULL,
516         };
517         struct netdev_nested_priv priv = {
518                 .data = &switchdev_priv,
519         };
520
521         netdev_walk_all_lower_dev(dev, switchdev_lower_dev_walk, &priv);
522
523         return switchdev_priv.lower_dev;
524 }
525
526 static int __switchdev_handle_fdb_event_to_device(struct net_device *dev,
527                 struct net_device *orig_dev, unsigned long event,
528                 const struct switchdev_notifier_fdb_info *fdb_info,
529                 bool (*check_cb)(const struct net_device *dev),
530                 bool (*foreign_dev_check_cb)(const struct net_device *dev,
531                                              const struct net_device *foreign_dev),
532                 int (*mod_cb)(struct net_device *dev, struct net_device *orig_dev,
533                               unsigned long event, const void *ctx,
534                               const struct switchdev_notifier_fdb_info *fdb_info))
535 {
536         const struct switchdev_notifier_info *info = &fdb_info->info;
537         struct net_device *br, *lower_dev, *switchdev;
538         struct list_head *iter;
539         int err = -EOPNOTSUPP;
540
541         if (check_cb(dev))
542                 return mod_cb(dev, orig_dev, event, info->ctx, fdb_info);
543
544         /* Recurse through lower interfaces in case the FDB entry is pointing
545          * towards a bridge or a LAG device.
546          */
547         netdev_for_each_lower_dev(dev, lower_dev, iter) {
548                 /* Do not propagate FDB entries across bridges */
549                 if (netif_is_bridge_master(lower_dev))
550                         continue;
551
552                 /* Bridge ports might be either us, or LAG interfaces
553                  * that we offload.
554                  */
555                 if (!check_cb(lower_dev) &&
556                     !switchdev_lower_dev_find_rcu(lower_dev, check_cb,
557                                                   foreign_dev_check_cb))
558                         continue;
559
560                 err = __switchdev_handle_fdb_event_to_device(lower_dev, orig_dev,
561                                                              event, fdb_info, check_cb,
562                                                              foreign_dev_check_cb,
563                                                              mod_cb);
564                 if (err && err != -EOPNOTSUPP)
565                         return err;
566         }
567
568         /* Event is neither on a bridge nor a LAG. Check whether it is on an
569          * interface that is in a bridge with us.
570          */
571         br = netdev_master_upper_dev_get_rcu(dev);
572         if (!br || !netif_is_bridge_master(br))
573                 return 0;
574
575         switchdev = switchdev_lower_dev_find_rcu(br, check_cb, foreign_dev_check_cb);
576         if (!switchdev)
577                 return 0;
578
579         if (!foreign_dev_check_cb(switchdev, dev))
580                 return err;
581
582         return __switchdev_handle_fdb_event_to_device(br, orig_dev, event, fdb_info,
583                                                       check_cb, foreign_dev_check_cb,
584                                                       mod_cb);
585 }
586
587 int switchdev_handle_fdb_event_to_device(struct net_device *dev, unsigned long event,
588                 const struct switchdev_notifier_fdb_info *fdb_info,
589                 bool (*check_cb)(const struct net_device *dev),
590                 bool (*foreign_dev_check_cb)(const struct net_device *dev,
591                                              const struct net_device *foreign_dev),
592                 int (*mod_cb)(struct net_device *dev, struct net_device *orig_dev,
593                               unsigned long event, const void *ctx,
594                               const struct switchdev_notifier_fdb_info *fdb_info))
595 {
596         int err;
597
598         err = __switchdev_handle_fdb_event_to_device(dev, dev, event, fdb_info,
599                                                      check_cb, foreign_dev_check_cb,
600                                                      mod_cb);
601         if (err == -EOPNOTSUPP)
602                 err = 0;
603
604         return err;
605 }
606 EXPORT_SYMBOL_GPL(switchdev_handle_fdb_event_to_device);
607
608 static int __switchdev_handle_port_obj_add(struct net_device *dev,
609                         struct switchdev_notifier_port_obj_info *port_obj_info,
610                         bool (*check_cb)(const struct net_device *dev),
611                         bool (*foreign_dev_check_cb)(const struct net_device *dev,
612                                                      const struct net_device *foreign_dev),
613                         int (*add_cb)(struct net_device *dev, const void *ctx,
614                                       const struct switchdev_obj *obj,
615                                       struct netlink_ext_ack *extack))
616 {
617         struct switchdev_notifier_info *info = &port_obj_info->info;
618         struct net_device *br, *lower_dev, *switchdev;
619         struct netlink_ext_ack *extack;
620         struct list_head *iter;
621         int err = -EOPNOTSUPP;
622
623         extack = switchdev_notifier_info_to_extack(info);
624
625         if (check_cb(dev)) {
626                 err = add_cb(dev, info->ctx, port_obj_info->obj, extack);
627                 if (err != -EOPNOTSUPP)
628                         port_obj_info->handled = true;
629                 return err;
630         }
631
632         /* Switch ports might be stacked under e.g. a LAG. Ignore the
633          * unsupported devices, another driver might be able to handle them. But
634          * propagate to the callers any hard errors.
635          *
636          * If the driver does its own bookkeeping of stacked ports, it's not
637          * necessary to go through this helper.
638          */
639         netdev_for_each_lower_dev(dev, lower_dev, iter) {
640                 if (netif_is_bridge_master(lower_dev))
641                         continue;
642
643                 /* When searching for switchdev interfaces that are neighbors
644                  * of foreign ones, and @dev is a bridge, do not recurse on the
645                  * foreign interface again, it was already visited.
646                  */
647                 if (foreign_dev_check_cb && !check_cb(lower_dev) &&
648                     !switchdev_lower_dev_find(lower_dev, check_cb, foreign_dev_check_cb))
649                         continue;
650
651                 err = __switchdev_handle_port_obj_add(lower_dev, port_obj_info,
652                                                       check_cb, foreign_dev_check_cb,
653                                                       add_cb);
654                 if (err && err != -EOPNOTSUPP)
655                         return err;
656         }
657
658         /* Event is neither on a bridge nor a LAG. Check whether it is on an
659          * interface that is in a bridge with us.
660          */
661         if (!foreign_dev_check_cb)
662                 return err;
663
664         br = netdev_master_upper_dev_get(dev);
665         if (!br || !netif_is_bridge_master(br))
666                 return err;
667
668         switchdev = switchdev_lower_dev_find(br, check_cb, foreign_dev_check_cb);
669         if (!switchdev)
670                 return err;
671
672         if (!foreign_dev_check_cb(switchdev, dev))
673                 return err;
674
675         return __switchdev_handle_port_obj_add(br, port_obj_info, check_cb,
676                                                foreign_dev_check_cb, add_cb);
677 }
678
679 /* Pass through a port object addition, if @dev passes @check_cb, or replicate
680  * it towards all lower interfaces of @dev that pass @check_cb, if @dev is a
681  * bridge or a LAG.
682  */
683 int switchdev_handle_port_obj_add(struct net_device *dev,
684                         struct switchdev_notifier_port_obj_info *port_obj_info,
685                         bool (*check_cb)(const struct net_device *dev),
686                         int (*add_cb)(struct net_device *dev, const void *ctx,
687                                       const struct switchdev_obj *obj,
688                                       struct netlink_ext_ack *extack))
689 {
690         int err;
691
692         err = __switchdev_handle_port_obj_add(dev, port_obj_info, check_cb,
693                                               NULL, add_cb);
694         if (err == -EOPNOTSUPP)
695                 err = 0;
696         return err;
697 }
698 EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add);
699
700 /* Same as switchdev_handle_port_obj_add(), except if object is notified on a
701  * @dev that passes @foreign_dev_check_cb, it is replicated towards all devices
702  * that pass @check_cb and are in the same bridge as @dev.
703  */
704 int switchdev_handle_port_obj_add_foreign(struct net_device *dev,
705                         struct switchdev_notifier_port_obj_info *port_obj_info,
706                         bool (*check_cb)(const struct net_device *dev),
707                         bool (*foreign_dev_check_cb)(const struct net_device *dev,
708                                                      const struct net_device *foreign_dev),
709                         int (*add_cb)(struct net_device *dev, const void *ctx,
710                                       const struct switchdev_obj *obj,
711                                       struct netlink_ext_ack *extack))
712 {
713         int err;
714
715         err = __switchdev_handle_port_obj_add(dev, port_obj_info, check_cb,
716                                               foreign_dev_check_cb, add_cb);
717         if (err == -EOPNOTSUPP)
718                 err = 0;
719         return err;
720 }
721 EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add_foreign);
722
723 static int __switchdev_handle_port_obj_del(struct net_device *dev,
724                         struct switchdev_notifier_port_obj_info *port_obj_info,
725                         bool (*check_cb)(const struct net_device *dev),
726                         bool (*foreign_dev_check_cb)(const struct net_device *dev,
727                                                      const struct net_device *foreign_dev),
728                         int (*del_cb)(struct net_device *dev, const void *ctx,
729                                       const struct switchdev_obj *obj))
730 {
731         struct switchdev_notifier_info *info = &port_obj_info->info;
732         struct net_device *br, *lower_dev, *switchdev;
733         struct list_head *iter;
734         int err = -EOPNOTSUPP;
735
736         if (check_cb(dev)) {
737                 err = del_cb(dev, info->ctx, port_obj_info->obj);
738                 if (err != -EOPNOTSUPP)
739                         port_obj_info->handled = true;
740                 return err;
741         }
742
743         /* Switch ports might be stacked under e.g. a LAG. Ignore the
744          * unsupported devices, another driver might be able to handle them. But
745          * propagate to the callers any hard errors.
746          *
747          * If the driver does its own bookkeeping of stacked ports, it's not
748          * necessary to go through this helper.
749          */
750         netdev_for_each_lower_dev(dev, lower_dev, iter) {
751                 if (netif_is_bridge_master(lower_dev))
752                         continue;
753
754                 /* When searching for switchdev interfaces that are neighbors
755                  * of foreign ones, and @dev is a bridge, do not recurse on the
756                  * foreign interface again, it was already visited.
757                  */
758                 if (foreign_dev_check_cb && !check_cb(lower_dev) &&
759                     !switchdev_lower_dev_find(lower_dev, check_cb, foreign_dev_check_cb))
760                         continue;
761
762                 err = __switchdev_handle_port_obj_del(lower_dev, port_obj_info,
763                                                       check_cb, foreign_dev_check_cb,
764                                                       del_cb);
765                 if (err && err != -EOPNOTSUPP)
766                         return err;
767         }
768
769         /* Event is neither on a bridge nor a LAG. Check whether it is on an
770          * interface that is in a bridge with us.
771          */
772         if (!foreign_dev_check_cb)
773                 return err;
774
775         br = netdev_master_upper_dev_get(dev);
776         if (!br || !netif_is_bridge_master(br))
777                 return err;
778
779         switchdev = switchdev_lower_dev_find(br, check_cb, foreign_dev_check_cb);
780         if (!switchdev)
781                 return err;
782
783         if (!foreign_dev_check_cb(switchdev, dev))
784                 return err;
785
786         return __switchdev_handle_port_obj_del(br, port_obj_info, check_cb,
787                                                foreign_dev_check_cb, del_cb);
788 }
789
790 /* Pass through a port object deletion, if @dev passes @check_cb, or replicate
791  * it towards all lower interfaces of @dev that pass @check_cb, if @dev is a
792  * bridge or a LAG.
793  */
794 int switchdev_handle_port_obj_del(struct net_device *dev,
795                         struct switchdev_notifier_port_obj_info *port_obj_info,
796                         bool (*check_cb)(const struct net_device *dev),
797                         int (*del_cb)(struct net_device *dev, const void *ctx,
798                                       const struct switchdev_obj *obj))
799 {
800         int err;
801
802         err = __switchdev_handle_port_obj_del(dev, port_obj_info, check_cb,
803                                               NULL, del_cb);
804         if (err == -EOPNOTSUPP)
805                 err = 0;
806         return err;
807 }
808 EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del);
809
810 /* Same as switchdev_handle_port_obj_del(), except if object is notified on a
811  * @dev that passes @foreign_dev_check_cb, it is replicated towards all devices
812  * that pass @check_cb and are in the same bridge as @dev.
813  */
814 int switchdev_handle_port_obj_del_foreign(struct net_device *dev,
815                         struct switchdev_notifier_port_obj_info *port_obj_info,
816                         bool (*check_cb)(const struct net_device *dev),
817                         bool (*foreign_dev_check_cb)(const struct net_device *dev,
818                                                      const struct net_device *foreign_dev),
819                         int (*del_cb)(struct net_device *dev, const void *ctx,
820                                       const struct switchdev_obj *obj))
821 {
822         int err;
823
824         err = __switchdev_handle_port_obj_del(dev, port_obj_info, check_cb,
825                                               foreign_dev_check_cb, del_cb);
826         if (err == -EOPNOTSUPP)
827                 err = 0;
828         return err;
829 }
830 EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del_foreign);
831
832 static int __switchdev_handle_port_attr_set(struct net_device *dev,
833                         struct switchdev_notifier_port_attr_info *port_attr_info,
834                         bool (*check_cb)(const struct net_device *dev),
835                         int (*set_cb)(struct net_device *dev, const void *ctx,
836                                       const struct switchdev_attr *attr,
837                                       struct netlink_ext_ack *extack))
838 {
839         struct switchdev_notifier_info *info = &port_attr_info->info;
840         struct netlink_ext_ack *extack;
841         struct net_device *lower_dev;
842         struct list_head *iter;
843         int err = -EOPNOTSUPP;
844
845         extack = switchdev_notifier_info_to_extack(info);
846
847         if (check_cb(dev)) {
848                 err = set_cb(dev, info->ctx, port_attr_info->attr, extack);
849                 if (err != -EOPNOTSUPP)
850                         port_attr_info->handled = true;
851                 return err;
852         }
853
854         /* Switch ports might be stacked under e.g. a LAG. Ignore the
855          * unsupported devices, another driver might be able to handle them. But
856          * propagate to the callers any hard errors.
857          *
858          * If the driver does its own bookkeeping of stacked ports, it's not
859          * necessary to go through this helper.
860          */
861         netdev_for_each_lower_dev(dev, lower_dev, iter) {
862                 if (netif_is_bridge_master(lower_dev))
863                         continue;
864
865                 err = __switchdev_handle_port_attr_set(lower_dev, port_attr_info,
866                                                        check_cb, set_cb);
867                 if (err && err != -EOPNOTSUPP)
868                         return err;
869         }
870
871         return err;
872 }
873
874 int switchdev_handle_port_attr_set(struct net_device *dev,
875                         struct switchdev_notifier_port_attr_info *port_attr_info,
876                         bool (*check_cb)(const struct net_device *dev),
877                         int (*set_cb)(struct net_device *dev, const void *ctx,
878                                       const struct switchdev_attr *attr,
879                                       struct netlink_ext_ack *extack))
880 {
881         int err;
882
883         err = __switchdev_handle_port_attr_set(dev, port_attr_info, check_cb,
884                                                set_cb);
885         if (err == -EOPNOTSUPP)
886                 err = 0;
887         return err;
888 }
889 EXPORT_SYMBOL_GPL(switchdev_handle_port_attr_set);
890
891 int switchdev_bridge_port_offload(struct net_device *brport_dev,
892                                   struct net_device *dev, const void *ctx,
893                                   struct notifier_block *atomic_nb,
894                                   struct notifier_block *blocking_nb,
895                                   bool tx_fwd_offload,
896                                   struct netlink_ext_ack *extack)
897 {
898         struct switchdev_notifier_brport_info brport_info = {
899                 .brport = {
900                         .dev = dev,
901                         .ctx = ctx,
902                         .atomic_nb = atomic_nb,
903                         .blocking_nb = blocking_nb,
904                         .tx_fwd_offload = tx_fwd_offload,
905                 },
906         };
907         int err;
908
909         ASSERT_RTNL();
910
911         err = call_switchdev_blocking_notifiers(SWITCHDEV_BRPORT_OFFLOADED,
912                                                 brport_dev, &brport_info.info,
913                                                 extack);
914         return notifier_to_errno(err);
915 }
916 EXPORT_SYMBOL_GPL(switchdev_bridge_port_offload);
917
918 void switchdev_bridge_port_unoffload(struct net_device *brport_dev,
919                                      const void *ctx,
920                                      struct notifier_block *atomic_nb,
921                                      struct notifier_block *blocking_nb)
922 {
923         struct switchdev_notifier_brport_info brport_info = {
924                 .brport = {
925                         .ctx = ctx,
926                         .atomic_nb = atomic_nb,
927                         .blocking_nb = blocking_nb,
928                 },
929         };
930
931         ASSERT_RTNL();
932
933         call_switchdev_blocking_notifiers(SWITCHDEV_BRPORT_UNOFFLOADED,
934                                           brport_dev, &brport_info.info,
935                                           NULL);
936 }
937 EXPORT_SYMBOL_GPL(switchdev_bridge_port_unoffload);
938
939 int switchdev_bridge_port_replay(struct net_device *brport_dev,
940                                  struct net_device *dev, const void *ctx,
941                                  struct notifier_block *atomic_nb,
942                                  struct notifier_block *blocking_nb,
943                                  struct netlink_ext_ack *extack)
944 {
945         struct switchdev_notifier_brport_info brport_info = {
946                 .brport = {
947                         .dev = dev,
948                         .ctx = ctx,
949                         .atomic_nb = atomic_nb,
950                         .blocking_nb = blocking_nb,
951                 },
952         };
953         int err;
954
955         ASSERT_RTNL();
956
957         err = call_switchdev_blocking_notifiers(SWITCHDEV_BRPORT_REPLAY,
958                                                 brport_dev, &brport_info.info,
959                                                 extack);
960         return notifier_to_errno(err);
961 }
962 EXPORT_SYMBOL_GPL(switchdev_bridge_port_replay);