Merge tag 'pci-v5.14-fixes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaa...
[linux-2.6-microblaze.git] / net / bridge / br_vlan.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kernel.h>
3 #include <linux/netdevice.h>
4 #include <linux/rtnetlink.h>
5 #include <linux/slab.h>
6 #include <net/switchdev.h>
7
8 #include "br_private.h"
9 #include "br_private_tunnel.h"
10
11 static void nbp_vlan_set_vlan_dev_state(struct net_bridge_port *p, u16 vid);
12
13 static inline int br_vlan_cmp(struct rhashtable_compare_arg *arg,
14                               const void *ptr)
15 {
16         const struct net_bridge_vlan *vle = ptr;
17         u16 vid = *(u16 *)arg->key;
18
19         return vle->vid != vid;
20 }
21
22 static const struct rhashtable_params br_vlan_rht_params = {
23         .head_offset = offsetof(struct net_bridge_vlan, vnode),
24         .key_offset = offsetof(struct net_bridge_vlan, vid),
25         .key_len = sizeof(u16),
26         .nelem_hint = 3,
27         .max_size = VLAN_N_VID,
28         .obj_cmpfn = br_vlan_cmp,
29         .automatic_shrinking = true,
30 };
31
32 static struct net_bridge_vlan *br_vlan_lookup(struct rhashtable *tbl, u16 vid)
33 {
34         return rhashtable_lookup_fast(tbl, &vid, br_vlan_rht_params);
35 }
36
37 static bool __vlan_add_pvid(struct net_bridge_vlan_group *vg,
38                             const struct net_bridge_vlan *v)
39 {
40         if (vg->pvid == v->vid)
41                 return false;
42
43         smp_wmb();
44         br_vlan_set_pvid_state(vg, v->state);
45         vg->pvid = v->vid;
46
47         return true;
48 }
49
50 static bool __vlan_delete_pvid(struct net_bridge_vlan_group *vg, u16 vid)
51 {
52         if (vg->pvid != vid)
53                 return false;
54
55         smp_wmb();
56         vg->pvid = 0;
57
58         return true;
59 }
60
61 /* return true if anything changed, false otherwise */
62 static bool __vlan_add_flags(struct net_bridge_vlan *v, u16 flags)
63 {
64         struct net_bridge_vlan_group *vg;
65         u16 old_flags = v->flags;
66         bool ret;
67
68         if (br_vlan_is_master(v))
69                 vg = br_vlan_group(v->br);
70         else
71                 vg = nbp_vlan_group(v->port);
72
73         if (flags & BRIDGE_VLAN_INFO_PVID)
74                 ret = __vlan_add_pvid(vg, v);
75         else
76                 ret = __vlan_delete_pvid(vg, v->vid);
77
78         if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
79                 v->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
80         else
81                 v->flags &= ~BRIDGE_VLAN_INFO_UNTAGGED;
82
83         return ret || !!(old_flags ^ v->flags);
84 }
85
86 static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
87                           struct net_bridge_vlan *v, u16 flags,
88                           struct netlink_ext_ack *extack)
89 {
90         int err;
91
92         /* Try switchdev op first. In case it is not supported, fallback to
93          * 8021q add.
94          */
95         err = br_switchdev_port_vlan_add(dev, v->vid, flags, extack);
96         if (err == -EOPNOTSUPP)
97                 return vlan_vid_add(dev, br->vlan_proto, v->vid);
98         v->priv_flags |= BR_VLFLAG_ADDED_BY_SWITCHDEV;
99         return err;
100 }
101
102 static void __vlan_add_list(struct net_bridge_vlan *v)
103 {
104         struct net_bridge_vlan_group *vg;
105         struct list_head *headp, *hpos;
106         struct net_bridge_vlan *vent;
107
108         if (br_vlan_is_master(v))
109                 vg = br_vlan_group(v->br);
110         else
111                 vg = nbp_vlan_group(v->port);
112
113         headp = &vg->vlan_list;
114         list_for_each_prev(hpos, headp) {
115                 vent = list_entry(hpos, struct net_bridge_vlan, vlist);
116                 if (v->vid >= vent->vid)
117                         break;
118         }
119         list_add_rcu(&v->vlist, hpos);
120 }
121
122 static void __vlan_del_list(struct net_bridge_vlan *v)
123 {
124         list_del_rcu(&v->vlist);
125 }
126
127 static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
128                           const struct net_bridge_vlan *v)
129 {
130         int err;
131
132         /* Try switchdev op first. In case it is not supported, fallback to
133          * 8021q del.
134          */
135         err = br_switchdev_port_vlan_del(dev, v->vid);
136         if (!(v->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV))
137                 vlan_vid_del(dev, br->vlan_proto, v->vid);
138         return err == -EOPNOTSUPP ? 0 : err;
139 }
140
141 /* Returns a master vlan, if it didn't exist it gets created. In all cases
142  * a reference is taken to the master vlan before returning.
143  */
144 static struct net_bridge_vlan *
145 br_vlan_get_master(struct net_bridge *br, u16 vid,
146                    struct netlink_ext_ack *extack)
147 {
148         struct net_bridge_vlan_group *vg;
149         struct net_bridge_vlan *masterv;
150
151         vg = br_vlan_group(br);
152         masterv = br_vlan_find(vg, vid);
153         if (!masterv) {
154                 bool changed;
155
156                 /* missing global ctx, create it now */
157                 if (br_vlan_add(br, vid, 0, &changed, extack))
158                         return NULL;
159                 masterv = br_vlan_find(vg, vid);
160                 if (WARN_ON(!masterv))
161                         return NULL;
162                 refcount_set(&masterv->refcnt, 1);
163                 return masterv;
164         }
165         refcount_inc(&masterv->refcnt);
166
167         return masterv;
168 }
169
170 static void br_master_vlan_rcu_free(struct rcu_head *rcu)
171 {
172         struct net_bridge_vlan *v;
173
174         v = container_of(rcu, struct net_bridge_vlan, rcu);
175         WARN_ON(!br_vlan_is_master(v));
176         free_percpu(v->stats);
177         v->stats = NULL;
178         kfree(v);
179 }
180
181 static void br_vlan_put_master(struct net_bridge_vlan *masterv)
182 {
183         struct net_bridge_vlan_group *vg;
184
185         if (!br_vlan_is_master(masterv))
186                 return;
187
188         vg = br_vlan_group(masterv->br);
189         if (refcount_dec_and_test(&masterv->refcnt)) {
190                 rhashtable_remove_fast(&vg->vlan_hash,
191                                        &masterv->vnode, br_vlan_rht_params);
192                 __vlan_del_list(masterv);
193                 call_rcu(&masterv->rcu, br_master_vlan_rcu_free);
194         }
195 }
196
197 static void nbp_vlan_rcu_free(struct rcu_head *rcu)
198 {
199         struct net_bridge_vlan *v;
200
201         v = container_of(rcu, struct net_bridge_vlan, rcu);
202         WARN_ON(br_vlan_is_master(v));
203         /* if we had per-port stats configured then free them here */
204         if (v->priv_flags & BR_VLFLAG_PER_PORT_STATS)
205                 free_percpu(v->stats);
206         v->stats = NULL;
207         kfree(v);
208 }
209
210 /* This is the shared VLAN add function which works for both ports and bridge
211  * devices. There are four possible calls to this function in terms of the
212  * vlan entry type:
213  * 1. vlan is being added on a port (no master flags, global entry exists)
214  * 2. vlan is being added on a bridge (both master and brentry flags)
215  * 3. vlan is being added on a port, but a global entry didn't exist which
216  *    is being created right now (master flag set, brentry flag unset), the
217  *    global entry is used for global per-vlan features, but not for filtering
218  * 4. same as 3 but with both master and brentry flags set so the entry
219  *    will be used for filtering in both the port and the bridge
220  */
221 static int __vlan_add(struct net_bridge_vlan *v, u16 flags,
222                       struct netlink_ext_ack *extack)
223 {
224         struct net_bridge_vlan *masterv = NULL;
225         struct net_bridge_port *p = NULL;
226         struct net_bridge_vlan_group *vg;
227         struct net_device *dev;
228         struct net_bridge *br;
229         int err;
230
231         if (br_vlan_is_master(v)) {
232                 br = v->br;
233                 dev = br->dev;
234                 vg = br_vlan_group(br);
235         } else {
236                 p = v->port;
237                 br = p->br;
238                 dev = p->dev;
239                 vg = nbp_vlan_group(p);
240         }
241
242         if (p) {
243                 /* Add VLAN to the device filter if it is supported.
244                  * This ensures tagged traffic enters the bridge when
245                  * promiscuous mode is disabled by br_manage_promisc().
246                  */
247                 err = __vlan_vid_add(dev, br, v, flags, extack);
248                 if (err)
249                         goto out;
250
251                 /* need to work on the master vlan too */
252                 if (flags & BRIDGE_VLAN_INFO_MASTER) {
253                         bool changed;
254
255                         err = br_vlan_add(br, v->vid,
256                                           flags | BRIDGE_VLAN_INFO_BRENTRY,
257                                           &changed, extack);
258                         if (err)
259                                 goto out_filt;
260
261                         if (changed)
262                                 br_vlan_notify(br, NULL, v->vid, 0,
263                                                RTM_NEWVLAN);
264                 }
265
266                 masterv = br_vlan_get_master(br, v->vid, extack);
267                 if (!masterv) {
268                         err = -ENOMEM;
269                         goto out_filt;
270                 }
271                 v->brvlan = masterv;
272                 if (br_opt_get(br, BROPT_VLAN_STATS_PER_PORT)) {
273                         v->stats =
274                              netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
275                         if (!v->stats) {
276                                 err = -ENOMEM;
277                                 goto out_filt;
278                         }
279                         v->priv_flags |= BR_VLFLAG_PER_PORT_STATS;
280                 } else {
281                         v->stats = masterv->stats;
282                 }
283         } else {
284                 err = br_switchdev_port_vlan_add(dev, v->vid, flags, extack);
285                 if (err && err != -EOPNOTSUPP)
286                         goto out;
287         }
288
289         /* Add the dev mac and count the vlan only if it's usable */
290         if (br_vlan_should_use(v)) {
291                 err = br_fdb_insert(br, p, dev->dev_addr, v->vid);
292                 if (err) {
293                         br_err(br, "failed insert local address into bridge forwarding table\n");
294                         goto out_filt;
295                 }
296                 vg->num_vlans++;
297         }
298
299         /* set the state before publishing */
300         v->state = BR_STATE_FORWARDING;
301
302         err = rhashtable_lookup_insert_fast(&vg->vlan_hash, &v->vnode,
303                                             br_vlan_rht_params);
304         if (err)
305                 goto out_fdb_insert;
306
307         __vlan_add_list(v);
308         __vlan_add_flags(v, flags);
309
310         if (p)
311                 nbp_vlan_set_vlan_dev_state(p, v->vid);
312 out:
313         return err;
314
315 out_fdb_insert:
316         if (br_vlan_should_use(v)) {
317                 br_fdb_find_delete_local(br, p, dev->dev_addr, v->vid);
318                 vg->num_vlans--;
319         }
320
321 out_filt:
322         if (p) {
323                 __vlan_vid_del(dev, br, v);
324                 if (masterv) {
325                         if (v->stats && masterv->stats != v->stats)
326                                 free_percpu(v->stats);
327                         v->stats = NULL;
328
329                         br_vlan_put_master(masterv);
330                         v->brvlan = NULL;
331                 }
332         } else {
333                 br_switchdev_port_vlan_del(dev, v->vid);
334         }
335
336         goto out;
337 }
338
339 static int __vlan_del(struct net_bridge_vlan *v)
340 {
341         struct net_bridge_vlan *masterv = v;
342         struct net_bridge_vlan_group *vg;
343         struct net_bridge_port *p = NULL;
344         int err = 0;
345
346         if (br_vlan_is_master(v)) {
347                 vg = br_vlan_group(v->br);
348         } else {
349                 p = v->port;
350                 vg = nbp_vlan_group(v->port);
351                 masterv = v->brvlan;
352         }
353
354         __vlan_delete_pvid(vg, v->vid);
355         if (p) {
356                 err = __vlan_vid_del(p->dev, p->br, v);
357                 if (err)
358                         goto out;
359         } else {
360                 err = br_switchdev_port_vlan_del(v->br->dev, v->vid);
361                 if (err && err != -EOPNOTSUPP)
362                         goto out;
363                 err = 0;
364         }
365
366         if (br_vlan_should_use(v)) {
367                 v->flags &= ~BRIDGE_VLAN_INFO_BRENTRY;
368                 vg->num_vlans--;
369         }
370
371         if (masterv != v) {
372                 vlan_tunnel_info_del(vg, v);
373                 rhashtable_remove_fast(&vg->vlan_hash, &v->vnode,
374                                        br_vlan_rht_params);
375                 __vlan_del_list(v);
376                 nbp_vlan_set_vlan_dev_state(p, v->vid);
377                 call_rcu(&v->rcu, nbp_vlan_rcu_free);
378         }
379
380         br_vlan_put_master(masterv);
381 out:
382         return err;
383 }
384
385 static void __vlan_group_free(struct net_bridge_vlan_group *vg)
386 {
387         WARN_ON(!list_empty(&vg->vlan_list));
388         rhashtable_destroy(&vg->vlan_hash);
389         vlan_tunnel_deinit(vg);
390         kfree(vg);
391 }
392
393 static void __vlan_flush(const struct net_bridge *br,
394                          const struct net_bridge_port *p,
395                          struct net_bridge_vlan_group *vg)
396 {
397         struct net_bridge_vlan *vlan, *tmp;
398         u16 v_start = 0, v_end = 0;
399
400         __vlan_delete_pvid(vg, vg->pvid);
401         list_for_each_entry_safe(vlan, tmp, &vg->vlan_list, vlist) {
402                 /* take care of disjoint ranges */
403                 if (!v_start) {
404                         v_start = vlan->vid;
405                 } else if (vlan->vid - v_end != 1) {
406                         /* found range end, notify and start next one */
407                         br_vlan_notify(br, p, v_start, v_end, RTM_DELVLAN);
408                         v_start = vlan->vid;
409                 }
410                 v_end = vlan->vid;
411
412                 __vlan_del(vlan);
413         }
414
415         /* notify about the last/whole vlan range */
416         if (v_start)
417                 br_vlan_notify(br, p, v_start, v_end, RTM_DELVLAN);
418 }
419
420 struct sk_buff *br_handle_vlan(struct net_bridge *br,
421                                const struct net_bridge_port *p,
422                                struct net_bridge_vlan_group *vg,
423                                struct sk_buff *skb)
424 {
425         struct pcpu_sw_netstats *stats;
426         struct net_bridge_vlan *v;
427         u16 vid;
428
429         /* If this packet was not filtered at input, let it pass */
430         if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
431                 goto out;
432
433         /* At this point, we know that the frame was filtered and contains
434          * a valid vlan id.  If the vlan id has untagged flag set,
435          * send untagged; otherwise, send tagged.
436          */
437         br_vlan_get_tag(skb, &vid);
438         v = br_vlan_find(vg, vid);
439         /* Vlan entry must be configured at this point.  The
440          * only exception is the bridge is set in promisc mode and the
441          * packet is destined for the bridge device.  In this case
442          * pass the packet as is.
443          */
444         if (!v || !br_vlan_should_use(v)) {
445                 if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) {
446                         goto out;
447                 } else {
448                         kfree_skb(skb);
449                         return NULL;
450                 }
451         }
452         if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
453                 stats = this_cpu_ptr(v->stats);
454                 u64_stats_update_begin(&stats->syncp);
455                 stats->tx_bytes += skb->len;
456                 stats->tx_packets++;
457                 u64_stats_update_end(&stats->syncp);
458         }
459
460         if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
461                 __vlan_hwaccel_clear_tag(skb);
462
463         if (p && (p->flags & BR_VLAN_TUNNEL) &&
464             br_handle_egress_vlan_tunnel(skb, v)) {
465                 kfree_skb(skb);
466                 return NULL;
467         }
468 out:
469         return skb;
470 }
471
472 /* Called under RCU */
473 static bool __allowed_ingress(const struct net_bridge *br,
474                               struct net_bridge_vlan_group *vg,
475                               struct sk_buff *skb, u16 *vid,
476                               u8 *state)
477 {
478         struct pcpu_sw_netstats *stats;
479         struct net_bridge_vlan *v;
480         bool tagged;
481
482         BR_INPUT_SKB_CB(skb)->vlan_filtered = true;
483         /* If vlan tx offload is disabled on bridge device and frame was
484          * sent from vlan device on the bridge device, it does not have
485          * HW accelerated vlan tag.
486          */
487         if (unlikely(!skb_vlan_tag_present(skb) &&
488                      skb->protocol == br->vlan_proto)) {
489                 skb = skb_vlan_untag(skb);
490                 if (unlikely(!skb))
491                         return false;
492         }
493
494         if (!br_vlan_get_tag(skb, vid)) {
495                 /* Tagged frame */
496                 if (skb->vlan_proto != br->vlan_proto) {
497                         /* Protocol-mismatch, empty out vlan_tci for new tag */
498                         skb_push(skb, ETH_HLEN);
499                         skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
500                                                         skb_vlan_tag_get(skb));
501                         if (unlikely(!skb))
502                                 return false;
503
504                         skb_pull(skb, ETH_HLEN);
505                         skb_reset_mac_len(skb);
506                         *vid = 0;
507                         tagged = false;
508                 } else {
509                         tagged = true;
510                 }
511         } else {
512                 /* Untagged frame */
513                 tagged = false;
514         }
515
516         if (!*vid) {
517                 u16 pvid = br_get_pvid(vg);
518
519                 /* Frame had a tag with VID 0 or did not have a tag.
520                  * See if pvid is set on this port.  That tells us which
521                  * vlan untagged or priority-tagged traffic belongs to.
522                  */
523                 if (!pvid)
524                         goto drop;
525
526                 /* PVID is set on this port.  Any untagged or priority-tagged
527                  * ingress frame is considered to belong to this vlan.
528                  */
529                 *vid = pvid;
530                 if (likely(!tagged))
531                         /* Untagged Frame. */
532                         __vlan_hwaccel_put_tag(skb, br->vlan_proto, pvid);
533                 else
534                         /* Priority-tagged Frame.
535                          * At this point, we know that skb->vlan_tci VID
536                          * field was 0.
537                          * We update only VID field and preserve PCP field.
538                          */
539                         skb->vlan_tci |= pvid;
540
541                 /* if stats are disabled we can avoid the lookup */
542                 if (!br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
543                         if (*state == BR_STATE_FORWARDING) {
544                                 *state = br_vlan_get_pvid_state(vg);
545                                 return br_vlan_state_allowed(*state, true);
546                         } else {
547                                 return true;
548                         }
549                 }
550         }
551         v = br_vlan_find(vg, *vid);
552         if (!v || !br_vlan_should_use(v))
553                 goto drop;
554
555         if (*state == BR_STATE_FORWARDING) {
556                 *state = br_vlan_get_state(v);
557                 if (!br_vlan_state_allowed(*state, true))
558                         goto drop;
559         }
560
561         if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
562                 stats = this_cpu_ptr(v->stats);
563                 u64_stats_update_begin(&stats->syncp);
564                 stats->rx_bytes += skb->len;
565                 stats->rx_packets++;
566                 u64_stats_update_end(&stats->syncp);
567         }
568
569         return true;
570
571 drop:
572         kfree_skb(skb);
573         return false;
574 }
575
576 bool br_allowed_ingress(const struct net_bridge *br,
577                         struct net_bridge_vlan_group *vg, struct sk_buff *skb,
578                         u16 *vid, u8 *state)
579 {
580         /* If VLAN filtering is disabled on the bridge, all packets are
581          * permitted.
582          */
583         if (!br_opt_get(br, BROPT_VLAN_ENABLED)) {
584                 BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
585                 return true;
586         }
587
588         return __allowed_ingress(br, vg, skb, vid, state);
589 }
590
591 /* Called under RCU. */
592 bool br_allowed_egress(struct net_bridge_vlan_group *vg,
593                        const struct sk_buff *skb)
594 {
595         const struct net_bridge_vlan *v;
596         u16 vid;
597
598         /* If this packet was not filtered at input, let it pass */
599         if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
600                 return true;
601
602         br_vlan_get_tag(skb, &vid);
603         v = br_vlan_find(vg, vid);
604         if (v && br_vlan_should_use(v) &&
605             br_vlan_state_allowed(br_vlan_get_state(v), false))
606                 return true;
607
608         return false;
609 }
610
611 /* Called under RCU */
612 bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
613 {
614         struct net_bridge_vlan_group *vg;
615         struct net_bridge *br = p->br;
616         struct net_bridge_vlan *v;
617
618         /* If filtering was disabled at input, let it pass. */
619         if (!br_opt_get(br, BROPT_VLAN_ENABLED))
620                 return true;
621
622         vg = nbp_vlan_group_rcu(p);
623         if (!vg || !vg->num_vlans)
624                 return false;
625
626         if (!br_vlan_get_tag(skb, vid) && skb->vlan_proto != br->vlan_proto)
627                 *vid = 0;
628
629         if (!*vid) {
630                 *vid = br_get_pvid(vg);
631                 if (!*vid ||
632                     !br_vlan_state_allowed(br_vlan_get_pvid_state(vg), true))
633                         return false;
634
635                 return true;
636         }
637
638         v = br_vlan_find(vg, *vid);
639         if (v && br_vlan_state_allowed(br_vlan_get_state(v), true))
640                 return true;
641
642         return false;
643 }
644
645 static int br_vlan_add_existing(struct net_bridge *br,
646                                 struct net_bridge_vlan_group *vg,
647                                 struct net_bridge_vlan *vlan,
648                                 u16 flags, bool *changed,
649                                 struct netlink_ext_ack *extack)
650 {
651         int err;
652
653         err = br_switchdev_port_vlan_add(br->dev, vlan->vid, flags, extack);
654         if (err && err != -EOPNOTSUPP)
655                 return err;
656
657         if (!br_vlan_is_brentry(vlan)) {
658                 /* Trying to change flags of non-existent bridge vlan */
659                 if (!(flags & BRIDGE_VLAN_INFO_BRENTRY)) {
660                         err = -EINVAL;
661                         goto err_flags;
662                 }
663                 /* It was only kept for port vlans, now make it real */
664                 err = br_fdb_insert(br, NULL, br->dev->dev_addr,
665                                     vlan->vid);
666                 if (err) {
667                         br_err(br, "failed to insert local address into bridge forwarding table\n");
668                         goto err_fdb_insert;
669                 }
670
671                 refcount_inc(&vlan->refcnt);
672                 vlan->flags |= BRIDGE_VLAN_INFO_BRENTRY;
673                 vg->num_vlans++;
674                 *changed = true;
675         }
676
677         if (__vlan_add_flags(vlan, flags))
678                 *changed = true;
679
680         return 0;
681
682 err_fdb_insert:
683 err_flags:
684         br_switchdev_port_vlan_del(br->dev, vlan->vid);
685         return err;
686 }
687
688 /* Must be protected by RTNL.
689  * Must be called with vid in range from 1 to 4094 inclusive.
690  * changed must be true only if the vlan was created or updated
691  */
692 int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags, bool *changed,
693                 struct netlink_ext_ack *extack)
694 {
695         struct net_bridge_vlan_group *vg;
696         struct net_bridge_vlan *vlan;
697         int ret;
698
699         ASSERT_RTNL();
700
701         *changed = false;
702         vg = br_vlan_group(br);
703         vlan = br_vlan_find(vg, vid);
704         if (vlan)
705                 return br_vlan_add_existing(br, vg, vlan, flags, changed,
706                                             extack);
707
708         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
709         if (!vlan)
710                 return -ENOMEM;
711
712         vlan->stats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
713         if (!vlan->stats) {
714                 kfree(vlan);
715                 return -ENOMEM;
716         }
717         vlan->vid = vid;
718         vlan->flags = flags | BRIDGE_VLAN_INFO_MASTER;
719         vlan->flags &= ~BRIDGE_VLAN_INFO_PVID;
720         vlan->br = br;
721         if (flags & BRIDGE_VLAN_INFO_BRENTRY)
722                 refcount_set(&vlan->refcnt, 1);
723         ret = __vlan_add(vlan, flags, extack);
724         if (ret) {
725                 free_percpu(vlan->stats);
726                 kfree(vlan);
727         } else {
728                 *changed = true;
729         }
730
731         return ret;
732 }
733
734 /* Must be protected by RTNL.
735  * Must be called with vid in range from 1 to 4094 inclusive.
736  */
737 int br_vlan_delete(struct net_bridge *br, u16 vid)
738 {
739         struct net_bridge_vlan_group *vg;
740         struct net_bridge_vlan *v;
741
742         ASSERT_RTNL();
743
744         vg = br_vlan_group(br);
745         v = br_vlan_find(vg, vid);
746         if (!v || !br_vlan_is_brentry(v))
747                 return -ENOENT;
748
749         br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid);
750         br_fdb_delete_by_port(br, NULL, vid, 0);
751
752         vlan_tunnel_info_del(vg, v);
753
754         return __vlan_del(v);
755 }
756
757 void br_vlan_flush(struct net_bridge *br)
758 {
759         struct net_bridge_vlan_group *vg;
760
761         ASSERT_RTNL();
762
763         vg = br_vlan_group(br);
764         __vlan_flush(br, NULL, vg);
765         RCU_INIT_POINTER(br->vlgrp, NULL);
766         synchronize_rcu();
767         __vlan_group_free(vg);
768 }
769
770 struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg, u16 vid)
771 {
772         if (!vg)
773                 return NULL;
774
775         return br_vlan_lookup(&vg->vlan_hash, vid);
776 }
777
778 /* Must be protected by RTNL. */
779 static void recalculate_group_addr(struct net_bridge *br)
780 {
781         if (br_opt_get(br, BROPT_GROUP_ADDR_SET))
782                 return;
783
784         spin_lock_bh(&br->lock);
785         if (!br_opt_get(br, BROPT_VLAN_ENABLED) ||
786             br->vlan_proto == htons(ETH_P_8021Q)) {
787                 /* Bridge Group Address */
788                 br->group_addr[5] = 0x00;
789         } else { /* vlan_enabled && ETH_P_8021AD */
790                 /* Provider Bridge Group Address */
791                 br->group_addr[5] = 0x08;
792         }
793         spin_unlock_bh(&br->lock);
794 }
795
796 /* Must be protected by RTNL. */
797 void br_recalculate_fwd_mask(struct net_bridge *br)
798 {
799         if (!br_opt_get(br, BROPT_VLAN_ENABLED) ||
800             br->vlan_proto == htons(ETH_P_8021Q))
801                 br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
802         else /* vlan_enabled && ETH_P_8021AD */
803                 br->group_fwd_mask_required = BR_GROUPFWD_8021AD &
804                                               ~(1u << br->group_addr[5]);
805 }
806
807 int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val,
808                           struct netlink_ext_ack *extack)
809 {
810         struct switchdev_attr attr = {
811                 .orig_dev = br->dev,
812                 .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
813                 .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
814                 .u.vlan_filtering = val,
815         };
816         int err;
817
818         if (br_opt_get(br, BROPT_VLAN_ENABLED) == !!val)
819                 return 0;
820
821         err = switchdev_port_attr_set(br->dev, &attr, extack);
822         if (err && err != -EOPNOTSUPP)
823                 return err;
824
825         br_opt_toggle(br, BROPT_VLAN_ENABLED, !!val);
826         br_manage_promisc(br);
827         recalculate_group_addr(br);
828         br_recalculate_fwd_mask(br);
829
830         return 0;
831 }
832
833 bool br_vlan_enabled(const struct net_device *dev)
834 {
835         struct net_bridge *br = netdev_priv(dev);
836
837         return br_opt_get(br, BROPT_VLAN_ENABLED);
838 }
839 EXPORT_SYMBOL_GPL(br_vlan_enabled);
840
841 int br_vlan_get_proto(const struct net_device *dev, u16 *p_proto)
842 {
843         struct net_bridge *br = netdev_priv(dev);
844
845         *p_proto = ntohs(br->vlan_proto);
846
847         return 0;
848 }
849 EXPORT_SYMBOL_GPL(br_vlan_get_proto);
850
851 int __br_vlan_set_proto(struct net_bridge *br, __be16 proto,
852                         struct netlink_ext_ack *extack)
853 {
854         struct switchdev_attr attr = {
855                 .orig_dev = br->dev,
856                 .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL,
857                 .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
858                 .u.vlan_protocol = ntohs(proto),
859         };
860         int err = 0;
861         struct net_bridge_port *p;
862         struct net_bridge_vlan *vlan;
863         struct net_bridge_vlan_group *vg;
864         __be16 oldproto = br->vlan_proto;
865
866         if (br->vlan_proto == proto)
867                 return 0;
868
869         err = switchdev_port_attr_set(br->dev, &attr, extack);
870         if (err && err != -EOPNOTSUPP)
871                 return err;
872
873         /* Add VLANs for the new proto to the device filter. */
874         list_for_each_entry(p, &br->port_list, list) {
875                 vg = nbp_vlan_group(p);
876                 list_for_each_entry(vlan, &vg->vlan_list, vlist) {
877                         err = vlan_vid_add(p->dev, proto, vlan->vid);
878                         if (err)
879                                 goto err_filt;
880                 }
881         }
882
883         br->vlan_proto = proto;
884
885         recalculate_group_addr(br);
886         br_recalculate_fwd_mask(br);
887
888         /* Delete VLANs for the old proto from the device filter. */
889         list_for_each_entry(p, &br->port_list, list) {
890                 vg = nbp_vlan_group(p);
891                 list_for_each_entry(vlan, &vg->vlan_list, vlist)
892                         vlan_vid_del(p->dev, oldproto, vlan->vid);
893         }
894
895         return 0;
896
897 err_filt:
898         attr.u.vlan_protocol = ntohs(oldproto);
899         switchdev_port_attr_set(br->dev, &attr, NULL);
900
901         list_for_each_entry_continue_reverse(vlan, &vg->vlan_list, vlist)
902                 vlan_vid_del(p->dev, proto, vlan->vid);
903
904         list_for_each_entry_continue_reverse(p, &br->port_list, list) {
905                 vg = nbp_vlan_group(p);
906                 list_for_each_entry(vlan, &vg->vlan_list, vlist)
907                         vlan_vid_del(p->dev, proto, vlan->vid);
908         }
909
910         return err;
911 }
912
913 int br_vlan_set_proto(struct net_bridge *br, unsigned long val,
914                       struct netlink_ext_ack *extack)
915 {
916         if (!eth_type_vlan(htons(val)))
917                 return -EPROTONOSUPPORT;
918
919         return __br_vlan_set_proto(br, htons(val), extack);
920 }
921
922 int br_vlan_set_stats(struct net_bridge *br, unsigned long val)
923 {
924         switch (val) {
925         case 0:
926         case 1:
927                 br_opt_toggle(br, BROPT_VLAN_STATS_ENABLED, !!val);
928                 break;
929         default:
930                 return -EINVAL;
931         }
932
933         return 0;
934 }
935
936 int br_vlan_set_stats_per_port(struct net_bridge *br, unsigned long val)
937 {
938         struct net_bridge_port *p;
939
940         /* allow to change the option if there are no port vlans configured */
941         list_for_each_entry(p, &br->port_list, list) {
942                 struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
943
944                 if (vg->num_vlans)
945                         return -EBUSY;
946         }
947
948         switch (val) {
949         case 0:
950         case 1:
951                 br_opt_toggle(br, BROPT_VLAN_STATS_PER_PORT, !!val);
952                 break;
953         default:
954                 return -EINVAL;
955         }
956
957         return 0;
958 }
959
960 static bool vlan_default_pvid(struct net_bridge_vlan_group *vg, u16 vid)
961 {
962         struct net_bridge_vlan *v;
963
964         if (vid != vg->pvid)
965                 return false;
966
967         v = br_vlan_lookup(&vg->vlan_hash, vid);
968         if (v && br_vlan_should_use(v) &&
969             (v->flags & BRIDGE_VLAN_INFO_UNTAGGED))
970                 return true;
971
972         return false;
973 }
974
975 static void br_vlan_disable_default_pvid(struct net_bridge *br)
976 {
977         struct net_bridge_port *p;
978         u16 pvid = br->default_pvid;
979
980         /* Disable default_pvid on all ports where it is still
981          * configured.
982          */
983         if (vlan_default_pvid(br_vlan_group(br), pvid)) {
984                 if (!br_vlan_delete(br, pvid))
985                         br_vlan_notify(br, NULL, pvid, 0, RTM_DELVLAN);
986         }
987
988         list_for_each_entry(p, &br->port_list, list) {
989                 if (vlan_default_pvid(nbp_vlan_group(p), pvid) &&
990                     !nbp_vlan_delete(p, pvid))
991                         br_vlan_notify(br, p, pvid, 0, RTM_DELVLAN);
992         }
993
994         br->default_pvid = 0;
995 }
996
997 int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid,
998                                struct netlink_ext_ack *extack)
999 {
1000         const struct net_bridge_vlan *pvent;
1001         struct net_bridge_vlan_group *vg;
1002         struct net_bridge_port *p;
1003         unsigned long *changed;
1004         bool vlchange;
1005         u16 old_pvid;
1006         int err = 0;
1007
1008         if (!pvid) {
1009                 br_vlan_disable_default_pvid(br);
1010                 return 0;
1011         }
1012
1013         changed = bitmap_zalloc(BR_MAX_PORTS, GFP_KERNEL);
1014         if (!changed)
1015                 return -ENOMEM;
1016
1017         old_pvid = br->default_pvid;
1018
1019         /* Update default_pvid config only if we do not conflict with
1020          * user configuration.
1021          */
1022         vg = br_vlan_group(br);
1023         pvent = br_vlan_find(vg, pvid);
1024         if ((!old_pvid || vlan_default_pvid(vg, old_pvid)) &&
1025             (!pvent || !br_vlan_should_use(pvent))) {
1026                 err = br_vlan_add(br, pvid,
1027                                   BRIDGE_VLAN_INFO_PVID |
1028                                   BRIDGE_VLAN_INFO_UNTAGGED |
1029                                   BRIDGE_VLAN_INFO_BRENTRY,
1030                                   &vlchange, extack);
1031                 if (err)
1032                         goto out;
1033
1034                 if (br_vlan_delete(br, old_pvid))
1035                         br_vlan_notify(br, NULL, old_pvid, 0, RTM_DELVLAN);
1036                 br_vlan_notify(br, NULL, pvid, 0, RTM_NEWVLAN);
1037                 set_bit(0, changed);
1038         }
1039
1040         list_for_each_entry(p, &br->port_list, list) {
1041                 /* Update default_pvid config only if we do not conflict with
1042                  * user configuration.
1043                  */
1044                 vg = nbp_vlan_group(p);
1045                 if ((old_pvid &&
1046                      !vlan_default_pvid(vg, old_pvid)) ||
1047                     br_vlan_find(vg, pvid))
1048                         continue;
1049
1050                 err = nbp_vlan_add(p, pvid,
1051                                    BRIDGE_VLAN_INFO_PVID |
1052                                    BRIDGE_VLAN_INFO_UNTAGGED,
1053                                    &vlchange, extack);
1054                 if (err)
1055                         goto err_port;
1056                 if (nbp_vlan_delete(p, old_pvid))
1057                         br_vlan_notify(br, p, old_pvid, 0, RTM_DELVLAN);
1058                 br_vlan_notify(p->br, p, pvid, 0, RTM_NEWVLAN);
1059                 set_bit(p->port_no, changed);
1060         }
1061
1062         br->default_pvid = pvid;
1063
1064 out:
1065         bitmap_free(changed);
1066         return err;
1067
1068 err_port:
1069         list_for_each_entry_continue_reverse(p, &br->port_list, list) {
1070                 if (!test_bit(p->port_no, changed))
1071                         continue;
1072
1073                 if (old_pvid) {
1074                         nbp_vlan_add(p, old_pvid,
1075                                      BRIDGE_VLAN_INFO_PVID |
1076                                      BRIDGE_VLAN_INFO_UNTAGGED,
1077                                      &vlchange, NULL);
1078                         br_vlan_notify(p->br, p, old_pvid, 0, RTM_NEWVLAN);
1079                 }
1080                 nbp_vlan_delete(p, pvid);
1081                 br_vlan_notify(br, p, pvid, 0, RTM_DELVLAN);
1082         }
1083
1084         if (test_bit(0, changed)) {
1085                 if (old_pvid) {
1086                         br_vlan_add(br, old_pvid,
1087                                     BRIDGE_VLAN_INFO_PVID |
1088                                     BRIDGE_VLAN_INFO_UNTAGGED |
1089                                     BRIDGE_VLAN_INFO_BRENTRY,
1090                                     &vlchange, NULL);
1091                         br_vlan_notify(br, NULL, old_pvid, 0, RTM_NEWVLAN);
1092                 }
1093                 br_vlan_delete(br, pvid);
1094                 br_vlan_notify(br, NULL, pvid, 0, RTM_DELVLAN);
1095         }
1096         goto out;
1097 }
1098
1099 int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val,
1100                              struct netlink_ext_ack *extack)
1101 {
1102         u16 pvid = val;
1103         int err = 0;
1104
1105         if (val >= VLAN_VID_MASK)
1106                 return -EINVAL;
1107
1108         if (pvid == br->default_pvid)
1109                 goto out;
1110
1111         /* Only allow default pvid change when filtering is disabled */
1112         if (br_opt_get(br, BROPT_VLAN_ENABLED)) {
1113                 pr_info_once("Please disable vlan filtering to change default_pvid\n");
1114                 err = -EPERM;
1115                 goto out;
1116         }
1117         err = __br_vlan_set_default_pvid(br, pvid, extack);
1118 out:
1119         return err;
1120 }
1121
1122 int br_vlan_init(struct net_bridge *br)
1123 {
1124         struct net_bridge_vlan_group *vg;
1125         int ret = -ENOMEM;
1126
1127         vg = kzalloc(sizeof(*vg), GFP_KERNEL);
1128         if (!vg)
1129                 goto out;
1130         ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
1131         if (ret)
1132                 goto err_rhtbl;
1133         ret = vlan_tunnel_init(vg);
1134         if (ret)
1135                 goto err_tunnel_init;
1136         INIT_LIST_HEAD(&vg->vlan_list);
1137         br->vlan_proto = htons(ETH_P_8021Q);
1138         br->default_pvid = 1;
1139         rcu_assign_pointer(br->vlgrp, vg);
1140
1141 out:
1142         return ret;
1143
1144 err_tunnel_init:
1145         rhashtable_destroy(&vg->vlan_hash);
1146 err_rhtbl:
1147         kfree(vg);
1148
1149         goto out;
1150 }
1151
1152 int nbp_vlan_init(struct net_bridge_port *p, struct netlink_ext_ack *extack)
1153 {
1154         struct switchdev_attr attr = {
1155                 .orig_dev = p->br->dev,
1156                 .id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
1157                 .flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
1158                 .u.vlan_filtering = br_opt_get(p->br, BROPT_VLAN_ENABLED),
1159         };
1160         struct net_bridge_vlan_group *vg;
1161         int ret = -ENOMEM;
1162
1163         vg = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL);
1164         if (!vg)
1165                 goto out;
1166
1167         ret = switchdev_port_attr_set(p->dev, &attr, extack);
1168         if (ret && ret != -EOPNOTSUPP)
1169                 goto err_vlan_enabled;
1170
1171         ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
1172         if (ret)
1173                 goto err_rhtbl;
1174         ret = vlan_tunnel_init(vg);
1175         if (ret)
1176                 goto err_tunnel_init;
1177         INIT_LIST_HEAD(&vg->vlan_list);
1178         rcu_assign_pointer(p->vlgrp, vg);
1179         if (p->br->default_pvid) {
1180                 bool changed;
1181
1182                 ret = nbp_vlan_add(p, p->br->default_pvid,
1183                                    BRIDGE_VLAN_INFO_PVID |
1184                                    BRIDGE_VLAN_INFO_UNTAGGED,
1185                                    &changed, extack);
1186                 if (ret)
1187                         goto err_vlan_add;
1188                 br_vlan_notify(p->br, p, p->br->default_pvid, 0, RTM_NEWVLAN);
1189         }
1190 out:
1191         return ret;
1192
1193 err_vlan_add:
1194         RCU_INIT_POINTER(p->vlgrp, NULL);
1195         synchronize_rcu();
1196         vlan_tunnel_deinit(vg);
1197 err_tunnel_init:
1198         rhashtable_destroy(&vg->vlan_hash);
1199 err_rhtbl:
1200 err_vlan_enabled:
1201         kfree(vg);
1202
1203         goto out;
1204 }
1205
1206 /* Must be protected by RTNL.
1207  * Must be called with vid in range from 1 to 4094 inclusive.
1208  * changed must be true only if the vlan was created or updated
1209  */
1210 int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags,
1211                  bool *changed, struct netlink_ext_ack *extack)
1212 {
1213         struct net_bridge_vlan *vlan;
1214         int ret;
1215
1216         ASSERT_RTNL();
1217
1218         *changed = false;
1219         vlan = br_vlan_find(nbp_vlan_group(port), vid);
1220         if (vlan) {
1221                 /* Pass the flags to the hardware bridge */
1222                 ret = br_switchdev_port_vlan_add(port->dev, vid, flags, extack);
1223                 if (ret && ret != -EOPNOTSUPP)
1224                         return ret;
1225                 *changed = __vlan_add_flags(vlan, flags);
1226
1227                 return 0;
1228         }
1229
1230         vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
1231         if (!vlan)
1232                 return -ENOMEM;
1233
1234         vlan->vid = vid;
1235         vlan->port = port;
1236         ret = __vlan_add(vlan, flags, extack);
1237         if (ret)
1238                 kfree(vlan);
1239         else
1240                 *changed = true;
1241
1242         return ret;
1243 }
1244
1245 /* Must be protected by RTNL.
1246  * Must be called with vid in range from 1 to 4094 inclusive.
1247  */
1248 int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
1249 {
1250         struct net_bridge_vlan *v;
1251
1252         ASSERT_RTNL();
1253
1254         v = br_vlan_find(nbp_vlan_group(port), vid);
1255         if (!v)
1256                 return -ENOENT;
1257         br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid);
1258         br_fdb_delete_by_port(port->br, port, vid, 0);
1259
1260         return __vlan_del(v);
1261 }
1262
1263 void nbp_vlan_flush(struct net_bridge_port *port)
1264 {
1265         struct net_bridge_vlan_group *vg;
1266
1267         ASSERT_RTNL();
1268
1269         vg = nbp_vlan_group(port);
1270         __vlan_flush(port->br, port, vg);
1271         RCU_INIT_POINTER(port->vlgrp, NULL);
1272         synchronize_rcu();
1273         __vlan_group_free(vg);
1274 }
1275
1276 void br_vlan_get_stats(const struct net_bridge_vlan *v,
1277                        struct pcpu_sw_netstats *stats)
1278 {
1279         int i;
1280
1281         memset(stats, 0, sizeof(*stats));
1282         for_each_possible_cpu(i) {
1283                 u64 rxpackets, rxbytes, txpackets, txbytes;
1284                 struct pcpu_sw_netstats *cpu_stats;
1285                 unsigned int start;
1286
1287                 cpu_stats = per_cpu_ptr(v->stats, i);
1288                 do {
1289                         start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
1290                         rxpackets = cpu_stats->rx_packets;
1291                         rxbytes = cpu_stats->rx_bytes;
1292                         txbytes = cpu_stats->tx_bytes;
1293                         txpackets = cpu_stats->tx_packets;
1294                 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
1295
1296                 stats->rx_packets += rxpackets;
1297                 stats->rx_bytes += rxbytes;
1298                 stats->tx_bytes += txbytes;
1299                 stats->tx_packets += txpackets;
1300         }
1301 }
1302
1303 int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid)
1304 {
1305         struct net_bridge_vlan_group *vg;
1306         struct net_bridge_port *p;
1307
1308         ASSERT_RTNL();
1309         p = br_port_get_check_rtnl(dev);
1310         if (p)
1311                 vg = nbp_vlan_group(p);
1312         else if (netif_is_bridge_master(dev))
1313                 vg = br_vlan_group(netdev_priv(dev));
1314         else
1315                 return -EINVAL;
1316
1317         *p_pvid = br_get_pvid(vg);
1318         return 0;
1319 }
1320 EXPORT_SYMBOL_GPL(br_vlan_get_pvid);
1321
1322 int br_vlan_get_pvid_rcu(const struct net_device *dev, u16 *p_pvid)
1323 {
1324         struct net_bridge_vlan_group *vg;
1325         struct net_bridge_port *p;
1326
1327         p = br_port_get_check_rcu(dev);
1328         if (p)
1329                 vg = nbp_vlan_group_rcu(p);
1330         else if (netif_is_bridge_master(dev))
1331                 vg = br_vlan_group_rcu(netdev_priv(dev));
1332         else
1333                 return -EINVAL;
1334
1335         *p_pvid = br_get_pvid(vg);
1336         return 0;
1337 }
1338 EXPORT_SYMBOL_GPL(br_vlan_get_pvid_rcu);
1339
1340 void br_vlan_fill_forward_path_pvid(struct net_bridge *br,
1341                                     struct net_device_path_ctx *ctx,
1342                                     struct net_device_path *path)
1343 {
1344         struct net_bridge_vlan_group *vg;
1345         int idx = ctx->num_vlans - 1;
1346         u16 vid;
1347
1348         path->bridge.vlan_mode = DEV_PATH_BR_VLAN_KEEP;
1349
1350         if (!br_opt_get(br, BROPT_VLAN_ENABLED))
1351                 return;
1352
1353         vg = br_vlan_group(br);
1354
1355         if (idx >= 0 &&
1356             ctx->vlan[idx].proto == br->vlan_proto) {
1357                 vid = ctx->vlan[idx].id;
1358         } else {
1359                 path->bridge.vlan_mode = DEV_PATH_BR_VLAN_TAG;
1360                 vid = br_get_pvid(vg);
1361         }
1362
1363         path->bridge.vlan_id = vid;
1364         path->bridge.vlan_proto = br->vlan_proto;
1365 }
1366
1367 int br_vlan_fill_forward_path_mode(struct net_bridge *br,
1368                                    struct net_bridge_port *dst,
1369                                    struct net_device_path *path)
1370 {
1371         struct net_bridge_vlan_group *vg;
1372         struct net_bridge_vlan *v;
1373
1374         if (!br_opt_get(br, BROPT_VLAN_ENABLED))
1375                 return 0;
1376
1377         vg = nbp_vlan_group_rcu(dst);
1378         v = br_vlan_find(vg, path->bridge.vlan_id);
1379         if (!v || !br_vlan_should_use(v))
1380                 return -EINVAL;
1381
1382         if (!(v->flags & BRIDGE_VLAN_INFO_UNTAGGED))
1383                 return 0;
1384
1385         if (path->bridge.vlan_mode == DEV_PATH_BR_VLAN_TAG)
1386                 path->bridge.vlan_mode = DEV_PATH_BR_VLAN_KEEP;
1387         else if (v->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV)
1388                 path->bridge.vlan_mode = DEV_PATH_BR_VLAN_UNTAG_HW;
1389         else
1390                 path->bridge.vlan_mode = DEV_PATH_BR_VLAN_UNTAG;
1391
1392         return 0;
1393 }
1394
1395 int br_vlan_get_info(const struct net_device *dev, u16 vid,
1396                      struct bridge_vlan_info *p_vinfo)
1397 {
1398         struct net_bridge_vlan_group *vg;
1399         struct net_bridge_vlan *v;
1400         struct net_bridge_port *p;
1401
1402         ASSERT_RTNL();
1403         p = br_port_get_check_rtnl(dev);
1404         if (p)
1405                 vg = nbp_vlan_group(p);
1406         else if (netif_is_bridge_master(dev))
1407                 vg = br_vlan_group(netdev_priv(dev));
1408         else
1409                 return -EINVAL;
1410
1411         v = br_vlan_find(vg, vid);
1412         if (!v)
1413                 return -ENOENT;
1414
1415         p_vinfo->vid = vid;
1416         p_vinfo->flags = v->flags;
1417         if (vid == br_get_pvid(vg))
1418                 p_vinfo->flags |= BRIDGE_VLAN_INFO_PVID;
1419         return 0;
1420 }
1421 EXPORT_SYMBOL_GPL(br_vlan_get_info);
1422
1423 static int br_vlan_is_bind_vlan_dev(const struct net_device *dev)
1424 {
1425         return is_vlan_dev(dev) &&
1426                 !!(vlan_dev_priv(dev)->flags & VLAN_FLAG_BRIDGE_BINDING);
1427 }
1428
1429 static int br_vlan_is_bind_vlan_dev_fn(struct net_device *dev,
1430                                __always_unused struct netdev_nested_priv *priv)
1431 {
1432         return br_vlan_is_bind_vlan_dev(dev);
1433 }
1434
1435 static bool br_vlan_has_upper_bind_vlan_dev(struct net_device *dev)
1436 {
1437         int found;
1438
1439         rcu_read_lock();
1440         found = netdev_walk_all_upper_dev_rcu(dev, br_vlan_is_bind_vlan_dev_fn,
1441                                               NULL);
1442         rcu_read_unlock();
1443
1444         return !!found;
1445 }
1446
1447 struct br_vlan_bind_walk_data {
1448         u16 vid;
1449         struct net_device *result;
1450 };
1451
1452 static int br_vlan_match_bind_vlan_dev_fn(struct net_device *dev,
1453                                           struct netdev_nested_priv *priv)
1454 {
1455         struct br_vlan_bind_walk_data *data = priv->data;
1456         int found = 0;
1457
1458         if (br_vlan_is_bind_vlan_dev(dev) &&
1459             vlan_dev_priv(dev)->vlan_id == data->vid) {
1460                 data->result = dev;
1461                 found = 1;
1462         }
1463
1464         return found;
1465 }
1466
1467 static struct net_device *
1468 br_vlan_get_upper_bind_vlan_dev(struct net_device *dev, u16 vid)
1469 {
1470         struct br_vlan_bind_walk_data data = {
1471                 .vid = vid,
1472         };
1473         struct netdev_nested_priv priv = {
1474                 .data = (void *)&data,
1475         };
1476
1477         rcu_read_lock();
1478         netdev_walk_all_upper_dev_rcu(dev, br_vlan_match_bind_vlan_dev_fn,
1479                                       &priv);
1480         rcu_read_unlock();
1481
1482         return data.result;
1483 }
1484
1485 static bool br_vlan_is_dev_up(const struct net_device *dev)
1486 {
1487         return  !!(dev->flags & IFF_UP) && netif_oper_up(dev);
1488 }
1489
1490 static void br_vlan_set_vlan_dev_state(const struct net_bridge *br,
1491                                        struct net_device *vlan_dev)
1492 {
1493         u16 vid = vlan_dev_priv(vlan_dev)->vlan_id;
1494         struct net_bridge_vlan_group *vg;
1495         struct net_bridge_port *p;
1496         bool has_carrier = false;
1497
1498         if (!netif_carrier_ok(br->dev)) {
1499                 netif_carrier_off(vlan_dev);
1500                 return;
1501         }
1502
1503         list_for_each_entry(p, &br->port_list, list) {
1504                 vg = nbp_vlan_group(p);
1505                 if (br_vlan_find(vg, vid) && br_vlan_is_dev_up(p->dev)) {
1506                         has_carrier = true;
1507                         break;
1508                 }
1509         }
1510
1511         if (has_carrier)
1512                 netif_carrier_on(vlan_dev);
1513         else
1514                 netif_carrier_off(vlan_dev);
1515 }
1516
1517 static void br_vlan_set_all_vlan_dev_state(struct net_bridge_port *p)
1518 {
1519         struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
1520         struct net_bridge_vlan *vlan;
1521         struct net_device *vlan_dev;
1522
1523         list_for_each_entry(vlan, &vg->vlan_list, vlist) {
1524                 vlan_dev = br_vlan_get_upper_bind_vlan_dev(p->br->dev,
1525                                                            vlan->vid);
1526                 if (vlan_dev) {
1527                         if (br_vlan_is_dev_up(p->dev)) {
1528                                 if (netif_carrier_ok(p->br->dev))
1529                                         netif_carrier_on(vlan_dev);
1530                         } else {
1531                                 br_vlan_set_vlan_dev_state(p->br, vlan_dev);
1532                         }
1533                 }
1534         }
1535 }
1536
1537 static void br_vlan_upper_change(struct net_device *dev,
1538                                  struct net_device *upper_dev,
1539                                  bool linking)
1540 {
1541         struct net_bridge *br = netdev_priv(dev);
1542
1543         if (!br_vlan_is_bind_vlan_dev(upper_dev))
1544                 return;
1545
1546         if (linking) {
1547                 br_vlan_set_vlan_dev_state(br, upper_dev);
1548                 br_opt_toggle(br, BROPT_VLAN_BRIDGE_BINDING, true);
1549         } else {
1550                 br_opt_toggle(br, BROPT_VLAN_BRIDGE_BINDING,
1551                               br_vlan_has_upper_bind_vlan_dev(dev));
1552         }
1553 }
1554
1555 struct br_vlan_link_state_walk_data {
1556         struct net_bridge *br;
1557 };
1558
1559 static int br_vlan_link_state_change_fn(struct net_device *vlan_dev,
1560                                         struct netdev_nested_priv *priv)
1561 {
1562         struct br_vlan_link_state_walk_data *data = priv->data;
1563
1564         if (br_vlan_is_bind_vlan_dev(vlan_dev))
1565                 br_vlan_set_vlan_dev_state(data->br, vlan_dev);
1566
1567         return 0;
1568 }
1569
1570 static void br_vlan_link_state_change(struct net_device *dev,
1571                                       struct net_bridge *br)
1572 {
1573         struct br_vlan_link_state_walk_data data = {
1574                 .br = br
1575         };
1576         struct netdev_nested_priv priv = {
1577                 .data = (void *)&data,
1578         };
1579
1580         rcu_read_lock();
1581         netdev_walk_all_upper_dev_rcu(dev, br_vlan_link_state_change_fn,
1582                                       &priv);
1583         rcu_read_unlock();
1584 }
1585
1586 /* Must be protected by RTNL. */
1587 static void nbp_vlan_set_vlan_dev_state(struct net_bridge_port *p, u16 vid)
1588 {
1589         struct net_device *vlan_dev;
1590
1591         if (!br_opt_get(p->br, BROPT_VLAN_BRIDGE_BINDING))
1592                 return;
1593
1594         vlan_dev = br_vlan_get_upper_bind_vlan_dev(p->br->dev, vid);
1595         if (vlan_dev)
1596                 br_vlan_set_vlan_dev_state(p->br, vlan_dev);
1597 }
1598
1599 /* Must be protected by RTNL. */
1600 int br_vlan_bridge_event(struct net_device *dev, unsigned long event, void *ptr)
1601 {
1602         struct netdev_notifier_changeupper_info *info;
1603         struct net_bridge *br = netdev_priv(dev);
1604         int vlcmd = 0, ret = 0;
1605         bool changed = false;
1606
1607         switch (event) {
1608         case NETDEV_REGISTER:
1609                 ret = br_vlan_add(br, br->default_pvid,
1610                                   BRIDGE_VLAN_INFO_PVID |
1611                                   BRIDGE_VLAN_INFO_UNTAGGED |
1612                                   BRIDGE_VLAN_INFO_BRENTRY, &changed, NULL);
1613                 vlcmd = RTM_NEWVLAN;
1614                 break;
1615         case NETDEV_UNREGISTER:
1616                 changed = !br_vlan_delete(br, br->default_pvid);
1617                 vlcmd = RTM_DELVLAN;
1618                 break;
1619         case NETDEV_CHANGEUPPER:
1620                 info = ptr;
1621                 br_vlan_upper_change(dev, info->upper_dev, info->linking);
1622                 break;
1623
1624         case NETDEV_CHANGE:
1625         case NETDEV_UP:
1626                 if (!br_opt_get(br, BROPT_VLAN_BRIDGE_BINDING))
1627                         break;
1628                 br_vlan_link_state_change(dev, br);
1629                 break;
1630         }
1631         if (changed)
1632                 br_vlan_notify(br, NULL, br->default_pvid, 0, vlcmd);
1633
1634         return ret;
1635 }
1636
1637 /* Must be protected by RTNL. */
1638 void br_vlan_port_event(struct net_bridge_port *p, unsigned long event)
1639 {
1640         if (!br_opt_get(p->br, BROPT_VLAN_BRIDGE_BINDING))
1641                 return;
1642
1643         switch (event) {
1644         case NETDEV_CHANGE:
1645         case NETDEV_DOWN:
1646         case NETDEV_UP:
1647                 br_vlan_set_all_vlan_dev_state(p);
1648                 break;
1649         }
1650 }
1651
1652 static bool br_vlan_stats_fill(struct sk_buff *skb,
1653                                const struct net_bridge_vlan *v)
1654 {
1655         struct pcpu_sw_netstats stats;
1656         struct nlattr *nest;
1657
1658         nest = nla_nest_start(skb, BRIDGE_VLANDB_ENTRY_STATS);
1659         if (!nest)
1660                 return false;
1661
1662         br_vlan_get_stats(v, &stats);
1663         if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_RX_BYTES, stats.rx_bytes,
1664                               BRIDGE_VLANDB_STATS_PAD) ||
1665             nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_RX_PACKETS,
1666                               stats.rx_packets, BRIDGE_VLANDB_STATS_PAD) ||
1667             nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_TX_BYTES, stats.tx_bytes,
1668                               BRIDGE_VLANDB_STATS_PAD) ||
1669             nla_put_u64_64bit(skb, BRIDGE_VLANDB_STATS_TX_PACKETS,
1670                               stats.tx_packets, BRIDGE_VLANDB_STATS_PAD))
1671                 goto out_err;
1672
1673         nla_nest_end(skb, nest);
1674
1675         return true;
1676
1677 out_err:
1678         nla_nest_cancel(skb, nest);
1679         return false;
1680 }
1681
1682 /* v_opts is used to dump the options which must be equal in the whole range */
1683 static bool br_vlan_fill_vids(struct sk_buff *skb, u16 vid, u16 vid_range,
1684                               const struct net_bridge_vlan *v_opts,
1685                               u16 flags,
1686                               bool dump_stats)
1687 {
1688         struct bridge_vlan_info info;
1689         struct nlattr *nest;
1690
1691         nest = nla_nest_start(skb, BRIDGE_VLANDB_ENTRY);
1692         if (!nest)
1693                 return false;
1694
1695         memset(&info, 0, sizeof(info));
1696         info.vid = vid;
1697         if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
1698                 info.flags |= BRIDGE_VLAN_INFO_UNTAGGED;
1699         if (flags & BRIDGE_VLAN_INFO_PVID)
1700                 info.flags |= BRIDGE_VLAN_INFO_PVID;
1701
1702         if (nla_put(skb, BRIDGE_VLANDB_ENTRY_INFO, sizeof(info), &info))
1703                 goto out_err;
1704
1705         if (vid_range && vid < vid_range &&
1706             !(flags & BRIDGE_VLAN_INFO_PVID) &&
1707             nla_put_u16(skb, BRIDGE_VLANDB_ENTRY_RANGE, vid_range))
1708                 goto out_err;
1709
1710         if (v_opts) {
1711                 if (!br_vlan_opts_fill(skb, v_opts))
1712                         goto out_err;
1713
1714                 if (dump_stats && !br_vlan_stats_fill(skb, v_opts))
1715                         goto out_err;
1716         }
1717
1718         nla_nest_end(skb, nest);
1719
1720         return true;
1721
1722 out_err:
1723         nla_nest_cancel(skb, nest);
1724         return false;
1725 }
1726
1727 static size_t rtnl_vlan_nlmsg_size(void)
1728 {
1729         return NLMSG_ALIGN(sizeof(struct br_vlan_msg))
1730                 + nla_total_size(0) /* BRIDGE_VLANDB_ENTRY */
1731                 + nla_total_size(sizeof(u16)) /* BRIDGE_VLANDB_ENTRY_RANGE */
1732                 + nla_total_size(sizeof(struct bridge_vlan_info)) /* BRIDGE_VLANDB_ENTRY_INFO */
1733                 + br_vlan_opts_nl_size(); /* bridge vlan options */
1734 }
1735
1736 void br_vlan_notify(const struct net_bridge *br,
1737                     const struct net_bridge_port *p,
1738                     u16 vid, u16 vid_range,
1739                     int cmd)
1740 {
1741         struct net_bridge_vlan_group *vg;
1742         struct net_bridge_vlan *v = NULL;
1743         struct br_vlan_msg *bvm;
1744         struct nlmsghdr *nlh;
1745         struct sk_buff *skb;
1746         int err = -ENOBUFS;
1747         struct net *net;
1748         u16 flags = 0;
1749         int ifindex;
1750
1751         /* right now notifications are done only with rtnl held */
1752         ASSERT_RTNL();
1753
1754         if (p) {
1755                 ifindex = p->dev->ifindex;
1756                 vg = nbp_vlan_group(p);
1757                 net = dev_net(p->dev);
1758         } else {
1759                 ifindex = br->dev->ifindex;
1760                 vg = br_vlan_group(br);
1761                 net = dev_net(br->dev);
1762         }
1763
1764         skb = nlmsg_new(rtnl_vlan_nlmsg_size(), GFP_KERNEL);
1765         if (!skb)
1766                 goto out_err;
1767
1768         err = -EMSGSIZE;
1769         nlh = nlmsg_put(skb, 0, 0, cmd, sizeof(*bvm), 0);
1770         if (!nlh)
1771                 goto out_err;
1772         bvm = nlmsg_data(nlh);
1773         memset(bvm, 0, sizeof(*bvm));
1774         bvm->family = AF_BRIDGE;
1775         bvm->ifindex = ifindex;
1776
1777         switch (cmd) {
1778         case RTM_NEWVLAN:
1779                 /* need to find the vlan due to flags/options */
1780                 v = br_vlan_find(vg, vid);
1781                 if (!v || !br_vlan_should_use(v))
1782                         goto out_kfree;
1783
1784                 flags = v->flags;
1785                 if (br_get_pvid(vg) == v->vid)
1786                         flags |= BRIDGE_VLAN_INFO_PVID;
1787                 break;
1788         case RTM_DELVLAN:
1789                 break;
1790         default:
1791                 goto out_kfree;
1792         }
1793
1794         if (!br_vlan_fill_vids(skb, vid, vid_range, v, flags, false))
1795                 goto out_err;
1796
1797         nlmsg_end(skb, nlh);
1798         rtnl_notify(skb, net, 0, RTNLGRP_BRVLAN, NULL, GFP_KERNEL);
1799         return;
1800
1801 out_err:
1802         rtnl_set_sk_err(net, RTNLGRP_BRVLAN, err);
1803 out_kfree:
1804         kfree_skb(skb);
1805 }
1806
1807 static int br_vlan_replay_one(struct notifier_block *nb,
1808                               struct net_device *dev,
1809                               struct switchdev_obj_port_vlan *vlan,
1810                               const void *ctx, unsigned long action,
1811                               struct netlink_ext_ack *extack)
1812 {
1813         struct switchdev_notifier_port_obj_info obj_info = {
1814                 .info = {
1815                         .dev = dev,
1816                         .extack = extack,
1817                         .ctx = ctx,
1818                 },
1819                 .obj = &vlan->obj,
1820         };
1821         int err;
1822
1823         err = nb->notifier_call(nb, action, &obj_info);
1824         return notifier_to_errno(err);
1825 }
1826
1827 int br_vlan_replay(struct net_device *br_dev, struct net_device *dev,
1828                    const void *ctx, bool adding, struct notifier_block *nb,
1829                    struct netlink_ext_ack *extack)
1830 {
1831         struct net_bridge_vlan_group *vg;
1832         struct net_bridge_vlan *v;
1833         struct net_bridge_port *p;
1834         struct net_bridge *br;
1835         unsigned long action;
1836         int err = 0;
1837         u16 pvid;
1838
1839         ASSERT_RTNL();
1840
1841         if (!netif_is_bridge_master(br_dev))
1842                 return -EINVAL;
1843
1844         if (!netif_is_bridge_master(dev) && !netif_is_bridge_port(dev))
1845                 return -EINVAL;
1846
1847         if (netif_is_bridge_master(dev)) {
1848                 br = netdev_priv(dev);
1849                 vg = br_vlan_group(br);
1850                 p = NULL;
1851         } else {
1852                 p = br_port_get_rtnl(dev);
1853                 if (WARN_ON(!p))
1854                         return -EINVAL;
1855                 vg = nbp_vlan_group(p);
1856                 br = p->br;
1857         }
1858
1859         if (!vg)
1860                 return 0;
1861
1862         if (adding)
1863                 action = SWITCHDEV_PORT_OBJ_ADD;
1864         else
1865                 action = SWITCHDEV_PORT_OBJ_DEL;
1866
1867         pvid = br_get_pvid(vg);
1868
1869         list_for_each_entry(v, &vg->vlan_list, vlist) {
1870                 struct switchdev_obj_port_vlan vlan = {
1871                         .obj.orig_dev = dev,
1872                         .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
1873                         .flags = br_vlan_flags(v, pvid),
1874                         .vid = v->vid,
1875                 };
1876
1877                 if (!br_vlan_should_use(v))
1878                         continue;
1879
1880                 err = br_vlan_replay_one(nb, dev, &vlan, ctx, action, extack);
1881                 if (err)
1882                         return err;
1883         }
1884
1885         return err;
1886 }
1887 EXPORT_SYMBOL_GPL(br_vlan_replay);
1888
1889 /* check if v_curr can enter a range ending in range_end */
1890 bool br_vlan_can_enter_range(const struct net_bridge_vlan *v_curr,
1891                              const struct net_bridge_vlan *range_end)
1892 {
1893         return v_curr->vid - range_end->vid == 1 &&
1894                range_end->flags == v_curr->flags &&
1895                br_vlan_opts_eq_range(v_curr, range_end);
1896 }
1897
1898 static int br_vlan_dump_dev(const struct net_device *dev,
1899                             struct sk_buff *skb,
1900                             struct netlink_callback *cb,
1901                             u32 dump_flags)
1902 {
1903         struct net_bridge_vlan *v, *range_start = NULL, *range_end = NULL;
1904         bool dump_stats = !!(dump_flags & BRIDGE_VLANDB_DUMPF_STATS);
1905         struct net_bridge_vlan_group *vg;
1906         int idx = 0, s_idx = cb->args[1];
1907         struct nlmsghdr *nlh = NULL;
1908         struct net_bridge_port *p;
1909         struct br_vlan_msg *bvm;
1910         struct net_bridge *br;
1911         int err = 0;
1912         u16 pvid;
1913
1914         if (!netif_is_bridge_master(dev) && !netif_is_bridge_port(dev))
1915                 return -EINVAL;
1916
1917         if (netif_is_bridge_master(dev)) {
1918                 br = netdev_priv(dev);
1919                 vg = br_vlan_group_rcu(br);
1920                 p = NULL;
1921         } else {
1922                 p = br_port_get_rcu(dev);
1923                 if (WARN_ON(!p))
1924                         return -EINVAL;
1925                 vg = nbp_vlan_group_rcu(p);
1926                 br = p->br;
1927         }
1928
1929         if (!vg)
1930                 return 0;
1931
1932         nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
1933                         RTM_NEWVLAN, sizeof(*bvm), NLM_F_MULTI);
1934         if (!nlh)
1935                 return -EMSGSIZE;
1936         bvm = nlmsg_data(nlh);
1937         memset(bvm, 0, sizeof(*bvm));
1938         bvm->family = PF_BRIDGE;
1939         bvm->ifindex = dev->ifindex;
1940         pvid = br_get_pvid(vg);
1941
1942         /* idx must stay at range's beginning until it is filled in */
1943         list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
1944                 if (!br_vlan_should_use(v))
1945                         continue;
1946                 if (idx < s_idx) {
1947                         idx++;
1948                         continue;
1949                 }
1950
1951                 if (!range_start) {
1952                         range_start = v;
1953                         range_end = v;
1954                         continue;
1955                 }
1956
1957                 if (dump_stats || v->vid == pvid ||
1958                     !br_vlan_can_enter_range(v, range_end)) {
1959                         u16 vlan_flags = br_vlan_flags(range_start, pvid);
1960
1961                         if (!br_vlan_fill_vids(skb, range_start->vid,
1962                                                range_end->vid, range_start,
1963                                                vlan_flags, dump_stats)) {
1964                                 err = -EMSGSIZE;
1965                                 break;
1966                         }
1967                         /* advance number of filled vlans */
1968                         idx += range_end->vid - range_start->vid + 1;
1969
1970                         range_start = v;
1971                 }
1972                 range_end = v;
1973         }
1974
1975         /* err will be 0 and range_start will be set in 3 cases here:
1976          * - first vlan (range_start == range_end)
1977          * - last vlan (range_start == range_end, not in range)
1978          * - last vlan range (range_start != range_end, in range)
1979          */
1980         if (!err && range_start &&
1981             !br_vlan_fill_vids(skb, range_start->vid, range_end->vid,
1982                                range_start, br_vlan_flags(range_start, pvid),
1983                                dump_stats))
1984                 err = -EMSGSIZE;
1985
1986         cb->args[1] = err ? idx : 0;
1987
1988         nlmsg_end(skb, nlh);
1989
1990         return err;
1991 }
1992
1993 static const struct nla_policy br_vlan_db_dump_pol[BRIDGE_VLANDB_DUMP_MAX + 1] = {
1994         [BRIDGE_VLANDB_DUMP_FLAGS] = { .type = NLA_U32 },
1995 };
1996
1997 static int br_vlan_rtm_dump(struct sk_buff *skb, struct netlink_callback *cb)
1998 {
1999         struct nlattr *dtb[BRIDGE_VLANDB_DUMP_MAX + 1];
2000         int idx = 0, err = 0, s_idx = cb->args[0];
2001         struct net *net = sock_net(skb->sk);
2002         struct br_vlan_msg *bvm;
2003         struct net_device *dev;
2004         u32 dump_flags = 0;
2005
2006         err = nlmsg_parse(cb->nlh, sizeof(*bvm), dtb, BRIDGE_VLANDB_DUMP_MAX,
2007                           br_vlan_db_dump_pol, cb->extack);
2008         if (err < 0)
2009                 return err;
2010
2011         bvm = nlmsg_data(cb->nlh);
2012         if (dtb[BRIDGE_VLANDB_DUMP_FLAGS])
2013                 dump_flags = nla_get_u32(dtb[BRIDGE_VLANDB_DUMP_FLAGS]);
2014
2015         rcu_read_lock();
2016         if (bvm->ifindex) {
2017                 dev = dev_get_by_index_rcu(net, bvm->ifindex);
2018                 if (!dev) {
2019                         err = -ENODEV;
2020                         goto out_err;
2021                 }
2022                 err = br_vlan_dump_dev(dev, skb, cb, dump_flags);
2023                 if (err && err != -EMSGSIZE)
2024                         goto out_err;
2025         } else {
2026                 for_each_netdev_rcu(net, dev) {
2027                         if (idx < s_idx)
2028                                 goto skip;
2029
2030                         err = br_vlan_dump_dev(dev, skb, cb, dump_flags);
2031                         if (err == -EMSGSIZE)
2032                                 break;
2033 skip:
2034                         idx++;
2035                 }
2036         }
2037         cb->args[0] = idx;
2038         rcu_read_unlock();
2039
2040         return skb->len;
2041
2042 out_err:
2043         rcu_read_unlock();
2044
2045         return err;
2046 }
2047
2048 static const struct nla_policy br_vlan_db_policy[BRIDGE_VLANDB_ENTRY_MAX + 1] = {
2049         [BRIDGE_VLANDB_ENTRY_INFO]      =
2050                 NLA_POLICY_EXACT_LEN(sizeof(struct bridge_vlan_info)),
2051         [BRIDGE_VLANDB_ENTRY_RANGE]     = { .type = NLA_U16 },
2052         [BRIDGE_VLANDB_ENTRY_STATE]     = { .type = NLA_U8 },
2053         [BRIDGE_VLANDB_ENTRY_TUNNEL_INFO] = { .type = NLA_NESTED },
2054 };
2055
2056 static int br_vlan_rtm_process_one(struct net_device *dev,
2057                                    const struct nlattr *attr,
2058                                    int cmd, struct netlink_ext_ack *extack)
2059 {
2060         struct bridge_vlan_info *vinfo, vrange_end, *vinfo_last = NULL;
2061         struct nlattr *tb[BRIDGE_VLANDB_ENTRY_MAX + 1];
2062         bool changed = false, skip_processing = false;
2063         struct net_bridge_vlan_group *vg;
2064         struct net_bridge_port *p = NULL;
2065         int err = 0, cmdmap = 0;
2066         struct net_bridge *br;
2067
2068         if (netif_is_bridge_master(dev)) {
2069                 br = netdev_priv(dev);
2070                 vg = br_vlan_group(br);
2071         } else {
2072                 p = br_port_get_rtnl(dev);
2073                 if (WARN_ON(!p))
2074                         return -ENODEV;
2075                 br = p->br;
2076                 vg = nbp_vlan_group(p);
2077         }
2078
2079         if (WARN_ON(!vg))
2080                 return -ENODEV;
2081
2082         err = nla_parse_nested(tb, BRIDGE_VLANDB_ENTRY_MAX, attr,
2083                                br_vlan_db_policy, extack);
2084         if (err)
2085                 return err;
2086
2087         if (!tb[BRIDGE_VLANDB_ENTRY_INFO]) {
2088                 NL_SET_ERR_MSG_MOD(extack, "Missing vlan entry info");
2089                 return -EINVAL;
2090         }
2091         memset(&vrange_end, 0, sizeof(vrange_end));
2092
2093         vinfo = nla_data(tb[BRIDGE_VLANDB_ENTRY_INFO]);
2094         if (vinfo->flags & (BRIDGE_VLAN_INFO_RANGE_BEGIN |
2095                             BRIDGE_VLAN_INFO_RANGE_END)) {
2096                 NL_SET_ERR_MSG_MOD(extack, "Old-style vlan ranges are not allowed when using RTM vlan calls");
2097                 return -EINVAL;
2098         }
2099         if (!br_vlan_valid_id(vinfo->vid, extack))
2100                 return -EINVAL;
2101
2102         if (tb[BRIDGE_VLANDB_ENTRY_RANGE]) {
2103                 vrange_end.vid = nla_get_u16(tb[BRIDGE_VLANDB_ENTRY_RANGE]);
2104                 /* validate user-provided flags without RANGE_BEGIN */
2105                 vrange_end.flags = BRIDGE_VLAN_INFO_RANGE_END | vinfo->flags;
2106                 vinfo->flags |= BRIDGE_VLAN_INFO_RANGE_BEGIN;
2107
2108                 /* vinfo_last is the range start, vinfo the range end */
2109                 vinfo_last = vinfo;
2110                 vinfo = &vrange_end;
2111
2112                 if (!br_vlan_valid_id(vinfo->vid, extack) ||
2113                     !br_vlan_valid_range(vinfo, vinfo_last, extack))
2114                         return -EINVAL;
2115         }
2116
2117         switch (cmd) {
2118         case RTM_NEWVLAN:
2119                 cmdmap = RTM_SETLINK;
2120                 skip_processing = !!(vinfo->flags & BRIDGE_VLAN_INFO_ONLY_OPTS);
2121                 break;
2122         case RTM_DELVLAN:
2123                 cmdmap = RTM_DELLINK;
2124                 break;
2125         }
2126
2127         if (!skip_processing) {
2128                 struct bridge_vlan_info *tmp_last = vinfo_last;
2129
2130                 /* br_process_vlan_info may overwrite vinfo_last */
2131                 err = br_process_vlan_info(br, p, cmdmap, vinfo, &tmp_last,
2132                                            &changed, extack);
2133
2134                 /* notify first if anything changed */
2135                 if (changed)
2136                         br_ifinfo_notify(cmdmap, br, p);
2137
2138                 if (err)
2139                         return err;
2140         }
2141
2142         /* deal with options */
2143         if (cmd == RTM_NEWVLAN) {
2144                 struct net_bridge_vlan *range_start, *range_end;
2145
2146                 if (vinfo_last) {
2147                         range_start = br_vlan_find(vg, vinfo_last->vid);
2148                         range_end = br_vlan_find(vg, vinfo->vid);
2149                 } else {
2150                         range_start = br_vlan_find(vg, vinfo->vid);
2151                         range_end = range_start;
2152                 }
2153
2154                 err = br_vlan_process_options(br, p, range_start, range_end,
2155                                               tb, extack);
2156         }
2157
2158         return err;
2159 }
2160
2161 static int br_vlan_rtm_process(struct sk_buff *skb, struct nlmsghdr *nlh,
2162                                struct netlink_ext_ack *extack)
2163 {
2164         struct net *net = sock_net(skb->sk);
2165         struct br_vlan_msg *bvm;
2166         struct net_device *dev;
2167         struct nlattr *attr;
2168         int err, vlans = 0;
2169         int rem;
2170
2171         /* this should validate the header and check for remaining bytes */
2172         err = nlmsg_parse(nlh, sizeof(*bvm), NULL, BRIDGE_VLANDB_MAX, NULL,
2173                           extack);
2174         if (err < 0)
2175                 return err;
2176
2177         bvm = nlmsg_data(nlh);
2178         dev = __dev_get_by_index(net, bvm->ifindex);
2179         if (!dev)
2180                 return -ENODEV;
2181
2182         if (!netif_is_bridge_master(dev) && !netif_is_bridge_port(dev)) {
2183                 NL_SET_ERR_MSG_MOD(extack, "The device is not a valid bridge or bridge port");
2184                 return -EINVAL;
2185         }
2186
2187         nlmsg_for_each_attr(attr, nlh, sizeof(*bvm), rem) {
2188                 if (nla_type(attr) != BRIDGE_VLANDB_ENTRY)
2189                         continue;
2190
2191                 vlans++;
2192                 err = br_vlan_rtm_process_one(dev, attr, nlh->nlmsg_type,
2193                                               extack);
2194                 if (err)
2195                         break;
2196         }
2197         if (!vlans) {
2198                 NL_SET_ERR_MSG_MOD(extack, "No vlans found to process");
2199                 err = -EINVAL;
2200         }
2201
2202         return err;
2203 }
2204
2205 void br_vlan_rtnl_init(void)
2206 {
2207         rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_GETVLAN, NULL,
2208                              br_vlan_rtm_dump, 0);
2209         rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_NEWVLAN,
2210                              br_vlan_rtm_process, NULL, 0);
2211         rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_DELVLAN,
2212                              br_vlan_rtm_process, NULL, 0);
2213 }
2214
2215 void br_vlan_rtnl_uninit(void)
2216 {
2217         rtnl_unregister(PF_BRIDGE, RTM_GETVLAN);
2218         rtnl_unregister(PF_BRIDGE, RTM_NEWVLAN);
2219         rtnl_unregister(PF_BRIDGE, RTM_DELVLAN);
2220 }