1 // SPDX-License-Identifier: GPL-2.0-only
2 // Copyright (c) 2020, Nikolay Aleksandrov <nikolay@cumulusnetworks.com>
3 #include <linux/kernel.h>
4 #include <linux/netdevice.h>
5 #include <linux/rtnetlink.h>
6 #include <linux/slab.h>
7 #include <net/ip_tunnels.h>
9 #include "br_private.h"
10 #include "br_private_tunnel.h"
12 static bool __vlan_tun_put(struct sk_buff *skb, const struct net_bridge_vlan *v)
14 __be32 tid = tunnel_id_to_key32(v->tinfo.tunnel_id);
17 if (!v->tinfo.tunnel_dst)
20 nest = nla_nest_start(skb, BRIDGE_VLANDB_ENTRY_TUNNEL_INFO);
23 if (nla_put_u32(skb, BRIDGE_VLANDB_TINFO_ID, be32_to_cpu(tid))) {
24 nla_nest_cancel(skb, nest);
27 nla_nest_end(skb, nest);
32 static bool __vlan_tun_can_enter_range(const struct net_bridge_vlan *v_curr,
33 const struct net_bridge_vlan *range_end)
35 return (!v_curr->tinfo.tunnel_dst && !range_end->tinfo.tunnel_dst) ||
36 vlan_tunid_inrange(v_curr, range_end);
39 /* check if the options' state of v_curr allow it to enter the range */
40 bool br_vlan_opts_eq_range(const struct net_bridge_vlan *v_curr,
41 const struct net_bridge_vlan *range_end)
43 return v_curr->state == range_end->state &&
44 __vlan_tun_can_enter_range(v_curr, range_end);
47 bool br_vlan_opts_fill(struct sk_buff *skb, const struct net_bridge_vlan *v)
49 return !nla_put_u8(skb, BRIDGE_VLANDB_ENTRY_STATE,
50 br_vlan_get_state(v)) &&
51 __vlan_tun_put(skb, v);
54 size_t br_vlan_opts_nl_size(void)
56 return nla_total_size(sizeof(u8)) /* BRIDGE_VLANDB_ENTRY_STATE */
57 + nla_total_size(0) /* BRIDGE_VLANDB_ENTRY_TUNNEL_INFO */
58 + nla_total_size(sizeof(u32)); /* BRIDGE_VLANDB_TINFO_ID */
61 static int br_vlan_modify_state(struct net_bridge_vlan_group *vg,
62 struct net_bridge_vlan *v,
65 struct netlink_ext_ack *extack)
67 struct net_bridge *br;
71 if (state > BR_STATE_BLOCKING) {
72 NL_SET_ERR_MSG_MOD(extack, "Invalid vlan state");
76 if (br_vlan_is_brentry(v))
81 if (br->stp_enabled == BR_KERNEL_STP) {
82 NL_SET_ERR_MSG_MOD(extack, "Can't modify vlan state when using kernel STP");
86 if (v->state == state)
89 if (v->vid == br_get_pvid(vg))
90 br_vlan_set_pvid_state(vg, state);
92 br_vlan_set_state(v, state);
98 static const struct nla_policy br_vlandb_tinfo_pol[BRIDGE_VLANDB_TINFO_MAX + 1] = {
99 [BRIDGE_VLANDB_TINFO_ID] = { .type = NLA_U32 },
100 [BRIDGE_VLANDB_TINFO_CMD] = { .type = NLA_U32 },
103 static int br_vlan_modify_tunnel(const struct net_bridge_port *p,
104 struct net_bridge_vlan *v,
107 struct netlink_ext_ack *extack)
109 struct nlattr *tun_tb[BRIDGE_VLANDB_TINFO_MAX + 1], *attr;
110 struct bridge_vlan_info *vinfo;
115 NL_SET_ERR_MSG_MOD(extack, "Can't modify tunnel mapping of non-port vlans");
118 if (!(p->flags & BR_VLAN_TUNNEL)) {
119 NL_SET_ERR_MSG_MOD(extack, "Port doesn't have tunnel flag set");
123 attr = tb[BRIDGE_VLANDB_ENTRY_TUNNEL_INFO];
124 err = nla_parse_nested(tun_tb, BRIDGE_VLANDB_TINFO_MAX, attr,
125 br_vlandb_tinfo_pol, extack);
129 if (!tun_tb[BRIDGE_VLANDB_TINFO_CMD]) {
130 NL_SET_ERR_MSG_MOD(extack, "Missing tunnel command attribute");
133 cmd = nla_get_u32(tun_tb[BRIDGE_VLANDB_TINFO_CMD]);
136 if (!tun_tb[BRIDGE_VLANDB_TINFO_ID]) {
137 NL_SET_ERR_MSG_MOD(extack, "Missing tunnel id attribute");
140 /* when working on vlan ranges this is the starting tunnel id */
141 tun_id = nla_get_u32(tun_tb[BRIDGE_VLANDB_TINFO_ID]);
142 /* vlan info attr is guaranteed by br_vlan_rtm_process_one */
143 vinfo = nla_data(tb[BRIDGE_VLANDB_ENTRY_INFO]);
144 /* tunnel ids are mapped to each vlan in increasing order,
145 * the starting vlan is in BRIDGE_VLANDB_ENTRY_INFO and v is the
146 * current vlan, so we compute: tun_id + v - vinfo->vid
148 tun_id += v->vid - vinfo->vid;
153 NL_SET_ERR_MSG_MOD(extack, "Unsupported tunnel command");
157 return br_vlan_tunnel_info(p, cmd, v->vid, tun_id, changed);
160 static int br_vlan_process_one_opts(const struct net_bridge *br,
161 const struct net_bridge_port *p,
162 struct net_bridge_vlan_group *vg,
163 struct net_bridge_vlan *v,
166 struct netlink_ext_ack *extack)
171 if (tb[BRIDGE_VLANDB_ENTRY_STATE]) {
172 u8 state = nla_get_u8(tb[BRIDGE_VLANDB_ENTRY_STATE]);
174 err = br_vlan_modify_state(vg, v, state, changed, extack);
178 if (tb[BRIDGE_VLANDB_ENTRY_TUNNEL_INFO]) {
179 err = br_vlan_modify_tunnel(p, v, tb, changed, extack);
187 int br_vlan_process_options(const struct net_bridge *br,
188 const struct net_bridge_port *p,
189 struct net_bridge_vlan *range_start,
190 struct net_bridge_vlan *range_end,
192 struct netlink_ext_ack *extack)
194 struct net_bridge_vlan *v, *curr_start = NULL, *curr_end = NULL;
195 struct net_bridge_vlan_group *vg;
200 vg = nbp_vlan_group(p);
202 vg = br_vlan_group(br);
204 if (!range_start || !br_vlan_should_use(range_start)) {
205 NL_SET_ERR_MSG_MOD(extack, "Vlan range start doesn't exist, can't process options");
208 if (!range_end || !br_vlan_should_use(range_end)) {
209 NL_SET_ERR_MSG_MOD(extack, "Vlan range end doesn't exist, can't process options");
213 pvid = br_get_pvid(vg);
214 for (vid = range_start->vid; vid <= range_end->vid; vid++) {
215 bool changed = false;
217 v = br_vlan_find(vg, vid);
218 if (!v || !br_vlan_should_use(v)) {
219 NL_SET_ERR_MSG_MOD(extack, "Vlan in range doesn't exist, can't process options");
224 err = br_vlan_process_one_opts(br, p, vg, v, tb, &changed,
230 /* vlan options changed, check for range */
237 if (v->vid == pvid ||
238 !br_vlan_can_enter_range(v, curr_end)) {
239 br_vlan_notify(br, p, curr_start->vid,
240 curr_end->vid, RTM_NEWVLAN);
245 /* nothing changed and nothing to notify yet */
249 br_vlan_notify(br, p, curr_start->vid, curr_end->vid,
256 br_vlan_notify(br, p, curr_start->vid, curr_end->vid,
262 bool br_vlan_global_opts_can_enter_range(const struct net_bridge_vlan *v_curr,
263 const struct net_bridge_vlan *r_end)
265 return v_curr->vid - r_end->vid == 1 &&
266 ((v_curr->priv_flags ^ r_end->priv_flags) &
267 BR_VLFLAG_GLOBAL_MCAST_ENABLED) == 0 &&
268 br_multicast_ctx_options_equal(&v_curr->br_mcast_ctx,
269 &r_end->br_mcast_ctx);
272 bool br_vlan_global_opts_fill(struct sk_buff *skb, u16 vid, u16 vid_range,
273 const struct net_bridge_vlan *v_opts)
275 u64 clockval __maybe_unused;
278 nest = nla_nest_start(skb, BRIDGE_VLANDB_GLOBAL_OPTIONS);
282 if (nla_put_u16(skb, BRIDGE_VLANDB_GOPTS_ID, vid))
285 if (vid_range && vid < vid_range &&
286 nla_put_u16(skb, BRIDGE_VLANDB_GOPTS_RANGE, vid_range))
289 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
290 if (nla_put_u8(skb, BRIDGE_VLANDB_GOPTS_MCAST_SNOOPING,
291 !!(v_opts->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED)) ||
292 nla_put_u8(skb, BRIDGE_VLANDB_GOPTS_MCAST_IGMP_VERSION,
293 v_opts->br_mcast_ctx.multicast_igmp_version) ||
294 nla_put_u32(skb, BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_CNT,
295 v_opts->br_mcast_ctx.multicast_last_member_count) ||
296 nla_put_u32(skb, BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_CNT,
297 v_opts->br_mcast_ctx.multicast_startup_query_count) ||
298 nla_put_u8(skb, BRIDGE_VLANDB_GOPTS_MCAST_QUERIER,
299 v_opts->br_mcast_ctx.multicast_querier))
302 clockval = jiffies_to_clock_t(v_opts->br_mcast_ctx.multicast_last_member_interval);
303 if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_INTVL,
304 clockval, BRIDGE_VLANDB_GOPTS_PAD))
306 clockval = jiffies_to_clock_t(v_opts->br_mcast_ctx.multicast_membership_interval);
307 if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_GOPTS_MCAST_MEMBERSHIP_INTVL,
308 clockval, BRIDGE_VLANDB_GOPTS_PAD))
310 clockval = jiffies_to_clock_t(v_opts->br_mcast_ctx.multicast_querier_interval);
311 if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_GOPTS_MCAST_QUERIER_INTVL,
312 clockval, BRIDGE_VLANDB_GOPTS_PAD))
314 clockval = jiffies_to_clock_t(v_opts->br_mcast_ctx.multicast_query_interval);
315 if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_GOPTS_MCAST_QUERY_INTVL,
316 clockval, BRIDGE_VLANDB_GOPTS_PAD))
318 clockval = jiffies_to_clock_t(v_opts->br_mcast_ctx.multicast_query_response_interval);
319 if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_GOPTS_MCAST_QUERY_RESPONSE_INTVL,
320 clockval, BRIDGE_VLANDB_GOPTS_PAD))
322 clockval = jiffies_to_clock_t(v_opts->br_mcast_ctx.multicast_startup_query_interval);
323 if (nla_put_u64_64bit(skb, BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_INTVL,
324 clockval, BRIDGE_VLANDB_GOPTS_PAD))
327 #if IS_ENABLED(CONFIG_IPV6)
328 if (nla_put_u8(skb, BRIDGE_VLANDB_GOPTS_MCAST_MLD_VERSION,
329 v_opts->br_mcast_ctx.multicast_mld_version))
334 nla_nest_end(skb, nest);
339 nla_nest_cancel(skb, nest);
343 static size_t rtnl_vlan_global_opts_nlmsg_size(void)
345 return NLMSG_ALIGN(sizeof(struct br_vlan_msg))
346 + nla_total_size(0) /* BRIDGE_VLANDB_GLOBAL_OPTIONS */
347 + nla_total_size(sizeof(u16)) /* BRIDGE_VLANDB_GOPTS_ID */
348 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
349 + nla_total_size(sizeof(u8)) /* BRIDGE_VLANDB_GOPTS_MCAST_SNOOPING */
350 + nla_total_size(sizeof(u8)) /* BRIDGE_VLANDB_GOPTS_MCAST_IGMP_VERSION */
351 + nla_total_size(sizeof(u8)) /* BRIDGE_VLANDB_GOPTS_MCAST_MLD_VERSION */
352 + nla_total_size(sizeof(u32)) /* BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_CNT */
353 + nla_total_size(sizeof(u32)) /* BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_CNT */
354 + nla_total_size(sizeof(u64)) /* BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_INTVL */
355 + nla_total_size(sizeof(u64)) /* BRIDGE_VLANDB_GOPTS_MCAST_MEMBERSHIP_INTVL */
356 + nla_total_size(sizeof(u64)) /* BRIDGE_VLANDB_GOPTS_MCAST_QUERIER_INTVL */
357 + nla_total_size(sizeof(u64)) /* BRIDGE_VLANDB_GOPTS_MCAST_QUERY_INTVL */
358 + nla_total_size(sizeof(u64)) /* BRIDGE_VLANDB_GOPTS_MCAST_QUERY_RESPONSE_INTVL */
359 + nla_total_size(sizeof(u64)) /* BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_INTVL */
360 + nla_total_size(sizeof(u8)) /* BRIDGE_VLANDB_GOPTS_MCAST_QUERIER */
362 + nla_total_size(sizeof(u16)); /* BRIDGE_VLANDB_GOPTS_RANGE */
365 static void br_vlan_global_opts_notify(const struct net_bridge *br,
366 u16 vid, u16 vid_range)
368 struct net_bridge_vlan *v;
369 struct br_vlan_msg *bvm;
370 struct nlmsghdr *nlh;
374 /* right now notifications are done only with rtnl held */
377 skb = nlmsg_new(rtnl_vlan_global_opts_nlmsg_size(), GFP_KERNEL);
382 nlh = nlmsg_put(skb, 0, 0, RTM_NEWVLAN, sizeof(*bvm), 0);
385 bvm = nlmsg_data(nlh);
386 memset(bvm, 0, sizeof(*bvm));
387 bvm->family = AF_BRIDGE;
388 bvm->ifindex = br->dev->ifindex;
390 /* need to find the vlan due to flags/options */
391 v = br_vlan_find(br_vlan_group(br), vid);
395 if (!br_vlan_global_opts_fill(skb, vid, vid_range, v))
399 rtnl_notify(skb, dev_net(br->dev), 0, RTNLGRP_BRVLAN, NULL, GFP_KERNEL);
403 rtnl_set_sk_err(dev_net(br->dev), RTNLGRP_BRVLAN, err);
408 static int br_vlan_process_global_one_opts(const struct net_bridge *br,
409 struct net_bridge_vlan_group *vg,
410 struct net_bridge_vlan *v,
413 struct netlink_ext_ack *extack)
415 int err __maybe_unused;
418 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
419 if (tb[BRIDGE_VLANDB_GOPTS_MCAST_SNOOPING]) {
422 mc_snooping = nla_get_u8(tb[BRIDGE_VLANDB_GOPTS_MCAST_SNOOPING]);
423 if (br_multicast_toggle_global_vlan(v, !!mc_snooping))
426 if (tb[BRIDGE_VLANDB_GOPTS_MCAST_IGMP_VERSION]) {
429 ver = nla_get_u8(tb[BRIDGE_VLANDB_GOPTS_MCAST_IGMP_VERSION]);
430 err = br_multicast_set_igmp_version(&v->br_mcast_ctx, ver);
435 if (tb[BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_CNT]) {
438 cnt = nla_get_u32(tb[BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_CNT]);
439 v->br_mcast_ctx.multicast_last_member_count = cnt;
442 if (tb[BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_CNT]) {
445 cnt = nla_get_u32(tb[BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_CNT]);
446 v->br_mcast_ctx.multicast_startup_query_count = cnt;
449 if (tb[BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_INTVL]) {
452 val = nla_get_u64(tb[BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_INTVL]);
453 v->br_mcast_ctx.multicast_last_member_interval = clock_t_to_jiffies(val);
456 if (tb[BRIDGE_VLANDB_GOPTS_MCAST_MEMBERSHIP_INTVL]) {
459 val = nla_get_u64(tb[BRIDGE_VLANDB_GOPTS_MCAST_MEMBERSHIP_INTVL]);
460 v->br_mcast_ctx.multicast_membership_interval = clock_t_to_jiffies(val);
463 if (tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERIER_INTVL]) {
466 val = nla_get_u64(tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERIER_INTVL]);
467 v->br_mcast_ctx.multicast_querier_interval = clock_t_to_jiffies(val);
470 if (tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERY_INTVL]) {
473 val = nla_get_u64(tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERY_INTVL]);
474 v->br_mcast_ctx.multicast_query_interval = clock_t_to_jiffies(val);
477 if (tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERY_RESPONSE_INTVL]) {
480 val = nla_get_u64(tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERY_RESPONSE_INTVL]);
481 v->br_mcast_ctx.multicast_query_response_interval = clock_t_to_jiffies(val);
484 if (tb[BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_INTVL]) {
487 val = nla_get_u64(tb[BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_INTVL]);
488 v->br_mcast_ctx.multicast_startup_query_interval = clock_t_to_jiffies(val);
491 if (tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERIER]) {
494 val = nla_get_u8(tb[BRIDGE_VLANDB_GOPTS_MCAST_QUERIER]);
495 err = br_multicast_set_querier(&v->br_mcast_ctx, val);
500 #if IS_ENABLED(CONFIG_IPV6)
501 if (tb[BRIDGE_VLANDB_GOPTS_MCAST_MLD_VERSION]) {
504 ver = nla_get_u8(tb[BRIDGE_VLANDB_GOPTS_MCAST_MLD_VERSION]);
505 err = br_multicast_set_mld_version(&v->br_mcast_ctx, ver);
516 static const struct nla_policy br_vlan_db_gpol[BRIDGE_VLANDB_GOPTS_MAX + 1] = {
517 [BRIDGE_VLANDB_GOPTS_ID] = { .type = NLA_U16 },
518 [BRIDGE_VLANDB_GOPTS_RANGE] = { .type = NLA_U16 },
519 [BRIDGE_VLANDB_GOPTS_MCAST_SNOOPING] = { .type = NLA_U8 },
520 [BRIDGE_VLANDB_GOPTS_MCAST_MLD_VERSION] = { .type = NLA_U8 },
521 [BRIDGE_VLANDB_GOPTS_MCAST_QUERY_INTVL] = { .type = NLA_U64 },
522 [BRIDGE_VLANDB_GOPTS_MCAST_QUERIER] = { .type = NLA_U8 },
523 [BRIDGE_VLANDB_GOPTS_MCAST_IGMP_VERSION] = { .type = NLA_U8 },
524 [BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_CNT] = { .type = NLA_U32 },
525 [BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_CNT] = { .type = NLA_U32 },
526 [BRIDGE_VLANDB_GOPTS_MCAST_LAST_MEMBER_INTVL] = { .type = NLA_U64 },
527 [BRIDGE_VLANDB_GOPTS_MCAST_MEMBERSHIP_INTVL] = { .type = NLA_U64 },
528 [BRIDGE_VLANDB_GOPTS_MCAST_QUERIER_INTVL] = { .type = NLA_U64 },
529 [BRIDGE_VLANDB_GOPTS_MCAST_STARTUP_QUERY_INTVL] = { .type = NLA_U64 },
530 [BRIDGE_VLANDB_GOPTS_MCAST_QUERY_RESPONSE_INTVL] = { .type = NLA_U64 },
533 int br_vlan_rtm_process_global_options(struct net_device *dev,
534 const struct nlattr *attr,
536 struct netlink_ext_ack *extack)
538 struct net_bridge_vlan *v, *curr_start = NULL, *curr_end = NULL;
539 struct nlattr *tb[BRIDGE_VLANDB_GOPTS_MAX + 1];
540 struct net_bridge_vlan_group *vg;
541 u16 vid, vid_range = 0;
542 struct net_bridge *br;
545 if (cmd != RTM_NEWVLAN) {
546 NL_SET_ERR_MSG_MOD(extack, "Global vlan options support only set operation");
549 if (!netif_is_bridge_master(dev)) {
550 NL_SET_ERR_MSG_MOD(extack, "Global vlan options can only be set on bridge device");
553 br = netdev_priv(dev);
554 vg = br_vlan_group(br);
558 err = nla_parse_nested(tb, BRIDGE_VLANDB_GOPTS_MAX, attr,
559 br_vlan_db_gpol, extack);
563 if (!tb[BRIDGE_VLANDB_GOPTS_ID]) {
564 NL_SET_ERR_MSG_MOD(extack, "Missing vlan entry id");
567 vid = nla_get_u16(tb[BRIDGE_VLANDB_GOPTS_ID]);
568 if (!br_vlan_valid_id(vid, extack))
571 if (tb[BRIDGE_VLANDB_GOPTS_RANGE]) {
572 vid_range = nla_get_u16(tb[BRIDGE_VLANDB_GOPTS_RANGE]);
573 if (!br_vlan_valid_id(vid_range, extack))
575 if (vid >= vid_range) {
576 NL_SET_ERR_MSG_MOD(extack, "End vlan id is less than or equal to start vlan id");
583 for (; vid <= vid_range; vid++) {
584 bool changed = false;
586 v = br_vlan_find(vg, vid);
588 NL_SET_ERR_MSG_MOD(extack, "Vlan in range doesn't exist, can't process global options");
593 err = br_vlan_process_global_one_opts(br, vg, v, tb, &changed,
599 /* vlan options changed, check for range */
606 if (!br_vlan_global_opts_can_enter_range(v, curr_end)) {
607 br_vlan_global_opts_notify(br, curr_start->vid,
613 /* nothing changed and nothing to notify yet */
617 br_vlan_global_opts_notify(br, curr_start->vid,
624 br_vlan_global_opts_notify(br, curr_start->vid, curr_end->vid);