net: bridge: Maintain number of MDB entries in net_bridge_mcast_port
authorPetr Machata <petrm@nvidia.com>
Thu, 2 Feb 2023 17:59:25 +0000 (18:59 +0100)
committerDavid S. Miller <davem@davemloft.net>
Mon, 6 Feb 2023 08:48:26 +0000 (08:48 +0000)
The MDB maintained by the bridge is limited. When the bridge is configured
for IGMP / MLD snooping, a buggy or malicious client can easily exhaust its
capacity. In SW datapath, the capacity is configurable through the
IFLA_BR_MCAST_HASH_MAX parameter, but ultimately is finite. Obviously a
similar limit exists in the HW datapath for purposes of offloading.

In order to prevent the issue of unilateral exhaustion of MDB resources,
introduce two parameters in each of two contexts:

- Per-port and per-port-VLAN number of MDB entries that the port
  is member in.

- Per-port and (when BROPT_MCAST_VLAN_SNOOPING_ENABLED is enabled)
  per-port-VLAN maximum permitted number of MDB entries, or 0 for
  no limit.

The per-port multicast context is used for tracking of MDB entries for the
port as a whole. This is available for all bridges.

The per-port-VLAN multicast context is then only available on
VLAN-filtering bridges on VLANs that have multicast snooping on.

With these changes in place, it will be possible to configure MDB limit for
bridge as a whole, or any one port as a whole, or any single port-VLAN.

Note that unlike the global limit, exhaustion of the per-port and
per-port-VLAN maximums does not cause disablement of multicast snooping.
It is also permitted to configure the local limit larger than hash_max,
even though that is not useful.

In this patch, introduce only the accounting for number of entries, and the
max field itself, but not the means to toggle the max. The next patch
introduces the netlink APIs to toggle and read the values.

Signed-off-by: Petr Machata <petrm@nvidia.com>
Acked-by: Nikolay Aleksandrov <razor@blackwall.org>
Reviewed-by: Ido Schimmel <idosch@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
net/bridge/br_multicast.c
net/bridge/br_private.h

index 51b622a..b6aa0ba 100644 (file)
@@ -31,6 +31,7 @@
 #include <net/ip6_checksum.h>
 #include <net/addrconf.h>
 #endif
+#include <trace/events/bridge.h>
 
 #include "br_private.h"
 #include "br_private_mcast_eht.h"
@@ -234,6 +235,29 @@ out:
        return pmctx;
 }
 
+static struct net_bridge_mcast_port *
+br_multicast_port_vid_to_port_ctx(struct net_bridge_port *port, u16 vid)
+{
+       struct net_bridge_mcast_port *pmctx = NULL;
+       struct net_bridge_vlan *vlan;
+
+       lockdep_assert_held_once(&port->br->multicast_lock);
+
+       if (!br_opt_get(port->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED))
+               return NULL;
+
+       /* Take RCU to access the vlan. */
+       rcu_read_lock();
+
+       vlan = br_vlan_find(nbp_vlan_group_rcu(port), vid);
+       if (vlan && !br_multicast_port_ctx_vlan_disabled(&vlan->port_mcast_ctx))
+               pmctx = &vlan->port_mcast_ctx;
+
+       rcu_read_unlock();
+
+       return pmctx;
+}
+
 /* when snooping we need to check if the contexts should be used
  * in the following order:
  * - if pmctx is non-NULL (port), check if it should be used
@@ -668,6 +692,86 @@ void br_multicast_del_group_src(struct net_bridge_group_src *src,
        __br_multicast_del_group_src(src);
 }
 
+static int
+br_multicast_port_ngroups_inc_one(struct net_bridge_mcast_port *pmctx,
+                                 struct netlink_ext_ack *extack,
+                                 const char *what)
+{
+       u32 max = READ_ONCE(pmctx->mdb_max_entries);
+       u32 n = READ_ONCE(pmctx->mdb_n_entries);
+
+       if (max && n >= max) {
+               NL_SET_ERR_MSG_FMT_MOD(extack, "%s is already in %u groups, and mcast_max_groups=%u",
+                                      what, n, max);
+               return -E2BIG;
+       }
+
+       WRITE_ONCE(pmctx->mdb_n_entries, n + 1);
+       return 0;
+}
+
+static void br_multicast_port_ngroups_dec_one(struct net_bridge_mcast_port *pmctx)
+{
+       u32 n = READ_ONCE(pmctx->mdb_n_entries);
+
+       WARN_ON_ONCE(n == 0);
+       WRITE_ONCE(pmctx->mdb_n_entries, n - 1);
+}
+
+static int br_multicast_port_ngroups_inc(struct net_bridge_port *port,
+                                        const struct br_ip *group,
+                                        struct netlink_ext_ack *extack)
+{
+       struct net_bridge_mcast_port *pmctx;
+       int err;
+
+       lockdep_assert_held_once(&port->br->multicast_lock);
+
+       /* Always count on the port context. */
+       err = br_multicast_port_ngroups_inc_one(&port->multicast_ctx, extack,
+                                               "Port");
+       if (err) {
+               trace_br_mdb_full(port->dev, group);
+               return err;
+       }
+
+       /* Only count on the VLAN context if VID is given, and if snooping on
+        * that VLAN is enabled.
+        */
+       if (!group->vid)
+               return 0;
+
+       pmctx = br_multicast_port_vid_to_port_ctx(port, group->vid);
+       if (!pmctx)
+               return 0;
+
+       err = br_multicast_port_ngroups_inc_one(pmctx, extack, "Port-VLAN");
+       if (err) {
+               trace_br_mdb_full(port->dev, group);
+               goto dec_one_out;
+       }
+
+       return 0;
+
+dec_one_out:
+       br_multicast_port_ngroups_dec_one(&port->multicast_ctx);
+       return err;
+}
+
+static void br_multicast_port_ngroups_dec(struct net_bridge_port *port, u16 vid)
+{
+       struct net_bridge_mcast_port *pmctx;
+
+       lockdep_assert_held_once(&port->br->multicast_lock);
+
+       if (vid) {
+               pmctx = br_multicast_port_vid_to_port_ctx(port, vid);
+               if (pmctx)
+                       br_multicast_port_ngroups_dec_one(pmctx);
+       }
+       br_multicast_port_ngroups_dec_one(&port->multicast_ctx);
+}
+
 static void br_multicast_destroy_port_group(struct net_bridge_mcast_gc *gc)
 {
        struct net_bridge_port_group *pg;
@@ -702,6 +806,7 @@ void br_multicast_del_pg(struct net_bridge_mdb_entry *mp,
        } else {
                br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE);
        }
+       br_multicast_port_ngroups_dec(pg->key.port, pg->key.addr.vid);
        hlist_add_head(&pg->mcast_gc.gc_node, &br->mcast_gc_list);
        queue_work(system_long_wq, &br->mcast_gc_work);
 
@@ -1165,6 +1270,7 @@ struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br,
                return mp;
 
        if (atomic_read(&br->mdb_hash_tbl.nelems) >= br->hash_max) {
+               trace_br_mdb_full(br->dev, group);
                br_mc_disabled_update(br->dev, false, NULL);
                br_opt_toggle(br, BROPT_MULTICAST_ENABLED, false);
                return ERR_PTR(-E2BIG);
@@ -1288,11 +1394,16 @@ struct net_bridge_port_group *br_multicast_new_port_group(
                        struct netlink_ext_ack *extack)
 {
        struct net_bridge_port_group *p;
+       int err;
+
+       err = br_multicast_port_ngroups_inc(port, group, extack);
+       if (err)
+               return NULL;
 
        p = kzalloc(sizeof(*p), GFP_ATOMIC);
        if (unlikely(!p)) {
                NL_SET_ERR_MSG_MOD(extack, "Couldn't allocate new port group");
-               return NULL;
+               goto dec_out;
        }
 
        p->key.addr = *group;
@@ -1326,18 +1437,22 @@ struct net_bridge_port_group *br_multicast_new_port_group(
 
 free_out:
        kfree(p);
+dec_out:
+       br_multicast_port_ngroups_dec(port, group->vid);
        return NULL;
 }
 
 void br_multicast_del_port_group(struct net_bridge_port_group *p)
 {
        struct net_bridge_port *port = p->key.port;
+       __u16 vid = p->key.addr.vid;
 
        hlist_del_init(&p->mglist);
        if (!br_multicast_is_star_g(&p->key.addr))
                rhashtable_remove_fast(&port->br->sg_port_tbl, &p->rhnode,
                                       br_sg_port_rht_params);
        kfree(p);
+       br_multicast_port_ngroups_dec(port, vid);
 }
 
 void br_multicast_host_join(const struct net_bridge_mcast *brmctx,
@@ -1951,6 +2066,25 @@ static void __br_multicast_enable_port_ctx(struct net_bridge_mcast_port *pmctx)
                br_ip4_multicast_add_router(brmctx, pmctx);
                br_ip6_multicast_add_router(brmctx, pmctx);
        }
+
+       if (br_multicast_port_ctx_is_vlan(pmctx)) {
+               struct net_bridge_port_group *pg;
+               u32 n = 0;
+
+               /* The mcast_n_groups counter might be wrong. First,
+                * BR_VLFLAG_MCAST_ENABLED is toggled before temporary entries
+                * are flushed, thus mcast_n_groups after the toggle does not
+                * reflect the true values. And second, permanent entries added
+                * while BR_VLFLAG_MCAST_ENABLED was disabled, are not reflected
+                * either. Thus we have to refresh the counter.
+                */
+
+               hlist_for_each_entry(pg, &pmctx->port->mglist, mglist) {
+                       if (pg->key.addr.vid == pmctx->vlan->vid)
+                               n++;
+               }
+               WRITE_ONCE(pmctx->mdb_n_entries, n);
+       }
 }
 
 void br_multicast_enable_port(struct net_bridge_port *port)
index e4069e2..49f411a 100644 (file)
@@ -126,6 +126,8 @@ struct net_bridge_mcast_port {
        struct hlist_node               ip6_rlist;
 #endif /* IS_ENABLED(CONFIG_IPV6) */
        unsigned char                   multicast_router;
+       u32                             mdb_n_entries;
+       u32                             mdb_max_entries;
 #endif /* CONFIG_BRIDGE_IGMP_SNOOPING */
 };