1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
4 #include <linux/if_bridge.h>
5 #include <linux/list.h>
10 #include <net/ip6_tunnel.h>
13 #include "spectrum_ipip.h"
14 #include "spectrum_span.h"
15 #include "spectrum_switchdev.h"
17 static u64 mlxsw_sp_span_occ_get(void *priv)
19 const struct mlxsw_sp *mlxsw_sp = priv;
23 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
24 if (mlxsw_sp->span.entries[i].ref_count)
31 int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
33 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
36 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN))
39 mlxsw_sp->span.entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core,
41 mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count,
42 sizeof(struct mlxsw_sp_span_entry),
44 if (!mlxsw_sp->span.entries)
47 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
48 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
50 INIT_LIST_HEAD(&curr->bound_ports_list);
54 devlink_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_SPAN,
55 mlxsw_sp_span_occ_get, mlxsw_sp);
60 void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
62 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
65 devlink_resource_occ_get_unregister(devlink, MLXSW_SP_RESOURCE_SPAN);
67 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
68 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
70 WARN_ON_ONCE(!list_empty(&curr->bound_ports_list));
72 kfree(mlxsw_sp->span.entries);
76 mlxsw_sp_span_entry_phys_parms(const struct net_device *to_dev,
77 struct mlxsw_sp_span_parms *sparmsp)
79 sparmsp->dest_port = netdev_priv(to_dev);
84 mlxsw_sp_span_entry_phys_configure(struct mlxsw_sp_span_entry *span_entry,
85 struct mlxsw_sp_span_parms sparms)
87 struct mlxsw_sp_port *dest_port = sparms.dest_port;
88 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
89 u8 local_port = dest_port->local_port;
90 char mpat_pl[MLXSW_REG_MPAT_LEN];
91 int pa_id = span_entry->id;
93 /* Create a new port analayzer entry for local_port. */
94 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
95 MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH);
97 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
101 mlxsw_sp_span_entry_deconfigure_common(struct mlxsw_sp_span_entry *span_entry,
102 enum mlxsw_reg_mpat_span_type span_type)
104 struct mlxsw_sp_port *dest_port = span_entry->parms.dest_port;
105 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
106 u8 local_port = dest_port->local_port;
107 char mpat_pl[MLXSW_REG_MPAT_LEN];
108 int pa_id = span_entry->id;
110 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false, span_type);
111 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
115 mlxsw_sp_span_entry_phys_deconfigure(struct mlxsw_sp_span_entry *span_entry)
117 mlxsw_sp_span_entry_deconfigure_common(span_entry,
118 MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH);
122 struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_phys = {
123 .can_handle = mlxsw_sp_port_dev_check,
124 .parms = mlxsw_sp_span_entry_phys_parms,
125 .configure = mlxsw_sp_span_entry_phys_configure,
126 .deconfigure = mlxsw_sp_span_entry_phys_deconfigure,
129 static int mlxsw_sp_span_dmac(struct neigh_table *tbl,
131 struct net_device *dev,
132 unsigned char dmac[ETH_ALEN])
134 struct neighbour *neigh = neigh_lookup(tbl, pkey, dev);
138 neigh = neigh_create(tbl, pkey, dev);
140 return PTR_ERR(neigh);
143 neigh_event_send(neigh, NULL);
145 read_lock_bh(&neigh->lock);
146 if ((neigh->nud_state & NUD_VALID) && !neigh->dead)
147 memcpy(dmac, neigh->ha, ETH_ALEN);
150 read_unlock_bh(&neigh->lock);
152 neigh_release(neigh);
157 mlxsw_sp_span_entry_unoffloadable(struct mlxsw_sp_span_parms *sparmsp)
159 sparmsp->dest_port = NULL;
163 static struct net_device *
164 mlxsw_sp_span_entry_bridge_8021q(const struct net_device *br_dev,
168 struct bridge_vlan_info vinfo;
169 struct net_device *edev;
172 if (!vid && WARN_ON(br_vlan_get_pvid(br_dev, &vid)))
175 br_vlan_get_info(br_dev, vid, &vinfo) ||
176 !(vinfo.flags & BRIDGE_VLAN_INFO_BRENTRY))
179 edev = br_fdb_find_port(br_dev, dmac, vid);
183 if (br_vlan_get_info(edev, vid, &vinfo))
185 if (vinfo.flags & BRIDGE_VLAN_INFO_UNTAGGED)
192 static struct net_device *
193 mlxsw_sp_span_entry_bridge_8021d(const struct net_device *br_dev,
196 return br_fdb_find_port(br_dev, dmac, 0);
199 static struct net_device *
200 mlxsw_sp_span_entry_bridge(const struct net_device *br_dev,
201 unsigned char dmac[ETH_ALEN],
204 struct mlxsw_sp_bridge_port *bridge_port;
205 enum mlxsw_reg_spms_state spms_state;
206 struct net_device *dev = NULL;
207 struct mlxsw_sp_port *port;
210 if (br_vlan_enabled(br_dev))
211 dev = mlxsw_sp_span_entry_bridge_8021q(br_dev, dmac, p_vid);
213 dev = mlxsw_sp_span_entry_bridge_8021d(br_dev, dmac);
217 port = mlxsw_sp_port_dev_lower_find(dev);
221 bridge_port = mlxsw_sp_bridge_port_find(port->mlxsw_sp->bridge, dev);
225 stp_state = mlxsw_sp_bridge_port_stp_state(bridge_port);
226 spms_state = mlxsw_sp_stp_spms_state(stp_state);
227 if (spms_state != MLXSW_REG_SPMS_STATE_FORWARDING)
233 static struct net_device *
234 mlxsw_sp_span_entry_vlan(const struct net_device *vlan_dev,
237 *p_vid = vlan_dev_vlan_id(vlan_dev);
238 return vlan_dev_real_dev(vlan_dev);
241 static struct net_device *
242 mlxsw_sp_span_entry_lag(struct net_device *lag_dev)
244 struct net_device *dev;
245 struct list_head *iter;
247 netdev_for_each_lower_dev(lag_dev, dev, iter)
248 if (netif_carrier_ok(dev) &&
249 net_lag_port_dev_txable(dev) &&
250 mlxsw_sp_port_dev_check(dev))
256 static __maybe_unused int
257 mlxsw_sp_span_entry_tunnel_parms_common(struct net_device *edev,
258 union mlxsw_sp_l3addr saddr,
259 union mlxsw_sp_l3addr daddr,
260 union mlxsw_sp_l3addr gw,
262 struct neigh_table *tbl,
263 struct mlxsw_sp_span_parms *sparmsp)
265 unsigned char dmac[ETH_ALEN];
268 if (mlxsw_sp_l3addr_is_zero(gw))
271 if (!edev || mlxsw_sp_span_dmac(tbl, &gw, edev, dmac))
274 if (is_vlan_dev(edev))
275 edev = mlxsw_sp_span_entry_vlan(edev, &vid);
277 if (netif_is_bridge_master(edev)) {
278 edev = mlxsw_sp_span_entry_bridge(edev, dmac, &vid);
283 if (is_vlan_dev(edev)) {
284 if (vid || !(edev->flags & IFF_UP))
286 edev = mlxsw_sp_span_entry_vlan(edev, &vid);
289 if (netif_is_lag_master(edev)) {
290 if (!(edev->flags & IFF_UP))
292 edev = mlxsw_sp_span_entry_lag(edev);
297 if (!mlxsw_sp_port_dev_check(edev))
300 sparmsp->dest_port = netdev_priv(edev);
302 memcpy(sparmsp->dmac, dmac, ETH_ALEN);
303 memcpy(sparmsp->smac, edev->dev_addr, ETH_ALEN);
304 sparmsp->saddr = saddr;
305 sparmsp->daddr = daddr;
310 return mlxsw_sp_span_entry_unoffloadable(sparmsp);
313 #if IS_ENABLED(CONFIG_NET_IPGRE)
314 static struct net_device *
315 mlxsw_sp_span_gretap4_route(const struct net_device *to_dev,
316 __be32 *saddrp, __be32 *daddrp)
318 struct ip_tunnel *tun = netdev_priv(to_dev);
319 struct net_device *dev = NULL;
320 struct ip_tunnel_parm parms;
321 struct rtable *rt = NULL;
324 /* We assume "dev" stays valid after rt is put. */
327 parms = mlxsw_sp_ipip_netdev_parms4(to_dev);
328 ip_tunnel_init_flow(&fl4, parms.iph.protocol, *daddrp, *saddrp,
329 0, 0, parms.link, tun->fwmark, 0);
331 rt = ip_route_output_key(tun->net, &fl4);
335 if (rt->rt_type != RTN_UNICAST)
340 if (rt->rt_gw_family == AF_INET)
341 *daddrp = rt->rt_gw4;
342 /* can not offload if route has an IPv6 gateway */
343 else if (rt->rt_gw_family == AF_INET6)
352 mlxsw_sp_span_entry_gretap4_parms(const struct net_device *to_dev,
353 struct mlxsw_sp_span_parms *sparmsp)
355 struct ip_tunnel_parm tparm = mlxsw_sp_ipip_netdev_parms4(to_dev);
356 union mlxsw_sp_l3addr saddr = { .addr4 = tparm.iph.saddr };
357 union mlxsw_sp_l3addr daddr = { .addr4 = tparm.iph.daddr };
358 bool inherit_tos = tparm.iph.tos & 0x1;
359 bool inherit_ttl = !tparm.iph.ttl;
360 union mlxsw_sp_l3addr gw = daddr;
361 struct net_device *l3edev;
363 if (!(to_dev->flags & IFF_UP) ||
364 /* Reject tunnels with GRE keys, checksums, etc. */
365 tparm.i_flags || tparm.o_flags ||
366 /* Require a fixed TTL and a TOS copied from the mirrored packet. */
367 inherit_ttl || !inherit_tos ||
368 /* A destination address may not be "any". */
369 mlxsw_sp_l3addr_is_zero(daddr))
370 return mlxsw_sp_span_entry_unoffloadable(sparmsp);
372 l3edev = mlxsw_sp_span_gretap4_route(to_dev, &saddr.addr4, &gw.addr4);
373 return mlxsw_sp_span_entry_tunnel_parms_common(l3edev, saddr, daddr, gw,
379 mlxsw_sp_span_entry_gretap4_configure(struct mlxsw_sp_span_entry *span_entry,
380 struct mlxsw_sp_span_parms sparms)
382 struct mlxsw_sp_port *dest_port = sparms.dest_port;
383 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
384 u8 local_port = dest_port->local_port;
385 char mpat_pl[MLXSW_REG_MPAT_LEN];
386 int pa_id = span_entry->id;
388 /* Create a new port analayzer entry for local_port. */
389 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
390 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
391 mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid);
392 mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl,
393 MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER,
394 sparms.dmac, !!sparms.vid);
395 mlxsw_reg_mpat_eth_rspan_l3_ipv4_pack(mpat_pl,
396 sparms.ttl, sparms.smac,
397 be32_to_cpu(sparms.saddr.addr4),
398 be32_to_cpu(sparms.daddr.addr4));
400 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
404 mlxsw_sp_span_entry_gretap4_deconfigure(struct mlxsw_sp_span_entry *span_entry)
406 mlxsw_sp_span_entry_deconfigure_common(span_entry,
407 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
410 static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap4 = {
411 .can_handle = netif_is_gretap,
412 .parms = mlxsw_sp_span_entry_gretap4_parms,
413 .configure = mlxsw_sp_span_entry_gretap4_configure,
414 .deconfigure = mlxsw_sp_span_entry_gretap4_deconfigure,
418 #if IS_ENABLED(CONFIG_IPV6_GRE)
419 static struct net_device *
420 mlxsw_sp_span_gretap6_route(const struct net_device *to_dev,
421 struct in6_addr *saddrp,
422 struct in6_addr *daddrp)
424 struct ip6_tnl *t = netdev_priv(to_dev);
425 struct flowi6 fl6 = t->fl.u.ip6;
426 struct net_device *dev = NULL;
427 struct dst_entry *dst;
428 struct rt6_info *rt6;
430 /* We assume "dev" stays valid after dst is released. */
433 fl6.flowi6_mark = t->parms.fwmark;
434 if (!ip6_tnl_xmit_ctl(t, &fl6.saddr, &fl6.daddr))
437 dst = ip6_route_output(t->net, NULL, &fl6);
438 if (!dst || dst->error)
441 rt6 = container_of(dst, struct rt6_info, dst);
445 *daddrp = rt6->rt6i_gateway;
453 mlxsw_sp_span_entry_gretap6_parms(const struct net_device *to_dev,
454 struct mlxsw_sp_span_parms *sparmsp)
456 struct __ip6_tnl_parm tparm = mlxsw_sp_ipip_netdev_parms6(to_dev);
457 bool inherit_tos = tparm.flags & IP6_TNL_F_USE_ORIG_TCLASS;
458 union mlxsw_sp_l3addr saddr = { .addr6 = tparm.laddr };
459 union mlxsw_sp_l3addr daddr = { .addr6 = tparm.raddr };
460 bool inherit_ttl = !tparm.hop_limit;
461 union mlxsw_sp_l3addr gw = daddr;
462 struct net_device *l3edev;
464 if (!(to_dev->flags & IFF_UP) ||
465 /* Reject tunnels with GRE keys, checksums, etc. */
466 tparm.i_flags || tparm.o_flags ||
467 /* Require a fixed TTL and a TOS copied from the mirrored packet. */
468 inherit_ttl || !inherit_tos ||
469 /* A destination address may not be "any". */
470 mlxsw_sp_l3addr_is_zero(daddr))
471 return mlxsw_sp_span_entry_unoffloadable(sparmsp);
473 l3edev = mlxsw_sp_span_gretap6_route(to_dev, &saddr.addr6, &gw.addr6);
474 return mlxsw_sp_span_entry_tunnel_parms_common(l3edev, saddr, daddr, gw,
480 mlxsw_sp_span_entry_gretap6_configure(struct mlxsw_sp_span_entry *span_entry,
481 struct mlxsw_sp_span_parms sparms)
483 struct mlxsw_sp_port *dest_port = sparms.dest_port;
484 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
485 u8 local_port = dest_port->local_port;
486 char mpat_pl[MLXSW_REG_MPAT_LEN];
487 int pa_id = span_entry->id;
489 /* Create a new port analayzer entry for local_port. */
490 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
491 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
492 mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid);
493 mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl,
494 MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER,
495 sparms.dmac, !!sparms.vid);
496 mlxsw_reg_mpat_eth_rspan_l3_ipv6_pack(mpat_pl, sparms.ttl, sparms.smac,
500 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
504 mlxsw_sp_span_entry_gretap6_deconfigure(struct mlxsw_sp_span_entry *span_entry)
506 mlxsw_sp_span_entry_deconfigure_common(span_entry,
507 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
511 struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap6 = {
512 .can_handle = netif_is_ip6gretap,
513 .parms = mlxsw_sp_span_entry_gretap6_parms,
514 .configure = mlxsw_sp_span_entry_gretap6_configure,
515 .deconfigure = mlxsw_sp_span_entry_gretap6_deconfigure,
520 mlxsw_sp_span_vlan_can_handle(const struct net_device *dev)
522 return is_vlan_dev(dev) &&
523 mlxsw_sp_port_dev_check(vlan_dev_real_dev(dev));
527 mlxsw_sp_span_entry_vlan_parms(const struct net_device *to_dev,
528 struct mlxsw_sp_span_parms *sparmsp)
530 struct net_device *real_dev;
533 if (!(to_dev->flags & IFF_UP))
534 return mlxsw_sp_span_entry_unoffloadable(sparmsp);
536 real_dev = mlxsw_sp_span_entry_vlan(to_dev, &vid);
537 sparmsp->dest_port = netdev_priv(real_dev);
543 mlxsw_sp_span_entry_vlan_configure(struct mlxsw_sp_span_entry *span_entry,
544 struct mlxsw_sp_span_parms sparms)
546 struct mlxsw_sp_port *dest_port = sparms.dest_port;
547 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
548 u8 local_port = dest_port->local_port;
549 char mpat_pl[MLXSW_REG_MPAT_LEN];
550 int pa_id = span_entry->id;
552 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
553 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH);
554 mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid);
556 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
560 mlxsw_sp_span_entry_vlan_deconfigure(struct mlxsw_sp_span_entry *span_entry)
562 mlxsw_sp_span_entry_deconfigure_common(span_entry,
563 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH);
567 struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_vlan = {
568 .can_handle = mlxsw_sp_span_vlan_can_handle,
569 .parms = mlxsw_sp_span_entry_vlan_parms,
570 .configure = mlxsw_sp_span_entry_vlan_configure,
571 .deconfigure = mlxsw_sp_span_entry_vlan_deconfigure,
575 struct mlxsw_sp_span_entry_ops *const mlxsw_sp_span_entry_types[] = {
576 &mlxsw_sp_span_entry_ops_phys,
577 #if IS_ENABLED(CONFIG_NET_IPGRE)
578 &mlxsw_sp_span_entry_ops_gretap4,
580 #if IS_ENABLED(CONFIG_IPV6_GRE)
581 &mlxsw_sp_span_entry_ops_gretap6,
583 &mlxsw_sp_span_entry_ops_vlan,
587 mlxsw_sp_span_entry_nop_parms(const struct net_device *to_dev,
588 struct mlxsw_sp_span_parms *sparmsp)
590 return mlxsw_sp_span_entry_unoffloadable(sparmsp);
594 mlxsw_sp_span_entry_nop_configure(struct mlxsw_sp_span_entry *span_entry,
595 struct mlxsw_sp_span_parms sparms)
601 mlxsw_sp_span_entry_nop_deconfigure(struct mlxsw_sp_span_entry *span_entry)
605 static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_nop = {
606 .parms = mlxsw_sp_span_entry_nop_parms,
607 .configure = mlxsw_sp_span_entry_nop_configure,
608 .deconfigure = mlxsw_sp_span_entry_nop_deconfigure,
612 mlxsw_sp_span_entry_configure(struct mlxsw_sp *mlxsw_sp,
613 struct mlxsw_sp_span_entry *span_entry,
614 struct mlxsw_sp_span_parms sparms)
616 if (sparms.dest_port) {
617 if (sparms.dest_port->mlxsw_sp != mlxsw_sp) {
618 netdev_err(span_entry->to_dev, "Cannot mirror to %s, which belongs to a different mlxsw instance",
619 sparms.dest_port->dev->name);
620 sparms.dest_port = NULL;
621 } else if (span_entry->ops->configure(span_entry, sparms)) {
622 netdev_err(span_entry->to_dev, "Failed to offload mirror to %s",
623 sparms.dest_port->dev->name);
624 sparms.dest_port = NULL;
628 span_entry->parms = sparms;
632 mlxsw_sp_span_entry_deconfigure(struct mlxsw_sp_span_entry *span_entry)
634 if (span_entry->parms.dest_port)
635 span_entry->ops->deconfigure(span_entry);
638 static struct mlxsw_sp_span_entry *
639 mlxsw_sp_span_entry_create(struct mlxsw_sp *mlxsw_sp,
640 const struct net_device *to_dev,
641 const struct mlxsw_sp_span_entry_ops *ops,
642 struct mlxsw_sp_span_parms sparms)
644 struct mlxsw_sp_span_entry *span_entry = NULL;
647 /* find a free entry to use */
648 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
649 if (!mlxsw_sp->span.entries[i].ref_count) {
650 span_entry = &mlxsw_sp->span.entries[i];
657 span_entry->ops = ops;
658 span_entry->ref_count = 1;
659 span_entry->to_dev = to_dev;
660 mlxsw_sp_span_entry_configure(mlxsw_sp, span_entry, sparms);
665 static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp_span_entry *span_entry)
667 mlxsw_sp_span_entry_deconfigure(span_entry);
670 struct mlxsw_sp_span_entry *
671 mlxsw_sp_span_entry_find_by_port(struct mlxsw_sp *mlxsw_sp,
672 const struct net_device *to_dev)
676 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
677 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
679 if (curr->ref_count && curr->to_dev == to_dev)
685 void mlxsw_sp_span_entry_invalidate(struct mlxsw_sp *mlxsw_sp,
686 struct mlxsw_sp_span_entry *span_entry)
688 mlxsw_sp_span_entry_deconfigure(span_entry);
689 span_entry->ops = &mlxsw_sp_span_entry_ops_nop;
692 static struct mlxsw_sp_span_entry *
693 mlxsw_sp_span_entry_find_by_id(struct mlxsw_sp *mlxsw_sp, int span_id)
697 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
698 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
700 if (curr->ref_count && curr->id == span_id)
706 static struct mlxsw_sp_span_entry *
707 mlxsw_sp_span_entry_get(struct mlxsw_sp *mlxsw_sp,
708 const struct net_device *to_dev,
709 const struct mlxsw_sp_span_entry_ops *ops,
710 struct mlxsw_sp_span_parms sparms)
712 struct mlxsw_sp_span_entry *span_entry;
714 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, to_dev);
716 /* Already exists, just take a reference */
717 span_entry->ref_count++;
721 return mlxsw_sp_span_entry_create(mlxsw_sp, to_dev, ops, sparms);
724 static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
725 struct mlxsw_sp_span_entry *span_entry)
727 WARN_ON(!span_entry->ref_count);
728 if (--span_entry->ref_count == 0)
729 mlxsw_sp_span_entry_destroy(span_entry);
733 static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port)
735 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
736 struct mlxsw_sp_span_inspected_port *p;
739 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
740 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
742 list_for_each_entry(p, &curr->bound_ports_list, list)
743 if (p->local_port == port->local_port &&
744 p->type == MLXSW_SP_SPAN_EGRESS)
751 static int mlxsw_sp_span_mtu_to_buffsize(const struct mlxsw_sp *mlxsw_sp,
754 return mlxsw_sp_bytes_cells(mlxsw_sp, mtu * 5 / 2) + 1;
757 int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
759 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
760 char sbib_pl[MLXSW_REG_SBIB_LEN];
763 /* If port is egress mirrored, the shared buffer size should be
764 * updated according to the mtu value
766 if (mlxsw_sp_span_is_egress_mirror(port)) {
767 u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, mtu);
769 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
770 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
772 netdev_err(port->dev, "Could not update shared buffer for mirroring\n");
780 static struct mlxsw_sp_span_inspected_port *
781 mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_span_entry *span_entry,
782 enum mlxsw_sp_span_type type,
783 struct mlxsw_sp_port *port,
786 struct mlxsw_sp_span_inspected_port *p;
788 list_for_each_entry(p, &span_entry->bound_ports_list, list)
789 if (type == p->type &&
790 port->local_port == p->local_port &&
797 mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
798 struct mlxsw_sp_span_entry *span_entry,
799 enum mlxsw_sp_span_type type,
802 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
803 char mpar_pl[MLXSW_REG_MPAR_LEN];
804 int pa_id = span_entry->id;
806 /* bind the port to the SPAN entry */
807 mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
808 (enum mlxsw_reg_mpar_i_e)type, bind, pa_id);
809 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
813 mlxsw_sp_span_inspected_port_add(struct mlxsw_sp_port *port,
814 struct mlxsw_sp_span_entry *span_entry,
815 enum mlxsw_sp_span_type type,
818 struct mlxsw_sp_span_inspected_port *inspected_port;
819 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
820 char sbib_pl[MLXSW_REG_SBIB_LEN];
824 /* A given (source port, direction) can only be bound to one analyzer,
825 * so if a binding is requested, check for conflicts.
828 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
829 struct mlxsw_sp_span_entry *curr =
830 &mlxsw_sp->span.entries[i];
832 if (mlxsw_sp_span_entry_bound_port_find(curr, type,
837 /* if it is an egress SPAN, bind a shared buffer to it */
838 if (type == MLXSW_SP_SPAN_EGRESS) {
839 u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp,
842 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
843 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
845 netdev_err(port->dev, "Could not create shared buffer for mirroring\n");
851 err = mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
857 inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL);
858 if (!inspected_port) {
860 goto err_inspected_port_alloc;
862 inspected_port->local_port = port->local_port;
863 inspected_port->type = type;
864 inspected_port->bound = bind;
865 list_add_tail(&inspected_port->list, &span_entry->bound_ports_list);
869 err_inspected_port_alloc:
871 mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
874 if (type == MLXSW_SP_SPAN_EGRESS) {
875 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
876 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
882 mlxsw_sp_span_inspected_port_del(struct mlxsw_sp_port *port,
883 struct mlxsw_sp_span_entry *span_entry,
884 enum mlxsw_sp_span_type type,
887 struct mlxsw_sp_span_inspected_port *inspected_port;
888 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
889 char sbib_pl[MLXSW_REG_SBIB_LEN];
891 inspected_port = mlxsw_sp_span_entry_bound_port_find(span_entry, type,
897 mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
899 /* remove the SBIB buffer if it was egress SPAN */
900 if (type == MLXSW_SP_SPAN_EGRESS) {
901 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
902 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
905 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
907 list_del(&inspected_port->list);
908 kfree(inspected_port);
911 static const struct mlxsw_sp_span_entry_ops *
912 mlxsw_sp_span_entry_ops(struct mlxsw_sp *mlxsw_sp,
913 const struct net_device *to_dev)
917 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_span_entry_types); ++i)
918 if (mlxsw_sp_span_entry_types[i]->can_handle(to_dev))
919 return mlxsw_sp_span_entry_types[i];
924 int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
925 const struct net_device *to_dev,
926 enum mlxsw_sp_span_type type, bool bind,
929 struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp;
930 const struct mlxsw_sp_span_entry_ops *ops;
931 struct mlxsw_sp_span_parms sparms = {NULL};
932 struct mlxsw_sp_span_entry *span_entry;
935 ops = mlxsw_sp_span_entry_ops(mlxsw_sp, to_dev);
937 netdev_err(to_dev, "Cannot mirror to %s", to_dev->name);
941 err = ops->parms(to_dev, &sparms);
945 span_entry = mlxsw_sp_span_entry_get(mlxsw_sp, to_dev, ops, sparms);
949 netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n",
952 err = mlxsw_sp_span_inspected_port_add(from, span_entry, type, bind);
956 *p_span_id = span_entry->id;
960 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
964 void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from, int span_id,
965 enum mlxsw_sp_span_type type, bool bind)
967 struct mlxsw_sp_span_entry *span_entry;
969 span_entry = mlxsw_sp_span_entry_find_by_id(from->mlxsw_sp, span_id);
971 netdev_err(from->dev, "no span entry found\n");
975 netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n",
977 mlxsw_sp_span_inspected_port_del(from, span_entry, type, bind);
980 void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp)
986 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
987 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
988 struct mlxsw_sp_span_parms sparms = {NULL};
990 if (!curr->ref_count)
993 err = curr->ops->parms(curr->to_dev, &sparms);
997 if (memcmp(&sparms, &curr->parms, sizeof(sparms))) {
998 mlxsw_sp_span_entry_deconfigure(curr);
999 mlxsw_sp_span_entry_configure(mlxsw_sp, curr, sparms);