1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
4 #include <linux/if_bridge.h>
5 #include <linux/list.h>
6 #include <linux/mutex.h>
7 #include <linux/refcount.h>
8 #include <linux/rtnetlink.h>
9 #include <linux/workqueue.h>
13 #include <net/ndisc.h>
14 #include <net/ip6_tunnel.h>
17 #include "spectrum_ipip.h"
18 #include "spectrum_span.h"
19 #include "spectrum_switchdev.h"
21 struct mlxsw_sp_span {
22 struct work_struct work;
23 struct mlxsw_sp *mlxsw_sp;
24 const struct mlxsw_sp_span_trigger_ops **span_trigger_ops_arr;
25 const struct mlxsw_sp_span_entry_ops **span_entry_ops_arr;
26 size_t span_entry_ops_arr_size;
27 struct list_head analyzed_ports_list;
28 struct mutex analyzed_ports_lock; /* Protects analyzed_ports_list */
29 struct list_head trigger_entries_list;
31 refcount_t policer_id_base_ref_count;
32 atomic_t active_entries_count;
34 struct mlxsw_sp_span_entry entries[] __counted_by(entries_count);
37 struct mlxsw_sp_span_analyzed_port {
38 struct list_head list; /* Member of analyzed_ports_list */
44 struct mlxsw_sp_span_trigger_entry {
45 struct list_head list; /* Member of trigger_entries_list */
46 struct mlxsw_sp_span *span;
47 const struct mlxsw_sp_span_trigger_ops *ops;
50 enum mlxsw_sp_span_trigger trigger;
51 struct mlxsw_sp_span_trigger_parms parms;
54 enum mlxsw_sp_span_trigger_type {
55 MLXSW_SP_SPAN_TRIGGER_TYPE_PORT,
56 MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL,
59 struct mlxsw_sp_span_trigger_ops {
60 int (*bind)(struct mlxsw_sp_span_trigger_entry *trigger_entry);
61 void (*unbind)(struct mlxsw_sp_span_trigger_entry *trigger_entry);
62 bool (*matches)(struct mlxsw_sp_span_trigger_entry *trigger_entry,
63 enum mlxsw_sp_span_trigger trigger,
64 struct mlxsw_sp_port *mlxsw_sp_port);
65 int (*enable)(struct mlxsw_sp_span_trigger_entry *trigger_entry,
66 struct mlxsw_sp_port *mlxsw_sp_port, u8 tc);
67 void (*disable)(struct mlxsw_sp_span_trigger_entry *trigger_entry,
68 struct mlxsw_sp_port *mlxsw_sp_port, u8 tc);
71 static void mlxsw_sp_span_respin_work(struct work_struct *work);
73 static u64 mlxsw_sp_span_occ_get(void *priv)
75 const struct mlxsw_sp *mlxsw_sp = priv;
77 return atomic_read(&mlxsw_sp->span->active_entries_count);
80 int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
82 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
83 struct mlxsw_sp_span *span;
84 int i, entries_count, err;
86 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN))
89 entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_SPAN);
90 span = kzalloc(struct_size(span, entries, entries_count), GFP_KERNEL);
93 refcount_set(&span->policer_id_base_ref_count, 0);
94 span->entries_count = entries_count;
95 atomic_set(&span->active_entries_count, 0);
96 mutex_init(&span->analyzed_ports_lock);
97 INIT_LIST_HEAD(&span->analyzed_ports_list);
98 INIT_LIST_HEAD(&span->trigger_entries_list);
99 span->mlxsw_sp = mlxsw_sp;
100 mlxsw_sp->span = span;
102 for (i = 0; i < mlxsw_sp->span->entries_count; i++)
103 mlxsw_sp->span->entries[i].id = i;
105 err = mlxsw_sp->span_ops->init(mlxsw_sp);
109 devl_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_SPAN,
110 mlxsw_sp_span_occ_get, mlxsw_sp);
111 INIT_WORK(&span->work, mlxsw_sp_span_respin_work);
116 mutex_destroy(&mlxsw_sp->span->analyzed_ports_lock);
117 kfree(mlxsw_sp->span);
121 void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
123 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
125 cancel_work_sync(&mlxsw_sp->span->work);
126 devl_resource_occ_get_unregister(devlink, MLXSW_SP_RESOURCE_SPAN);
128 WARN_ON_ONCE(!list_empty(&mlxsw_sp->span->trigger_entries_list));
129 WARN_ON_ONCE(!list_empty(&mlxsw_sp->span->analyzed_ports_list));
130 mutex_destroy(&mlxsw_sp->span->analyzed_ports_lock);
131 kfree(mlxsw_sp->span);
134 static bool mlxsw_sp1_span_cpu_can_handle(const struct net_device *dev)
139 static int mlxsw_sp1_span_entry_cpu_parms(struct mlxsw_sp *mlxsw_sp,
140 const struct net_device *to_dev,
141 struct mlxsw_sp_span_parms *sparmsp)
147 mlxsw_sp1_span_entry_cpu_configure(struct mlxsw_sp_span_entry *span_entry,
148 struct mlxsw_sp_span_parms sparms)
154 mlxsw_sp1_span_entry_cpu_deconfigure(struct mlxsw_sp_span_entry *span_entry)
159 struct mlxsw_sp_span_entry_ops mlxsw_sp1_span_entry_ops_cpu = {
161 .can_handle = mlxsw_sp1_span_cpu_can_handle,
162 .parms_set = mlxsw_sp1_span_entry_cpu_parms,
163 .configure = mlxsw_sp1_span_entry_cpu_configure,
164 .deconfigure = mlxsw_sp1_span_entry_cpu_deconfigure,
168 mlxsw_sp_span_entry_phys_parms(struct mlxsw_sp *mlxsw_sp,
169 const struct net_device *to_dev,
170 struct mlxsw_sp_span_parms *sparmsp)
172 sparmsp->dest_port = netdev_priv(to_dev);
177 mlxsw_sp_span_entry_phys_configure(struct mlxsw_sp_span_entry *span_entry,
178 struct mlxsw_sp_span_parms sparms)
180 struct mlxsw_sp_port *dest_port = sparms.dest_port;
181 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
182 u16 local_port = dest_port->local_port;
183 char mpat_pl[MLXSW_REG_MPAT_LEN];
184 int pa_id = span_entry->id;
186 /* Create a new port analayzer entry for local_port. */
187 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
188 MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH);
189 mlxsw_reg_mpat_session_id_set(mpat_pl, sparms.session_id);
190 mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable);
191 mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id);
193 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
197 mlxsw_sp_span_entry_deconfigure_common(struct mlxsw_sp_span_entry *span_entry,
198 enum mlxsw_reg_mpat_span_type span_type)
200 struct mlxsw_sp_port *dest_port = span_entry->parms.dest_port;
201 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
202 u16 local_port = dest_port->local_port;
203 char mpat_pl[MLXSW_REG_MPAT_LEN];
204 int pa_id = span_entry->id;
206 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false, span_type);
207 mlxsw_reg_mpat_session_id_set(mpat_pl, span_entry->parms.session_id);
208 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
212 mlxsw_sp_span_entry_phys_deconfigure(struct mlxsw_sp_span_entry *span_entry)
214 mlxsw_sp_span_entry_deconfigure_common(span_entry,
215 MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH);
219 struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_phys = {
221 .can_handle = mlxsw_sp_port_dev_check,
222 .parms_set = mlxsw_sp_span_entry_phys_parms,
223 .configure = mlxsw_sp_span_entry_phys_configure,
224 .deconfigure = mlxsw_sp_span_entry_phys_deconfigure,
227 static int mlxsw_sp_span_dmac(struct neigh_table *tbl,
229 struct net_device *dev,
230 unsigned char dmac[ETH_ALEN])
232 struct neighbour *neigh = neigh_lookup(tbl, pkey, dev);
236 neigh = neigh_create(tbl, pkey, dev);
238 return PTR_ERR(neigh);
241 neigh_event_send(neigh, NULL);
243 read_lock_bh(&neigh->lock);
244 if ((neigh->nud_state & NUD_VALID) && !neigh->dead)
245 memcpy(dmac, neigh->ha, ETH_ALEN);
248 read_unlock_bh(&neigh->lock);
250 neigh_release(neigh);
255 mlxsw_sp_span_entry_unoffloadable(struct mlxsw_sp_span_parms *sparmsp)
257 sparmsp->dest_port = NULL;
261 static struct net_device *
262 mlxsw_sp_span_entry_bridge_8021q(const struct net_device *br_dev,
266 struct bridge_vlan_info vinfo;
267 struct net_device *edev;
270 if (!vid && WARN_ON(br_vlan_get_pvid(br_dev, &vid)))
272 if (!vid || br_vlan_get_info(br_dev, vid, &vinfo) ||
273 !(vinfo.flags & BRIDGE_VLAN_INFO_BRENTRY))
276 edev = br_fdb_find_port(br_dev, dmac, vid);
280 if (br_vlan_get_info(edev, vid, &vinfo))
282 if (vinfo.flags & BRIDGE_VLAN_INFO_UNTAGGED)
289 static struct net_device *
290 mlxsw_sp_span_entry_bridge_8021d(const struct net_device *br_dev,
293 return br_fdb_find_port(br_dev, dmac, 0);
296 static struct net_device *
297 mlxsw_sp_span_entry_bridge(const struct net_device *br_dev,
298 unsigned char dmac[ETH_ALEN],
301 struct mlxsw_sp_bridge_port *bridge_port;
302 enum mlxsw_reg_spms_state spms_state;
303 struct net_device *dev = NULL;
304 struct mlxsw_sp_port *port;
307 if (br_vlan_enabled(br_dev))
308 dev = mlxsw_sp_span_entry_bridge_8021q(br_dev, dmac, p_vid);
310 dev = mlxsw_sp_span_entry_bridge_8021d(br_dev, dmac);
314 port = mlxsw_sp_port_dev_lower_find(dev);
318 bridge_port = mlxsw_sp_bridge_port_find(port->mlxsw_sp->bridge, dev);
322 stp_state = mlxsw_sp_bridge_port_stp_state(bridge_port);
323 spms_state = mlxsw_sp_stp_spms_state(stp_state);
324 if (spms_state != MLXSW_REG_SPMS_STATE_FORWARDING)
330 static struct net_device *
331 mlxsw_sp_span_entry_vlan(const struct net_device *vlan_dev,
334 *p_vid = vlan_dev_vlan_id(vlan_dev);
335 return vlan_dev_real_dev(vlan_dev);
338 static struct net_device *
339 mlxsw_sp_span_entry_lag(struct net_device *lag_dev)
341 struct net_device *dev;
342 struct list_head *iter;
344 netdev_for_each_lower_dev(lag_dev, dev, iter)
345 if (netif_carrier_ok(dev) &&
346 net_lag_port_dev_txable(dev) &&
347 mlxsw_sp_port_dev_check(dev))
353 static __maybe_unused int
354 mlxsw_sp_span_entry_tunnel_parms_common(struct net_device *edev,
355 union mlxsw_sp_l3addr saddr,
356 union mlxsw_sp_l3addr daddr,
357 union mlxsw_sp_l3addr gw,
359 struct neigh_table *tbl,
360 struct mlxsw_sp_span_parms *sparmsp)
362 unsigned char dmac[ETH_ALEN];
365 if (mlxsw_sp_l3addr_is_zero(gw))
368 if (!edev || mlxsw_sp_span_dmac(tbl, &gw, edev, dmac))
371 if (is_vlan_dev(edev))
372 edev = mlxsw_sp_span_entry_vlan(edev, &vid);
374 if (netif_is_bridge_master(edev)) {
375 edev = mlxsw_sp_span_entry_bridge(edev, dmac, &vid);
380 if (is_vlan_dev(edev)) {
381 if (vid || !(edev->flags & IFF_UP))
383 edev = mlxsw_sp_span_entry_vlan(edev, &vid);
386 if (netif_is_lag_master(edev)) {
387 if (!(edev->flags & IFF_UP))
389 edev = mlxsw_sp_span_entry_lag(edev);
394 if (!mlxsw_sp_port_dev_check(edev))
397 sparmsp->dest_port = netdev_priv(edev);
399 memcpy(sparmsp->dmac, dmac, ETH_ALEN);
400 memcpy(sparmsp->smac, edev->dev_addr, ETH_ALEN);
401 sparmsp->saddr = saddr;
402 sparmsp->daddr = daddr;
407 return mlxsw_sp_span_entry_unoffloadable(sparmsp);
410 #if IS_ENABLED(CONFIG_NET_IPGRE)
411 static struct net_device *
412 mlxsw_sp_span_gretap4_route(const struct net_device *to_dev,
413 __be32 *saddrp, __be32 *daddrp)
415 struct ip_tunnel *tun = netdev_priv(to_dev);
416 struct net_device *dev = NULL;
417 struct ip_tunnel_parm parms;
418 struct rtable *rt = NULL;
421 /* We assume "dev" stays valid after rt is put. */
424 parms = mlxsw_sp_ipip_netdev_parms4(to_dev);
425 ip_tunnel_init_flow(&fl4, parms.iph.protocol, *daddrp, *saddrp,
426 0, 0, dev_net(to_dev), parms.link, tun->fwmark, 0,
429 rt = ip_route_output_key(tun->net, &fl4);
433 if (rt->rt_type != RTN_UNICAST)
438 if (rt->rt_gw_family == AF_INET)
439 *daddrp = rt->rt_gw4;
440 /* can not offload if route has an IPv6 gateway */
441 else if (rt->rt_gw_family == AF_INET6)
450 mlxsw_sp_span_entry_gretap4_parms(struct mlxsw_sp *mlxsw_sp,
451 const struct net_device *to_dev,
452 struct mlxsw_sp_span_parms *sparmsp)
454 struct ip_tunnel_parm tparm = mlxsw_sp_ipip_netdev_parms4(to_dev);
455 union mlxsw_sp_l3addr saddr = { .addr4 = tparm.iph.saddr };
456 union mlxsw_sp_l3addr daddr = { .addr4 = tparm.iph.daddr };
457 bool inherit_tos = tparm.iph.tos & 0x1;
458 bool inherit_ttl = !tparm.iph.ttl;
459 union mlxsw_sp_l3addr gw = daddr;
460 struct net_device *l3edev;
462 if (!(to_dev->flags & IFF_UP) ||
463 /* Reject tunnels with GRE keys, checksums, etc. */
464 tparm.i_flags || tparm.o_flags ||
465 /* Require a fixed TTL and a TOS copied from the mirrored packet. */
466 inherit_ttl || !inherit_tos ||
467 /* A destination address may not be "any". */
468 mlxsw_sp_l3addr_is_zero(daddr))
469 return mlxsw_sp_span_entry_unoffloadable(sparmsp);
471 l3edev = mlxsw_sp_span_gretap4_route(to_dev, &saddr.addr4, &gw.addr4);
472 return mlxsw_sp_span_entry_tunnel_parms_common(l3edev, saddr, daddr, gw,
478 mlxsw_sp_span_entry_gretap4_configure(struct mlxsw_sp_span_entry *span_entry,
479 struct mlxsw_sp_span_parms sparms)
481 struct mlxsw_sp_port *dest_port = sparms.dest_port;
482 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
483 u16 local_port = dest_port->local_port;
484 char mpat_pl[MLXSW_REG_MPAT_LEN];
485 int pa_id = span_entry->id;
487 /* Create a new port analayzer entry for local_port. */
488 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
489 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
490 mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable);
491 mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id);
492 mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid);
493 mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl,
494 MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER,
495 sparms.dmac, !!sparms.vid);
496 mlxsw_reg_mpat_eth_rspan_l3_ipv4_pack(mpat_pl,
497 sparms.ttl, sparms.smac,
498 be32_to_cpu(sparms.saddr.addr4),
499 be32_to_cpu(sparms.daddr.addr4));
501 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
505 mlxsw_sp_span_entry_gretap4_deconfigure(struct mlxsw_sp_span_entry *span_entry)
507 mlxsw_sp_span_entry_deconfigure_common(span_entry,
508 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
511 static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap4 = {
512 .can_handle = netif_is_gretap,
513 .parms_set = mlxsw_sp_span_entry_gretap4_parms,
514 .configure = mlxsw_sp_span_entry_gretap4_configure,
515 .deconfigure = mlxsw_sp_span_entry_gretap4_deconfigure,
519 #if IS_ENABLED(CONFIG_IPV6_GRE)
520 static struct net_device *
521 mlxsw_sp_span_gretap6_route(const struct net_device *to_dev,
522 struct in6_addr *saddrp,
523 struct in6_addr *daddrp)
525 struct ip6_tnl *t = netdev_priv(to_dev);
526 struct flowi6 fl6 = t->fl.u.ip6;
527 struct net_device *dev = NULL;
528 struct dst_entry *dst;
529 struct rt6_info *rt6;
531 /* We assume "dev" stays valid after dst is released. */
534 fl6.flowi6_mark = t->parms.fwmark;
535 if (!ip6_tnl_xmit_ctl(t, &fl6.saddr, &fl6.daddr))
538 dst = ip6_route_output(t->net, NULL, &fl6);
539 if (!dst || dst->error)
542 rt6 = container_of(dst, struct rt6_info, dst);
546 *daddrp = rt6->rt6i_gateway;
554 mlxsw_sp_span_entry_gretap6_parms(struct mlxsw_sp *mlxsw_sp,
555 const struct net_device *to_dev,
556 struct mlxsw_sp_span_parms *sparmsp)
558 struct __ip6_tnl_parm tparm = mlxsw_sp_ipip_netdev_parms6(to_dev);
559 bool inherit_tos = tparm.flags & IP6_TNL_F_USE_ORIG_TCLASS;
560 union mlxsw_sp_l3addr saddr = { .addr6 = tparm.laddr };
561 union mlxsw_sp_l3addr daddr = { .addr6 = tparm.raddr };
562 bool inherit_ttl = !tparm.hop_limit;
563 union mlxsw_sp_l3addr gw = daddr;
564 struct net_device *l3edev;
566 if (!(to_dev->flags & IFF_UP) ||
567 /* Reject tunnels with GRE keys, checksums, etc. */
568 tparm.i_flags || tparm.o_flags ||
569 /* Require a fixed TTL and a TOS copied from the mirrored packet. */
570 inherit_ttl || !inherit_tos ||
571 /* A destination address may not be "any". */
572 mlxsw_sp_l3addr_is_zero(daddr))
573 return mlxsw_sp_span_entry_unoffloadable(sparmsp);
575 l3edev = mlxsw_sp_span_gretap6_route(to_dev, &saddr.addr6, &gw.addr6);
576 return mlxsw_sp_span_entry_tunnel_parms_common(l3edev, saddr, daddr, gw,
582 mlxsw_sp_span_entry_gretap6_configure(struct mlxsw_sp_span_entry *span_entry,
583 struct mlxsw_sp_span_parms sparms)
585 struct mlxsw_sp_port *dest_port = sparms.dest_port;
586 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
587 u16 local_port = dest_port->local_port;
588 char mpat_pl[MLXSW_REG_MPAT_LEN];
589 int pa_id = span_entry->id;
591 /* Create a new port analayzer entry for local_port. */
592 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
593 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
594 mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable);
595 mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id);
596 mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid);
597 mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl,
598 MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER,
599 sparms.dmac, !!sparms.vid);
600 mlxsw_reg_mpat_eth_rspan_l3_ipv6_pack(mpat_pl, sparms.ttl, sparms.smac,
604 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
608 mlxsw_sp_span_entry_gretap6_deconfigure(struct mlxsw_sp_span_entry *span_entry)
610 mlxsw_sp_span_entry_deconfigure_common(span_entry,
611 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
615 struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap6 = {
616 .can_handle = netif_is_ip6gretap,
617 .parms_set = mlxsw_sp_span_entry_gretap6_parms,
618 .configure = mlxsw_sp_span_entry_gretap6_configure,
619 .deconfigure = mlxsw_sp_span_entry_gretap6_deconfigure,
624 mlxsw_sp_span_vlan_can_handle(const struct net_device *dev)
626 return is_vlan_dev(dev) &&
627 mlxsw_sp_port_dev_check(vlan_dev_real_dev(dev));
631 mlxsw_sp_span_entry_vlan_parms(struct mlxsw_sp *mlxsw_sp,
632 const struct net_device *to_dev,
633 struct mlxsw_sp_span_parms *sparmsp)
635 struct net_device *real_dev;
638 if (!(to_dev->flags & IFF_UP))
639 return mlxsw_sp_span_entry_unoffloadable(sparmsp);
641 real_dev = mlxsw_sp_span_entry_vlan(to_dev, &vid);
642 sparmsp->dest_port = netdev_priv(real_dev);
648 mlxsw_sp_span_entry_vlan_configure(struct mlxsw_sp_span_entry *span_entry,
649 struct mlxsw_sp_span_parms sparms)
651 struct mlxsw_sp_port *dest_port = sparms.dest_port;
652 struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
653 u16 local_port = dest_port->local_port;
654 char mpat_pl[MLXSW_REG_MPAT_LEN];
655 int pa_id = span_entry->id;
657 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
658 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH);
659 mlxsw_reg_mpat_pide_set(mpat_pl, sparms.policer_enable);
660 mlxsw_reg_mpat_pid_set(mpat_pl, sparms.policer_id);
661 mlxsw_reg_mpat_eth_rspan_pack(mpat_pl, sparms.vid);
663 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
667 mlxsw_sp_span_entry_vlan_deconfigure(struct mlxsw_sp_span_entry *span_entry)
669 mlxsw_sp_span_entry_deconfigure_common(span_entry,
670 MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH);
674 struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_vlan = {
675 .can_handle = mlxsw_sp_span_vlan_can_handle,
676 .parms_set = mlxsw_sp_span_entry_vlan_parms,
677 .configure = mlxsw_sp_span_entry_vlan_configure,
678 .deconfigure = mlxsw_sp_span_entry_vlan_deconfigure,
682 struct mlxsw_sp_span_entry_ops *mlxsw_sp1_span_entry_ops_arr[] = {
683 &mlxsw_sp1_span_entry_ops_cpu,
684 &mlxsw_sp_span_entry_ops_phys,
685 #if IS_ENABLED(CONFIG_NET_IPGRE)
686 &mlxsw_sp_span_entry_ops_gretap4,
688 #if IS_ENABLED(CONFIG_IPV6_GRE)
689 &mlxsw_sp_span_entry_ops_gretap6,
691 &mlxsw_sp_span_entry_ops_vlan,
694 static bool mlxsw_sp2_span_cpu_can_handle(const struct net_device *dev)
699 static int mlxsw_sp2_span_entry_cpu_parms(struct mlxsw_sp *mlxsw_sp,
700 const struct net_device *to_dev,
701 struct mlxsw_sp_span_parms *sparmsp)
703 sparmsp->dest_port = mlxsw_sp->ports[MLXSW_PORT_CPU_PORT];
708 mlxsw_sp2_span_entry_cpu_configure(struct mlxsw_sp_span_entry *span_entry,
709 struct mlxsw_sp_span_parms sparms)
711 /* Mirroring to the CPU port is like mirroring to any other physical
712 * port. Its local port is used instead of that of the physical port.
714 return mlxsw_sp_span_entry_phys_configure(span_entry, sparms);
718 mlxsw_sp2_span_entry_cpu_deconfigure(struct mlxsw_sp_span_entry *span_entry)
720 enum mlxsw_reg_mpat_span_type span_type;
722 span_type = MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH;
723 mlxsw_sp_span_entry_deconfigure_common(span_entry, span_type);
727 struct mlxsw_sp_span_entry_ops mlxsw_sp2_span_entry_ops_cpu = {
729 .can_handle = mlxsw_sp2_span_cpu_can_handle,
730 .parms_set = mlxsw_sp2_span_entry_cpu_parms,
731 .configure = mlxsw_sp2_span_entry_cpu_configure,
732 .deconfigure = mlxsw_sp2_span_entry_cpu_deconfigure,
736 struct mlxsw_sp_span_entry_ops *mlxsw_sp2_span_entry_ops_arr[] = {
737 &mlxsw_sp2_span_entry_ops_cpu,
738 &mlxsw_sp_span_entry_ops_phys,
739 #if IS_ENABLED(CONFIG_NET_IPGRE)
740 &mlxsw_sp_span_entry_ops_gretap4,
742 #if IS_ENABLED(CONFIG_IPV6_GRE)
743 &mlxsw_sp_span_entry_ops_gretap6,
745 &mlxsw_sp_span_entry_ops_vlan,
749 mlxsw_sp_span_entry_nop_parms(struct mlxsw_sp *mlxsw_sp,
750 const struct net_device *to_dev,
751 struct mlxsw_sp_span_parms *sparmsp)
753 return mlxsw_sp_span_entry_unoffloadable(sparmsp);
757 mlxsw_sp_span_entry_nop_configure(struct mlxsw_sp_span_entry *span_entry,
758 struct mlxsw_sp_span_parms sparms)
764 mlxsw_sp_span_entry_nop_deconfigure(struct mlxsw_sp_span_entry *span_entry)
768 static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_nop = {
769 .parms_set = mlxsw_sp_span_entry_nop_parms,
770 .configure = mlxsw_sp_span_entry_nop_configure,
771 .deconfigure = mlxsw_sp_span_entry_nop_deconfigure,
775 mlxsw_sp_span_entry_configure(struct mlxsw_sp *mlxsw_sp,
776 struct mlxsw_sp_span_entry *span_entry,
777 struct mlxsw_sp_span_parms sparms)
781 if (!sparms.dest_port)
784 if (sparms.dest_port->mlxsw_sp != mlxsw_sp) {
785 dev_err(mlxsw_sp->bus_info->dev,
786 "Cannot mirror to a port which belongs to a different mlxsw instance\n");
787 sparms.dest_port = NULL;
791 err = span_entry->ops->configure(span_entry, sparms);
793 dev_err(mlxsw_sp->bus_info->dev, "Failed to offload mirror\n");
794 sparms.dest_port = NULL;
799 span_entry->parms = sparms;
803 mlxsw_sp_span_entry_deconfigure(struct mlxsw_sp_span_entry *span_entry)
805 if (span_entry->parms.dest_port)
806 span_entry->ops->deconfigure(span_entry);
809 static int mlxsw_sp_span_policer_id_base_set(struct mlxsw_sp_span *span,
812 struct mlxsw_sp *mlxsw_sp = span->mlxsw_sp;
816 /* Policers set on SPAN agents must be in the range of
817 * `policer_id_base .. policer_id_base + max_span_agents - 1`. If the
818 * base is set and the new policer is not within the range, then we
821 if (refcount_read(&span->policer_id_base_ref_count)) {
822 if (policer_id < span->policer_id_base ||
823 policer_id >= span->policer_id_base + span->entries_count)
826 refcount_inc(&span->policer_id_base_ref_count);
830 /* Base must be even. */
831 policer_id_base = policer_id % 2 == 0 ? policer_id : policer_id - 1;
832 err = mlxsw_sp->span_ops->policer_id_base_set(mlxsw_sp,
837 span->policer_id_base = policer_id_base;
838 refcount_set(&span->policer_id_base_ref_count, 1);
843 static void mlxsw_sp_span_policer_id_base_unset(struct mlxsw_sp_span *span)
845 if (refcount_dec_and_test(&span->policer_id_base_ref_count))
846 span->policer_id_base = 0;
849 static struct mlxsw_sp_span_entry *
850 mlxsw_sp_span_entry_create(struct mlxsw_sp *mlxsw_sp,
851 const struct net_device *to_dev,
852 const struct mlxsw_sp_span_entry_ops *ops,
853 struct mlxsw_sp_span_parms sparms)
855 struct mlxsw_sp_span_entry *span_entry = NULL;
858 /* find a free entry to use */
859 for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
860 if (!refcount_read(&mlxsw_sp->span->entries[i].ref_count)) {
861 span_entry = &mlxsw_sp->span->entries[i];
868 if (sparms.policer_enable) {
871 err = mlxsw_sp_span_policer_id_base_set(mlxsw_sp->span,
877 atomic_inc(&mlxsw_sp->span->active_entries_count);
878 span_entry->ops = ops;
879 refcount_set(&span_entry->ref_count, 1);
880 span_entry->to_dev = to_dev;
881 mlxsw_sp_span_entry_configure(mlxsw_sp, span_entry, sparms);
886 static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp,
887 struct mlxsw_sp_span_entry *span_entry)
889 mlxsw_sp_span_entry_deconfigure(span_entry);
890 atomic_dec(&mlxsw_sp->span->active_entries_count);
891 if (span_entry->parms.policer_enable)
892 mlxsw_sp_span_policer_id_base_unset(mlxsw_sp->span);
895 struct mlxsw_sp_span_entry *
896 mlxsw_sp_span_entry_find_by_port(struct mlxsw_sp *mlxsw_sp,
897 const struct net_device *to_dev)
901 for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
902 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
904 if (refcount_read(&curr->ref_count) && curr->to_dev == to_dev)
910 void mlxsw_sp_span_entry_invalidate(struct mlxsw_sp *mlxsw_sp,
911 struct mlxsw_sp_span_entry *span_entry)
913 mlxsw_sp_span_entry_deconfigure(span_entry);
914 span_entry->ops = &mlxsw_sp_span_entry_ops_nop;
917 static struct mlxsw_sp_span_entry *
918 mlxsw_sp_span_entry_find_by_id(struct mlxsw_sp *mlxsw_sp, int span_id)
922 for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
923 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
925 if (refcount_read(&curr->ref_count) && curr->id == span_id)
931 static struct mlxsw_sp_span_entry *
932 mlxsw_sp_span_entry_find_by_parms(struct mlxsw_sp *mlxsw_sp,
933 const struct net_device *to_dev,
934 const struct mlxsw_sp_span_parms *sparms)
938 for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
939 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
941 if (refcount_read(&curr->ref_count) && curr->to_dev == to_dev &&
942 curr->parms.policer_enable == sparms->policer_enable &&
943 curr->parms.policer_id == sparms->policer_id &&
944 curr->parms.session_id == sparms->session_id)
950 static struct mlxsw_sp_span_entry *
951 mlxsw_sp_span_entry_get(struct mlxsw_sp *mlxsw_sp,
952 const struct net_device *to_dev,
953 const struct mlxsw_sp_span_entry_ops *ops,
954 struct mlxsw_sp_span_parms sparms)
956 struct mlxsw_sp_span_entry *span_entry;
958 span_entry = mlxsw_sp_span_entry_find_by_parms(mlxsw_sp, to_dev,
961 /* Already exists, just take a reference */
962 refcount_inc(&span_entry->ref_count);
966 return mlxsw_sp_span_entry_create(mlxsw_sp, to_dev, ops, sparms);
969 static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
970 struct mlxsw_sp_span_entry *span_entry)
972 if (refcount_dec_and_test(&span_entry->ref_count))
973 mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry);
977 static int mlxsw_sp_span_port_buffer_update(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
979 struct mlxsw_sp_hdroom hdroom;
981 hdroom = *mlxsw_sp_port->hdroom;
982 hdroom.int_buf.enable = enable;
983 mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
985 return mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
989 mlxsw_sp_span_port_buffer_enable(struct mlxsw_sp_port *mlxsw_sp_port)
991 return mlxsw_sp_span_port_buffer_update(mlxsw_sp_port, true);
994 static void mlxsw_sp_span_port_buffer_disable(struct mlxsw_sp_port *mlxsw_sp_port)
996 mlxsw_sp_span_port_buffer_update(mlxsw_sp_port, false);
999 static struct mlxsw_sp_span_analyzed_port *
1000 mlxsw_sp_span_analyzed_port_find(struct mlxsw_sp_span *span, u16 local_port,
1003 struct mlxsw_sp_span_analyzed_port *analyzed_port;
1005 list_for_each_entry(analyzed_port, &span->analyzed_ports_list, list) {
1006 if (analyzed_port->local_port == local_port &&
1007 analyzed_port->ingress == ingress)
1008 return analyzed_port;
1014 static const struct mlxsw_sp_span_entry_ops *
1015 mlxsw_sp_span_entry_ops(struct mlxsw_sp *mlxsw_sp,
1016 const struct net_device *to_dev)
1018 struct mlxsw_sp_span *span = mlxsw_sp->span;
1021 for (i = 0; i < span->span_entry_ops_arr_size; ++i)
1022 if (span->span_entry_ops_arr[i]->can_handle(to_dev))
1023 return span->span_entry_ops_arr[i];
1028 static void mlxsw_sp_span_respin_work(struct work_struct *work)
1030 struct mlxsw_sp_span *span;
1031 struct mlxsw_sp *mlxsw_sp;
1034 span = container_of(work, struct mlxsw_sp_span, work);
1035 mlxsw_sp = span->mlxsw_sp;
1038 for (i = 0; i < mlxsw_sp->span->entries_count; i++) {
1039 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span->entries[i];
1040 struct mlxsw_sp_span_parms sparms = {NULL};
1042 if (!refcount_read(&curr->ref_count))
1045 if (curr->ops->is_static)
1048 err = curr->ops->parms_set(mlxsw_sp, curr->to_dev, &sparms);
1052 if (memcmp(&sparms, &curr->parms, sizeof(sparms))) {
1053 mlxsw_sp_span_entry_deconfigure(curr);
1054 mlxsw_sp_span_entry_configure(mlxsw_sp, curr, sparms);
1060 void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp)
1062 if (atomic_read(&mlxsw_sp->span->active_entries_count) == 0)
1064 mlxsw_core_schedule_work(&mlxsw_sp->span->work);
1067 int mlxsw_sp_span_agent_get(struct mlxsw_sp *mlxsw_sp, int *p_span_id,
1068 const struct mlxsw_sp_span_agent_parms *parms)
1070 const struct net_device *to_dev = parms->to_dev;
1071 const struct mlxsw_sp_span_entry_ops *ops;
1072 struct mlxsw_sp_span_entry *span_entry;
1073 struct mlxsw_sp_span_parms sparms;
1078 ops = mlxsw_sp_span_entry_ops(mlxsw_sp, to_dev);
1080 dev_err(mlxsw_sp->bus_info->dev, "Cannot mirror to requested destination\n");
1084 memset(&sparms, 0, sizeof(sparms));
1085 err = ops->parms_set(mlxsw_sp, to_dev, &sparms);
1089 sparms.policer_id = parms->policer_id;
1090 sparms.policer_enable = parms->policer_enable;
1091 sparms.session_id = parms->session_id;
1092 span_entry = mlxsw_sp_span_entry_get(mlxsw_sp, to_dev, ops, sparms);
1096 *p_span_id = span_entry->id;
1101 void mlxsw_sp_span_agent_put(struct mlxsw_sp *mlxsw_sp, int span_id)
1103 struct mlxsw_sp_span_entry *span_entry;
1107 span_entry = mlxsw_sp_span_entry_find_by_id(mlxsw_sp, span_id);
1108 if (WARN_ON_ONCE(!span_entry))
1111 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
1114 static struct mlxsw_sp_span_analyzed_port *
1115 mlxsw_sp_span_analyzed_port_create(struct mlxsw_sp_span *span,
1116 struct mlxsw_sp_port *mlxsw_sp_port,
1119 struct mlxsw_sp_span_analyzed_port *analyzed_port;
1122 analyzed_port = kzalloc(sizeof(*analyzed_port), GFP_KERNEL);
1124 return ERR_PTR(-ENOMEM);
1126 refcount_set(&analyzed_port->ref_count, 1);
1127 analyzed_port->local_port = mlxsw_sp_port->local_port;
1128 analyzed_port->ingress = ingress;
1129 list_add_tail(&analyzed_port->list, &span->analyzed_ports_list);
1131 /* An egress mirror buffer should be allocated on the egress port which
1132 * does the mirroring.
1135 err = mlxsw_sp_span_port_buffer_enable(mlxsw_sp_port);
1137 goto err_buffer_update;
1140 return analyzed_port;
1143 list_del(&analyzed_port->list);
1144 kfree(analyzed_port);
1145 return ERR_PTR(err);
1149 mlxsw_sp_span_analyzed_port_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
1150 struct mlxsw_sp_span_analyzed_port *
1153 /* Remove egress mirror buffer now that port is no longer analyzed
1156 if (!analyzed_port->ingress)
1157 mlxsw_sp_span_port_buffer_disable(mlxsw_sp_port);
1159 list_del(&analyzed_port->list);
1160 kfree(analyzed_port);
1163 int mlxsw_sp_span_analyzed_port_get(struct mlxsw_sp_port *mlxsw_sp_port,
1166 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1167 struct mlxsw_sp_span_analyzed_port *analyzed_port;
1168 u16 local_port = mlxsw_sp_port->local_port;
1171 mutex_lock(&mlxsw_sp->span->analyzed_ports_lock);
1173 analyzed_port = mlxsw_sp_span_analyzed_port_find(mlxsw_sp->span,
1174 local_port, ingress);
1175 if (analyzed_port) {
1176 refcount_inc(&analyzed_port->ref_count);
1180 analyzed_port = mlxsw_sp_span_analyzed_port_create(mlxsw_sp->span,
1183 if (IS_ERR(analyzed_port))
1184 err = PTR_ERR(analyzed_port);
1187 mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock);
1191 void mlxsw_sp_span_analyzed_port_put(struct mlxsw_sp_port *mlxsw_sp_port,
1194 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1195 struct mlxsw_sp_span_analyzed_port *analyzed_port;
1196 u16 local_port = mlxsw_sp_port->local_port;
1198 mutex_lock(&mlxsw_sp->span->analyzed_ports_lock);
1200 analyzed_port = mlxsw_sp_span_analyzed_port_find(mlxsw_sp->span,
1201 local_port, ingress);
1202 if (WARN_ON_ONCE(!analyzed_port))
1205 if (!refcount_dec_and_test(&analyzed_port->ref_count))
1208 mlxsw_sp_span_analyzed_port_destroy(mlxsw_sp_port, analyzed_port);
1211 mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock);
1215 __mlxsw_sp_span_trigger_port_bind(struct mlxsw_sp_span *span,
1216 struct mlxsw_sp_span_trigger_entry *
1217 trigger_entry, bool enable)
1219 char mpar_pl[MLXSW_REG_MPAR_LEN];
1220 enum mlxsw_reg_mpar_i_e i_e;
1222 switch (trigger_entry->trigger) {
1223 case MLXSW_SP_SPAN_TRIGGER_INGRESS:
1224 i_e = MLXSW_REG_MPAR_TYPE_INGRESS;
1226 case MLXSW_SP_SPAN_TRIGGER_EGRESS:
1227 i_e = MLXSW_REG_MPAR_TYPE_EGRESS;
1234 if (trigger_entry->parms.probability_rate > MLXSW_REG_MPAR_RATE_MAX)
1237 mlxsw_reg_mpar_pack(mpar_pl, trigger_entry->local_port, i_e, enable,
1238 trigger_entry->parms.span_id,
1239 trigger_entry->parms.probability_rate);
1240 return mlxsw_reg_write(span->mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
1244 mlxsw_sp_span_trigger_port_bind(struct mlxsw_sp_span_trigger_entry *
1247 return __mlxsw_sp_span_trigger_port_bind(trigger_entry->span,
1248 trigger_entry, true);
1252 mlxsw_sp_span_trigger_port_unbind(struct mlxsw_sp_span_trigger_entry *
1255 __mlxsw_sp_span_trigger_port_bind(trigger_entry->span, trigger_entry,
1260 mlxsw_sp_span_trigger_port_matches(struct mlxsw_sp_span_trigger_entry *
1262 enum mlxsw_sp_span_trigger trigger,
1263 struct mlxsw_sp_port *mlxsw_sp_port)
1265 return trigger_entry->trigger == trigger &&
1266 trigger_entry->local_port == mlxsw_sp_port->local_port;
1270 mlxsw_sp_span_trigger_port_enable(struct mlxsw_sp_span_trigger_entry *
1272 struct mlxsw_sp_port *mlxsw_sp_port, u8 tc)
1274 /* Port trigger are enabled during binding. */
1279 mlxsw_sp_span_trigger_port_disable(struct mlxsw_sp_span_trigger_entry *
1281 struct mlxsw_sp_port *mlxsw_sp_port, u8 tc)
1285 static const struct mlxsw_sp_span_trigger_ops
1286 mlxsw_sp_span_trigger_port_ops = {
1287 .bind = mlxsw_sp_span_trigger_port_bind,
1288 .unbind = mlxsw_sp_span_trigger_port_unbind,
1289 .matches = mlxsw_sp_span_trigger_port_matches,
1290 .enable = mlxsw_sp_span_trigger_port_enable,
1291 .disable = mlxsw_sp_span_trigger_port_disable,
1295 mlxsw_sp1_span_trigger_global_bind(struct mlxsw_sp_span_trigger_entry *
1302 mlxsw_sp1_span_trigger_global_unbind(struct mlxsw_sp_span_trigger_entry *
1308 mlxsw_sp1_span_trigger_global_matches(struct mlxsw_sp_span_trigger_entry *
1310 enum mlxsw_sp_span_trigger trigger,
1311 struct mlxsw_sp_port *mlxsw_sp_port)
1318 mlxsw_sp1_span_trigger_global_enable(struct mlxsw_sp_span_trigger_entry *
1320 struct mlxsw_sp_port *mlxsw_sp_port,
1327 mlxsw_sp1_span_trigger_global_disable(struct mlxsw_sp_span_trigger_entry *
1329 struct mlxsw_sp_port *mlxsw_sp_port,
1334 static const struct mlxsw_sp_span_trigger_ops
1335 mlxsw_sp1_span_trigger_global_ops = {
1336 .bind = mlxsw_sp1_span_trigger_global_bind,
1337 .unbind = mlxsw_sp1_span_trigger_global_unbind,
1338 .matches = mlxsw_sp1_span_trigger_global_matches,
1339 .enable = mlxsw_sp1_span_trigger_global_enable,
1340 .disable = mlxsw_sp1_span_trigger_global_disable,
1343 static const struct mlxsw_sp_span_trigger_ops *
1344 mlxsw_sp1_span_trigger_ops_arr[] = {
1345 [MLXSW_SP_SPAN_TRIGGER_TYPE_PORT] = &mlxsw_sp_span_trigger_port_ops,
1346 [MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL] =
1347 &mlxsw_sp1_span_trigger_global_ops,
1351 mlxsw_sp2_span_trigger_global_bind(struct mlxsw_sp_span_trigger_entry *
1354 struct mlxsw_sp *mlxsw_sp = trigger_entry->span->mlxsw_sp;
1355 enum mlxsw_reg_mpagr_trigger trigger;
1356 char mpagr_pl[MLXSW_REG_MPAGR_LEN];
1358 switch (trigger_entry->trigger) {
1359 case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP:
1360 trigger = MLXSW_REG_MPAGR_TRIGGER_INGRESS_SHARED_BUFFER;
1362 case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP:
1363 trigger = MLXSW_REG_MPAGR_TRIGGER_INGRESS_WRED;
1365 case MLXSW_SP_SPAN_TRIGGER_ECN:
1366 trigger = MLXSW_REG_MPAGR_TRIGGER_EGRESS_ECN;
1373 if (trigger_entry->parms.probability_rate > MLXSW_REG_MPAGR_RATE_MAX)
1376 mlxsw_reg_mpagr_pack(mpagr_pl, trigger, trigger_entry->parms.span_id,
1377 trigger_entry->parms.probability_rate);
1378 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpagr), mpagr_pl);
1382 mlxsw_sp2_span_trigger_global_unbind(struct mlxsw_sp_span_trigger_entry *
1385 /* There is no unbinding for global triggers. The trigger should be
1386 * disabled on all ports by now.
1391 mlxsw_sp2_span_trigger_global_matches(struct mlxsw_sp_span_trigger_entry *
1393 enum mlxsw_sp_span_trigger trigger,
1394 struct mlxsw_sp_port *mlxsw_sp_port)
1396 return trigger_entry->trigger == trigger;
1400 __mlxsw_sp2_span_trigger_global_enable(struct mlxsw_sp_span_trigger_entry *
1402 struct mlxsw_sp_port *mlxsw_sp_port,
1405 struct mlxsw_sp *mlxsw_sp = trigger_entry->span->mlxsw_sp;
1406 char momte_pl[MLXSW_REG_MOMTE_LEN];
1407 enum mlxsw_reg_momte_type type;
1410 switch (trigger_entry->trigger) {
1411 case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP:
1412 type = MLXSW_REG_MOMTE_TYPE_SHARED_BUFFER_TCLASS;
1414 case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP:
1415 type = MLXSW_REG_MOMTE_TYPE_WRED;
1417 case MLXSW_SP_SPAN_TRIGGER_ECN:
1418 type = MLXSW_REG_MOMTE_TYPE_ECN;
1425 /* Query existing configuration in order to only change the state of
1426 * the specified traffic class.
1428 mlxsw_reg_momte_pack(momte_pl, mlxsw_sp_port->local_port, type);
1429 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(momte), momte_pl);
1433 mlxsw_reg_momte_tclass_en_set(momte_pl, tc, enable);
1434 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(momte), momte_pl);
1438 mlxsw_sp2_span_trigger_global_enable(struct mlxsw_sp_span_trigger_entry *
1440 struct mlxsw_sp_port *mlxsw_sp_port,
1443 return __mlxsw_sp2_span_trigger_global_enable(trigger_entry,
1444 mlxsw_sp_port, tc, true);
1448 mlxsw_sp2_span_trigger_global_disable(struct mlxsw_sp_span_trigger_entry *
1450 struct mlxsw_sp_port *mlxsw_sp_port,
1453 __mlxsw_sp2_span_trigger_global_enable(trigger_entry, mlxsw_sp_port, tc,
1457 static const struct mlxsw_sp_span_trigger_ops
1458 mlxsw_sp2_span_trigger_global_ops = {
1459 .bind = mlxsw_sp2_span_trigger_global_bind,
1460 .unbind = mlxsw_sp2_span_trigger_global_unbind,
1461 .matches = mlxsw_sp2_span_trigger_global_matches,
1462 .enable = mlxsw_sp2_span_trigger_global_enable,
1463 .disable = mlxsw_sp2_span_trigger_global_disable,
1466 static const struct mlxsw_sp_span_trigger_ops *
1467 mlxsw_sp2_span_trigger_ops_arr[] = {
1468 [MLXSW_SP_SPAN_TRIGGER_TYPE_PORT] = &mlxsw_sp_span_trigger_port_ops,
1469 [MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL] =
1470 &mlxsw_sp2_span_trigger_global_ops,
1474 mlxsw_sp_span_trigger_ops_set(struct mlxsw_sp_span_trigger_entry *trigger_entry)
1476 struct mlxsw_sp_span *span = trigger_entry->span;
1477 enum mlxsw_sp_span_trigger_type type;
1479 switch (trigger_entry->trigger) {
1480 case MLXSW_SP_SPAN_TRIGGER_INGRESS:
1481 case MLXSW_SP_SPAN_TRIGGER_EGRESS:
1482 type = MLXSW_SP_SPAN_TRIGGER_TYPE_PORT;
1484 case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP:
1485 case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP:
1486 case MLXSW_SP_SPAN_TRIGGER_ECN:
1487 type = MLXSW_SP_SPAN_TRIGGER_TYPE_GLOBAL;
1494 trigger_entry->ops = span->span_trigger_ops_arr[type];
1497 static struct mlxsw_sp_span_trigger_entry *
1498 mlxsw_sp_span_trigger_entry_create(struct mlxsw_sp_span *span,
1499 enum mlxsw_sp_span_trigger trigger,
1500 struct mlxsw_sp_port *mlxsw_sp_port,
1501 const struct mlxsw_sp_span_trigger_parms
1504 struct mlxsw_sp_span_trigger_entry *trigger_entry;
1507 trigger_entry = kzalloc(sizeof(*trigger_entry), GFP_KERNEL);
1509 return ERR_PTR(-ENOMEM);
1511 refcount_set(&trigger_entry->ref_count, 1);
1512 trigger_entry->local_port = mlxsw_sp_port ? mlxsw_sp_port->local_port :
1514 trigger_entry->trigger = trigger;
1515 memcpy(&trigger_entry->parms, parms, sizeof(trigger_entry->parms));
1516 trigger_entry->span = span;
1517 mlxsw_sp_span_trigger_ops_set(trigger_entry);
1518 list_add_tail(&trigger_entry->list, &span->trigger_entries_list);
1520 err = trigger_entry->ops->bind(trigger_entry);
1522 goto err_trigger_entry_bind;
1524 return trigger_entry;
1526 err_trigger_entry_bind:
1527 list_del(&trigger_entry->list);
1528 kfree(trigger_entry);
1529 return ERR_PTR(err);
1533 mlxsw_sp_span_trigger_entry_destroy(struct mlxsw_sp_span *span,
1534 struct mlxsw_sp_span_trigger_entry *
1537 trigger_entry->ops->unbind(trigger_entry);
1538 list_del(&trigger_entry->list);
1539 kfree(trigger_entry);
1542 static struct mlxsw_sp_span_trigger_entry *
1543 mlxsw_sp_span_trigger_entry_find(struct mlxsw_sp_span *span,
1544 enum mlxsw_sp_span_trigger trigger,
1545 struct mlxsw_sp_port *mlxsw_sp_port)
1547 struct mlxsw_sp_span_trigger_entry *trigger_entry;
1549 list_for_each_entry(trigger_entry, &span->trigger_entries_list, list) {
1550 if (trigger_entry->ops->matches(trigger_entry, trigger,
1552 return trigger_entry;
1558 int mlxsw_sp_span_agent_bind(struct mlxsw_sp *mlxsw_sp,
1559 enum mlxsw_sp_span_trigger trigger,
1560 struct mlxsw_sp_port *mlxsw_sp_port,
1561 const struct mlxsw_sp_span_trigger_parms *parms)
1563 struct mlxsw_sp_span_trigger_entry *trigger_entry;
1568 if (!mlxsw_sp_span_entry_find_by_id(mlxsw_sp, parms->span_id))
1571 trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span,
1574 if (trigger_entry) {
1575 if (trigger_entry->parms.span_id != parms->span_id ||
1576 trigger_entry->parms.probability_rate !=
1577 parms->probability_rate)
1579 refcount_inc(&trigger_entry->ref_count);
1583 trigger_entry = mlxsw_sp_span_trigger_entry_create(mlxsw_sp->span,
1587 if (IS_ERR(trigger_entry))
1588 err = PTR_ERR(trigger_entry);
1594 void mlxsw_sp_span_agent_unbind(struct mlxsw_sp *mlxsw_sp,
1595 enum mlxsw_sp_span_trigger trigger,
1596 struct mlxsw_sp_port *mlxsw_sp_port,
1597 const struct mlxsw_sp_span_trigger_parms *parms)
1599 struct mlxsw_sp_span_trigger_entry *trigger_entry;
1603 if (WARN_ON_ONCE(!mlxsw_sp_span_entry_find_by_id(mlxsw_sp,
1607 trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span,
1610 if (WARN_ON_ONCE(!trigger_entry))
1613 if (!refcount_dec_and_test(&trigger_entry->ref_count))
1616 mlxsw_sp_span_trigger_entry_destroy(mlxsw_sp->span, trigger_entry);
1619 int mlxsw_sp_span_trigger_enable(struct mlxsw_sp_port *mlxsw_sp_port,
1620 enum mlxsw_sp_span_trigger trigger, u8 tc)
1622 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1623 struct mlxsw_sp_span_trigger_entry *trigger_entry;
1627 trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span,
1630 if (WARN_ON_ONCE(!trigger_entry))
1633 return trigger_entry->ops->enable(trigger_entry, mlxsw_sp_port, tc);
1636 void mlxsw_sp_span_trigger_disable(struct mlxsw_sp_port *mlxsw_sp_port,
1637 enum mlxsw_sp_span_trigger trigger, u8 tc)
1639 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1640 struct mlxsw_sp_span_trigger_entry *trigger_entry;
1644 trigger_entry = mlxsw_sp_span_trigger_entry_find(mlxsw_sp->span,
1647 if (WARN_ON_ONCE(!trigger_entry))
1650 return trigger_entry->ops->disable(trigger_entry, mlxsw_sp_port, tc);
1653 bool mlxsw_sp_span_trigger_is_ingress(enum mlxsw_sp_span_trigger trigger)
1656 case MLXSW_SP_SPAN_TRIGGER_INGRESS:
1657 case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP:
1658 case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP:
1660 case MLXSW_SP_SPAN_TRIGGER_EGRESS:
1661 case MLXSW_SP_SPAN_TRIGGER_ECN:
1669 static int mlxsw_sp1_span_init(struct mlxsw_sp *mlxsw_sp)
1671 size_t arr_size = ARRAY_SIZE(mlxsw_sp1_span_entry_ops_arr);
1673 /* Must be first to avoid NULL pointer dereference by subsequent
1674 * can_handle() callbacks.
1676 if (WARN_ON(mlxsw_sp1_span_entry_ops_arr[0] !=
1677 &mlxsw_sp1_span_entry_ops_cpu))
1680 mlxsw_sp->span->span_trigger_ops_arr = mlxsw_sp1_span_trigger_ops_arr;
1681 mlxsw_sp->span->span_entry_ops_arr = mlxsw_sp1_span_entry_ops_arr;
1682 mlxsw_sp->span->span_entry_ops_arr_size = arr_size;
1687 static int mlxsw_sp1_span_policer_id_base_set(struct mlxsw_sp *mlxsw_sp,
1688 u16 policer_id_base)
1693 const struct mlxsw_sp_span_ops mlxsw_sp1_span_ops = {
1694 .init = mlxsw_sp1_span_init,
1695 .policer_id_base_set = mlxsw_sp1_span_policer_id_base_set,
1698 static int mlxsw_sp2_span_init(struct mlxsw_sp *mlxsw_sp)
1700 size_t arr_size = ARRAY_SIZE(mlxsw_sp2_span_entry_ops_arr);
1702 /* Must be first to avoid NULL pointer dereference by subsequent
1703 * can_handle() callbacks.
1705 if (WARN_ON(mlxsw_sp2_span_entry_ops_arr[0] !=
1706 &mlxsw_sp2_span_entry_ops_cpu))
1709 mlxsw_sp->span->span_trigger_ops_arr = mlxsw_sp2_span_trigger_ops_arr;
1710 mlxsw_sp->span->span_entry_ops_arr = mlxsw_sp2_span_entry_ops_arr;
1711 mlxsw_sp->span->span_entry_ops_arr_size = arr_size;
1716 #define MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR 38
1717 #define MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR 50
1719 static int mlxsw_sp2_span_policer_id_base_set(struct mlxsw_sp *mlxsw_sp,
1720 u16 policer_id_base)
1722 char mogcr_pl[MLXSW_REG_MOGCR_LEN];
1725 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mogcr), mogcr_pl);
1729 mlxsw_reg_mogcr_mirroring_pid_base_set(mogcr_pl, policer_id_base);
1730 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mogcr), mogcr_pl);
1733 const struct mlxsw_sp_span_ops mlxsw_sp2_span_ops = {
1734 .init = mlxsw_sp2_span_init,
1735 .policer_id_base_set = mlxsw_sp2_span_policer_id_base_set,
1738 const struct mlxsw_sp_span_ops mlxsw_sp3_span_ops = {
1739 .init = mlxsw_sp2_span_init,
1740 .policer_id_base_set = mlxsw_sp2_span_policer_id_base_set,