1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2017-2020 Mellanox Technologies. All rights reserved */
4 #include <linux/kernel.h>
5 #include <linux/errno.h>
6 #include <linux/netdevice.h>
7 #include <net/flow_offload.h>
10 #include "spectrum_span.h"
13 enum mlxsw_sp_mall_action_type {
14 MLXSW_SP_MALL_ACTION_TYPE_MIRROR,
15 MLXSW_SP_MALL_ACTION_TYPE_SAMPLE,
18 struct mlxsw_sp_mall_mirror_entry {
19 const struct net_device *to_dev;
23 struct mlxsw_sp_mall_entry {
24 struct list_head list;
26 unsigned int priority;
27 enum mlxsw_sp_mall_action_type type;
30 struct mlxsw_sp_mall_mirror_entry mirror;
31 struct mlxsw_sp_port_sample sample;
36 static struct mlxsw_sp_mall_entry *
37 mlxsw_sp_mall_entry_find(struct mlxsw_sp_flow_block *block, unsigned long cookie)
39 struct mlxsw_sp_mall_entry *mall_entry;
41 list_for_each_entry(mall_entry, &block->mall.list, list)
42 if (mall_entry->cookie == cookie)
49 mlxsw_sp_mall_port_mirror_add(struct mlxsw_sp_port *mlxsw_sp_port,
50 struct mlxsw_sp_mall_entry *mall_entry)
52 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
53 struct mlxsw_sp_span_trigger_parms parms;
54 enum mlxsw_sp_span_trigger trigger;
57 if (!mall_entry->mirror.to_dev) {
58 netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
62 err = mlxsw_sp_span_agent_get(mlxsw_sp, mall_entry->mirror.to_dev,
63 &mall_entry->mirror.span_id);
67 err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port,
70 goto err_analyzed_port_get;
72 trigger = mall_entry->ingress ? MLXSW_SP_SPAN_TRIGGER_INGRESS :
73 MLXSW_SP_SPAN_TRIGGER_EGRESS;
74 parms.span_id = mall_entry->mirror.span_id;
75 err = mlxsw_sp_span_agent_bind(mlxsw_sp, trigger, mlxsw_sp_port,
83 mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, mall_entry->ingress);
84 err_analyzed_port_get:
85 mlxsw_sp_span_agent_put(mlxsw_sp, mall_entry->mirror.span_id);
90 mlxsw_sp_mall_port_mirror_del(struct mlxsw_sp_port *mlxsw_sp_port,
91 struct mlxsw_sp_mall_entry *mall_entry)
93 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
94 struct mlxsw_sp_span_trigger_parms parms;
95 enum mlxsw_sp_span_trigger trigger;
97 trigger = mall_entry->ingress ? MLXSW_SP_SPAN_TRIGGER_INGRESS :
98 MLXSW_SP_SPAN_TRIGGER_EGRESS;
99 parms.span_id = mall_entry->mirror.span_id;
100 mlxsw_sp_span_agent_unbind(mlxsw_sp, trigger, mlxsw_sp_port, &parms);
101 mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, mall_entry->ingress);
102 mlxsw_sp_span_agent_put(mlxsw_sp, mall_entry->mirror.span_id);
105 static int mlxsw_sp_mall_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port,
106 bool enable, u32 rate)
108 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
109 char mpsc_pl[MLXSW_REG_MPSC_LEN];
111 mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate);
112 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl);
116 mlxsw_sp_mall_port_sample_add(struct mlxsw_sp_port *mlxsw_sp_port,
117 struct mlxsw_sp_mall_entry *mall_entry)
121 if (rtnl_dereference(mlxsw_sp_port->sample)) {
122 netdev_err(mlxsw_sp_port->dev, "sample already active\n");
125 rcu_assign_pointer(mlxsw_sp_port->sample, &mall_entry->sample);
127 err = mlxsw_sp_mall_port_sample_set(mlxsw_sp_port, true,
128 mall_entry->sample.rate);
130 goto err_port_sample_set;
134 RCU_INIT_POINTER(mlxsw_sp_port->sample, NULL);
139 mlxsw_sp_mall_port_sample_del(struct mlxsw_sp_port *mlxsw_sp_port)
141 if (!mlxsw_sp_port->sample)
144 mlxsw_sp_mall_port_sample_set(mlxsw_sp_port, false, 1);
145 RCU_INIT_POINTER(mlxsw_sp_port->sample, NULL);
149 mlxsw_sp_mall_port_rule_add(struct mlxsw_sp_port *mlxsw_sp_port,
150 struct mlxsw_sp_mall_entry *mall_entry)
152 switch (mall_entry->type) {
153 case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
154 return mlxsw_sp_mall_port_mirror_add(mlxsw_sp_port, mall_entry);
155 case MLXSW_SP_MALL_ACTION_TYPE_SAMPLE:
156 return mlxsw_sp_mall_port_sample_add(mlxsw_sp_port, mall_entry);
164 mlxsw_sp_mall_port_rule_del(struct mlxsw_sp_port *mlxsw_sp_port,
165 struct mlxsw_sp_mall_entry *mall_entry)
167 switch (mall_entry->type) {
168 case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
169 mlxsw_sp_mall_port_mirror_del(mlxsw_sp_port, mall_entry);
171 case MLXSW_SP_MALL_ACTION_TYPE_SAMPLE:
172 mlxsw_sp_mall_port_sample_del(mlxsw_sp_port);
179 static void mlxsw_sp_mall_prio_update(struct mlxsw_sp_flow_block *block)
181 struct mlxsw_sp_mall_entry *mall_entry;
183 if (list_empty(&block->mall.list))
185 block->mall.min_prio = UINT_MAX;
186 block->mall.max_prio = 0;
187 list_for_each_entry(mall_entry, &block->mall.list, list) {
188 if (mall_entry->priority < block->mall.min_prio)
189 block->mall.min_prio = mall_entry->priority;
190 if (mall_entry->priority > block->mall.max_prio)
191 block->mall.max_prio = mall_entry->priority;
195 int mlxsw_sp_mall_replace(struct mlxsw_sp *mlxsw_sp,
196 struct mlxsw_sp_flow_block *block,
197 struct tc_cls_matchall_offload *f)
199 struct mlxsw_sp_flow_block_binding *binding;
200 struct mlxsw_sp_mall_entry *mall_entry;
201 __be16 protocol = f->common.protocol;
202 struct flow_action_entry *act;
203 unsigned int flower_min_prio;
204 unsigned int flower_max_prio;
205 bool flower_prio_valid;
208 if (!flow_offload_has_one_action(&f->rule->action)) {
209 NL_SET_ERR_MSG(f->common.extack, "Only singular actions are supported");
213 if (f->common.chain_index) {
214 NL_SET_ERR_MSG(f->common.extack, "Only chain 0 is supported");
218 if (mlxsw_sp_flow_block_is_mixed_bound(block)) {
219 NL_SET_ERR_MSG(f->common.extack, "Only not mixed bound blocks are supported");
223 err = mlxsw_sp_flower_prio_get(mlxsw_sp, block, f->common.chain_index,
224 &flower_min_prio, &flower_max_prio);
226 if (err != -ENOENT) {
227 NL_SET_ERR_MSG(f->common.extack, "Failed to get flower priorities");
230 flower_prio_valid = false;
231 /* No flower filters are installed in specified chain. */
233 flower_prio_valid = true;
236 mall_entry = kzalloc(sizeof(*mall_entry), GFP_KERNEL);
239 mall_entry->cookie = f->cookie;
240 mall_entry->priority = f->common.prio;
241 mall_entry->ingress = mlxsw_sp_flow_block_is_ingress_bound(block);
243 act = &f->rule->action.entries[0];
245 if (act->id == FLOW_ACTION_MIRRED && protocol == htons(ETH_P_ALL)) {
246 if (flower_prio_valid && mall_entry->ingress &&
247 mall_entry->priority >= flower_min_prio) {
248 NL_SET_ERR_MSG(f->common.extack, "Failed to add behind existing flower rules");
252 if (flower_prio_valid && !mall_entry->ingress &&
253 mall_entry->priority <= flower_max_prio) {
254 NL_SET_ERR_MSG(f->common.extack, "Failed to add in front of existing flower rules");
258 mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_MIRROR;
259 mall_entry->mirror.to_dev = act->dev;
260 } else if (act->id == FLOW_ACTION_SAMPLE &&
261 protocol == htons(ETH_P_ALL)) {
262 if (!mall_entry->ingress) {
263 NL_SET_ERR_MSG(f->common.extack, "Sample is not supported on egress");
267 if (flower_prio_valid &&
268 mall_entry->priority >= flower_min_prio) {
269 NL_SET_ERR_MSG(f->common.extack, "Failed to add behind existing flower rules");
273 if (act->sample.rate > MLXSW_REG_MPSC_RATE_MAX) {
274 NL_SET_ERR_MSG(f->common.extack, "Sample rate not supported");
278 mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_SAMPLE;
279 mall_entry->sample.psample_group = act->sample.psample_group;
280 mall_entry->sample.truncate = act->sample.truncate;
281 mall_entry->sample.trunc_size = act->sample.trunc_size;
282 mall_entry->sample.rate = act->sample.rate;
288 list_for_each_entry(binding, &block->binding_list, list) {
289 err = mlxsw_sp_mall_port_rule_add(binding->mlxsw_sp_port,
296 if (mall_entry->ingress)
297 block->egress_blocker_rule_count++;
299 block->ingress_blocker_rule_count++;
300 list_add_tail(&mall_entry->list, &block->mall.list);
301 mlxsw_sp_mall_prio_update(block);
305 list_for_each_entry_continue_reverse(binding, &block->binding_list,
307 mlxsw_sp_mall_port_rule_del(binding->mlxsw_sp_port, mall_entry);
313 void mlxsw_sp_mall_destroy(struct mlxsw_sp_flow_block *block,
314 struct tc_cls_matchall_offload *f)
316 struct mlxsw_sp_flow_block_binding *binding;
317 struct mlxsw_sp_mall_entry *mall_entry;
319 mall_entry = mlxsw_sp_mall_entry_find(block, f->cookie);
321 NL_SET_ERR_MSG(f->common.extack, "Entry not found");
325 list_del(&mall_entry->list);
326 if (mall_entry->ingress)
327 block->egress_blocker_rule_count--;
329 block->ingress_blocker_rule_count--;
331 list_for_each_entry(binding, &block->binding_list, list)
332 mlxsw_sp_mall_port_rule_del(binding->mlxsw_sp_port, mall_entry);
333 kfree_rcu(mall_entry, rcu); /* sample RX packets may be in-flight */
334 mlxsw_sp_mall_prio_update(block);
337 int mlxsw_sp_mall_port_bind(struct mlxsw_sp_flow_block *block,
338 struct mlxsw_sp_port *mlxsw_sp_port)
340 struct mlxsw_sp_mall_entry *mall_entry;
343 list_for_each_entry(mall_entry, &block->mall.list, list) {
344 err = mlxsw_sp_mall_port_rule_add(mlxsw_sp_port, mall_entry);
351 list_for_each_entry_continue_reverse(mall_entry, &block->mall.list,
353 mlxsw_sp_mall_port_rule_del(mlxsw_sp_port, mall_entry);
357 void mlxsw_sp_mall_port_unbind(struct mlxsw_sp_flow_block *block,
358 struct mlxsw_sp_port *mlxsw_sp_port)
360 struct mlxsw_sp_mall_entry *mall_entry;
362 list_for_each_entry(mall_entry, &block->mall.list, list)
363 mlxsw_sp_mall_port_rule_del(mlxsw_sp_port, mall_entry);
366 int mlxsw_sp_mall_prio_get(struct mlxsw_sp_flow_block *block, u32 chain_index,
367 unsigned int *p_min_prio, unsigned int *p_max_prio)
369 if (chain_index || list_empty(&block->mall.list))
370 /* In case there are no matchall rules, the caller
371 * receives -ENOENT to indicate there is no need
372 * to check the priorities.
375 *p_min_prio = block->mall.min_prio;
376 *p_max_prio = block->mall.max_prio;