Merge tag 'dma-mapping-5.8' of git://git.infradead.org/users/hch/dma-mapping
[linux-2.6-microblaze.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum_matchall.c
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2017-2020 Mellanox Technologies. All rights reserved */
3
4 #include <linux/kernel.h>
5 #include <linux/errno.h>
6 #include <linux/netdevice.h>
7 #include <net/flow_offload.h>
8
9 #include "spectrum.h"
10 #include "spectrum_span.h"
11 #include "reg.h"
12
13 enum mlxsw_sp_mall_action_type {
14         MLXSW_SP_MALL_ACTION_TYPE_MIRROR,
15         MLXSW_SP_MALL_ACTION_TYPE_SAMPLE,
16 };
17
18 struct mlxsw_sp_mall_mirror_entry {
19         const struct net_device *to_dev;
20         int span_id;
21 };
22
23 struct mlxsw_sp_mall_entry {
24         struct list_head list;
25         unsigned long cookie;
26         unsigned int priority;
27         enum mlxsw_sp_mall_action_type type;
28         bool ingress;
29         union {
30                 struct mlxsw_sp_mall_mirror_entry mirror;
31                 struct mlxsw_sp_port_sample sample;
32         };
33         struct rcu_head rcu;
34 };
35
36 static struct mlxsw_sp_mall_entry *
37 mlxsw_sp_mall_entry_find(struct mlxsw_sp_flow_block *block, unsigned long cookie)
38 {
39         struct mlxsw_sp_mall_entry *mall_entry;
40
41         list_for_each_entry(mall_entry, &block->mall.list, list)
42                 if (mall_entry->cookie == cookie)
43                         return mall_entry;
44
45         return NULL;
46 }
47
48 static int
49 mlxsw_sp_mall_port_mirror_add(struct mlxsw_sp_port *mlxsw_sp_port,
50                               struct mlxsw_sp_mall_entry *mall_entry)
51 {
52         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
53         struct mlxsw_sp_span_trigger_parms parms;
54         enum mlxsw_sp_span_trigger trigger;
55         int err;
56
57         if (!mall_entry->mirror.to_dev) {
58                 netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
59                 return -EINVAL;
60         }
61
62         err = mlxsw_sp_span_agent_get(mlxsw_sp, mall_entry->mirror.to_dev,
63                                       &mall_entry->mirror.span_id);
64         if (err)
65                 return err;
66
67         err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port,
68                                               mall_entry->ingress);
69         if (err)
70                 goto err_analyzed_port_get;
71
72         trigger = mall_entry->ingress ? MLXSW_SP_SPAN_TRIGGER_INGRESS :
73                                         MLXSW_SP_SPAN_TRIGGER_EGRESS;
74         parms.span_id = mall_entry->mirror.span_id;
75         err = mlxsw_sp_span_agent_bind(mlxsw_sp, trigger, mlxsw_sp_port,
76                                        &parms);
77         if (err)
78                 goto err_agent_bind;
79
80         return 0;
81
82 err_agent_bind:
83         mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, mall_entry->ingress);
84 err_analyzed_port_get:
85         mlxsw_sp_span_agent_put(mlxsw_sp, mall_entry->mirror.span_id);
86         return err;
87 }
88
89 static void
90 mlxsw_sp_mall_port_mirror_del(struct mlxsw_sp_port *mlxsw_sp_port,
91                               struct mlxsw_sp_mall_entry *mall_entry)
92 {
93         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
94         struct mlxsw_sp_span_trigger_parms parms;
95         enum mlxsw_sp_span_trigger trigger;
96
97         trigger = mall_entry->ingress ? MLXSW_SP_SPAN_TRIGGER_INGRESS :
98                                         MLXSW_SP_SPAN_TRIGGER_EGRESS;
99         parms.span_id = mall_entry->mirror.span_id;
100         mlxsw_sp_span_agent_unbind(mlxsw_sp, trigger, mlxsw_sp_port, &parms);
101         mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, mall_entry->ingress);
102         mlxsw_sp_span_agent_put(mlxsw_sp, mall_entry->mirror.span_id);
103 }
104
105 static int mlxsw_sp_mall_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port,
106                                          bool enable, u32 rate)
107 {
108         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
109         char mpsc_pl[MLXSW_REG_MPSC_LEN];
110
111         mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate);
112         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl);
113 }
114
115 static int
116 mlxsw_sp_mall_port_sample_add(struct mlxsw_sp_port *mlxsw_sp_port,
117                               struct mlxsw_sp_mall_entry *mall_entry)
118 {
119         int err;
120
121         if (rtnl_dereference(mlxsw_sp_port->sample)) {
122                 netdev_err(mlxsw_sp_port->dev, "sample already active\n");
123                 return -EEXIST;
124         }
125         rcu_assign_pointer(mlxsw_sp_port->sample, &mall_entry->sample);
126
127         err = mlxsw_sp_mall_port_sample_set(mlxsw_sp_port, true,
128                                             mall_entry->sample.rate);
129         if (err)
130                 goto err_port_sample_set;
131         return 0;
132
133 err_port_sample_set:
134         RCU_INIT_POINTER(mlxsw_sp_port->sample, NULL);
135         return err;
136 }
137
138 static void
139 mlxsw_sp_mall_port_sample_del(struct mlxsw_sp_port *mlxsw_sp_port)
140 {
141         if (!mlxsw_sp_port->sample)
142                 return;
143
144         mlxsw_sp_mall_port_sample_set(mlxsw_sp_port, false, 1);
145         RCU_INIT_POINTER(mlxsw_sp_port->sample, NULL);
146 }
147
148 static int
149 mlxsw_sp_mall_port_rule_add(struct mlxsw_sp_port *mlxsw_sp_port,
150                             struct mlxsw_sp_mall_entry *mall_entry)
151 {
152         switch (mall_entry->type) {
153         case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
154                 return mlxsw_sp_mall_port_mirror_add(mlxsw_sp_port, mall_entry);
155         case MLXSW_SP_MALL_ACTION_TYPE_SAMPLE:
156                 return mlxsw_sp_mall_port_sample_add(mlxsw_sp_port, mall_entry);
157         default:
158                 WARN_ON(1);
159                 return -EINVAL;
160         }
161 }
162
163 static void
164 mlxsw_sp_mall_port_rule_del(struct mlxsw_sp_port *mlxsw_sp_port,
165                             struct mlxsw_sp_mall_entry *mall_entry)
166 {
167         switch (mall_entry->type) {
168         case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
169                 mlxsw_sp_mall_port_mirror_del(mlxsw_sp_port, mall_entry);
170                 break;
171         case MLXSW_SP_MALL_ACTION_TYPE_SAMPLE:
172                 mlxsw_sp_mall_port_sample_del(mlxsw_sp_port);
173                 break;
174         default:
175                 WARN_ON(1);
176         }
177 }
178
179 static void mlxsw_sp_mall_prio_update(struct mlxsw_sp_flow_block *block)
180 {
181         struct mlxsw_sp_mall_entry *mall_entry;
182
183         if (list_empty(&block->mall.list))
184                 return;
185         block->mall.min_prio = UINT_MAX;
186         block->mall.max_prio = 0;
187         list_for_each_entry(mall_entry, &block->mall.list, list) {
188                 if (mall_entry->priority < block->mall.min_prio)
189                         block->mall.min_prio = mall_entry->priority;
190                 if (mall_entry->priority > block->mall.max_prio)
191                         block->mall.max_prio = mall_entry->priority;
192         }
193 }
194
195 int mlxsw_sp_mall_replace(struct mlxsw_sp *mlxsw_sp,
196                           struct mlxsw_sp_flow_block *block,
197                           struct tc_cls_matchall_offload *f)
198 {
199         struct mlxsw_sp_flow_block_binding *binding;
200         struct mlxsw_sp_mall_entry *mall_entry;
201         __be16 protocol = f->common.protocol;
202         struct flow_action_entry *act;
203         unsigned int flower_min_prio;
204         unsigned int flower_max_prio;
205         bool flower_prio_valid;
206         int err;
207
208         if (!flow_offload_has_one_action(&f->rule->action)) {
209                 NL_SET_ERR_MSG(f->common.extack, "Only singular actions are supported");
210                 return -EOPNOTSUPP;
211         }
212
213         if (f->common.chain_index) {
214                 NL_SET_ERR_MSG(f->common.extack, "Only chain 0 is supported");
215                 return -EOPNOTSUPP;
216         }
217
218         if (mlxsw_sp_flow_block_is_mixed_bound(block)) {
219                 NL_SET_ERR_MSG(f->common.extack, "Only not mixed bound blocks are supported");
220                 return -EOPNOTSUPP;
221         }
222
223         err = mlxsw_sp_flower_prio_get(mlxsw_sp, block, f->common.chain_index,
224                                        &flower_min_prio, &flower_max_prio);
225         if (err) {
226                 if (err != -ENOENT) {
227                         NL_SET_ERR_MSG(f->common.extack, "Failed to get flower priorities");
228                         return err;
229                 }
230                 flower_prio_valid = false;
231                 /* No flower filters are installed in specified chain. */
232         } else {
233                 flower_prio_valid = true;
234         }
235
236         mall_entry = kzalloc(sizeof(*mall_entry), GFP_KERNEL);
237         if (!mall_entry)
238                 return -ENOMEM;
239         mall_entry->cookie = f->cookie;
240         mall_entry->priority = f->common.prio;
241         mall_entry->ingress = mlxsw_sp_flow_block_is_ingress_bound(block);
242
243         act = &f->rule->action.entries[0];
244
245         if (act->id == FLOW_ACTION_MIRRED && protocol == htons(ETH_P_ALL)) {
246                 if (flower_prio_valid && mall_entry->ingress &&
247                     mall_entry->priority >= flower_min_prio) {
248                         NL_SET_ERR_MSG(f->common.extack, "Failed to add behind existing flower rules");
249                         err = -EOPNOTSUPP;
250                         goto errout;
251                 }
252                 if (flower_prio_valid && !mall_entry->ingress &&
253                     mall_entry->priority <= flower_max_prio) {
254                         NL_SET_ERR_MSG(f->common.extack, "Failed to add in front of existing flower rules");
255                         err = -EOPNOTSUPP;
256                         goto errout;
257                 }
258                 mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_MIRROR;
259                 mall_entry->mirror.to_dev = act->dev;
260         } else if (act->id == FLOW_ACTION_SAMPLE &&
261                    protocol == htons(ETH_P_ALL)) {
262                 if (!mall_entry->ingress) {
263                         NL_SET_ERR_MSG(f->common.extack, "Sample is not supported on egress");
264                         err = -EOPNOTSUPP;
265                         goto errout;
266                 }
267                 if (flower_prio_valid &&
268                     mall_entry->priority >= flower_min_prio) {
269                         NL_SET_ERR_MSG(f->common.extack, "Failed to add behind existing flower rules");
270                         err = -EOPNOTSUPP;
271                         goto errout;
272                 }
273                 if (act->sample.rate > MLXSW_REG_MPSC_RATE_MAX) {
274                         NL_SET_ERR_MSG(f->common.extack, "Sample rate not supported");
275                         err = -EOPNOTSUPP;
276                         goto errout;
277                 }
278                 mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_SAMPLE;
279                 mall_entry->sample.psample_group = act->sample.psample_group;
280                 mall_entry->sample.truncate = act->sample.truncate;
281                 mall_entry->sample.trunc_size = act->sample.trunc_size;
282                 mall_entry->sample.rate = act->sample.rate;
283         } else {
284                 err = -EOPNOTSUPP;
285                 goto errout;
286         }
287
288         list_for_each_entry(binding, &block->binding_list, list) {
289                 err = mlxsw_sp_mall_port_rule_add(binding->mlxsw_sp_port,
290                                                   mall_entry);
291                 if (err)
292                         goto rollback;
293         }
294
295         block->rule_count++;
296         if (mall_entry->ingress)
297                 block->egress_blocker_rule_count++;
298         else
299                 block->ingress_blocker_rule_count++;
300         list_add_tail(&mall_entry->list, &block->mall.list);
301         mlxsw_sp_mall_prio_update(block);
302         return 0;
303
304 rollback:
305         list_for_each_entry_continue_reverse(binding, &block->binding_list,
306                                              list)
307                 mlxsw_sp_mall_port_rule_del(binding->mlxsw_sp_port, mall_entry);
308 errout:
309         kfree(mall_entry);
310         return err;
311 }
312
313 void mlxsw_sp_mall_destroy(struct mlxsw_sp_flow_block *block,
314                            struct tc_cls_matchall_offload *f)
315 {
316         struct mlxsw_sp_flow_block_binding *binding;
317         struct mlxsw_sp_mall_entry *mall_entry;
318
319         mall_entry = mlxsw_sp_mall_entry_find(block, f->cookie);
320         if (!mall_entry) {
321                 NL_SET_ERR_MSG(f->common.extack, "Entry not found");
322                 return;
323         }
324
325         list_del(&mall_entry->list);
326         if (mall_entry->ingress)
327                 block->egress_blocker_rule_count--;
328         else
329                 block->ingress_blocker_rule_count--;
330         block->rule_count--;
331         list_for_each_entry(binding, &block->binding_list, list)
332                 mlxsw_sp_mall_port_rule_del(binding->mlxsw_sp_port, mall_entry);
333         kfree_rcu(mall_entry, rcu); /* sample RX packets may be in-flight */
334         mlxsw_sp_mall_prio_update(block);
335 }
336
337 int mlxsw_sp_mall_port_bind(struct mlxsw_sp_flow_block *block,
338                             struct mlxsw_sp_port *mlxsw_sp_port)
339 {
340         struct mlxsw_sp_mall_entry *mall_entry;
341         int err;
342
343         list_for_each_entry(mall_entry, &block->mall.list, list) {
344                 err = mlxsw_sp_mall_port_rule_add(mlxsw_sp_port, mall_entry);
345                 if (err)
346                         goto rollback;
347         }
348         return 0;
349
350 rollback:
351         list_for_each_entry_continue_reverse(mall_entry, &block->mall.list,
352                                              list)
353                 mlxsw_sp_mall_port_rule_del(mlxsw_sp_port, mall_entry);
354         return err;
355 }
356
357 void mlxsw_sp_mall_port_unbind(struct mlxsw_sp_flow_block *block,
358                                struct mlxsw_sp_port *mlxsw_sp_port)
359 {
360         struct mlxsw_sp_mall_entry *mall_entry;
361
362         list_for_each_entry(mall_entry, &block->mall.list, list)
363                 mlxsw_sp_mall_port_rule_del(mlxsw_sp_port, mall_entry);
364 }
365
366 int mlxsw_sp_mall_prio_get(struct mlxsw_sp_flow_block *block, u32 chain_index,
367                            unsigned int *p_min_prio, unsigned int *p_max_prio)
368 {
369         if (chain_index || list_empty(&block->mall.list))
370                 /* In case there are no matchall rules, the caller
371                  * receives -ENOENT to indicate there is no need
372                  * to check the priorities.
373                  */
374                 return -ENOENT;
375         *p_min_prio = block->mall.min_prio;
376         *p_max_prio = block->mall.max_prio;
377         return 0;
378 }