x86/debug: Change thread.debugreg6 to thread.virtual_dr6
[linux-2.6-microblaze.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum_acl_tcam.c
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
3
4 #include <linux/kernel.h>
5 #include <linux/slab.h>
6 #include <linux/errno.h>
7 #include <linux/bitops.h>
8 #include <linux/list.h>
9 #include <linux/rhashtable.h>
10 #include <linux/netdevice.h>
11 #include <linux/mutex.h>
12 #include <trace/events/mlxsw.h>
13
14 #include "reg.h"
15 #include "core.h"
16 #include "resources.h"
17 #include "spectrum.h"
18 #include "spectrum_acl_tcam.h"
19 #include "core_acl_flex_keys.h"
20
21 size_t mlxsw_sp_acl_tcam_priv_size(struct mlxsw_sp *mlxsw_sp)
22 {
23         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
24
25         return ops->priv_size;
26 }
27
28 #define MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_DFLT 5000 /* ms */
29 #define MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_MIN 3000 /* ms */
30 #define MLXSW_SP_ACL_TCAM_VREGION_REHASH_CREDITS 100 /* number of entries */
31
32 int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
33                            struct mlxsw_sp_acl_tcam *tcam)
34 {
35         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
36         u64 max_tcam_regions;
37         u64 max_regions;
38         u64 max_groups;
39         size_t alloc_size;
40         int err;
41
42         mutex_init(&tcam->lock);
43         tcam->vregion_rehash_intrvl =
44                         MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_DFLT;
45         INIT_LIST_HEAD(&tcam->vregion_list);
46
47         max_tcam_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core,
48                                               ACL_MAX_TCAM_REGIONS);
49         max_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_REGIONS);
50
51         /* Use 1:1 mapping between ACL region and TCAM region */
52         if (max_tcam_regions < max_regions)
53                 max_regions = max_tcam_regions;
54
55         alloc_size = sizeof(tcam->used_regions[0]) * BITS_TO_LONGS(max_regions);
56         tcam->used_regions = kzalloc(alloc_size, GFP_KERNEL);
57         if (!tcam->used_regions)
58                 return -ENOMEM;
59         tcam->max_regions = max_regions;
60
61         max_groups = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUPS);
62         alloc_size = sizeof(tcam->used_groups[0]) * BITS_TO_LONGS(max_groups);
63         tcam->used_groups = kzalloc(alloc_size, GFP_KERNEL);
64         if (!tcam->used_groups) {
65                 err = -ENOMEM;
66                 goto err_alloc_used_groups;
67         }
68         tcam->max_groups = max_groups;
69         tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
70                                                  ACL_MAX_GROUP_SIZE);
71
72         err = ops->init(mlxsw_sp, tcam->priv, tcam);
73         if (err)
74                 goto err_tcam_init;
75
76         return 0;
77
78 err_tcam_init:
79         kfree(tcam->used_groups);
80 err_alloc_used_groups:
81         kfree(tcam->used_regions);
82         return err;
83 }
84
85 void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
86                             struct mlxsw_sp_acl_tcam *tcam)
87 {
88         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
89
90         mutex_destroy(&tcam->lock);
91         ops->fini(mlxsw_sp, tcam->priv);
92         kfree(tcam->used_groups);
93         kfree(tcam->used_regions);
94 }
95
96 int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
97                                    struct mlxsw_sp_acl_rule_info *rulei,
98                                    u32 *priority, bool fillup_priority)
99 {
100         u64 max_priority;
101
102         if (!fillup_priority) {
103                 *priority = 0;
104                 return 0;
105         }
106
107         if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, KVD_SIZE))
108                 return -EIO;
109
110         /* Priority range is 1..cap_kvd_size-1. */
111         max_priority = MLXSW_CORE_RES_GET(mlxsw_sp->core, KVD_SIZE) - 1;
112         if (rulei->priority >= max_priority)
113                 return -EINVAL;
114
115         /* Unlike in TC, in HW, higher number means higher priority. */
116         *priority = max_priority - rulei->priority;
117         return 0;
118 }
119
120 static int mlxsw_sp_acl_tcam_region_id_get(struct mlxsw_sp_acl_tcam *tcam,
121                                            u16 *p_id)
122 {
123         u16 id;
124
125         id = find_first_zero_bit(tcam->used_regions, tcam->max_regions);
126         if (id < tcam->max_regions) {
127                 __set_bit(id, tcam->used_regions);
128                 *p_id = id;
129                 return 0;
130         }
131         return -ENOBUFS;
132 }
133
134 static void mlxsw_sp_acl_tcam_region_id_put(struct mlxsw_sp_acl_tcam *tcam,
135                                             u16 id)
136 {
137         __clear_bit(id, tcam->used_regions);
138 }
139
140 static int mlxsw_sp_acl_tcam_group_id_get(struct mlxsw_sp_acl_tcam *tcam,
141                                           u16 *p_id)
142 {
143         u16 id;
144
145         id = find_first_zero_bit(tcam->used_groups, tcam->max_groups);
146         if (id < tcam->max_groups) {
147                 __set_bit(id, tcam->used_groups);
148                 *p_id = id;
149                 return 0;
150         }
151         return -ENOBUFS;
152 }
153
154 static void mlxsw_sp_acl_tcam_group_id_put(struct mlxsw_sp_acl_tcam *tcam,
155                                            u16 id)
156 {
157         __clear_bit(id, tcam->used_groups);
158 }
159
160 struct mlxsw_sp_acl_tcam_pattern {
161         const enum mlxsw_afk_element *elements;
162         unsigned int elements_count;
163 };
164
165 struct mlxsw_sp_acl_tcam_group {
166         struct mlxsw_sp_acl_tcam *tcam;
167         u16 id;
168         struct mutex lock; /* guards region list updates */
169         struct list_head region_list;
170         unsigned int region_count;
171 };
172
173 struct mlxsw_sp_acl_tcam_vgroup {
174         struct mlxsw_sp_acl_tcam_group group;
175         struct list_head vregion_list;
176         struct rhashtable vchunk_ht;
177         const struct mlxsw_sp_acl_tcam_pattern *patterns;
178         unsigned int patterns_count;
179         bool tmplt_elusage_set;
180         struct mlxsw_afk_element_usage tmplt_elusage;
181         bool vregion_rehash_enabled;
182         unsigned int *p_min_prio;
183         unsigned int *p_max_prio;
184 };
185
186 struct mlxsw_sp_acl_tcam_rehash_ctx {
187         void *hints_priv;
188         bool this_is_rollback;
189         struct mlxsw_sp_acl_tcam_vchunk *current_vchunk; /* vchunk being
190                                                           * currently migrated.
191                                                           */
192         struct mlxsw_sp_acl_tcam_ventry *start_ventry; /* ventry to start
193                                                         * migration from in
194                                                         * a vchunk being
195                                                         * currently migrated.
196                                                         */
197         struct mlxsw_sp_acl_tcam_ventry *stop_ventry; /* ventry to stop
198                                                        * migration at
199                                                        * a vchunk being
200                                                        * currently migrated.
201                                                        */
202 };
203
204 struct mlxsw_sp_acl_tcam_vregion {
205         struct mutex lock; /* Protects consistency of region, region2 pointers
206                             * and vchunk_list.
207                             */
208         struct mlxsw_sp_acl_tcam_region *region;
209         struct mlxsw_sp_acl_tcam_region *region2; /* Used during migration */
210         struct list_head list; /* Member of a TCAM group */
211         struct list_head tlist; /* Member of a TCAM */
212         struct list_head vchunk_list; /* List of vchunks under this vregion */
213         struct mlxsw_afk_key_info *key_info;
214         struct mlxsw_sp_acl_tcam *tcam;
215         struct mlxsw_sp_acl_tcam_vgroup *vgroup;
216         struct {
217                 struct delayed_work dw;
218                 struct mlxsw_sp_acl_tcam_rehash_ctx ctx;
219         } rehash;
220         struct mlxsw_sp *mlxsw_sp;
221         unsigned int ref_count;
222 };
223
224 struct mlxsw_sp_acl_tcam_vchunk;
225
226 struct mlxsw_sp_acl_tcam_chunk {
227         struct mlxsw_sp_acl_tcam_vchunk *vchunk;
228         struct mlxsw_sp_acl_tcam_region *region;
229         unsigned long priv[];
230         /* priv has to be always the last item */
231 };
232
233 struct mlxsw_sp_acl_tcam_vchunk {
234         struct mlxsw_sp_acl_tcam_chunk *chunk;
235         struct mlxsw_sp_acl_tcam_chunk *chunk2; /* Used during migration */
236         struct list_head list; /* Member of a TCAM vregion */
237         struct rhash_head ht_node; /* Member of a chunk HT */
238         struct list_head ventry_list;
239         unsigned int priority; /* Priority within the vregion and group */
240         struct mlxsw_sp_acl_tcam_vgroup *vgroup;
241         struct mlxsw_sp_acl_tcam_vregion *vregion;
242         unsigned int ref_count;
243 };
244
245 struct mlxsw_sp_acl_tcam_entry {
246         struct mlxsw_sp_acl_tcam_ventry *ventry;
247         struct mlxsw_sp_acl_tcam_chunk *chunk;
248         unsigned long priv[];
249         /* priv has to be always the last item */
250 };
251
252 struct mlxsw_sp_acl_tcam_ventry {
253         struct mlxsw_sp_acl_tcam_entry *entry;
254         struct list_head list; /* Member of a TCAM vchunk */
255         struct mlxsw_sp_acl_tcam_vchunk *vchunk;
256         struct mlxsw_sp_acl_rule_info *rulei;
257 };
258
259 static const struct rhashtable_params mlxsw_sp_acl_tcam_vchunk_ht_params = {
260         .key_len = sizeof(unsigned int),
261         .key_offset = offsetof(struct mlxsw_sp_acl_tcam_vchunk, priority),
262         .head_offset = offsetof(struct mlxsw_sp_acl_tcam_vchunk, ht_node),
263         .automatic_shrinking = true,
264 };
265
266 static int mlxsw_sp_acl_tcam_group_update(struct mlxsw_sp *mlxsw_sp,
267                                           struct mlxsw_sp_acl_tcam_group *group)
268 {
269         struct mlxsw_sp_acl_tcam_region *region;
270         char pagt_pl[MLXSW_REG_PAGT_LEN];
271         int acl_index = 0;
272
273         mlxsw_reg_pagt_pack(pagt_pl, group->id);
274         list_for_each_entry(region, &group->region_list, list) {
275                 bool multi = false;
276
277                 /* Check if the next entry in the list has the same vregion. */
278                 if (region->list.next != &group->region_list &&
279                     list_next_entry(region, list)->vregion == region->vregion)
280                         multi = true;
281                 mlxsw_reg_pagt_acl_id_pack(pagt_pl, acl_index++,
282                                            region->id, multi);
283         }
284         mlxsw_reg_pagt_size_set(pagt_pl, acl_index);
285         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pagt), pagt_pl);
286 }
287
288 static int
289 mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp_acl_tcam *tcam,
290                             struct mlxsw_sp_acl_tcam_group *group)
291 {
292         int err;
293
294         group->tcam = tcam;
295         mutex_init(&group->lock);
296         INIT_LIST_HEAD(&group->region_list);
297
298         err = mlxsw_sp_acl_tcam_group_id_get(tcam, &group->id);
299         if (err)
300                 return err;
301
302         return 0;
303 }
304
305 static void mlxsw_sp_acl_tcam_group_del(struct mlxsw_sp_acl_tcam_group *group)
306 {
307         struct mlxsw_sp_acl_tcam *tcam = group->tcam;
308
309         mutex_destroy(&group->lock);
310         mlxsw_sp_acl_tcam_group_id_put(tcam, group->id);
311         WARN_ON(!list_empty(&group->region_list));
312 }
313
314 static int
315 mlxsw_sp_acl_tcam_vgroup_add(struct mlxsw_sp *mlxsw_sp,
316                              struct mlxsw_sp_acl_tcam *tcam,
317                              struct mlxsw_sp_acl_tcam_vgroup *vgroup,
318                              const struct mlxsw_sp_acl_tcam_pattern *patterns,
319                              unsigned int patterns_count,
320                              struct mlxsw_afk_element_usage *tmplt_elusage,
321                              bool vregion_rehash_enabled,
322                              unsigned int *p_min_prio,
323                              unsigned int *p_max_prio)
324 {
325         int err;
326
327         vgroup->patterns = patterns;
328         vgroup->patterns_count = patterns_count;
329         vgroup->vregion_rehash_enabled = vregion_rehash_enabled;
330         vgroup->p_min_prio = p_min_prio;
331         vgroup->p_max_prio = p_max_prio;
332
333         if (tmplt_elusage) {
334                 vgroup->tmplt_elusage_set = true;
335                 memcpy(&vgroup->tmplt_elusage, tmplt_elusage,
336                        sizeof(vgroup->tmplt_elusage));
337         }
338         INIT_LIST_HEAD(&vgroup->vregion_list);
339
340         err = mlxsw_sp_acl_tcam_group_add(tcam, &vgroup->group);
341         if (err)
342                 return err;
343
344         err = rhashtable_init(&vgroup->vchunk_ht,
345                               &mlxsw_sp_acl_tcam_vchunk_ht_params);
346         if (err)
347                 goto err_rhashtable_init;
348
349         return 0;
350
351 err_rhashtable_init:
352         mlxsw_sp_acl_tcam_group_del(&vgroup->group);
353         return err;
354 }
355
356 static void
357 mlxsw_sp_acl_tcam_vgroup_del(struct mlxsw_sp_acl_tcam_vgroup *vgroup)
358 {
359         rhashtable_destroy(&vgroup->vchunk_ht);
360         mlxsw_sp_acl_tcam_group_del(&vgroup->group);
361         WARN_ON(!list_empty(&vgroup->vregion_list));
362 }
363
364 static int
365 mlxsw_sp_acl_tcam_group_bind(struct mlxsw_sp *mlxsw_sp,
366                              struct mlxsw_sp_acl_tcam_group *group,
367                              struct mlxsw_sp_port *mlxsw_sp_port,
368                              bool ingress)
369 {
370         char ppbt_pl[MLXSW_REG_PPBT_LEN];
371
372         mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL :
373                                                MLXSW_REG_PXBT_E_EACL,
374                             MLXSW_REG_PXBT_OP_BIND, mlxsw_sp_port->local_port,
375                             group->id);
376         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
377 }
378
379 static void
380 mlxsw_sp_acl_tcam_group_unbind(struct mlxsw_sp *mlxsw_sp,
381                                struct mlxsw_sp_acl_tcam_group *group,
382                                struct mlxsw_sp_port *mlxsw_sp_port,
383                                bool ingress)
384 {
385         char ppbt_pl[MLXSW_REG_PPBT_LEN];
386
387         mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL :
388                                                MLXSW_REG_PXBT_E_EACL,
389                             MLXSW_REG_PXBT_OP_UNBIND, mlxsw_sp_port->local_port,
390                             group->id);
391         mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
392 }
393
394 static u16
395 mlxsw_sp_acl_tcam_group_id(struct mlxsw_sp_acl_tcam_group *group)
396 {
397         return group->id;
398 }
399
400 static unsigned int
401 mlxsw_sp_acl_tcam_vregion_prio(struct mlxsw_sp_acl_tcam_vregion *vregion)
402 {
403         struct mlxsw_sp_acl_tcam_vchunk *vchunk;
404
405         if (list_empty(&vregion->vchunk_list))
406                 return 0;
407         /* As a priority of a vregion, return priority of the first vchunk */
408         vchunk = list_first_entry(&vregion->vchunk_list,
409                                   typeof(*vchunk), list);
410         return vchunk->priority;
411 }
412
413 static unsigned int
414 mlxsw_sp_acl_tcam_vregion_max_prio(struct mlxsw_sp_acl_tcam_vregion *vregion)
415 {
416         struct mlxsw_sp_acl_tcam_vchunk *vchunk;
417
418         if (list_empty(&vregion->vchunk_list))
419                 return 0;
420         vchunk = list_last_entry(&vregion->vchunk_list,
421                                  typeof(*vchunk), list);
422         return vchunk->priority;
423 }
424
425 static void
426 mlxsw_sp_acl_tcam_vgroup_prio_update(struct mlxsw_sp_acl_tcam_vgroup *vgroup)
427 {
428         struct mlxsw_sp_acl_tcam_vregion *vregion;
429
430         if (list_empty(&vgroup->vregion_list))
431                 return;
432         vregion = list_first_entry(&vgroup->vregion_list,
433                                    typeof(*vregion), list);
434         *vgroup->p_min_prio = mlxsw_sp_acl_tcam_vregion_prio(vregion);
435         vregion = list_last_entry(&vgroup->vregion_list,
436                                   typeof(*vregion), list);
437         *vgroup->p_max_prio = mlxsw_sp_acl_tcam_vregion_max_prio(vregion);
438 }
439
440 static int
441 mlxsw_sp_acl_tcam_group_region_attach(struct mlxsw_sp *mlxsw_sp,
442                                       struct mlxsw_sp_acl_tcam_group *group,
443                                       struct mlxsw_sp_acl_tcam_region *region,
444                                       unsigned int priority,
445                                       struct mlxsw_sp_acl_tcam_region *next_region)
446 {
447         struct mlxsw_sp_acl_tcam_region *region2;
448         struct list_head *pos;
449         int err;
450
451         mutex_lock(&group->lock);
452         if (group->region_count == group->tcam->max_group_size) {
453                 err = -ENOBUFS;
454                 goto err_region_count_check;
455         }
456
457         if (next_region) {
458                 /* If the next region is defined, place the new one
459                  * before it. The next one is a sibling.
460                  */
461                 pos = &next_region->list;
462         } else {
463                 /* Position the region inside the list according to priority */
464                 list_for_each(pos, &group->region_list) {
465                         region2 = list_entry(pos, typeof(*region2), list);
466                         if (mlxsw_sp_acl_tcam_vregion_prio(region2->vregion) >
467                             priority)
468                                 break;
469                 }
470         }
471         list_add_tail(&region->list, pos);
472         region->group = group;
473
474         err = mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
475         if (err)
476                 goto err_group_update;
477
478         group->region_count++;
479         mutex_unlock(&group->lock);
480         return 0;
481
482 err_group_update:
483         list_del(&region->list);
484 err_region_count_check:
485         mutex_unlock(&group->lock);
486         return err;
487 }
488
489 static void
490 mlxsw_sp_acl_tcam_group_region_detach(struct mlxsw_sp *mlxsw_sp,
491                                       struct mlxsw_sp_acl_tcam_region *region)
492 {
493         struct mlxsw_sp_acl_tcam_group *group = region->group;
494
495         mutex_lock(&group->lock);
496         list_del(&region->list);
497         group->region_count--;
498         mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
499         mutex_unlock(&group->lock);
500 }
501
502 static int
503 mlxsw_sp_acl_tcam_vgroup_vregion_attach(struct mlxsw_sp *mlxsw_sp,
504                                         struct mlxsw_sp_acl_tcam_vgroup *vgroup,
505                                         struct mlxsw_sp_acl_tcam_vregion *vregion,
506                                         unsigned int priority)
507 {
508         struct mlxsw_sp_acl_tcam_vregion *vregion2;
509         struct list_head *pos;
510         int err;
511
512         /* Position the vregion inside the list according to priority */
513         list_for_each(pos, &vgroup->vregion_list) {
514                 vregion2 = list_entry(pos, typeof(*vregion2), list);
515                 if (mlxsw_sp_acl_tcam_vregion_prio(vregion2) > priority)
516                         break;
517         }
518         list_add_tail(&vregion->list, pos);
519
520         err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp, &vgroup->group,
521                                                     vregion->region,
522                                                     priority, NULL);
523         if (err)
524                 goto err_region_attach;
525
526         return 0;
527
528 err_region_attach:
529         list_del(&vregion->list);
530         return err;
531 }
532
533 static void
534 mlxsw_sp_acl_tcam_vgroup_vregion_detach(struct mlxsw_sp *mlxsw_sp,
535                                         struct mlxsw_sp_acl_tcam_vregion *vregion)
536 {
537         list_del(&vregion->list);
538         if (vregion->region2)
539                 mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp,
540                                                       vregion->region2);
541         mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, vregion->region);
542 }
543
544 static struct mlxsw_sp_acl_tcam_vregion *
545 mlxsw_sp_acl_tcam_vgroup_vregion_find(struct mlxsw_sp_acl_tcam_vgroup *vgroup,
546                                       unsigned int priority,
547                                       struct mlxsw_afk_element_usage *elusage,
548                                       bool *p_need_split)
549 {
550         struct mlxsw_sp_acl_tcam_vregion *vregion, *vregion2;
551         struct list_head *pos;
552         bool issubset;
553
554         list_for_each(pos, &vgroup->vregion_list) {
555                 vregion = list_entry(pos, typeof(*vregion), list);
556
557                 /* First, check if the requested priority does not rather belong
558                  * under some of the next vregions.
559                  */
560                 if (pos->next != &vgroup->vregion_list) { /* not last */
561                         vregion2 = list_entry(pos->next, typeof(*vregion2),
562                                               list);
563                         if (priority >=
564                             mlxsw_sp_acl_tcam_vregion_prio(vregion2))
565                                 continue;
566                 }
567
568                 issubset = mlxsw_afk_key_info_subset(vregion->key_info,
569                                                      elusage);
570
571                 /* If requested element usage would not fit and the priority
572                  * is lower than the currently inspected vregion we cannot
573                  * use this region, so return NULL to indicate new vregion has
574                  * to be created.
575                  */
576                 if (!issubset &&
577                     priority < mlxsw_sp_acl_tcam_vregion_prio(vregion))
578                         return NULL;
579
580                 /* If requested element usage would not fit and the priority
581                  * is higher than the currently inspected vregion we cannot
582                  * use this vregion. There is still some hope that the next
583                  * vregion would be the fit. So let it be processed and
584                  * eventually break at the check right above this.
585                  */
586                 if (!issubset &&
587                     priority > mlxsw_sp_acl_tcam_vregion_max_prio(vregion))
588                         continue;
589
590                 /* Indicate if the vregion needs to be split in order to add
591                  * the requested priority. Split is needed when requested
592                  * element usage won't fit into the found vregion.
593                  */
594                 *p_need_split = !issubset;
595                 return vregion;
596         }
597         return NULL; /* New vregion has to be created. */
598 }
599
600 static void
601 mlxsw_sp_acl_tcam_vgroup_use_patterns(struct mlxsw_sp_acl_tcam_vgroup *vgroup,
602                                       struct mlxsw_afk_element_usage *elusage,
603                                       struct mlxsw_afk_element_usage *out)
604 {
605         const struct mlxsw_sp_acl_tcam_pattern *pattern;
606         int i;
607
608         /* In case the template is set, we don't have to look up the pattern
609          * and just use the template.
610          */
611         if (vgroup->tmplt_elusage_set) {
612                 memcpy(out, &vgroup->tmplt_elusage, sizeof(*out));
613                 WARN_ON(!mlxsw_afk_element_usage_subset(elusage, out));
614                 return;
615         }
616
617         for (i = 0; i < vgroup->patterns_count; i++) {
618                 pattern = &vgroup->patterns[i];
619                 mlxsw_afk_element_usage_fill(out, pattern->elements,
620                                              pattern->elements_count);
621                 if (mlxsw_afk_element_usage_subset(elusage, out))
622                         return;
623         }
624         memcpy(out, elusage, sizeof(*out));
625 }
626
627 static int
628 mlxsw_sp_acl_tcam_region_alloc(struct mlxsw_sp *mlxsw_sp,
629                                struct mlxsw_sp_acl_tcam_region *region)
630 {
631         struct mlxsw_afk_key_info *key_info = region->key_info;
632         char ptar_pl[MLXSW_REG_PTAR_LEN];
633         unsigned int encodings_count;
634         int i;
635         int err;
636
637         mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_ALLOC,
638                             region->key_type,
639                             MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT,
640                             region->id, region->tcam_region_info);
641         encodings_count = mlxsw_afk_key_info_blocks_count_get(key_info);
642         for (i = 0; i < encodings_count; i++) {
643                 u16 encoding;
644
645                 encoding = mlxsw_afk_key_info_block_encoding_get(key_info, i);
646                 mlxsw_reg_ptar_key_id_pack(ptar_pl, i, encoding);
647         }
648         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
649         if (err)
650                 return err;
651         mlxsw_reg_ptar_unpack(ptar_pl, region->tcam_region_info);
652         return 0;
653 }
654
655 static void
656 mlxsw_sp_acl_tcam_region_free(struct mlxsw_sp *mlxsw_sp,
657                               struct mlxsw_sp_acl_tcam_region *region)
658 {
659         char ptar_pl[MLXSW_REG_PTAR_LEN];
660
661         mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_FREE,
662                             region->key_type, 0, region->id,
663                             region->tcam_region_info);
664         mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
665 }
666
667 static int
668 mlxsw_sp_acl_tcam_region_enable(struct mlxsw_sp *mlxsw_sp,
669                                 struct mlxsw_sp_acl_tcam_region *region)
670 {
671         char pacl_pl[MLXSW_REG_PACL_LEN];
672
673         mlxsw_reg_pacl_pack(pacl_pl, region->id, true,
674                             region->tcam_region_info);
675         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
676 }
677
678 static void
679 mlxsw_sp_acl_tcam_region_disable(struct mlxsw_sp *mlxsw_sp,
680                                  struct mlxsw_sp_acl_tcam_region *region)
681 {
682         char pacl_pl[MLXSW_REG_PACL_LEN];
683
684         mlxsw_reg_pacl_pack(pacl_pl, region->id, false,
685                             region->tcam_region_info);
686         mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
687 }
688
689 static struct mlxsw_sp_acl_tcam_region *
690 mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp,
691                                 struct mlxsw_sp_acl_tcam *tcam,
692                                 struct mlxsw_sp_acl_tcam_vregion *vregion,
693                                 void *hints_priv)
694 {
695         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
696         struct mlxsw_sp_acl_tcam_region *region;
697         int err;
698
699         region = kzalloc(sizeof(*region) + ops->region_priv_size, GFP_KERNEL);
700         if (!region)
701                 return ERR_PTR(-ENOMEM);
702         region->mlxsw_sp = mlxsw_sp;
703         region->vregion = vregion;
704         region->key_info = vregion->key_info;
705
706         err = mlxsw_sp_acl_tcam_region_id_get(tcam, &region->id);
707         if (err)
708                 goto err_region_id_get;
709
710         err = ops->region_associate(mlxsw_sp, region);
711         if (err)
712                 goto err_tcam_region_associate;
713
714         region->key_type = ops->key_type;
715         err = mlxsw_sp_acl_tcam_region_alloc(mlxsw_sp, region);
716         if (err)
717                 goto err_tcam_region_alloc;
718
719         err = mlxsw_sp_acl_tcam_region_enable(mlxsw_sp, region);
720         if (err)
721                 goto err_tcam_region_enable;
722
723         err = ops->region_init(mlxsw_sp, region->priv, tcam->priv,
724                                region, hints_priv);
725         if (err)
726                 goto err_tcam_region_init;
727
728         return region;
729
730 err_tcam_region_init:
731         mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
732 err_tcam_region_enable:
733         mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
734 err_tcam_region_alloc:
735 err_tcam_region_associate:
736         mlxsw_sp_acl_tcam_region_id_put(tcam, region->id);
737 err_region_id_get:
738         kfree(region);
739         return ERR_PTR(err);
740 }
741
742 static void
743 mlxsw_sp_acl_tcam_region_destroy(struct mlxsw_sp *mlxsw_sp,
744                                  struct mlxsw_sp_acl_tcam_region *region)
745 {
746         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
747
748         ops->region_fini(mlxsw_sp, region->priv);
749         mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
750         mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
751         mlxsw_sp_acl_tcam_region_id_put(region->group->tcam,
752                                         region->id);
753         kfree(region);
754 }
755
756 static void
757 mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(struct mlxsw_sp_acl_tcam_vregion *vregion)
758 {
759         unsigned long interval = vregion->tcam->vregion_rehash_intrvl;
760
761         if (!interval)
762                 return;
763         mlxsw_core_schedule_dw(&vregion->rehash.dw,
764                                msecs_to_jiffies(interval));
765 }
766
767 static void
768 mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
769                                  struct mlxsw_sp_acl_tcam_vregion *vregion,
770                                  int *credits);
771
772 static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work)
773 {
774         struct mlxsw_sp_acl_tcam_vregion *vregion =
775                 container_of(work, struct mlxsw_sp_acl_tcam_vregion,
776                              rehash.dw.work);
777         int credits = MLXSW_SP_ACL_TCAM_VREGION_REHASH_CREDITS;
778
779         mlxsw_sp_acl_tcam_vregion_rehash(vregion->mlxsw_sp, vregion, &credits);
780         if (credits < 0)
781                 /* Rehash gone out of credits so it was interrupted.
782                  * Schedule the work as soon as possible to continue.
783                  */
784                 mlxsw_core_schedule_dw(&vregion->rehash.dw, 0);
785         else
786                 mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(vregion);
787 }
788
789 static void
790 mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(struct mlxsw_sp_acl_tcam_vchunk *vchunk)
791 {
792         struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion;
793
794         /* If a rule was added or deleted from vchunk which is currently
795          * under rehash migration, we have to reset the ventry pointers
796          * to make sure all rules are properly migrated.
797          */
798         if (vregion->rehash.ctx.current_vchunk == vchunk) {
799                 vregion->rehash.ctx.start_ventry = NULL;
800                 vregion->rehash.ctx.stop_ventry = NULL;
801         }
802 }
803
804 static void
805 mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(struct mlxsw_sp_acl_tcam_vregion *vregion)
806 {
807         /* If a chunk was added or deleted from vregion we have to reset
808          * the current chunk pointer to make sure all chunks
809          * are properly migrated.
810          */
811         vregion->rehash.ctx.current_vchunk = NULL;
812 }
813
814 static struct mlxsw_sp_acl_tcam_vregion *
815 mlxsw_sp_acl_tcam_vregion_create(struct mlxsw_sp *mlxsw_sp,
816                                  struct mlxsw_sp_acl_tcam_vgroup *vgroup,
817                                  unsigned int priority,
818                                  struct mlxsw_afk_element_usage *elusage)
819 {
820         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
821         struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
822         struct mlxsw_sp_acl_tcam *tcam = vgroup->group.tcam;
823         struct mlxsw_sp_acl_tcam_vregion *vregion;
824         int err;
825
826         vregion = kzalloc(sizeof(*vregion), GFP_KERNEL);
827         if (!vregion)
828                 return ERR_PTR(-ENOMEM);
829         INIT_LIST_HEAD(&vregion->vchunk_list);
830         mutex_init(&vregion->lock);
831         vregion->tcam = tcam;
832         vregion->mlxsw_sp = mlxsw_sp;
833         vregion->vgroup = vgroup;
834         vregion->ref_count = 1;
835
836         vregion->key_info = mlxsw_afk_key_info_get(afk, elusage);
837         if (IS_ERR(vregion->key_info)) {
838                 err = PTR_ERR(vregion->key_info);
839                 goto err_key_info_get;
840         }
841
842         vregion->region = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, tcam,
843                                                           vregion, NULL);
844         if (IS_ERR(vregion->region)) {
845                 err = PTR_ERR(vregion->region);
846                 goto err_region_create;
847         }
848
849         err = mlxsw_sp_acl_tcam_vgroup_vregion_attach(mlxsw_sp, vgroup, vregion,
850                                                       priority);
851         if (err)
852                 goto err_vgroup_vregion_attach;
853
854         if (vgroup->vregion_rehash_enabled && ops->region_rehash_hints_get) {
855                 /* Create the delayed work for vregion periodic rehash */
856                 INIT_DELAYED_WORK(&vregion->rehash.dw,
857                                   mlxsw_sp_acl_tcam_vregion_rehash_work);
858                 mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(vregion);
859                 mutex_lock(&tcam->lock);
860                 list_add_tail(&vregion->tlist, &tcam->vregion_list);
861                 mutex_unlock(&tcam->lock);
862         }
863
864         return vregion;
865
866 err_vgroup_vregion_attach:
867         mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region);
868 err_region_create:
869         mlxsw_afk_key_info_put(vregion->key_info);
870 err_key_info_get:
871         kfree(vregion);
872         return ERR_PTR(err);
873 }
874
875 static void
876 mlxsw_sp_acl_tcam_vregion_destroy(struct mlxsw_sp *mlxsw_sp,
877                                   struct mlxsw_sp_acl_tcam_vregion *vregion)
878 {
879         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
880         struct mlxsw_sp_acl_tcam_vgroup *vgroup = vregion->vgroup;
881         struct mlxsw_sp_acl_tcam *tcam = vregion->tcam;
882
883         if (vgroup->vregion_rehash_enabled && ops->region_rehash_hints_get) {
884                 mutex_lock(&tcam->lock);
885                 list_del(&vregion->tlist);
886                 mutex_unlock(&tcam->lock);
887                 cancel_delayed_work_sync(&vregion->rehash.dw);
888         }
889         mlxsw_sp_acl_tcam_vgroup_vregion_detach(mlxsw_sp, vregion);
890         if (vregion->region2)
891                 mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region2);
892         mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region);
893         mlxsw_afk_key_info_put(vregion->key_info);
894         mutex_destroy(&vregion->lock);
895         kfree(vregion);
896 }
897
898 u32 mlxsw_sp_acl_tcam_vregion_rehash_intrvl_get(struct mlxsw_sp *mlxsw_sp,
899                                                 struct mlxsw_sp_acl_tcam *tcam)
900 {
901         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
902         u32 vregion_rehash_intrvl;
903
904         if (WARN_ON(!ops->region_rehash_hints_get))
905                 return 0;
906         vregion_rehash_intrvl = tcam->vregion_rehash_intrvl;
907         return vregion_rehash_intrvl;
908 }
909
910 int mlxsw_sp_acl_tcam_vregion_rehash_intrvl_set(struct mlxsw_sp *mlxsw_sp,
911                                                 struct mlxsw_sp_acl_tcam *tcam,
912                                                 u32 val)
913 {
914         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
915         struct mlxsw_sp_acl_tcam_vregion *vregion;
916
917         if (val < MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_MIN && val)
918                 return -EINVAL;
919         if (WARN_ON(!ops->region_rehash_hints_get))
920                 return -EOPNOTSUPP;
921         tcam->vregion_rehash_intrvl = val;
922         mutex_lock(&tcam->lock);
923         list_for_each_entry(vregion, &tcam->vregion_list, tlist) {
924                 if (val)
925                         mlxsw_core_schedule_dw(&vregion->rehash.dw, 0);
926                 else
927                         cancel_delayed_work_sync(&vregion->rehash.dw);
928         }
929         mutex_unlock(&tcam->lock);
930         return 0;
931 }
932
933 static struct mlxsw_sp_acl_tcam_vregion *
934 mlxsw_sp_acl_tcam_vregion_get(struct mlxsw_sp *mlxsw_sp,
935                               struct mlxsw_sp_acl_tcam_vgroup *vgroup,
936                               unsigned int priority,
937                               struct mlxsw_afk_element_usage *elusage)
938 {
939         struct mlxsw_afk_element_usage vregion_elusage;
940         struct mlxsw_sp_acl_tcam_vregion *vregion;
941         bool need_split;
942
943         vregion = mlxsw_sp_acl_tcam_vgroup_vregion_find(vgroup, priority,
944                                                         elusage, &need_split);
945         if (vregion) {
946                 if (need_split) {
947                         /* According to priority, new vchunk should belong to
948                          * an existing vregion. However, this vchunk needs
949                          * elements that vregion does not contain. We need
950                          * to split the existing vregion into two and create
951                          * a new vregion for the new vchunk in between.
952                          * This is not supported now.
953                          */
954                         return ERR_PTR(-EOPNOTSUPP);
955                 }
956                 vregion->ref_count++;
957                 return vregion;
958         }
959
960         mlxsw_sp_acl_tcam_vgroup_use_patterns(vgroup, elusage,
961                                               &vregion_elusage);
962
963         return mlxsw_sp_acl_tcam_vregion_create(mlxsw_sp, vgroup, priority,
964                                                 &vregion_elusage);
965 }
966
967 static void
968 mlxsw_sp_acl_tcam_vregion_put(struct mlxsw_sp *mlxsw_sp,
969                               struct mlxsw_sp_acl_tcam_vregion *vregion)
970 {
971         if (--vregion->ref_count)
972                 return;
973         mlxsw_sp_acl_tcam_vregion_destroy(mlxsw_sp, vregion);
974 }
975
976 static struct mlxsw_sp_acl_tcam_chunk *
977 mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp,
978                                struct mlxsw_sp_acl_tcam_vchunk *vchunk,
979                                struct mlxsw_sp_acl_tcam_region *region)
980 {
981         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
982         struct mlxsw_sp_acl_tcam_chunk *chunk;
983
984         chunk = kzalloc(sizeof(*chunk) + ops->chunk_priv_size, GFP_KERNEL);
985         if (!chunk)
986                 return ERR_PTR(-ENOMEM);
987         chunk->vchunk = vchunk;
988         chunk->region = region;
989
990         ops->chunk_init(region->priv, chunk->priv, vchunk->priority);
991         return chunk;
992 }
993
994 static void
995 mlxsw_sp_acl_tcam_chunk_destroy(struct mlxsw_sp *mlxsw_sp,
996                                 struct mlxsw_sp_acl_tcam_chunk *chunk)
997 {
998         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
999
1000         ops->chunk_fini(chunk->priv);
1001         kfree(chunk);
1002 }
1003
1004 static struct mlxsw_sp_acl_tcam_vchunk *
1005 mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp,
1006                                 struct mlxsw_sp_acl_tcam_vgroup *vgroup,
1007                                 unsigned int priority,
1008                                 struct mlxsw_afk_element_usage *elusage)
1009 {
1010         struct mlxsw_sp_acl_tcam_vchunk *vchunk, *vchunk2;
1011         struct mlxsw_sp_acl_tcam_vregion *vregion;
1012         struct list_head *pos;
1013         int err;
1014
1015         if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO)
1016                 return ERR_PTR(-EINVAL);
1017
1018         vchunk = kzalloc(sizeof(*vchunk), GFP_KERNEL);
1019         if (!vchunk)
1020                 return ERR_PTR(-ENOMEM);
1021         INIT_LIST_HEAD(&vchunk->ventry_list);
1022         vchunk->priority = priority;
1023         vchunk->vgroup = vgroup;
1024         vchunk->ref_count = 1;
1025
1026         vregion = mlxsw_sp_acl_tcam_vregion_get(mlxsw_sp, vgroup,
1027                                                 priority, elusage);
1028         if (IS_ERR(vregion)) {
1029                 err = PTR_ERR(vregion);
1030                 goto err_vregion_get;
1031         }
1032
1033         vchunk->vregion = vregion;
1034
1035         err = rhashtable_insert_fast(&vgroup->vchunk_ht, &vchunk->ht_node,
1036                                      mlxsw_sp_acl_tcam_vchunk_ht_params);
1037         if (err)
1038                 goto err_rhashtable_insert;
1039
1040         mutex_lock(&vregion->lock);
1041         vchunk->chunk = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk,
1042                                                        vchunk->vregion->region);
1043         if (IS_ERR(vchunk->chunk)) {
1044                 mutex_unlock(&vregion->lock);
1045                 err = PTR_ERR(vchunk->chunk);
1046                 goto err_chunk_create;
1047         }
1048
1049         mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(vregion);
1050
1051         /* Position the vchunk inside the list according to priority */
1052         list_for_each(pos, &vregion->vchunk_list) {
1053                 vchunk2 = list_entry(pos, typeof(*vchunk2), list);
1054                 if (vchunk2->priority > priority)
1055                         break;
1056         }
1057         list_add_tail(&vchunk->list, pos);
1058         mutex_unlock(&vregion->lock);
1059         mlxsw_sp_acl_tcam_vgroup_prio_update(vgroup);
1060
1061         return vchunk;
1062
1063 err_chunk_create:
1064         rhashtable_remove_fast(&vgroup->vchunk_ht, &vchunk->ht_node,
1065                                mlxsw_sp_acl_tcam_vchunk_ht_params);
1066 err_rhashtable_insert:
1067         mlxsw_sp_acl_tcam_vregion_put(mlxsw_sp, vregion);
1068 err_vregion_get:
1069         kfree(vchunk);
1070         return ERR_PTR(err);
1071 }
1072
1073 static void
1074 mlxsw_sp_acl_tcam_vchunk_destroy(struct mlxsw_sp *mlxsw_sp,
1075                                  struct mlxsw_sp_acl_tcam_vchunk *vchunk)
1076 {
1077         struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion;
1078         struct mlxsw_sp_acl_tcam_vgroup *vgroup = vchunk->vgroup;
1079
1080         mutex_lock(&vregion->lock);
1081         mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(vregion);
1082         list_del(&vchunk->list);
1083         if (vchunk->chunk2)
1084                 mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk2);
1085         mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk);
1086         mutex_unlock(&vregion->lock);
1087         rhashtable_remove_fast(&vgroup->vchunk_ht, &vchunk->ht_node,
1088                                mlxsw_sp_acl_tcam_vchunk_ht_params);
1089         mlxsw_sp_acl_tcam_vregion_put(mlxsw_sp, vchunk->vregion);
1090         kfree(vchunk);
1091         mlxsw_sp_acl_tcam_vgroup_prio_update(vgroup);
1092 }
1093
1094 static struct mlxsw_sp_acl_tcam_vchunk *
1095 mlxsw_sp_acl_tcam_vchunk_get(struct mlxsw_sp *mlxsw_sp,
1096                              struct mlxsw_sp_acl_tcam_vgroup *vgroup,
1097                              unsigned int priority,
1098                              struct mlxsw_afk_element_usage *elusage)
1099 {
1100         struct mlxsw_sp_acl_tcam_vchunk *vchunk;
1101
1102         vchunk = rhashtable_lookup_fast(&vgroup->vchunk_ht, &priority,
1103                                         mlxsw_sp_acl_tcam_vchunk_ht_params);
1104         if (vchunk) {
1105                 if (WARN_ON(!mlxsw_afk_key_info_subset(vchunk->vregion->key_info,
1106                                                        elusage)))
1107                         return ERR_PTR(-EINVAL);
1108                 vchunk->ref_count++;
1109                 return vchunk;
1110         }
1111         return mlxsw_sp_acl_tcam_vchunk_create(mlxsw_sp, vgroup,
1112                                                priority, elusage);
1113 }
1114
1115 static void
1116 mlxsw_sp_acl_tcam_vchunk_put(struct mlxsw_sp *mlxsw_sp,
1117                              struct mlxsw_sp_acl_tcam_vchunk *vchunk)
1118 {
1119         if (--vchunk->ref_count)
1120                 return;
1121         mlxsw_sp_acl_tcam_vchunk_destroy(mlxsw_sp, vchunk);
1122 }
1123
1124 static struct mlxsw_sp_acl_tcam_entry *
1125 mlxsw_sp_acl_tcam_entry_create(struct mlxsw_sp *mlxsw_sp,
1126                                struct mlxsw_sp_acl_tcam_ventry *ventry,
1127                                struct mlxsw_sp_acl_tcam_chunk *chunk)
1128 {
1129         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1130         struct mlxsw_sp_acl_tcam_entry *entry;
1131         int err;
1132
1133         entry = kzalloc(sizeof(*entry) + ops->entry_priv_size, GFP_KERNEL);
1134         if (!entry)
1135                 return ERR_PTR(-ENOMEM);
1136         entry->ventry = ventry;
1137         entry->chunk = chunk;
1138
1139         err = ops->entry_add(mlxsw_sp, chunk->region->priv, chunk->priv,
1140                              entry->priv, ventry->rulei);
1141         if (err)
1142                 goto err_entry_add;
1143
1144         return entry;
1145
1146 err_entry_add:
1147         kfree(entry);
1148         return ERR_PTR(err);
1149 }
1150
1151 static void mlxsw_sp_acl_tcam_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1152                                             struct mlxsw_sp_acl_tcam_entry *entry)
1153 {
1154         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1155
1156         ops->entry_del(mlxsw_sp, entry->chunk->region->priv,
1157                        entry->chunk->priv, entry->priv);
1158         kfree(entry);
1159 }
1160
1161 static int
1162 mlxsw_sp_acl_tcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
1163                                        struct mlxsw_sp_acl_tcam_region *region,
1164                                        struct mlxsw_sp_acl_tcam_entry *entry,
1165                                        struct mlxsw_sp_acl_rule_info *rulei)
1166 {
1167         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1168
1169         return ops->entry_action_replace(mlxsw_sp, region->priv,
1170                                          entry->priv, rulei);
1171 }
1172
1173 static int
1174 mlxsw_sp_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
1175                                      struct mlxsw_sp_acl_tcam_entry *entry,
1176                                      bool *activity)
1177 {
1178         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1179
1180         return ops->entry_activity_get(mlxsw_sp, entry->chunk->region->priv,
1181                                        entry->priv, activity);
1182 }
1183
1184 static int mlxsw_sp_acl_tcam_ventry_add(struct mlxsw_sp *mlxsw_sp,
1185                                         struct mlxsw_sp_acl_tcam_vgroup *vgroup,
1186                                         struct mlxsw_sp_acl_tcam_ventry *ventry,
1187                                         struct mlxsw_sp_acl_rule_info *rulei)
1188 {
1189         struct mlxsw_sp_acl_tcam_vregion *vregion;
1190         struct mlxsw_sp_acl_tcam_vchunk *vchunk;
1191         int err;
1192
1193         vchunk = mlxsw_sp_acl_tcam_vchunk_get(mlxsw_sp, vgroup, rulei->priority,
1194                                               &rulei->values.elusage);
1195         if (IS_ERR(vchunk))
1196                 return PTR_ERR(vchunk);
1197
1198         ventry->vchunk = vchunk;
1199         ventry->rulei = rulei;
1200         vregion = vchunk->vregion;
1201
1202         mutex_lock(&vregion->lock);
1203         ventry->entry = mlxsw_sp_acl_tcam_entry_create(mlxsw_sp, ventry,
1204                                                        vchunk->chunk);
1205         if (IS_ERR(ventry->entry)) {
1206                 mutex_unlock(&vregion->lock);
1207                 err = PTR_ERR(ventry->entry);
1208                 goto err_entry_create;
1209         }
1210
1211         list_add_tail(&ventry->list, &vchunk->ventry_list);
1212         mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(vchunk);
1213         mutex_unlock(&vregion->lock);
1214
1215         return 0;
1216
1217 err_entry_create:
1218         mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, vchunk);
1219         return err;
1220 }
1221
1222 static void mlxsw_sp_acl_tcam_ventry_del(struct mlxsw_sp *mlxsw_sp,
1223                                          struct mlxsw_sp_acl_tcam_ventry *ventry)
1224 {
1225         struct mlxsw_sp_acl_tcam_vchunk *vchunk = ventry->vchunk;
1226         struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion;
1227
1228         mutex_lock(&vregion->lock);
1229         mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(vchunk);
1230         list_del(&ventry->list);
1231         mlxsw_sp_acl_tcam_entry_destroy(mlxsw_sp, ventry->entry);
1232         mutex_unlock(&vregion->lock);
1233         mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, vchunk);
1234 }
1235
1236 static int
1237 mlxsw_sp_acl_tcam_ventry_action_replace(struct mlxsw_sp *mlxsw_sp,
1238                                         struct mlxsw_sp_acl_tcam_ventry *ventry,
1239                                         struct mlxsw_sp_acl_rule_info *rulei)
1240 {
1241         struct mlxsw_sp_acl_tcam_vchunk *vchunk = ventry->vchunk;
1242
1243         return mlxsw_sp_acl_tcam_entry_action_replace(mlxsw_sp,
1244                                                       vchunk->vregion->region,
1245                                                       ventry->entry, rulei);
1246 }
1247
1248 static int
1249 mlxsw_sp_acl_tcam_ventry_activity_get(struct mlxsw_sp *mlxsw_sp,
1250                                       struct mlxsw_sp_acl_tcam_ventry *ventry,
1251                                       bool *activity)
1252 {
1253         return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp,
1254                                                     ventry->entry, activity);
1255 }
1256
1257 static int
1258 mlxsw_sp_acl_tcam_ventry_migrate(struct mlxsw_sp *mlxsw_sp,
1259                                  struct mlxsw_sp_acl_tcam_ventry *ventry,
1260                                  struct mlxsw_sp_acl_tcam_chunk *chunk,
1261                                  int *credits)
1262 {
1263         struct mlxsw_sp_acl_tcam_entry *new_entry;
1264
1265         /* First check if the entry is not already where we want it to be. */
1266         if (ventry->entry->chunk == chunk)
1267                 return 0;
1268
1269         if (--(*credits) < 0)
1270                 return 0;
1271
1272         new_entry = mlxsw_sp_acl_tcam_entry_create(mlxsw_sp, ventry, chunk);
1273         if (IS_ERR(new_entry))
1274                 return PTR_ERR(new_entry);
1275         mlxsw_sp_acl_tcam_entry_destroy(mlxsw_sp, ventry->entry);
1276         ventry->entry = new_entry;
1277         return 0;
1278 }
1279
1280 static int
1281 mlxsw_sp_acl_tcam_vchunk_migrate_start(struct mlxsw_sp *mlxsw_sp,
1282                                        struct mlxsw_sp_acl_tcam_vchunk *vchunk,
1283                                        struct mlxsw_sp_acl_tcam_region *region,
1284                                        struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
1285 {
1286         struct mlxsw_sp_acl_tcam_chunk *new_chunk;
1287
1288         new_chunk = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk, region);
1289         if (IS_ERR(new_chunk))
1290                 return PTR_ERR(new_chunk);
1291         vchunk->chunk2 = vchunk->chunk;
1292         vchunk->chunk = new_chunk;
1293         ctx->current_vchunk = vchunk;
1294         ctx->start_ventry = NULL;
1295         ctx->stop_ventry = NULL;
1296         return 0;
1297 }
1298
1299 static void
1300 mlxsw_sp_acl_tcam_vchunk_migrate_end(struct mlxsw_sp *mlxsw_sp,
1301                                      struct mlxsw_sp_acl_tcam_vchunk *vchunk,
1302                                      struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
1303 {
1304         mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk2);
1305         vchunk->chunk2 = NULL;
1306         ctx->current_vchunk = NULL;
1307 }
1308
1309 static int
1310 mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
1311                                      struct mlxsw_sp_acl_tcam_vchunk *vchunk,
1312                                      struct mlxsw_sp_acl_tcam_region *region,
1313                                      struct mlxsw_sp_acl_tcam_rehash_ctx *ctx,
1314                                      int *credits)
1315 {
1316         struct mlxsw_sp_acl_tcam_ventry *ventry;
1317         int err;
1318
1319         if (vchunk->chunk->region != region) {
1320                 err = mlxsw_sp_acl_tcam_vchunk_migrate_start(mlxsw_sp, vchunk,
1321                                                              region, ctx);
1322                 if (err)
1323                         return err;
1324         } else if (!vchunk->chunk2) {
1325                 /* The chunk is already as it should be, nothing to do. */
1326                 return 0;
1327         }
1328
1329         /* If the migration got interrupted, we have the ventry to start from
1330          * stored in context.
1331          */
1332         if (ctx->start_ventry)
1333                 ventry = ctx->start_ventry;
1334         else
1335                 ventry = list_first_entry(&vchunk->ventry_list,
1336                                           typeof(*ventry), list);
1337
1338         list_for_each_entry_from(ventry, &vchunk->ventry_list, list) {
1339                 /* During rollback, once we reach the ventry that failed
1340                  * to migrate, we are done.
1341                  */
1342                 if (ventry == ctx->stop_ventry)
1343                         break;
1344
1345                 err = mlxsw_sp_acl_tcam_ventry_migrate(mlxsw_sp, ventry,
1346                                                        vchunk->chunk, credits);
1347                 if (err) {
1348                         if (ctx->this_is_rollback) {
1349                                 /* Save the ventry which we ended with and try
1350                                  * to continue later on.
1351                                  */
1352                                 ctx->start_ventry = ventry;
1353                                 return err;
1354                         }
1355                         /* Swap the chunk and chunk2 pointers so the follow-up
1356                          * rollback call will see the original chunk pointer
1357                          * in vchunk->chunk.
1358                          */
1359                         swap(vchunk->chunk, vchunk->chunk2);
1360                         /* The rollback has to be done from beginning of the
1361                          * chunk, that is why we have to null the start_ventry.
1362                          * However, we know where to stop the rollback,
1363                          * at the current ventry.
1364                          */
1365                         ctx->start_ventry = NULL;
1366                         ctx->stop_ventry = ventry;
1367                         return err;
1368                 } else if (*credits < 0) {
1369                         /* We are out of credits, the rest of the ventries
1370                          * will be migrated later. Save the ventry
1371                          * which we ended with.
1372                          */
1373                         ctx->start_ventry = ventry;
1374                         return 0;
1375                 }
1376         }
1377
1378         mlxsw_sp_acl_tcam_vchunk_migrate_end(mlxsw_sp, vchunk, ctx);
1379         return 0;
1380 }
1381
1382 static int
1383 mlxsw_sp_acl_tcam_vchunk_migrate_all(struct mlxsw_sp *mlxsw_sp,
1384                                      struct mlxsw_sp_acl_tcam_vregion *vregion,
1385                                      struct mlxsw_sp_acl_tcam_rehash_ctx *ctx,
1386                                      int *credits)
1387 {
1388         struct mlxsw_sp_acl_tcam_vchunk *vchunk;
1389         int err;
1390
1391         /* If the migration got interrupted, we have the vchunk
1392          * we are working on stored in context.
1393          */
1394         if (ctx->current_vchunk)
1395                 vchunk = ctx->current_vchunk;
1396         else
1397                 vchunk = list_first_entry(&vregion->vchunk_list,
1398                                           typeof(*vchunk), list);
1399
1400         list_for_each_entry_from(vchunk, &vregion->vchunk_list, list) {
1401                 err = mlxsw_sp_acl_tcam_vchunk_migrate_one(mlxsw_sp, vchunk,
1402                                                            vregion->region,
1403                                                            ctx, credits);
1404                 if (err || *credits < 0)
1405                         return err;
1406         }
1407         return 0;
1408 }
1409
1410 static int
1411 mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
1412                                   struct mlxsw_sp_acl_tcam_vregion *vregion,
1413                                   struct mlxsw_sp_acl_tcam_rehash_ctx *ctx,
1414                                   int *credits)
1415 {
1416         int err, err2;
1417
1418         trace_mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion);
1419         mutex_lock(&vregion->lock);
1420         err = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
1421                                                    ctx, credits);
1422         if (err) {
1423                 /* In case migration was not successful, we need to swap
1424                  * so the original region pointer is assigned again
1425                  * to vregion->region.
1426                  */
1427                 swap(vregion->region, vregion->region2);
1428                 ctx->current_vchunk = NULL;
1429                 ctx->this_is_rollback = true;
1430                 err2 = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
1431                                                             ctx, credits);
1432                 if (err2) {
1433                         trace_mlxsw_sp_acl_tcam_vregion_rehash_rollback_failed(mlxsw_sp,
1434                                                                                vregion);
1435                         dev_err(mlxsw_sp->bus_info->dev, "Failed to rollback during vregion migration fail\n");
1436                         /* Let the rollback to be continued later on. */
1437                 }
1438         }
1439         mutex_unlock(&vregion->lock);
1440         trace_mlxsw_sp_acl_tcam_vregion_migrate_end(mlxsw_sp, vregion);
1441         return err;
1442 }
1443
1444 static bool
1445 mlxsw_sp_acl_tcam_vregion_rehash_in_progress(const struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
1446 {
1447         return ctx->hints_priv;
1448 }
1449
1450 static int
1451 mlxsw_sp_acl_tcam_vregion_rehash_start(struct mlxsw_sp *mlxsw_sp,
1452                                        struct mlxsw_sp_acl_tcam_vregion *vregion,
1453                                        struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
1454 {
1455         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1456         unsigned int priority = mlxsw_sp_acl_tcam_vregion_prio(vregion);
1457         struct mlxsw_sp_acl_tcam_region *new_region;
1458         void *hints_priv;
1459         int err;
1460
1461         trace_mlxsw_sp_acl_tcam_vregion_rehash(mlxsw_sp, vregion);
1462
1463         hints_priv = ops->region_rehash_hints_get(vregion->region->priv);
1464         if (IS_ERR(hints_priv))
1465                 return PTR_ERR(hints_priv);
1466
1467         new_region = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, vregion->tcam,
1468                                                      vregion, hints_priv);
1469         if (IS_ERR(new_region)) {
1470                 err = PTR_ERR(new_region);
1471                 goto err_region_create;
1472         }
1473
1474         /* vregion->region contains the pointer to the new region
1475          * we are going to migrate to.
1476          */
1477         vregion->region2 = vregion->region;
1478         vregion->region = new_region;
1479         err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp,
1480                                                     vregion->region2->group,
1481                                                     new_region, priority,
1482                                                     vregion->region2);
1483         if (err)
1484                 goto err_group_region_attach;
1485
1486         ctx->hints_priv = hints_priv;
1487         ctx->this_is_rollback = false;
1488
1489         return 0;
1490
1491 err_group_region_attach:
1492         vregion->region = vregion->region2;
1493         vregion->region2 = NULL;
1494         mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, new_region);
1495 err_region_create:
1496         ops->region_rehash_hints_put(hints_priv);
1497         return err;
1498 }
1499
1500 static void
1501 mlxsw_sp_acl_tcam_vregion_rehash_end(struct mlxsw_sp *mlxsw_sp,
1502                                      struct mlxsw_sp_acl_tcam_vregion *vregion,
1503                                      struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
1504 {
1505         struct mlxsw_sp_acl_tcam_region *unused_region = vregion->region2;
1506         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1507
1508         vregion->region2 = NULL;
1509         mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, unused_region);
1510         mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, unused_region);
1511         ops->region_rehash_hints_put(ctx->hints_priv);
1512         ctx->hints_priv = NULL;
1513 }
1514
1515 static void
1516 mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
1517                                  struct mlxsw_sp_acl_tcam_vregion *vregion,
1518                                  int *credits)
1519 {
1520         struct mlxsw_sp_acl_tcam_rehash_ctx *ctx = &vregion->rehash.ctx;
1521         int err;
1522
1523         /* Check if the previous rehash work was interrupted
1524          * which means we have to continue it now.
1525          * If not, start a new rehash.
1526          */
1527         if (!mlxsw_sp_acl_tcam_vregion_rehash_in_progress(ctx)) {
1528                 err = mlxsw_sp_acl_tcam_vregion_rehash_start(mlxsw_sp,
1529                                                              vregion, ctx);
1530                 if (err) {
1531                         if (err != -EAGAIN)
1532                                 dev_err(mlxsw_sp->bus_info->dev, "Failed get rehash hints\n");
1533                         return;
1534                 }
1535         }
1536
1537         err = mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion,
1538                                                 ctx, credits);
1539         if (err) {
1540                 dev_err(mlxsw_sp->bus_info->dev, "Failed to migrate vregion\n");
1541         }
1542
1543         if (*credits >= 0)
1544                 mlxsw_sp_acl_tcam_vregion_rehash_end(mlxsw_sp, vregion, ctx);
1545 }
1546
1547 static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = {
1548         MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
1549         MLXSW_AFK_ELEMENT_DMAC_32_47,
1550         MLXSW_AFK_ELEMENT_DMAC_0_31,
1551         MLXSW_AFK_ELEMENT_SMAC_32_47,
1552         MLXSW_AFK_ELEMENT_SMAC_0_31,
1553         MLXSW_AFK_ELEMENT_ETHERTYPE,
1554         MLXSW_AFK_ELEMENT_IP_PROTO,
1555         MLXSW_AFK_ELEMENT_SRC_IP_0_31,
1556         MLXSW_AFK_ELEMENT_DST_IP_0_31,
1557         MLXSW_AFK_ELEMENT_DST_L4_PORT,
1558         MLXSW_AFK_ELEMENT_SRC_L4_PORT,
1559         MLXSW_AFK_ELEMENT_VID,
1560         MLXSW_AFK_ELEMENT_PCP,
1561         MLXSW_AFK_ELEMENT_TCP_FLAGS,
1562         MLXSW_AFK_ELEMENT_IP_TTL_,
1563         MLXSW_AFK_ELEMENT_IP_ECN,
1564         MLXSW_AFK_ELEMENT_IP_DSCP,
1565 };
1566
1567 static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv6[] = {
1568         MLXSW_AFK_ELEMENT_ETHERTYPE,
1569         MLXSW_AFK_ELEMENT_IP_PROTO,
1570         MLXSW_AFK_ELEMENT_SRC_IP_96_127,
1571         MLXSW_AFK_ELEMENT_SRC_IP_64_95,
1572         MLXSW_AFK_ELEMENT_SRC_IP_32_63,
1573         MLXSW_AFK_ELEMENT_SRC_IP_0_31,
1574         MLXSW_AFK_ELEMENT_DST_IP_96_127,
1575         MLXSW_AFK_ELEMENT_DST_IP_64_95,
1576         MLXSW_AFK_ELEMENT_DST_IP_32_63,
1577         MLXSW_AFK_ELEMENT_DST_IP_0_31,
1578         MLXSW_AFK_ELEMENT_DST_L4_PORT,
1579         MLXSW_AFK_ELEMENT_SRC_L4_PORT,
1580 };
1581
1582 static const struct mlxsw_sp_acl_tcam_pattern mlxsw_sp_acl_tcam_patterns[] = {
1583         {
1584                 .elements = mlxsw_sp_acl_tcam_pattern_ipv4,
1585                 .elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv4),
1586         },
1587         {
1588                 .elements = mlxsw_sp_acl_tcam_pattern_ipv6,
1589                 .elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv6),
1590         },
1591 };
1592
1593 #define MLXSW_SP_ACL_TCAM_PATTERNS_COUNT \
1594         ARRAY_SIZE(mlxsw_sp_acl_tcam_patterns)
1595
1596 struct mlxsw_sp_acl_tcam_flower_ruleset {
1597         struct mlxsw_sp_acl_tcam_vgroup vgroup;
1598 };
1599
1600 struct mlxsw_sp_acl_tcam_flower_rule {
1601         struct mlxsw_sp_acl_tcam_ventry ventry;
1602 };
1603
1604 static int
1605 mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp *mlxsw_sp,
1606                                      struct mlxsw_sp_acl_tcam *tcam,
1607                                      void *ruleset_priv,
1608                                      struct mlxsw_afk_element_usage *tmplt_elusage,
1609                                      unsigned int *p_min_prio,
1610                                      unsigned int *p_max_prio)
1611 {
1612         struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1613
1614         return mlxsw_sp_acl_tcam_vgroup_add(mlxsw_sp, tcam, &ruleset->vgroup,
1615                                             mlxsw_sp_acl_tcam_patterns,
1616                                             MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
1617                                             tmplt_elusage, true,
1618                                             p_min_prio, p_max_prio);
1619 }
1620
1621 static void
1622 mlxsw_sp_acl_tcam_flower_ruleset_del(struct mlxsw_sp *mlxsw_sp,
1623                                      void *ruleset_priv)
1624 {
1625         struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1626
1627         mlxsw_sp_acl_tcam_vgroup_del(&ruleset->vgroup);
1628 }
1629
1630 static int
1631 mlxsw_sp_acl_tcam_flower_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
1632                                       void *ruleset_priv,
1633                                       struct mlxsw_sp_port *mlxsw_sp_port,
1634                                       bool ingress)
1635 {
1636         struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1637
1638         return mlxsw_sp_acl_tcam_group_bind(mlxsw_sp, &ruleset->vgroup.group,
1639                                             mlxsw_sp_port, ingress);
1640 }
1641
1642 static void
1643 mlxsw_sp_acl_tcam_flower_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
1644                                         void *ruleset_priv,
1645                                         struct mlxsw_sp_port *mlxsw_sp_port,
1646                                         bool ingress)
1647 {
1648         struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1649
1650         mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->vgroup.group,
1651                                        mlxsw_sp_port, ingress);
1652 }
1653
1654 static u16
1655 mlxsw_sp_acl_tcam_flower_ruleset_group_id(void *ruleset_priv)
1656 {
1657         struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1658
1659         return mlxsw_sp_acl_tcam_group_id(&ruleset->vgroup.group);
1660 }
1661
1662 static int
1663 mlxsw_sp_acl_tcam_flower_rule_add(struct mlxsw_sp *mlxsw_sp,
1664                                   void *ruleset_priv, void *rule_priv,
1665                                   struct mlxsw_sp_acl_rule_info *rulei)
1666 {
1667         struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1668         struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
1669
1670         return mlxsw_sp_acl_tcam_ventry_add(mlxsw_sp, &ruleset->vgroup,
1671                                             &rule->ventry, rulei);
1672 }
1673
1674 static void
1675 mlxsw_sp_acl_tcam_flower_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv)
1676 {
1677         struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
1678
1679         mlxsw_sp_acl_tcam_ventry_del(mlxsw_sp, &rule->ventry);
1680 }
1681
1682 static int
1683 mlxsw_sp_acl_tcam_flower_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
1684                                              void *rule_priv,
1685                                              struct mlxsw_sp_acl_rule_info *rulei)
1686 {
1687         return -EOPNOTSUPP;
1688 }
1689
1690 static int
1691 mlxsw_sp_acl_tcam_flower_rule_activity_get(struct mlxsw_sp *mlxsw_sp,
1692                                            void *rule_priv, bool *activity)
1693 {
1694         struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
1695
1696         return mlxsw_sp_acl_tcam_ventry_activity_get(mlxsw_sp, &rule->ventry,
1697                                                      activity);
1698 }
1699
1700 static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = {
1701         .ruleset_priv_size      = sizeof(struct mlxsw_sp_acl_tcam_flower_ruleset),
1702         .ruleset_add            = mlxsw_sp_acl_tcam_flower_ruleset_add,
1703         .ruleset_del            = mlxsw_sp_acl_tcam_flower_ruleset_del,
1704         .ruleset_bind           = mlxsw_sp_acl_tcam_flower_ruleset_bind,
1705         .ruleset_unbind         = mlxsw_sp_acl_tcam_flower_ruleset_unbind,
1706         .ruleset_group_id       = mlxsw_sp_acl_tcam_flower_ruleset_group_id,
1707         .rule_priv_size         = sizeof(struct mlxsw_sp_acl_tcam_flower_rule),
1708         .rule_add               = mlxsw_sp_acl_tcam_flower_rule_add,
1709         .rule_del               = mlxsw_sp_acl_tcam_flower_rule_del,
1710         .rule_action_replace    = mlxsw_sp_acl_tcam_flower_rule_action_replace,
1711         .rule_activity_get      = mlxsw_sp_acl_tcam_flower_rule_activity_get,
1712 };
1713
1714 struct mlxsw_sp_acl_tcam_mr_ruleset {
1715         struct mlxsw_sp_acl_tcam_vchunk *vchunk;
1716         struct mlxsw_sp_acl_tcam_vgroup vgroup;
1717 };
1718
1719 struct mlxsw_sp_acl_tcam_mr_rule {
1720         struct mlxsw_sp_acl_tcam_ventry ventry;
1721 };
1722
1723 static int
1724 mlxsw_sp_acl_tcam_mr_ruleset_add(struct mlxsw_sp *mlxsw_sp,
1725                                  struct mlxsw_sp_acl_tcam *tcam,
1726                                  void *ruleset_priv,
1727                                  struct mlxsw_afk_element_usage *tmplt_elusage,
1728                                  unsigned int *p_min_prio,
1729                                  unsigned int *p_max_prio)
1730 {
1731         struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
1732         int err;
1733
1734         err = mlxsw_sp_acl_tcam_vgroup_add(mlxsw_sp, tcam, &ruleset->vgroup,
1735                                            mlxsw_sp_acl_tcam_patterns,
1736                                            MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
1737                                            tmplt_elusage, false,
1738                                            p_min_prio, p_max_prio);
1739         if (err)
1740                 return err;
1741
1742         /* For most of the TCAM clients it would make sense to take a tcam chunk
1743          * only when the first rule is written. This is not the case for
1744          * multicast router as it is required to bind the multicast router to a
1745          * specific ACL Group ID which must exist in HW before multicast router
1746          * is initialized.
1747          */
1748         ruleset->vchunk = mlxsw_sp_acl_tcam_vchunk_get(mlxsw_sp,
1749                                                        &ruleset->vgroup, 1,
1750                                                        tmplt_elusage);
1751         if (IS_ERR(ruleset->vchunk)) {
1752                 err = PTR_ERR(ruleset->vchunk);
1753                 goto err_chunk_get;
1754         }
1755
1756         return 0;
1757
1758 err_chunk_get:
1759         mlxsw_sp_acl_tcam_vgroup_del(&ruleset->vgroup);
1760         return err;
1761 }
1762
1763 static void
1764 mlxsw_sp_acl_tcam_mr_ruleset_del(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv)
1765 {
1766         struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
1767
1768         mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, ruleset->vchunk);
1769         mlxsw_sp_acl_tcam_vgroup_del(&ruleset->vgroup);
1770 }
1771
1772 static int
1773 mlxsw_sp_acl_tcam_mr_ruleset_bind(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
1774                                   struct mlxsw_sp_port *mlxsw_sp_port,
1775                                   bool ingress)
1776 {
1777         /* Binding is done when initializing multicast router */
1778         return 0;
1779 }
1780
1781 static void
1782 mlxsw_sp_acl_tcam_mr_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
1783                                     void *ruleset_priv,
1784                                     struct mlxsw_sp_port *mlxsw_sp_port,
1785                                     bool ingress)
1786 {
1787 }
1788
1789 static u16
1790 mlxsw_sp_acl_tcam_mr_ruleset_group_id(void *ruleset_priv)
1791 {
1792         struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
1793
1794         return mlxsw_sp_acl_tcam_group_id(&ruleset->vgroup.group);
1795 }
1796
1797 static int
1798 mlxsw_sp_acl_tcam_mr_rule_add(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
1799                               void *rule_priv,
1800                               struct mlxsw_sp_acl_rule_info *rulei)
1801 {
1802         struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
1803         struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
1804
1805         return mlxsw_sp_acl_tcam_ventry_add(mlxsw_sp, &ruleset->vgroup,
1806                                            &rule->ventry, rulei);
1807 }
1808
1809 static void
1810 mlxsw_sp_acl_tcam_mr_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv)
1811 {
1812         struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
1813
1814         mlxsw_sp_acl_tcam_ventry_del(mlxsw_sp, &rule->ventry);
1815 }
1816
1817 static int
1818 mlxsw_sp_acl_tcam_mr_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
1819                                          void *rule_priv,
1820                                          struct mlxsw_sp_acl_rule_info *rulei)
1821 {
1822         struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
1823
1824         return mlxsw_sp_acl_tcam_ventry_action_replace(mlxsw_sp, &rule->ventry,
1825                                                        rulei);
1826 }
1827
1828 static int
1829 mlxsw_sp_acl_tcam_mr_rule_activity_get(struct mlxsw_sp *mlxsw_sp,
1830                                        void *rule_priv, bool *activity)
1831 {
1832         struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
1833
1834         return mlxsw_sp_acl_tcam_ventry_activity_get(mlxsw_sp, &rule->ventry,
1835                                                      activity);
1836 }
1837
1838 static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_mr_ops = {
1839         .ruleset_priv_size      = sizeof(struct mlxsw_sp_acl_tcam_mr_ruleset),
1840         .ruleset_add            = mlxsw_sp_acl_tcam_mr_ruleset_add,
1841         .ruleset_del            = mlxsw_sp_acl_tcam_mr_ruleset_del,
1842         .ruleset_bind           = mlxsw_sp_acl_tcam_mr_ruleset_bind,
1843         .ruleset_unbind         = mlxsw_sp_acl_tcam_mr_ruleset_unbind,
1844         .ruleset_group_id       = mlxsw_sp_acl_tcam_mr_ruleset_group_id,
1845         .rule_priv_size         = sizeof(struct mlxsw_sp_acl_tcam_mr_rule),
1846         .rule_add               = mlxsw_sp_acl_tcam_mr_rule_add,
1847         .rule_del               = mlxsw_sp_acl_tcam_mr_rule_del,
1848         .rule_action_replace    = mlxsw_sp_acl_tcam_mr_rule_action_replace,
1849         .rule_activity_get      = mlxsw_sp_acl_tcam_mr_rule_activity_get,
1850 };
1851
1852 static const struct mlxsw_sp_acl_profile_ops *
1853 mlxsw_sp_acl_tcam_profile_ops_arr[] = {
1854         [MLXSW_SP_ACL_PROFILE_FLOWER] = &mlxsw_sp_acl_tcam_flower_ops,
1855         [MLXSW_SP_ACL_PROFILE_MR] = &mlxsw_sp_acl_tcam_mr_ops,
1856 };
1857
1858 const struct mlxsw_sp_acl_profile_ops *
1859 mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp,
1860                               enum mlxsw_sp_acl_profile profile)
1861 {
1862         const struct mlxsw_sp_acl_profile_ops *ops;
1863
1864         if (WARN_ON(profile >= ARRAY_SIZE(mlxsw_sp_acl_tcam_profile_ops_arr)))
1865                 return NULL;
1866         ops = mlxsw_sp_acl_tcam_profile_ops_arr[profile];
1867         if (WARN_ON(!ops))
1868                 return NULL;
1869         return ops;
1870 }