2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c
3 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the names of the copyright holders nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
18 * Alternatively, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") version 2 as published by the Free
20 * Software Foundation.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
35 #include <linux/kernel.h>
36 #include <linux/list.h>
37 #include <linux/netdevice.h>
38 #include <linux/parman.h>
40 #include "spectrum_mr_tcam.h"
43 #include "core_acl_flex_actions.h"
44 #include "spectrum_mr.h"
46 struct mlxsw_sp_mr_tcam_region {
47 struct mlxsw_sp *mlxsw_sp;
48 enum mlxsw_reg_rtar_key_type rtar_key_type;
49 struct parman *parman;
50 struct parman_prio *parman_prios;
53 struct mlxsw_sp_mr_tcam {
54 struct mlxsw_sp_mr_tcam_region tcam_regions[MLXSW_SP_L3_PROTO_MAX];
57 /* This struct maps to one RIGR2 register entry */
58 struct mlxsw_sp_mr_erif_sublist {
59 struct list_head list;
62 u16 erif_indices[MLXSW_REG_RIGR2_MAX_ERIFS];
66 struct mlxsw_sp_mr_tcam_erif_list {
67 struct list_head erif_sublists;
72 mlxsw_sp_mr_erif_sublist_full(struct mlxsw_sp *mlxsw_sp,
73 struct mlxsw_sp_mr_erif_sublist *erif_sublist)
75 int erif_list_entries = MLXSW_CORE_RES_GET(mlxsw_sp->core,
76 MC_ERIF_LIST_ENTRIES);
78 return erif_sublist->num_erifs == erif_list_entries;
82 mlxsw_sp_mr_erif_list_init(struct mlxsw_sp_mr_tcam_erif_list *erif_list)
84 INIT_LIST_HEAD(&erif_list->erif_sublists);
87 #define MLXSW_SP_KVDL_RIGR2_SIZE 1
89 static struct mlxsw_sp_mr_erif_sublist *
90 mlxsw_sp_mr_erif_sublist_create(struct mlxsw_sp *mlxsw_sp,
91 struct mlxsw_sp_mr_tcam_erif_list *erif_list)
93 struct mlxsw_sp_mr_erif_sublist *erif_sublist;
96 erif_sublist = kzalloc(sizeof(*erif_sublist), GFP_KERNEL);
98 return ERR_PTR(-ENOMEM);
99 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_RIGR2_SIZE,
100 &erif_sublist->rigr2_kvdl_index);
106 list_add_tail(&erif_sublist->list, &erif_list->erif_sublists);
111 mlxsw_sp_mr_erif_sublist_destroy(struct mlxsw_sp *mlxsw_sp,
112 struct mlxsw_sp_mr_erif_sublist *erif_sublist)
114 list_del(&erif_sublist->list);
115 mlxsw_sp_kvdl_free(mlxsw_sp, erif_sublist->rigr2_kvdl_index);
120 mlxsw_sp_mr_erif_list_add(struct mlxsw_sp *mlxsw_sp,
121 struct mlxsw_sp_mr_tcam_erif_list *erif_list,
124 struct mlxsw_sp_mr_erif_sublist *sublist;
126 /* If either there is no erif_entry or the last one is full, allocate a
129 if (list_empty(&erif_list->erif_sublists)) {
130 sublist = mlxsw_sp_mr_erif_sublist_create(mlxsw_sp, erif_list);
132 return PTR_ERR(sublist);
133 erif_list->kvdl_index = sublist->rigr2_kvdl_index;
135 sublist = list_last_entry(&erif_list->erif_sublists,
136 struct mlxsw_sp_mr_erif_sublist,
138 sublist->synced = false;
139 if (mlxsw_sp_mr_erif_sublist_full(mlxsw_sp, sublist)) {
140 sublist = mlxsw_sp_mr_erif_sublist_create(mlxsw_sp,
143 return PTR_ERR(sublist);
147 /* Add the eRIF to the last entry's last index */
148 sublist->erif_indices[sublist->num_erifs++] = erif_index;
153 mlxsw_sp_mr_erif_list_flush(struct mlxsw_sp *mlxsw_sp,
154 struct mlxsw_sp_mr_tcam_erif_list *erif_list)
156 struct mlxsw_sp_mr_erif_sublist *erif_sublist, *tmp;
158 list_for_each_entry_safe(erif_sublist, tmp, &erif_list->erif_sublists,
160 mlxsw_sp_mr_erif_sublist_destroy(mlxsw_sp, erif_sublist);
164 mlxsw_sp_mr_erif_list_commit(struct mlxsw_sp *mlxsw_sp,
165 struct mlxsw_sp_mr_tcam_erif_list *erif_list)
167 struct mlxsw_sp_mr_erif_sublist *curr_sublist;
168 char rigr2_pl[MLXSW_REG_RIGR2_LEN];
172 list_for_each_entry(curr_sublist, &erif_list->erif_sublists, list) {
173 if (curr_sublist->synced)
176 /* If the sublist is not the last one, pack the next index */
177 if (list_is_last(&curr_sublist->list,
178 &erif_list->erif_sublists)) {
179 mlxsw_reg_rigr2_pack(rigr2_pl,
180 curr_sublist->rigr2_kvdl_index,
183 struct mlxsw_sp_mr_erif_sublist *next_sublist;
185 next_sublist = list_next_entry(curr_sublist, list);
186 mlxsw_reg_rigr2_pack(rigr2_pl,
187 curr_sublist->rigr2_kvdl_index,
189 next_sublist->rigr2_kvdl_index);
192 /* Pack all the erifs */
193 for (i = 0; i < curr_sublist->num_erifs; i++) {
194 u16 erif_index = curr_sublist->erif_indices[i];
196 mlxsw_reg_rigr2_erif_entry_pack(rigr2_pl, i, true,
200 /* Write the entry */
201 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rigr2),
204 /* No need of a rollback here because this
205 * hardware entry should not be pointed yet.
208 curr_sublist->synced = true;
213 static void mlxsw_sp_mr_erif_list_move(struct mlxsw_sp_mr_tcam_erif_list *to,
214 struct mlxsw_sp_mr_tcam_erif_list *from)
216 list_splice(&from->erif_sublists, &to->erif_sublists);
217 to->kvdl_index = from->kvdl_index;
220 struct mlxsw_sp_mr_tcam_route {
221 struct mlxsw_sp_mr_tcam_erif_list erif_list;
222 struct mlxsw_afa_block *afa_block;
224 struct parman_item parman_item;
225 struct parman_prio *parman_prio;
226 enum mlxsw_sp_mr_route_action action;
227 struct mlxsw_sp_mr_route_key key;
232 static struct mlxsw_afa_block *
233 mlxsw_sp_mr_tcam_afa_block_create(struct mlxsw_sp *mlxsw_sp,
234 enum mlxsw_sp_mr_route_action route_action,
235 u16 irif_index, u32 counter_index,
237 struct mlxsw_sp_mr_tcam_erif_list *erif_list)
239 struct mlxsw_afa_block *afa_block;
242 afa_block = mlxsw_afa_block_create(mlxsw_sp->afa);
244 return ERR_PTR(-ENOMEM);
246 err = mlxsw_afa_block_append_allocated_counter(afa_block,
251 switch (route_action) {
252 case MLXSW_SP_MR_ROUTE_ACTION_TRAP:
253 err = mlxsw_afa_block_append_trap(afa_block,
258 case MLXSW_SP_MR_ROUTE_ACTION_TRAP_AND_FORWARD:
259 case MLXSW_SP_MR_ROUTE_ACTION_FORWARD:
260 /* If we are about to append a multicast router action, commit
263 err = mlxsw_sp_mr_erif_list_commit(mlxsw_sp, erif_list);
267 err = mlxsw_afa_block_append_mcrouter(afa_block, irif_index,
269 erif_list->kvdl_index);
273 if (route_action == MLXSW_SP_MR_ROUTE_ACTION_TRAP_AND_FORWARD) {
274 err = mlxsw_afa_block_append_trap_and_forward(afa_block,
285 err = mlxsw_afa_block_commit(afa_block);
290 mlxsw_afa_block_destroy(afa_block);
295 mlxsw_sp_mr_tcam_afa_block_destroy(struct mlxsw_afa_block *afa_block)
297 mlxsw_afa_block_destroy(afa_block);
300 static int mlxsw_sp_mr_tcam_route_replace(struct mlxsw_sp *mlxsw_sp,
301 struct parman_item *parman_item,
302 struct mlxsw_sp_mr_route_key *key,
303 struct mlxsw_afa_block *afa_block)
305 char rmft2_pl[MLXSW_REG_RMFT2_LEN];
307 switch (key->proto) {
308 case MLXSW_SP_L3_PROTO_IPV4:
309 mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, true, parman_item->index,
311 MLXSW_REG_RMFT2_IRIF_MASK_IGNORE, 0,
312 ntohl(key->group.addr4),
313 ntohl(key->group_mask.addr4),
314 ntohl(key->source.addr4),
315 ntohl(key->source_mask.addr4),
316 mlxsw_afa_block_first_set(afa_block));
318 case MLXSW_SP_L3_PROTO_IPV6:
319 mlxsw_reg_rmft2_ipv6_pack(rmft2_pl, true, parman_item->index,
321 MLXSW_REG_RMFT2_IRIF_MASK_IGNORE, 0,
323 key->group_mask.addr6,
325 key->source_mask.addr6,
326 mlxsw_afa_block_first_set(afa_block));
329 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl);
332 static int mlxsw_sp_mr_tcam_route_remove(struct mlxsw_sp *mlxsw_sp, int vrid,
333 struct mlxsw_sp_mr_route_key *key,
334 struct parman_item *parman_item)
336 struct in6_addr zero_addr = IN6ADDR_ANY_INIT;
337 char rmft2_pl[MLXSW_REG_RMFT2_LEN];
339 switch (key->proto) {
340 case MLXSW_SP_L3_PROTO_IPV4:
341 mlxsw_reg_rmft2_ipv4_pack(rmft2_pl, false, parman_item->index,
342 vrid, 0, 0, 0, 0, 0, 0, NULL);
344 case MLXSW_SP_L3_PROTO_IPV6:
345 mlxsw_reg_rmft2_ipv6_pack(rmft2_pl, false, parman_item->index,
346 vrid, 0, 0, zero_addr, zero_addr,
347 zero_addr, zero_addr, NULL);
351 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rmft2), rmft2_pl);
355 mlxsw_sp_mr_tcam_erif_populate(struct mlxsw_sp *mlxsw_sp,
356 struct mlxsw_sp_mr_tcam_erif_list *erif_list,
357 struct mlxsw_sp_mr_route_info *route_info)
362 for (i = 0; i < route_info->erif_num; i++) {
363 u16 erif_index = route_info->erif_indices[i];
365 err = mlxsw_sp_mr_erif_list_add(mlxsw_sp, erif_list,
373 static struct mlxsw_sp_mr_tcam_region *
374 mlxsw_sp_mr_tcam_protocol_region(struct mlxsw_sp_mr_tcam *mr_tcam,
375 enum mlxsw_sp_l3proto proto)
377 return &mr_tcam->tcam_regions[proto];
381 mlxsw_sp_mr_tcam_route_parman_item_add(struct mlxsw_sp_mr_tcam *mr_tcam,
382 struct mlxsw_sp_mr_tcam_route *route,
383 enum mlxsw_sp_mr_route_prio prio)
385 struct mlxsw_sp_mr_tcam_region *tcam_region;
388 tcam_region = mlxsw_sp_mr_tcam_protocol_region(mr_tcam,
390 err = parman_item_add(tcam_region->parman,
391 &tcam_region->parman_prios[prio],
392 &route->parman_item);
396 route->parman_prio = &tcam_region->parman_prios[prio];
401 mlxsw_sp_mr_tcam_route_parman_item_remove(struct mlxsw_sp_mr_tcam *mr_tcam,
402 struct mlxsw_sp_mr_tcam_route *route)
404 struct mlxsw_sp_mr_tcam_region *tcam_region;
406 tcam_region = mlxsw_sp_mr_tcam_protocol_region(mr_tcam,
409 parman_item_remove(tcam_region->parman,
410 route->parman_prio, &route->parman_item);
414 mlxsw_sp_mr_tcam_route_create(struct mlxsw_sp *mlxsw_sp, void *priv,
416 struct mlxsw_sp_mr_route_params *route_params)
418 struct mlxsw_sp_mr_tcam_route *route = route_priv;
419 struct mlxsw_sp_mr_tcam *mr_tcam = priv;
422 route->key = route_params->key;
423 route->irif_index = route_params->value.irif_index;
424 route->min_mtu = route_params->value.min_mtu;
425 route->action = route_params->value.route_action;
427 /* Create the egress RIFs list */
428 mlxsw_sp_mr_erif_list_init(&route->erif_list);
429 err = mlxsw_sp_mr_tcam_erif_populate(mlxsw_sp, &route->erif_list,
430 &route_params->value);
432 goto err_erif_populate;
434 /* Create the flow counter */
435 err = mlxsw_sp_flow_counter_alloc(mlxsw_sp, &route->counter_index);
437 goto err_counter_alloc;
439 /* Create the flexible action block */
440 route->afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp,
443 route->counter_index,
446 if (IS_ERR(route->afa_block)) {
447 err = PTR_ERR(route->afa_block);
448 goto err_afa_block_create;
451 /* Allocate place in the TCAM */
452 err = mlxsw_sp_mr_tcam_route_parman_item_add(mr_tcam, route,
455 goto err_parman_item_add;
457 /* Write the route to the TCAM */
458 err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
459 &route->key, route->afa_block);
461 goto err_route_replace;
465 mlxsw_sp_mr_tcam_route_parman_item_remove(mr_tcam, route);
467 mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
468 err_afa_block_create:
469 mlxsw_sp_flow_counter_free(mlxsw_sp, route->counter_index);
472 mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list);
476 static void mlxsw_sp_mr_tcam_route_destroy(struct mlxsw_sp *mlxsw_sp,
477 void *priv, void *route_priv)
479 struct mlxsw_sp_mr_tcam_route *route = route_priv;
480 struct mlxsw_sp_mr_tcam *mr_tcam = priv;
482 mlxsw_sp_mr_tcam_route_remove(mlxsw_sp, route->key.vrid,
483 &route->key, &route->parman_item);
484 mlxsw_sp_mr_tcam_route_parman_item_remove(mr_tcam, route);
485 mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
486 mlxsw_sp_flow_counter_free(mlxsw_sp, route->counter_index);
487 mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list);
490 static int mlxsw_sp_mr_tcam_route_stats(struct mlxsw_sp *mlxsw_sp,
491 void *route_priv, u64 *packets,
494 struct mlxsw_sp_mr_tcam_route *route = route_priv;
496 return mlxsw_sp_flow_counter_get(mlxsw_sp, route->counter_index,
501 mlxsw_sp_mr_tcam_route_action_update(struct mlxsw_sp *mlxsw_sp,
503 enum mlxsw_sp_mr_route_action route_action)
505 struct mlxsw_sp_mr_tcam_route *route = route_priv;
506 struct mlxsw_afa_block *afa_block;
509 /* Create a new flexible action block */
510 afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp, route_action,
512 route->counter_index,
515 if (IS_ERR(afa_block))
516 return PTR_ERR(afa_block);
518 /* Update the TCAM route entry */
519 err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
520 &route->key, afa_block);
524 /* Delete the old one */
525 mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
526 route->afa_block = afa_block;
527 route->action = route_action;
530 mlxsw_sp_mr_tcam_afa_block_destroy(afa_block);
534 static int mlxsw_sp_mr_tcam_route_min_mtu_update(struct mlxsw_sp *mlxsw_sp,
535 void *route_priv, u16 min_mtu)
537 struct mlxsw_sp_mr_tcam_route *route = route_priv;
538 struct mlxsw_afa_block *afa_block;
541 /* Create a new flexible action block */
542 afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp,
545 route->counter_index,
548 if (IS_ERR(afa_block))
549 return PTR_ERR(afa_block);
551 /* Update the TCAM route entry */
552 err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
553 &route->key, afa_block);
557 /* Delete the old one */
558 mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
559 route->afa_block = afa_block;
560 route->min_mtu = min_mtu;
563 mlxsw_sp_mr_tcam_afa_block_destroy(afa_block);
567 static int mlxsw_sp_mr_tcam_route_irif_update(struct mlxsw_sp *mlxsw_sp,
568 void *route_priv, u16 irif_index)
570 struct mlxsw_sp_mr_tcam_route *route = route_priv;
572 if (route->action != MLXSW_SP_MR_ROUTE_ACTION_TRAP)
574 route->irif_index = irif_index;
578 static int mlxsw_sp_mr_tcam_route_erif_add(struct mlxsw_sp *mlxsw_sp,
579 void *route_priv, u16 erif_index)
581 struct mlxsw_sp_mr_tcam_route *route = route_priv;
584 err = mlxsw_sp_mr_erif_list_add(mlxsw_sp, &route->erif_list,
589 /* Commit the action only if the route action is not TRAP */
590 if (route->action != MLXSW_SP_MR_ROUTE_ACTION_TRAP)
591 return mlxsw_sp_mr_erif_list_commit(mlxsw_sp,
596 static int mlxsw_sp_mr_tcam_route_erif_del(struct mlxsw_sp *mlxsw_sp,
597 void *route_priv, u16 erif_index)
599 struct mlxsw_sp_mr_tcam_route *route = route_priv;
600 struct mlxsw_sp_mr_erif_sublist *erif_sublist;
601 struct mlxsw_sp_mr_tcam_erif_list erif_list;
602 struct mlxsw_afa_block *afa_block;
606 /* Create a copy of the original erif_list without the deleted entry */
607 mlxsw_sp_mr_erif_list_init(&erif_list);
608 list_for_each_entry(erif_sublist, &route->erif_list.erif_sublists, list) {
609 for (i = 0; i < erif_sublist->num_erifs; i++) {
610 u16 curr_erif = erif_sublist->erif_indices[i];
612 if (curr_erif == erif_index)
614 err = mlxsw_sp_mr_erif_list_add(mlxsw_sp, &erif_list,
617 goto err_erif_list_add;
621 /* Create the flexible action block pointing to the new erif_list */
622 afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp, route->action,
624 route->counter_index,
627 if (IS_ERR(afa_block)) {
628 err = PTR_ERR(afa_block);
629 goto err_afa_block_create;
632 /* Update the TCAM route entry */
633 err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
634 &route->key, afa_block);
636 goto err_route_write;
638 mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
639 mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list);
640 route->afa_block = afa_block;
641 mlxsw_sp_mr_erif_list_move(&route->erif_list, &erif_list);
645 mlxsw_sp_mr_tcam_afa_block_destroy(afa_block);
646 err_afa_block_create:
648 mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &erif_list);
653 mlxsw_sp_mr_tcam_route_update(struct mlxsw_sp *mlxsw_sp, void *route_priv,
654 struct mlxsw_sp_mr_route_info *route_info)
656 struct mlxsw_sp_mr_tcam_route *route = route_priv;
657 struct mlxsw_sp_mr_tcam_erif_list erif_list;
658 struct mlxsw_afa_block *afa_block;
661 /* Create a new erif_list */
662 mlxsw_sp_mr_erif_list_init(&erif_list);
663 err = mlxsw_sp_mr_tcam_erif_populate(mlxsw_sp, &erif_list, route_info);
665 goto err_erif_populate;
667 /* Create the flexible action block pointing to the new erif_list */
668 afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp,
669 route_info->route_action,
670 route_info->irif_index,
671 route->counter_index,
674 if (IS_ERR(afa_block)) {
675 err = PTR_ERR(afa_block);
676 goto err_afa_block_create;
679 /* Update the TCAM route entry */
680 err = mlxsw_sp_mr_tcam_route_replace(mlxsw_sp, &route->parman_item,
681 &route->key, afa_block);
683 goto err_route_write;
685 mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block);
686 mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list);
687 route->afa_block = afa_block;
688 mlxsw_sp_mr_erif_list_move(&route->erif_list, &erif_list);
689 route->action = route_info->route_action;
690 route->irif_index = route_info->irif_index;
691 route->min_mtu = route_info->min_mtu;
695 mlxsw_sp_mr_tcam_afa_block_destroy(afa_block);
696 err_afa_block_create:
698 mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &erif_list);
702 #define MLXSW_SP_MR_TCAM_REGION_BASE_COUNT 16
703 #define MLXSW_SP_MR_TCAM_REGION_RESIZE_STEP 16
706 mlxsw_sp_mr_tcam_region_alloc(struct mlxsw_sp_mr_tcam_region *mr_tcam_region)
708 struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
709 char rtar_pl[MLXSW_REG_RTAR_LEN];
711 mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_ALLOCATE,
712 mr_tcam_region->rtar_key_type,
713 MLXSW_SP_MR_TCAM_REGION_BASE_COUNT);
714 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
718 mlxsw_sp_mr_tcam_region_free(struct mlxsw_sp_mr_tcam_region *mr_tcam_region)
720 struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
721 char rtar_pl[MLXSW_REG_RTAR_LEN];
723 mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_DEALLOCATE,
724 mr_tcam_region->rtar_key_type, 0);
725 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
728 static int mlxsw_sp_mr_tcam_region_parman_resize(void *priv,
729 unsigned long new_count)
731 struct mlxsw_sp_mr_tcam_region *mr_tcam_region = priv;
732 struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
733 char rtar_pl[MLXSW_REG_RTAR_LEN];
736 max_tcam_rules = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_TCAM_RULES);
737 if (new_count > max_tcam_rules)
739 mlxsw_reg_rtar_pack(rtar_pl, MLXSW_REG_RTAR_OP_RESIZE,
740 mr_tcam_region->rtar_key_type, new_count);
741 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtar), rtar_pl);
744 static void mlxsw_sp_mr_tcam_region_parman_move(void *priv,
745 unsigned long from_index,
746 unsigned long to_index,
749 struct mlxsw_sp_mr_tcam_region *mr_tcam_region = priv;
750 struct mlxsw_sp *mlxsw_sp = mr_tcam_region->mlxsw_sp;
751 char rrcr_pl[MLXSW_REG_RRCR_LEN];
753 mlxsw_reg_rrcr_pack(rrcr_pl, MLXSW_REG_RRCR_OP_MOVE,
755 mr_tcam_region->rtar_key_type, to_index);
756 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rrcr), rrcr_pl);
759 static const struct parman_ops mlxsw_sp_mr_tcam_region_parman_ops = {
760 .base_count = MLXSW_SP_MR_TCAM_REGION_BASE_COUNT,
761 .resize_step = MLXSW_SP_MR_TCAM_REGION_RESIZE_STEP,
762 .resize = mlxsw_sp_mr_tcam_region_parman_resize,
763 .move = mlxsw_sp_mr_tcam_region_parman_move,
764 .algo = PARMAN_ALGO_TYPE_LSORT,
768 mlxsw_sp_mr_tcam_region_init(struct mlxsw_sp *mlxsw_sp,
769 struct mlxsw_sp_mr_tcam_region *mr_tcam_region,
770 enum mlxsw_reg_rtar_key_type rtar_key_type)
772 struct parman_prio *parman_prios;
773 struct parman *parman;
777 mr_tcam_region->rtar_key_type = rtar_key_type;
778 mr_tcam_region->mlxsw_sp = mlxsw_sp;
780 err = mlxsw_sp_mr_tcam_region_alloc(mr_tcam_region);
784 parman = parman_create(&mlxsw_sp_mr_tcam_region_parman_ops,
788 goto err_parman_create;
790 mr_tcam_region->parman = parman;
792 parman_prios = kmalloc_array(MLXSW_SP_MR_ROUTE_PRIO_MAX + 1,
793 sizeof(*parman_prios), GFP_KERNEL);
796 goto err_parman_prios_alloc;
798 mr_tcam_region->parman_prios = parman_prios;
800 for (i = 0; i < MLXSW_SP_MR_ROUTE_PRIO_MAX + 1; i++)
801 parman_prio_init(mr_tcam_region->parman,
802 &mr_tcam_region->parman_prios[i], i);
805 err_parman_prios_alloc:
806 parman_destroy(parman);
808 mlxsw_sp_mr_tcam_region_free(mr_tcam_region);
813 mlxsw_sp_mr_tcam_region_fini(struct mlxsw_sp_mr_tcam_region *mr_tcam_region)
817 for (i = 0; i < MLXSW_SP_MR_ROUTE_PRIO_MAX + 1; i++)
818 parman_prio_fini(&mr_tcam_region->parman_prios[i]);
819 kfree(mr_tcam_region->parman_prios);
820 parman_destroy(mr_tcam_region->parman);
821 mlxsw_sp_mr_tcam_region_free(mr_tcam_region);
824 static int mlxsw_sp_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv)
826 struct mlxsw_sp_mr_tcam *mr_tcam = priv;
827 struct mlxsw_sp_mr_tcam_region *region = &mr_tcam->tcam_regions[0];
831 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MC_ERIF_LIST_ENTRIES) ||
832 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_TCAM_RULES))
835 rtar_key = MLXSW_REG_RTAR_KEY_TYPE_IPV4_MULTICAST;
836 err = mlxsw_sp_mr_tcam_region_init(mlxsw_sp,
837 ®ion[MLXSW_SP_L3_PROTO_IPV4],
842 rtar_key = MLXSW_REG_RTAR_KEY_TYPE_IPV6_MULTICAST;
843 err = mlxsw_sp_mr_tcam_region_init(mlxsw_sp,
844 ®ion[MLXSW_SP_L3_PROTO_IPV6],
847 goto err_ipv6_region_init;
851 err_ipv6_region_init:
852 mlxsw_sp_mr_tcam_region_fini(®ion[MLXSW_SP_L3_PROTO_IPV4]);
856 static void mlxsw_sp_mr_tcam_fini(void *priv)
858 struct mlxsw_sp_mr_tcam *mr_tcam = priv;
859 struct mlxsw_sp_mr_tcam_region *region = &mr_tcam->tcam_regions[0];
861 mlxsw_sp_mr_tcam_region_fini(®ion[MLXSW_SP_L3_PROTO_IPV6]);
862 mlxsw_sp_mr_tcam_region_fini(®ion[MLXSW_SP_L3_PROTO_IPV4]);
865 const struct mlxsw_sp_mr_ops mlxsw_sp_mr_tcam_ops = {
866 .priv_size = sizeof(struct mlxsw_sp_mr_tcam),
867 .route_priv_size = sizeof(struct mlxsw_sp_mr_tcam_route),
868 .init = mlxsw_sp_mr_tcam_init,
869 .route_create = mlxsw_sp_mr_tcam_route_create,
870 .route_update = mlxsw_sp_mr_tcam_route_update,
871 .route_stats = mlxsw_sp_mr_tcam_route_stats,
872 .route_action_update = mlxsw_sp_mr_tcam_route_action_update,
873 .route_min_mtu_update = mlxsw_sp_mr_tcam_route_min_mtu_update,
874 .route_irif_update = mlxsw_sp_mr_tcam_route_irif_update,
875 .route_erif_add = mlxsw_sp_mr_tcam_route_erif_add,
876 .route_erif_del = mlxsw_sp_mr_tcam_route_erif_del,
877 .route_destroy = mlxsw_sp_mr_tcam_route_destroy,
878 .fini = mlxsw_sp_mr_tcam_fini,