2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
3 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the names of the copyright holders nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
18 * Alternatively, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") version 2 as published by the Free
20 * Software Foundation.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
35 #include <linux/rhashtable.h>
37 #include "spectrum_mr.h"
38 #include "spectrum_router.h"
41 const struct mlxsw_sp_mr_ops *mr_ops;
42 void *catchall_route_priv;
43 struct delayed_work stats_update_dw;
44 struct list_head table_list;
45 #define MLXSW_SP_MR_ROUTES_COUNTER_UPDATE_INTERVAL 5000 /* ms */
46 unsigned long priv[0];
47 /* priv has to be always the last item */
50 struct mlxsw_sp_mr_vif {
51 struct net_device *dev;
52 const struct mlxsw_sp_rif *rif;
53 unsigned long vif_flags;
55 /* A list of route_vif_entry structs that point to routes that the VIF
56 * instance is used as one of the egress VIFs
58 struct list_head route_evif_list;
60 /* A list of route_vif_entry structs that point to routes that the VIF
61 * instance is used as an ingress VIF
63 struct list_head route_ivif_list;
66 struct mlxsw_sp_mr_route_vif_entry {
67 struct list_head vif_node;
68 struct list_head route_node;
69 struct mlxsw_sp_mr_vif *mr_vif;
70 struct mlxsw_sp_mr_route *mr_route;
73 struct mlxsw_sp_mr_table {
74 struct list_head node;
75 enum mlxsw_sp_l3proto proto;
76 struct mlxsw_sp *mlxsw_sp;
78 struct mlxsw_sp_mr_vif vifs[MAXVIFS];
79 struct list_head route_list;
80 struct rhashtable route_ht;
81 char catchall_route_priv[0];
82 /* catchall_route_priv has to be always the last item */
85 struct mlxsw_sp_mr_route {
86 struct list_head node;
87 struct rhash_head ht_node;
88 struct mlxsw_sp_mr_route_key key;
89 enum mlxsw_sp_mr_route_action route_action;
91 struct mfc_cache *mfc4;
93 const struct mlxsw_sp_mr_table *mr_table;
94 /* A list of route_vif_entry structs that point to the egress VIFs */
95 struct list_head evif_list;
96 /* A route_vif_entry struct that point to the ingress VIF */
97 struct mlxsw_sp_mr_route_vif_entry ivif;
100 static const struct rhashtable_params mlxsw_sp_mr_route_ht_params = {
101 .key_len = sizeof(struct mlxsw_sp_mr_route_key),
102 .key_offset = offsetof(struct mlxsw_sp_mr_route, key),
103 .head_offset = offsetof(struct mlxsw_sp_mr_route, ht_node),
104 .automatic_shrinking = true,
107 static bool mlxsw_sp_mr_vif_regular(const struct mlxsw_sp_mr_vif *vif)
109 return !(vif->vif_flags & (VIFF_TUNNEL | VIFF_REGISTER));
112 static bool mlxsw_sp_mr_vif_valid(const struct mlxsw_sp_mr_vif *vif)
114 return mlxsw_sp_mr_vif_regular(vif) && vif->dev && vif->rif;
117 static bool mlxsw_sp_mr_vif_rif_invalid(const struct mlxsw_sp_mr_vif *vif)
119 return mlxsw_sp_mr_vif_regular(vif) && vif->dev && !vif->rif;
123 mlxsw_sp_mr_route_ivif_in_evifs(const struct mlxsw_sp_mr_route *mr_route)
127 switch (mr_route->mr_table->proto) {
128 case MLXSW_SP_L3_PROTO_IPV4:
129 ivif = mr_route->mfc4->mfc_parent;
130 return mr_route->mfc4->mfc_un.res.ttls[ivif] != 255;
131 case MLXSW_SP_L3_PROTO_IPV6:
140 mlxsw_sp_mr_route_valid_evifs_num(const struct mlxsw_sp_mr_route *mr_route)
142 struct mlxsw_sp_mr_route_vif_entry *rve;
146 list_for_each_entry(rve, &mr_route->evif_list, route_node)
147 if (mlxsw_sp_mr_vif_valid(rve->mr_vif))
152 static bool mlxsw_sp_mr_route_starg(const struct mlxsw_sp_mr_route *mr_route)
154 switch (mr_route->mr_table->proto) {
155 case MLXSW_SP_L3_PROTO_IPV4:
156 return mr_route->key.source_mask.addr4 == INADDR_ANY;
157 case MLXSW_SP_L3_PROTO_IPV6:
165 static enum mlxsw_sp_mr_route_action
166 mlxsw_sp_mr_route_action(const struct mlxsw_sp_mr_route *mr_route)
168 struct mlxsw_sp_mr_route_vif_entry *rve;
170 /* If the ingress port is not regular and resolved, trap the route */
171 if (!mlxsw_sp_mr_vif_valid(mr_route->ivif.mr_vif))
172 return MLXSW_SP_MR_ROUTE_ACTION_TRAP;
174 /* The kernel does not match a (*,G) route that the ingress interface is
175 * not one of the egress interfaces, so trap these kind of routes.
177 if (mlxsw_sp_mr_route_starg(mr_route) &&
178 !mlxsw_sp_mr_route_ivif_in_evifs(mr_route))
179 return MLXSW_SP_MR_ROUTE_ACTION_TRAP;
181 /* If the route has no valid eVIFs, trap it. */
182 if (!mlxsw_sp_mr_route_valid_evifs_num(mr_route))
183 return MLXSW_SP_MR_ROUTE_ACTION_TRAP;
185 /* If either one of the eVIFs is not regular (VIF of type pimreg or
186 * tunnel) or one of the VIFs has no matching RIF, trap the packet.
188 list_for_each_entry(rve, &mr_route->evif_list, route_node) {
189 if (!mlxsw_sp_mr_vif_regular(rve->mr_vif) ||
190 mlxsw_sp_mr_vif_rif_invalid(rve->mr_vif))
191 return MLXSW_SP_MR_ROUTE_ACTION_TRAP;
193 return MLXSW_SP_MR_ROUTE_ACTION_FORWARD;
196 static enum mlxsw_sp_mr_route_prio
197 mlxsw_sp_mr_route_prio(const struct mlxsw_sp_mr_route *mr_route)
199 return mlxsw_sp_mr_route_starg(mr_route) ?
200 MLXSW_SP_MR_ROUTE_PRIO_STARG : MLXSW_SP_MR_ROUTE_PRIO_SG;
203 static void mlxsw_sp_mr_route4_key(struct mlxsw_sp_mr_table *mr_table,
204 struct mlxsw_sp_mr_route_key *key,
205 const struct mfc_cache *mfc)
207 bool starg = (mfc->mfc_origin == INADDR_ANY);
209 memset(key, 0, sizeof(*key));
210 key->vrid = mr_table->vr_id;
211 key->proto = mr_table->proto;
212 key->group.addr4 = mfc->mfc_mcastgrp;
213 key->group_mask.addr4 = 0xffffffff;
214 key->source.addr4 = mfc->mfc_origin;
215 key->source_mask.addr4 = starg ? 0 : 0xffffffff;
218 static int mlxsw_sp_mr_route_evif_link(struct mlxsw_sp_mr_route *mr_route,
219 struct mlxsw_sp_mr_vif *mr_vif)
221 struct mlxsw_sp_mr_route_vif_entry *rve;
223 rve = kzalloc(sizeof(*rve), GFP_KERNEL);
226 rve->mr_route = mr_route;
227 rve->mr_vif = mr_vif;
228 list_add_tail(&rve->route_node, &mr_route->evif_list);
229 list_add_tail(&rve->vif_node, &mr_vif->route_evif_list);
234 mlxsw_sp_mr_route_evif_unlink(struct mlxsw_sp_mr_route_vif_entry *rve)
236 list_del(&rve->route_node);
237 list_del(&rve->vif_node);
241 static void mlxsw_sp_mr_route_ivif_link(struct mlxsw_sp_mr_route *mr_route,
242 struct mlxsw_sp_mr_vif *mr_vif)
244 mr_route->ivif.mr_route = mr_route;
245 mr_route->ivif.mr_vif = mr_vif;
246 list_add_tail(&mr_route->ivif.vif_node, &mr_vif->route_ivif_list);
249 static void mlxsw_sp_mr_route_ivif_unlink(struct mlxsw_sp_mr_route *mr_route)
251 list_del(&mr_route->ivif.vif_node);
255 mlxsw_sp_mr_route_info_create(struct mlxsw_sp_mr_table *mr_table,
256 struct mlxsw_sp_mr_route *mr_route,
257 struct mlxsw_sp_mr_route_info *route_info)
259 struct mlxsw_sp_mr_route_vif_entry *rve;
264 erif_indices = kmalloc_array(MAXVIFS, sizeof(*erif_indices),
269 list_for_each_entry(rve, &mr_route->evif_list, route_node) {
270 if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) {
271 u16 rifi = mlxsw_sp_rif_index(rve->mr_vif->rif);
273 erif_indices[erif++] = rifi;
277 if (mlxsw_sp_mr_vif_valid(mr_route->ivif.mr_vif))
278 irif_index = mlxsw_sp_rif_index(mr_route->ivif.mr_vif->rif);
282 route_info->irif_index = irif_index;
283 route_info->erif_indices = erif_indices;
284 route_info->min_mtu = mr_route->min_mtu;
285 route_info->route_action = mr_route->route_action;
286 route_info->erif_num = erif;
291 mlxsw_sp_mr_route_info_destroy(struct mlxsw_sp_mr_route_info *route_info)
293 kfree(route_info->erif_indices);
296 static int mlxsw_sp_mr_route_write(struct mlxsw_sp_mr_table *mr_table,
297 struct mlxsw_sp_mr_route *mr_route,
300 struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
301 struct mlxsw_sp_mr_route_info route_info;
302 struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
305 err = mlxsw_sp_mr_route_info_create(mr_table, mr_route, &route_info);
310 struct mlxsw_sp_mr_route_params route_params;
312 mr_route->route_priv = kzalloc(mr->mr_ops->route_priv_size,
314 if (!mr_route->route_priv) {
319 route_params.key = mr_route->key;
320 route_params.value = route_info;
321 route_params.prio = mlxsw_sp_mr_route_prio(mr_route);
322 err = mr->mr_ops->route_create(mlxsw_sp, mr->priv,
323 mr_route->route_priv,
326 kfree(mr_route->route_priv);
328 err = mr->mr_ops->route_update(mlxsw_sp, mr_route->route_priv,
332 mlxsw_sp_mr_route_info_destroy(&route_info);
336 static void mlxsw_sp_mr_route_erase(struct mlxsw_sp_mr_table *mr_table,
337 struct mlxsw_sp_mr_route *mr_route)
339 struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
340 struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
342 mr->mr_ops->route_destroy(mlxsw_sp, mr->priv, mr_route->route_priv);
343 kfree(mr_route->route_priv);
346 static struct mlxsw_sp_mr_route *
347 mlxsw_sp_mr_route4_create(struct mlxsw_sp_mr_table *mr_table,
348 struct mfc_cache *mfc)
350 struct mlxsw_sp_mr_route_vif_entry *rve, *tmp;
351 struct mlxsw_sp_mr_route *mr_route;
355 /* Allocate and init a new route and fill it with parameters */
356 mr_route = kzalloc(sizeof(*mr_route), GFP_KERNEL);
358 return ERR_PTR(-ENOMEM);
359 INIT_LIST_HEAD(&mr_route->evif_list);
360 mlxsw_sp_mr_route4_key(mr_table, &mr_route->key, mfc);
362 /* Find min_mtu and link iVIF and eVIFs */
363 mr_route->min_mtu = ETH_MAX_MTU;
364 ipmr_cache_hold(mfc);
365 mr_route->mfc4 = mfc;
366 mr_route->mr_table = mr_table;
367 for (i = 0; i < MAXVIFS; i++) {
368 if (mfc->mfc_un.res.ttls[i] != 255) {
369 err = mlxsw_sp_mr_route_evif_link(mr_route,
373 if (mr_table->vifs[i].dev &&
374 mr_table->vifs[i].dev->mtu < mr_route->min_mtu)
375 mr_route->min_mtu = mr_table->vifs[i].dev->mtu;
378 mlxsw_sp_mr_route_ivif_link(mr_route, &mr_table->vifs[mfc->mfc_parent]);
382 mr_route->route_action = mlxsw_sp_mr_route_action(mr_route);
386 list_for_each_entry_safe(rve, tmp, &mr_route->evif_list, route_node)
387 mlxsw_sp_mr_route_evif_unlink(rve);
392 static void mlxsw_sp_mr_route4_destroy(struct mlxsw_sp_mr_table *mr_table,
393 struct mlxsw_sp_mr_route *mr_route)
395 struct mlxsw_sp_mr_route_vif_entry *rve, *tmp;
397 mlxsw_sp_mr_route_ivif_unlink(mr_route);
398 ipmr_cache_put(mr_route->mfc4);
399 list_for_each_entry_safe(rve, tmp, &mr_route->evif_list, route_node)
400 mlxsw_sp_mr_route_evif_unlink(rve);
404 static void mlxsw_sp_mr_route_destroy(struct mlxsw_sp_mr_table *mr_table,
405 struct mlxsw_sp_mr_route *mr_route)
407 switch (mr_table->proto) {
408 case MLXSW_SP_L3_PROTO_IPV4:
409 mlxsw_sp_mr_route4_destroy(mr_table, mr_route);
411 case MLXSW_SP_L3_PROTO_IPV6:
418 static void mlxsw_sp_mr_mfc_offload_set(struct mlxsw_sp_mr_route *mr_route,
421 switch (mr_route->mr_table->proto) {
422 case MLXSW_SP_L3_PROTO_IPV4:
424 mr_route->mfc4->mfc_flags |= MFC_OFFLOAD;
426 mr_route->mfc4->mfc_flags &= ~MFC_OFFLOAD;
428 case MLXSW_SP_L3_PROTO_IPV6:
435 static void mlxsw_sp_mr_mfc_offload_update(struct mlxsw_sp_mr_route *mr_route)
439 offload = mr_route->route_action != MLXSW_SP_MR_ROUTE_ACTION_TRAP;
440 mlxsw_sp_mr_mfc_offload_set(mr_route, offload);
443 static void __mlxsw_sp_mr_route_del(struct mlxsw_sp_mr_table *mr_table,
444 struct mlxsw_sp_mr_route *mr_route)
446 mlxsw_sp_mr_mfc_offload_set(mr_route, false);
447 mlxsw_sp_mr_route_erase(mr_table, mr_route);
448 rhashtable_remove_fast(&mr_table->route_ht, &mr_route->ht_node,
449 mlxsw_sp_mr_route_ht_params);
450 list_del(&mr_route->node);
451 mlxsw_sp_mr_route_destroy(mr_table, mr_route);
454 int mlxsw_sp_mr_route4_add(struct mlxsw_sp_mr_table *mr_table,
455 struct mfc_cache *mfc, bool replace)
457 struct mlxsw_sp_mr_route *mr_orig_route = NULL;
458 struct mlxsw_sp_mr_route *mr_route;
461 /* If the route is a (*,*) route, abort, as these kind of routes are
462 * used for proxy routes.
464 if (mfc->mfc_origin == INADDR_ANY && mfc->mfc_mcastgrp == INADDR_ANY) {
465 dev_warn(mr_table->mlxsw_sp->bus_info->dev,
466 "Offloading proxy routes is not supported.\n");
470 /* Create a new route */
471 mr_route = mlxsw_sp_mr_route4_create(mr_table, mfc);
472 if (IS_ERR(mr_route))
473 return PTR_ERR(mr_route);
475 /* Find any route with a matching key */
476 mr_orig_route = rhashtable_lookup_fast(&mr_table->route_ht,
478 mlxsw_sp_mr_route_ht_params);
480 /* On replace case, make the route point to the new route_priv.
482 if (WARN_ON(!mr_orig_route)) {
484 goto err_no_orig_route;
486 mr_route->route_priv = mr_orig_route->route_priv;
487 } else if (mr_orig_route) {
488 /* On non replace case, if another route with the same key was
489 * found, abort, as duplicate routes are used for proxy routes.
491 dev_warn(mr_table->mlxsw_sp->bus_info->dev,
492 "Offloading proxy routes is not supported.\n");
494 goto err_duplicate_route;
497 /* Put it in the table data-structures */
498 list_add_tail(&mr_route->node, &mr_table->route_list);
499 err = rhashtable_insert_fast(&mr_table->route_ht,
501 mlxsw_sp_mr_route_ht_params);
503 goto err_rhashtable_insert;
505 /* Write the route to the hardware */
506 err = mlxsw_sp_mr_route_write(mr_table, mr_route, replace);
508 goto err_mr_route_write;
510 /* Destroy the original route */
512 rhashtable_remove_fast(&mr_table->route_ht,
513 &mr_orig_route->ht_node,
514 mlxsw_sp_mr_route_ht_params);
515 list_del(&mr_orig_route->node);
516 mlxsw_sp_mr_route4_destroy(mr_table, mr_orig_route);
519 mlxsw_sp_mr_mfc_offload_update(mr_route);
523 rhashtable_remove_fast(&mr_table->route_ht, &mr_route->ht_node,
524 mlxsw_sp_mr_route_ht_params);
525 err_rhashtable_insert:
526 list_del(&mr_route->node);
529 mlxsw_sp_mr_route4_destroy(mr_table, mr_route);
533 void mlxsw_sp_mr_route4_del(struct mlxsw_sp_mr_table *mr_table,
534 struct mfc_cache *mfc)
536 struct mlxsw_sp_mr_route *mr_route;
537 struct mlxsw_sp_mr_route_key key;
539 mlxsw_sp_mr_route4_key(mr_table, &key, mfc);
540 mr_route = rhashtable_lookup_fast(&mr_table->route_ht, &key,
541 mlxsw_sp_mr_route_ht_params);
543 __mlxsw_sp_mr_route_del(mr_table, mr_route);
546 /* Should be called after the VIF struct is updated */
548 mlxsw_sp_mr_route_ivif_resolve(struct mlxsw_sp_mr_table *mr_table,
549 struct mlxsw_sp_mr_route_vif_entry *rve)
551 struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
552 enum mlxsw_sp_mr_route_action route_action;
553 struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
557 route_action = mlxsw_sp_mr_route_action(rve->mr_route);
558 if (route_action == MLXSW_SP_MR_ROUTE_ACTION_TRAP)
561 /* rve->mr_vif->rif is guaranteed to be valid at this stage */
562 irif_index = mlxsw_sp_rif_index(rve->mr_vif->rif);
563 err = mr->mr_ops->route_irif_update(mlxsw_sp, rve->mr_route->route_priv,
568 err = mr->mr_ops->route_action_update(mlxsw_sp,
569 rve->mr_route->route_priv,
572 /* No need to rollback here because the iRIF change only takes
573 * place after the action has been updated.
577 rve->mr_route->route_action = route_action;
578 mlxsw_sp_mr_mfc_offload_update(rve->mr_route);
583 mlxsw_sp_mr_route_ivif_unresolve(struct mlxsw_sp_mr_table *mr_table,
584 struct mlxsw_sp_mr_route_vif_entry *rve)
586 struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
587 struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
589 mr->mr_ops->route_action_update(mlxsw_sp, rve->mr_route->route_priv,
590 MLXSW_SP_MR_ROUTE_ACTION_TRAP);
591 rve->mr_route->route_action = MLXSW_SP_MR_ROUTE_ACTION_TRAP;
592 mlxsw_sp_mr_mfc_offload_update(rve->mr_route);
595 /* Should be called after the RIF struct is updated */
597 mlxsw_sp_mr_route_evif_resolve(struct mlxsw_sp_mr_table *mr_table,
598 struct mlxsw_sp_mr_route_vif_entry *rve)
600 struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
601 enum mlxsw_sp_mr_route_action route_action;
602 struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
606 /* Update the route action, as the new eVIF can be a tunnel or a pimreg
607 * device which will require updating the action.
609 route_action = mlxsw_sp_mr_route_action(rve->mr_route);
610 if (route_action != rve->mr_route->route_action) {
611 err = mr->mr_ops->route_action_update(mlxsw_sp,
612 rve->mr_route->route_priv,
619 if (mlxsw_sp_mr_vif_valid(rve->mr_vif)) {
620 erif_index = mlxsw_sp_rif_index(rve->mr_vif->rif);
621 err = mr->mr_ops->route_erif_add(mlxsw_sp,
622 rve->mr_route->route_priv,
625 goto err_route_erif_add;
628 /* Update the minimum MTU */
629 if (rve->mr_vif->dev->mtu < rve->mr_route->min_mtu) {
630 rve->mr_route->min_mtu = rve->mr_vif->dev->mtu;
631 err = mr->mr_ops->route_min_mtu_update(mlxsw_sp,
632 rve->mr_route->route_priv,
633 rve->mr_route->min_mtu);
635 goto err_route_min_mtu_update;
638 rve->mr_route->route_action = route_action;
639 mlxsw_sp_mr_mfc_offload_update(rve->mr_route);
642 err_route_min_mtu_update:
643 if (mlxsw_sp_mr_vif_valid(rve->mr_vif))
644 mr->mr_ops->route_erif_del(mlxsw_sp, rve->mr_route->route_priv,
647 if (route_action != rve->mr_route->route_action)
648 mr->mr_ops->route_action_update(mlxsw_sp,
649 rve->mr_route->route_priv,
650 rve->mr_route->route_action);
654 /* Should be called before the RIF struct is updated */
656 mlxsw_sp_mr_route_evif_unresolve(struct mlxsw_sp_mr_table *mr_table,
657 struct mlxsw_sp_mr_route_vif_entry *rve)
659 struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
660 enum mlxsw_sp_mr_route_action route_action;
661 struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
664 /* If the unresolved RIF was not valid, no need to delete it */
665 if (!mlxsw_sp_mr_vif_valid(rve->mr_vif))
668 /* Update the route action: if there is only one valid eVIF in the
669 * route, set the action to trap as the VIF deletion will lead to zero
670 * valid eVIFs. On any other case, use the mlxsw_sp_mr_route_action to
671 * determine the route action.
673 if (mlxsw_sp_mr_route_valid_evifs_num(rve->mr_route) == 1)
674 route_action = MLXSW_SP_MR_ROUTE_ACTION_TRAP;
676 route_action = mlxsw_sp_mr_route_action(rve->mr_route);
677 if (route_action != rve->mr_route->route_action)
678 mr->mr_ops->route_action_update(mlxsw_sp,
679 rve->mr_route->route_priv,
682 /* Delete the erif from the route */
683 rifi = mlxsw_sp_rif_index(rve->mr_vif->rif);
684 mr->mr_ops->route_erif_del(mlxsw_sp, rve->mr_route->route_priv, rifi);
685 rve->mr_route->route_action = route_action;
686 mlxsw_sp_mr_mfc_offload_update(rve->mr_route);
689 static int mlxsw_sp_mr_vif_resolve(struct mlxsw_sp_mr_table *mr_table,
690 struct net_device *dev,
691 struct mlxsw_sp_mr_vif *mr_vif,
692 unsigned long vif_flags,
693 const struct mlxsw_sp_rif *rif)
695 struct mlxsw_sp_mr_route_vif_entry *irve, *erve;
701 mr_vif->vif_flags = vif_flags;
703 /* Update all routes where this VIF is used as an unresolved iRIF */
704 list_for_each_entry(irve, &mr_vif->route_ivif_list, vif_node) {
705 err = mlxsw_sp_mr_route_ivif_resolve(mr_table, irve);
707 goto err_irif_unresolve;
710 /* Update all routes where this VIF is used as an unresolved eRIF */
711 list_for_each_entry(erve, &mr_vif->route_evif_list, vif_node) {
712 err = mlxsw_sp_mr_route_evif_resolve(mr_table, erve);
714 goto err_erif_unresolve;
719 list_for_each_entry_from_reverse(erve, &mr_vif->route_evif_list,
721 mlxsw_sp_mr_route_evif_unresolve(mr_table, erve);
723 list_for_each_entry_from_reverse(irve, &mr_vif->route_ivif_list,
725 mlxsw_sp_mr_route_ivif_unresolve(mr_table, irve);
730 static void mlxsw_sp_mr_vif_unresolve(struct mlxsw_sp_mr_table *mr_table,
731 struct net_device *dev,
732 struct mlxsw_sp_mr_vif *mr_vif)
734 struct mlxsw_sp_mr_route_vif_entry *rve;
736 /* Update all routes where this VIF is used as an unresolved eRIF */
737 list_for_each_entry(rve, &mr_vif->route_evif_list, vif_node)
738 mlxsw_sp_mr_route_evif_unresolve(mr_table, rve);
740 /* Update all routes where this VIF is used as an unresolved iRIF */
741 list_for_each_entry(rve, &mr_vif->route_ivif_list, vif_node)
742 mlxsw_sp_mr_route_ivif_unresolve(mr_table, rve);
749 int mlxsw_sp_mr_vif_add(struct mlxsw_sp_mr_table *mr_table,
750 struct net_device *dev, vifi_t vif_index,
751 unsigned long vif_flags, const struct mlxsw_sp_rif *rif)
753 struct mlxsw_sp_mr_vif *mr_vif = &mr_table->vifs[vif_index];
755 if (WARN_ON(vif_index >= MAXVIFS))
759 return mlxsw_sp_mr_vif_resolve(mr_table, dev, mr_vif, vif_flags, rif);
762 void mlxsw_sp_mr_vif_del(struct mlxsw_sp_mr_table *mr_table, vifi_t vif_index)
764 struct mlxsw_sp_mr_vif *mr_vif = &mr_table->vifs[vif_index];
766 if (WARN_ON(vif_index >= MAXVIFS))
768 if (WARN_ON(!mr_vif->dev))
770 mlxsw_sp_mr_vif_unresolve(mr_table, NULL, mr_vif);
773 struct mlxsw_sp_mr_vif *
774 mlxsw_sp_mr_dev_vif_lookup(struct mlxsw_sp_mr_table *mr_table,
775 const struct net_device *dev)
779 for (vif_index = 0; vif_index < MAXVIFS; vif_index++)
780 if (mr_table->vifs[vif_index].dev == dev)
781 return &mr_table->vifs[vif_index];
785 int mlxsw_sp_mr_rif_add(struct mlxsw_sp_mr_table *mr_table,
786 const struct mlxsw_sp_rif *rif)
788 const struct net_device *rif_dev = mlxsw_sp_rif_dev(rif);
789 struct mlxsw_sp_mr_vif *mr_vif;
794 mr_vif = mlxsw_sp_mr_dev_vif_lookup(mr_table, rif_dev);
797 return mlxsw_sp_mr_vif_resolve(mr_table, mr_vif->dev, mr_vif,
798 mr_vif->vif_flags, rif);
801 void mlxsw_sp_mr_rif_del(struct mlxsw_sp_mr_table *mr_table,
802 const struct mlxsw_sp_rif *rif)
804 const struct net_device *rif_dev = mlxsw_sp_rif_dev(rif);
805 struct mlxsw_sp_mr_vif *mr_vif;
810 mr_vif = mlxsw_sp_mr_dev_vif_lookup(mr_table, rif_dev);
813 mlxsw_sp_mr_vif_unresolve(mr_table, mr_vif->dev, mr_vif);
816 void mlxsw_sp_mr_rif_mtu_update(struct mlxsw_sp_mr_table *mr_table,
817 const struct mlxsw_sp_rif *rif, int mtu)
819 const struct net_device *rif_dev = mlxsw_sp_rif_dev(rif);
820 struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
821 struct mlxsw_sp_mr_route_vif_entry *rve;
822 struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
823 struct mlxsw_sp_mr_vif *mr_vif;
828 /* Search for a VIF that use that RIF */
829 mr_vif = mlxsw_sp_mr_dev_vif_lookup(mr_table, rif_dev);
833 /* Update all the routes that uses that VIF as eVIF */
834 list_for_each_entry(rve, &mr_vif->route_evif_list, vif_node) {
835 if (mtu < rve->mr_route->min_mtu) {
836 rve->mr_route->min_mtu = mtu;
837 mr->mr_ops->route_min_mtu_update(mlxsw_sp,
838 rve->mr_route->route_priv,
844 struct mlxsw_sp_mr_table *mlxsw_sp_mr_table_create(struct mlxsw_sp *mlxsw_sp,
846 enum mlxsw_sp_l3proto proto)
848 struct mlxsw_sp_mr_route_params catchall_route_params = {
849 .prio = MLXSW_SP_MR_ROUTE_PRIO_CATCHALL,
854 .route_action = MLXSW_SP_MR_ROUTE_ACTION_TRAP,
857 struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
858 struct mlxsw_sp_mr_table *mr_table;
862 mr_table = kzalloc(sizeof(*mr_table) + mr->mr_ops->route_priv_size,
865 return ERR_PTR(-ENOMEM);
867 mr_table->vr_id = vr_id;
868 mr_table->mlxsw_sp = mlxsw_sp;
869 mr_table->proto = proto;
870 INIT_LIST_HEAD(&mr_table->route_list);
872 err = rhashtable_init(&mr_table->route_ht,
873 &mlxsw_sp_mr_route_ht_params);
875 goto err_route_rhashtable_init;
877 for (i = 0; i < MAXVIFS; i++) {
878 INIT_LIST_HEAD(&mr_table->vifs[i].route_evif_list);
879 INIT_LIST_HEAD(&mr_table->vifs[i].route_ivif_list);
882 err = mr->mr_ops->route_create(mlxsw_sp, mr->priv,
883 mr_table->catchall_route_priv,
884 &catchall_route_params);
886 goto err_ops_route_create;
887 list_add_tail(&mr_table->node, &mr->table_list);
890 err_ops_route_create:
891 rhashtable_destroy(&mr_table->route_ht);
892 err_route_rhashtable_init:
897 void mlxsw_sp_mr_table_destroy(struct mlxsw_sp_mr_table *mr_table)
899 struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
900 struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
902 WARN_ON(!mlxsw_sp_mr_table_empty(mr_table));
903 list_del(&mr_table->node);
904 mr->mr_ops->route_destroy(mlxsw_sp, mr->priv,
905 &mr_table->catchall_route_priv);
906 rhashtable_destroy(&mr_table->route_ht);
910 void mlxsw_sp_mr_table_flush(struct mlxsw_sp_mr_table *mr_table)
912 struct mlxsw_sp_mr_route *mr_route, *tmp;
915 list_for_each_entry_safe(mr_route, tmp, &mr_table->route_list, node)
916 __mlxsw_sp_mr_route_del(mr_table, mr_route);
918 for (i = 0; i < MAXVIFS; i++) {
919 mr_table->vifs[i].dev = NULL;
920 mr_table->vifs[i].rif = NULL;
924 bool mlxsw_sp_mr_table_empty(const struct mlxsw_sp_mr_table *mr_table)
928 for (i = 0; i < MAXVIFS; i++)
929 if (mr_table->vifs[i].dev)
931 return list_empty(&mr_table->route_list);
934 static void mlxsw_sp_mr_route_stats_update(struct mlxsw_sp *mlxsw_sp,
935 struct mlxsw_sp_mr_route *mr_route)
937 struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
940 if (mr_route->route_action == MLXSW_SP_MR_ROUTE_ACTION_TRAP)
943 mr->mr_ops->route_stats(mlxsw_sp, mr_route->route_priv, &packets,
946 switch (mr_route->mr_table->proto) {
947 case MLXSW_SP_L3_PROTO_IPV4:
948 if (mr_route->mfc4->mfc_un.res.pkt != packets)
949 mr_route->mfc4->mfc_un.res.lastuse = jiffies;
950 mr_route->mfc4->mfc_un.res.pkt = packets;
951 mr_route->mfc4->mfc_un.res.bytes = bytes;
953 case MLXSW_SP_L3_PROTO_IPV6:
960 static void mlxsw_sp_mr_stats_update(struct work_struct *work)
962 struct mlxsw_sp_mr *mr = container_of(work, struct mlxsw_sp_mr,
963 stats_update_dw.work);
964 struct mlxsw_sp_mr_table *mr_table;
965 struct mlxsw_sp_mr_route *mr_route;
966 unsigned long interval;
969 list_for_each_entry(mr_table, &mr->table_list, node)
970 list_for_each_entry(mr_route, &mr_table->route_list, node)
971 mlxsw_sp_mr_route_stats_update(mr_table->mlxsw_sp,
975 interval = msecs_to_jiffies(MLXSW_SP_MR_ROUTES_COUNTER_UPDATE_INTERVAL);
976 mlxsw_core_schedule_dw(&mr->stats_update_dw, interval);
979 int mlxsw_sp_mr_init(struct mlxsw_sp *mlxsw_sp,
980 const struct mlxsw_sp_mr_ops *mr_ops)
982 struct mlxsw_sp_mr *mr;
983 unsigned long interval;
986 mr = kzalloc(sizeof(*mr) + mr_ops->priv_size, GFP_KERNEL);
991 INIT_LIST_HEAD(&mr->table_list);
993 err = mr_ops->init(mlxsw_sp, mr->priv);
997 /* Create the delayed work for counter updates */
998 INIT_DELAYED_WORK(&mr->stats_update_dw, mlxsw_sp_mr_stats_update);
999 interval = msecs_to_jiffies(MLXSW_SP_MR_ROUTES_COUNTER_UPDATE_INTERVAL);
1000 mlxsw_core_schedule_dw(&mr->stats_update_dw, interval);
1007 void mlxsw_sp_mr_fini(struct mlxsw_sp *mlxsw_sp)
1009 struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
1011 cancel_delayed_work_sync(&mr->stats_update_dw);
1012 mr->mr_ops->fini(mr->priv);