1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/rhashtable.h>
7 #include <linux/bitops.h>
9 #include <linux/notifier.h>
10 #include <linux/inetdevice.h>
11 #include <linux/netdevice.h>
12 #include <linux/if_bridge.h>
13 #include <linux/socket.h>
14 #include <linux/route.h>
15 #include <linux/gcd.h>
16 #include <linux/if_macvlan.h>
17 #include <linux/refcount.h>
18 #include <linux/jhash.h>
19 #include <linux/net_namespace.h>
20 #include <linux/mutex.h>
21 #include <net/netevent.h>
22 #include <net/neighbour.h>
24 #include <net/ip_fib.h>
25 #include <net/ip6_fib.h>
26 #include <net/nexthop.h>
27 #include <net/fib_rules.h>
28 #include <net/ip_tunnels.h>
29 #include <net/l3mdev.h>
30 #include <net/addrconf.h>
31 #include <net/ndisc.h>
33 #include <net/fib_notifier.h>
34 #include <net/switchdev.h>
39 #include "spectrum_cnt.h"
40 #include "spectrum_dpipe.h"
41 #include "spectrum_ipip.h"
42 #include "spectrum_mr.h"
43 #include "spectrum_mr_tcam.h"
44 #include "spectrum_router.h"
45 #include "spectrum_span.h"
49 struct mlxsw_sp_lpm_tree;
50 struct mlxsw_sp_rif_ops;
53 struct list_head nexthop_list;
54 struct list_head neigh_list;
55 struct net_device *dev; /* NULL for underlay RIF */
56 struct mlxsw_sp_fid *fid;
57 unsigned char addr[ETH_ALEN];
61 const struct mlxsw_sp_rif_ops *ops;
62 struct mlxsw_sp *mlxsw_sp;
64 unsigned int counter_ingress;
65 bool counter_ingress_valid;
66 unsigned int counter_egress;
67 bool counter_egress_valid;
70 struct mlxsw_sp_rif_params {
71 struct net_device *dev;
80 struct mlxsw_sp_rif_subport {
81 struct mlxsw_sp_rif common;
91 struct mlxsw_sp_rif_ipip_lb {
92 struct mlxsw_sp_rif common;
93 struct mlxsw_sp_rif_ipip_lb_config lb_config;
94 u16 ul_vr_id; /* Reserved for Spectrum-2. */
95 u16 ul_rif_id; /* Reserved for Spectrum. */
98 struct mlxsw_sp_rif_params_ipip_lb {
99 struct mlxsw_sp_rif_params common;
100 struct mlxsw_sp_rif_ipip_lb_config lb_config;
103 struct mlxsw_sp_rif_ops {
104 enum mlxsw_sp_rif_type type;
107 void (*setup)(struct mlxsw_sp_rif *rif,
108 const struct mlxsw_sp_rif_params *params);
109 int (*configure)(struct mlxsw_sp_rif *rif);
110 void (*deconfigure)(struct mlxsw_sp_rif *rif);
111 struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif,
112 struct netlink_ext_ack *extack);
113 void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac);
116 static struct mlxsw_sp_rif *
117 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
118 const struct net_device *dev);
119 static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
120 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree);
121 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
122 struct mlxsw_sp_lpm_tree *lpm_tree);
123 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
124 const struct mlxsw_sp_fib *fib,
126 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
127 const struct mlxsw_sp_fib *fib);
129 static unsigned int *
130 mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
131 enum mlxsw_sp_rif_counter_dir dir)
134 case MLXSW_SP_RIF_COUNTER_EGRESS:
135 return &rif->counter_egress;
136 case MLXSW_SP_RIF_COUNTER_INGRESS:
137 return &rif->counter_ingress;
143 mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
144 enum mlxsw_sp_rif_counter_dir dir)
147 case MLXSW_SP_RIF_COUNTER_EGRESS:
148 return rif->counter_egress_valid;
149 case MLXSW_SP_RIF_COUNTER_INGRESS:
150 return rif->counter_ingress_valid;
156 mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
157 enum mlxsw_sp_rif_counter_dir dir,
161 case MLXSW_SP_RIF_COUNTER_EGRESS:
162 rif->counter_egress_valid = valid;
164 case MLXSW_SP_RIF_COUNTER_INGRESS:
165 rif->counter_ingress_valid = valid;
170 static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
171 unsigned int counter_index, bool enable,
172 enum mlxsw_sp_rif_counter_dir dir)
174 char ritr_pl[MLXSW_REG_RITR_LEN];
175 bool is_egress = false;
178 if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
180 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
181 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
185 mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
187 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
190 int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
191 struct mlxsw_sp_rif *rif,
192 enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
194 char ricnt_pl[MLXSW_REG_RICNT_LEN];
195 unsigned int *p_counter_index;
199 valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
203 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
204 if (!p_counter_index)
206 mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
207 MLXSW_REG_RICNT_OPCODE_NOP);
208 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
211 *cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
215 static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
216 unsigned int counter_index)
218 char ricnt_pl[MLXSW_REG_RICNT_LEN];
220 mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
221 MLXSW_REG_RICNT_OPCODE_CLEAR);
222 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
225 int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
226 struct mlxsw_sp_rif *rif,
227 enum mlxsw_sp_rif_counter_dir dir)
229 unsigned int *p_counter_index;
232 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
233 if (!p_counter_index)
235 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
240 err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
242 goto err_counter_clear;
244 err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
245 *p_counter_index, true, dir);
247 goto err_counter_edit;
248 mlxsw_sp_rif_counter_valid_set(rif, dir, true);
253 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
258 void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
259 struct mlxsw_sp_rif *rif,
260 enum mlxsw_sp_rif_counter_dir dir)
262 unsigned int *p_counter_index;
264 if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
267 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
268 if (WARN_ON(!p_counter_index))
270 mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
271 *p_counter_index, false, dir);
272 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
274 mlxsw_sp_rif_counter_valid_set(rif, dir, false);
277 static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
279 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
280 struct devlink *devlink;
282 devlink = priv_to_devlink(mlxsw_sp->core);
283 if (!devlink_dpipe_table_counter_enabled(devlink,
284 MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
286 mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
289 static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
291 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
293 mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
296 #define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
298 struct mlxsw_sp_prefix_usage {
299 DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
302 #define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
303 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
306 mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
307 struct mlxsw_sp_prefix_usage *prefix_usage2)
309 return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
313 mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
314 struct mlxsw_sp_prefix_usage *prefix_usage2)
316 memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
320 mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
321 unsigned char prefix_len)
323 set_bit(prefix_len, prefix_usage->b);
327 mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
328 unsigned char prefix_len)
330 clear_bit(prefix_len, prefix_usage->b);
333 struct mlxsw_sp_fib_key {
334 unsigned char addr[sizeof(struct in6_addr)];
335 unsigned char prefix_len;
338 enum mlxsw_sp_fib_entry_type {
339 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
340 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
341 MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
342 MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE,
343 MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE,
345 /* This is a special case of local delivery, where a packet should be
346 * decapsulated on reception. Note that there is no corresponding ENCAP,
347 * because that's a type of next hop, not of FIB entry. (There can be
348 * several next hops in a REMOTE entry, and some of them may be
349 * encapsulating entries.)
351 MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP,
352 MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP,
355 struct mlxsw_sp_nexthop_group;
356 struct mlxsw_sp_fib_entry;
358 struct mlxsw_sp_fib_node {
359 struct mlxsw_sp_fib_entry *fib_entry;
360 struct list_head list;
361 struct rhash_head ht_node;
362 struct mlxsw_sp_fib *fib;
363 struct mlxsw_sp_fib_key key;
366 struct mlxsw_sp_fib_entry_decap {
367 struct mlxsw_sp_ipip_entry *ipip_entry;
371 static struct mlxsw_sp_fib_entry_priv *
372 mlxsw_sp_fib_entry_priv_create(const struct mlxsw_sp_router_ll_ops *ll_ops)
374 struct mlxsw_sp_fib_entry_priv *priv;
376 if (!ll_ops->fib_entry_priv_size)
377 /* No need to have priv */
380 priv = kzalloc(sizeof(*priv) + ll_ops->fib_entry_priv_size, GFP_KERNEL);
382 return ERR_PTR(-ENOMEM);
383 refcount_set(&priv->refcnt, 1);
388 mlxsw_sp_fib_entry_priv_destroy(struct mlxsw_sp_fib_entry_priv *priv)
393 static void mlxsw_sp_fib_entry_priv_hold(struct mlxsw_sp_fib_entry_priv *priv)
395 refcount_inc(&priv->refcnt);
398 static void mlxsw_sp_fib_entry_priv_put(struct mlxsw_sp_fib_entry_priv *priv)
400 if (!priv || !refcount_dec_and_test(&priv->refcnt))
402 mlxsw_sp_fib_entry_priv_destroy(priv);
405 static void mlxsw_sp_fib_entry_op_ctx_priv_hold(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
406 struct mlxsw_sp_fib_entry_priv *priv)
410 mlxsw_sp_fib_entry_priv_hold(priv);
411 list_add(&priv->list, &op_ctx->fib_entry_priv_list);
414 static void mlxsw_sp_fib_entry_op_ctx_priv_put_all(struct mlxsw_sp_fib_entry_op_ctx *op_ctx)
416 struct mlxsw_sp_fib_entry_priv *priv, *tmp;
418 list_for_each_entry_safe(priv, tmp, &op_ctx->fib_entry_priv_list, list)
419 mlxsw_sp_fib_entry_priv_put(priv);
420 INIT_LIST_HEAD(&op_ctx->fib_entry_priv_list);
423 struct mlxsw_sp_fib_entry {
424 struct mlxsw_sp_fib_node *fib_node;
425 enum mlxsw_sp_fib_entry_type type;
426 struct list_head nexthop_group_node;
427 struct mlxsw_sp_nexthop_group *nh_group;
428 struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */
429 struct mlxsw_sp_fib_entry_priv *priv;
432 struct mlxsw_sp_fib4_entry {
433 struct mlxsw_sp_fib_entry common;
440 struct mlxsw_sp_fib6_entry {
441 struct mlxsw_sp_fib_entry common;
442 struct list_head rt6_list;
446 struct mlxsw_sp_rt6 {
447 struct list_head list;
448 struct fib6_info *rt;
451 struct mlxsw_sp_lpm_tree {
453 unsigned int ref_count;
454 enum mlxsw_sp_l3proto proto;
455 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
456 struct mlxsw_sp_prefix_usage prefix_usage;
459 struct mlxsw_sp_fib {
460 struct rhashtable ht;
461 struct list_head node_list;
462 struct mlxsw_sp_vr *vr;
463 struct mlxsw_sp_lpm_tree *lpm_tree;
464 enum mlxsw_sp_l3proto proto;
465 const struct mlxsw_sp_router_ll_ops *ll_ops;
469 u16 id; /* virtual router ID */
470 u32 tb_id; /* kernel fib table id */
471 unsigned int rif_count;
472 struct mlxsw_sp_fib *fib4;
473 struct mlxsw_sp_fib *fib6;
474 struct mlxsw_sp_mr_table *mr_table[MLXSW_SP_L3_PROTO_MAX];
475 struct mlxsw_sp_rif *ul_rif;
476 refcount_t ul_rif_refcnt;
479 static int mlxsw_sp_router_ll_basic_ralta_write(struct mlxsw_sp *mlxsw_sp, char *xralta_pl)
481 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta),
482 xralta_pl + MLXSW_REG_XRALTA_RALTA_OFFSET);
485 static int mlxsw_sp_router_ll_basic_ralst_write(struct mlxsw_sp *mlxsw_sp, char *xralst_pl)
487 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst),
488 xralst_pl + MLXSW_REG_XRALST_RALST_OFFSET);
491 static int mlxsw_sp_router_ll_basic_raltb_write(struct mlxsw_sp *mlxsw_sp, char *xraltb_pl)
493 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
494 xraltb_pl + MLXSW_REG_XRALTB_RALTB_OFFSET);
497 static const struct rhashtable_params mlxsw_sp_fib_ht_params;
499 static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
500 struct mlxsw_sp_vr *vr,
501 enum mlxsw_sp_l3proto proto)
503 const struct mlxsw_sp_router_ll_ops *ll_ops = mlxsw_sp->router->proto_ll_ops[proto];
504 struct mlxsw_sp_lpm_tree *lpm_tree;
505 struct mlxsw_sp_fib *fib;
508 lpm_tree = mlxsw_sp->router->lpm.proto_trees[proto];
509 fib = kzalloc(sizeof(*fib), GFP_KERNEL);
511 return ERR_PTR(-ENOMEM);
512 err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
514 goto err_rhashtable_init;
515 INIT_LIST_HEAD(&fib->node_list);
518 fib->lpm_tree = lpm_tree;
519 fib->ll_ops = ll_ops;
520 mlxsw_sp_lpm_tree_hold(lpm_tree);
521 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, lpm_tree->id);
523 goto err_lpm_tree_bind;
527 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
533 static void mlxsw_sp_fib_destroy(struct mlxsw_sp *mlxsw_sp,
534 struct mlxsw_sp_fib *fib)
536 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
537 mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
538 WARN_ON(!list_empty(&fib->node_list));
539 rhashtable_destroy(&fib->ht);
543 static struct mlxsw_sp_lpm_tree *
544 mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
546 static struct mlxsw_sp_lpm_tree *lpm_tree;
549 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
550 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
551 if (lpm_tree->ref_count == 0)
557 static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
558 const struct mlxsw_sp_router_ll_ops *ll_ops,
559 struct mlxsw_sp_lpm_tree *lpm_tree)
561 char xralta_pl[MLXSW_REG_XRALTA_LEN];
563 mlxsw_reg_xralta_pack(xralta_pl, true,
564 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
566 return ll_ops->ralta_write(mlxsw_sp, xralta_pl);
569 static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
570 const struct mlxsw_sp_router_ll_ops *ll_ops,
571 struct mlxsw_sp_lpm_tree *lpm_tree)
573 char xralta_pl[MLXSW_REG_XRALTA_LEN];
575 mlxsw_reg_xralta_pack(xralta_pl, false,
576 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
578 ll_ops->ralta_write(mlxsw_sp, xralta_pl);
582 mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
583 const struct mlxsw_sp_router_ll_ops *ll_ops,
584 struct mlxsw_sp_prefix_usage *prefix_usage,
585 struct mlxsw_sp_lpm_tree *lpm_tree)
587 char xralst_pl[MLXSW_REG_XRALST_LEN];
590 u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
592 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
595 mlxsw_reg_xralst_pack(xralst_pl, root_bin, lpm_tree->id);
596 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
599 mlxsw_reg_xralst_bin_pack(xralst_pl, prefix, last_prefix,
600 MLXSW_REG_RALST_BIN_NO_CHILD);
601 last_prefix = prefix;
603 return ll_ops->ralst_write(mlxsw_sp, xralst_pl);
606 static struct mlxsw_sp_lpm_tree *
607 mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
608 const struct mlxsw_sp_router_ll_ops *ll_ops,
609 struct mlxsw_sp_prefix_usage *prefix_usage,
610 enum mlxsw_sp_l3proto proto)
612 struct mlxsw_sp_lpm_tree *lpm_tree;
615 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
617 return ERR_PTR(-EBUSY);
618 lpm_tree->proto = proto;
619 err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, ll_ops, lpm_tree);
623 err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, ll_ops, prefix_usage, lpm_tree);
625 goto err_left_struct_set;
626 memcpy(&lpm_tree->prefix_usage, prefix_usage,
627 sizeof(lpm_tree->prefix_usage));
628 memset(&lpm_tree->prefix_ref_count, 0,
629 sizeof(lpm_tree->prefix_ref_count));
630 lpm_tree->ref_count = 1;
634 mlxsw_sp_lpm_tree_free(mlxsw_sp, ll_ops, lpm_tree);
638 static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
639 const struct mlxsw_sp_router_ll_ops *ll_ops,
640 struct mlxsw_sp_lpm_tree *lpm_tree)
642 mlxsw_sp_lpm_tree_free(mlxsw_sp, ll_ops, lpm_tree);
645 static struct mlxsw_sp_lpm_tree *
646 mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
647 struct mlxsw_sp_prefix_usage *prefix_usage,
648 enum mlxsw_sp_l3proto proto)
650 const struct mlxsw_sp_router_ll_ops *ll_ops = mlxsw_sp->router->proto_ll_ops[proto];
651 struct mlxsw_sp_lpm_tree *lpm_tree;
654 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
655 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
656 if (lpm_tree->ref_count != 0 &&
657 lpm_tree->proto == proto &&
658 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
660 mlxsw_sp_lpm_tree_hold(lpm_tree);
664 return mlxsw_sp_lpm_tree_create(mlxsw_sp, ll_ops, prefix_usage, proto);
667 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
669 lpm_tree->ref_count++;
672 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
673 struct mlxsw_sp_lpm_tree *lpm_tree)
675 const struct mlxsw_sp_router_ll_ops *ll_ops =
676 mlxsw_sp->router->proto_ll_ops[lpm_tree->proto];
678 if (--lpm_tree->ref_count == 0)
679 mlxsw_sp_lpm_tree_destroy(mlxsw_sp, ll_ops, lpm_tree);
682 #define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
684 static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
686 struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
687 struct mlxsw_sp_lpm_tree *lpm_tree;
691 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
694 max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
695 mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
696 mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
697 sizeof(struct mlxsw_sp_lpm_tree),
699 if (!mlxsw_sp->router->lpm.trees)
702 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
703 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
704 lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
707 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
708 MLXSW_SP_L3_PROTO_IPV4);
709 if (IS_ERR(lpm_tree)) {
710 err = PTR_ERR(lpm_tree);
711 goto err_ipv4_tree_get;
713 mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4] = lpm_tree;
715 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
716 MLXSW_SP_L3_PROTO_IPV6);
717 if (IS_ERR(lpm_tree)) {
718 err = PTR_ERR(lpm_tree);
719 goto err_ipv6_tree_get;
721 mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6] = lpm_tree;
726 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
727 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
729 kfree(mlxsw_sp->router->lpm.trees);
733 static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
735 struct mlxsw_sp_lpm_tree *lpm_tree;
737 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6];
738 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
740 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
741 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
743 kfree(mlxsw_sp->router->lpm.trees);
746 static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
748 return !!vr->fib4 || !!vr->fib6 ||
749 !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] ||
750 !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
753 static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
755 struct mlxsw_sp_vr *vr;
758 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
759 vr = &mlxsw_sp->router->vrs[i];
760 if (!mlxsw_sp_vr_is_used(vr))
766 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
767 const struct mlxsw_sp_fib *fib, u8 tree_id)
769 char xraltb_pl[MLXSW_REG_XRALTB_LEN];
771 mlxsw_reg_xraltb_pack(xraltb_pl, fib->vr->id,
772 (enum mlxsw_reg_ralxx_protocol) fib->proto,
774 return fib->ll_ops->raltb_write(mlxsw_sp, xraltb_pl);
777 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
778 const struct mlxsw_sp_fib *fib)
780 char xraltb_pl[MLXSW_REG_XRALTB_LEN];
782 /* Bind to tree 0 which is default */
783 mlxsw_reg_xraltb_pack(xraltb_pl, fib->vr->id,
784 (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
785 return fib->ll_ops->raltb_write(mlxsw_sp, xraltb_pl);
788 static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
790 /* For our purpose, squash main, default and local tables into one */
791 if (tb_id == RT_TABLE_LOCAL || tb_id == RT_TABLE_DEFAULT)
792 tb_id = RT_TABLE_MAIN;
796 static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
799 struct mlxsw_sp_vr *vr;
802 tb_id = mlxsw_sp_fix_tb_id(tb_id);
804 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
805 vr = &mlxsw_sp->router->vrs[i];
806 if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
812 int mlxsw_sp_router_tb_id_vr_id(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
815 struct mlxsw_sp_vr *vr;
818 mutex_lock(&mlxsw_sp->router->lock);
819 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
826 mutex_unlock(&mlxsw_sp->router->lock);
830 static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
831 enum mlxsw_sp_l3proto proto)
834 case MLXSW_SP_L3_PROTO_IPV4:
836 case MLXSW_SP_L3_PROTO_IPV6:
842 static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
844 struct netlink_ext_ack *extack)
846 struct mlxsw_sp_mr_table *mr4_table, *mr6_table;
847 struct mlxsw_sp_fib *fib4;
848 struct mlxsw_sp_fib *fib6;
849 struct mlxsw_sp_vr *vr;
852 vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
854 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported virtual routers");
855 return ERR_PTR(-EBUSY);
857 fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
859 return ERR_CAST(fib4);
860 fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
863 goto err_fib6_create;
865 mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
866 MLXSW_SP_L3_PROTO_IPV4);
867 if (IS_ERR(mr4_table)) {
868 err = PTR_ERR(mr4_table);
869 goto err_mr4_table_create;
871 mr6_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
872 MLXSW_SP_L3_PROTO_IPV6);
873 if (IS_ERR(mr6_table)) {
874 err = PTR_ERR(mr6_table);
875 goto err_mr6_table_create;
880 vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = mr4_table;
881 vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = mr6_table;
885 err_mr6_table_create:
886 mlxsw_sp_mr_table_destroy(mr4_table);
887 err_mr4_table_create:
888 mlxsw_sp_fib_destroy(mlxsw_sp, fib6);
890 mlxsw_sp_fib_destroy(mlxsw_sp, fib4);
894 static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
895 struct mlxsw_sp_vr *vr)
897 mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]);
898 vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = NULL;
899 mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]);
900 vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = NULL;
901 mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6);
903 mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4);
907 static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
908 struct netlink_ext_ack *extack)
910 struct mlxsw_sp_vr *vr;
912 tb_id = mlxsw_sp_fix_tb_id(tb_id);
913 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
915 vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id, extack);
919 static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
921 if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
922 list_empty(&vr->fib6->node_list) &&
923 mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]) &&
924 mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]))
925 mlxsw_sp_vr_destroy(mlxsw_sp, vr);
929 mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr,
930 enum mlxsw_sp_l3proto proto, u8 tree_id)
932 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
934 if (!mlxsw_sp_vr_is_used(vr))
936 if (fib->lpm_tree->id == tree_id)
941 static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
942 struct mlxsw_sp_fib *fib,
943 struct mlxsw_sp_lpm_tree *new_tree)
945 struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
948 fib->lpm_tree = new_tree;
949 mlxsw_sp_lpm_tree_hold(new_tree);
950 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
953 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
957 mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
958 fib->lpm_tree = old_tree;
962 static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
963 struct mlxsw_sp_fib *fib,
964 struct mlxsw_sp_lpm_tree *new_tree)
966 enum mlxsw_sp_l3proto proto = fib->proto;
967 struct mlxsw_sp_lpm_tree *old_tree;
968 u8 old_id, new_id = new_tree->id;
969 struct mlxsw_sp_vr *vr;
972 old_tree = mlxsw_sp->router->lpm.proto_trees[proto];
973 old_id = old_tree->id;
975 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
976 vr = &mlxsw_sp->router->vrs[i];
977 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, old_id))
979 err = mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
980 mlxsw_sp_vr_fib(vr, proto),
983 goto err_tree_replace;
986 memcpy(new_tree->prefix_ref_count, old_tree->prefix_ref_count,
987 sizeof(new_tree->prefix_ref_count));
988 mlxsw_sp->router->lpm.proto_trees[proto] = new_tree;
989 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
994 for (i--; i >= 0; i--) {
995 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, new_id))
997 mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
998 mlxsw_sp_vr_fib(vr, proto),
1004 static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
1006 struct mlxsw_sp_vr *vr;
1010 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
1013 max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
1014 mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
1016 if (!mlxsw_sp->router->vrs)
1019 for (i = 0; i < max_vrs; i++) {
1020 vr = &mlxsw_sp->router->vrs[i];
1027 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
1029 static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
1031 /* At this stage we're guaranteed not to have new incoming
1032 * FIB notifications and the work queue is free from FIBs
1033 * sitting on top of mlxsw netdevs. However, we can still
1034 * have other FIBs queued. Flush the queue before flushing
1035 * the device's tables. No need for locks, as we're the only
1038 mlxsw_core_flush_owq();
1039 mlxsw_sp_router_fib_flush(mlxsw_sp);
1040 kfree(mlxsw_sp->router->vrs);
1043 static struct net_device *
1044 __mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev)
1046 struct ip_tunnel *tun = netdev_priv(ol_dev);
1047 struct net *net = dev_net(ol_dev);
1049 return dev_get_by_index_rcu(net, tun->parms.link);
1052 u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
1054 struct net_device *d;
1058 d = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1060 tb_id = l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
1062 tb_id = RT_TABLE_MAIN;
1068 static struct mlxsw_sp_rif *
1069 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
1070 const struct mlxsw_sp_rif_params *params,
1071 struct netlink_ext_ack *extack);
1073 static struct mlxsw_sp_rif_ipip_lb *
1074 mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp,
1075 enum mlxsw_sp_ipip_type ipipt,
1076 struct net_device *ol_dev,
1077 struct netlink_ext_ack *extack)
1079 struct mlxsw_sp_rif_params_ipip_lb lb_params;
1080 const struct mlxsw_sp_ipip_ops *ipip_ops;
1081 struct mlxsw_sp_rif *rif;
1083 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1084 lb_params = (struct mlxsw_sp_rif_params_ipip_lb) {
1085 .common.dev = ol_dev,
1086 .common.lag = false,
1087 .lb_config = ipip_ops->ol_loopback_config(mlxsw_sp, ol_dev),
1090 rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common, extack);
1092 return ERR_CAST(rif);
1093 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
1096 static struct mlxsw_sp_ipip_entry *
1097 mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
1098 enum mlxsw_sp_ipip_type ipipt,
1099 struct net_device *ol_dev)
1101 const struct mlxsw_sp_ipip_ops *ipip_ops;
1102 struct mlxsw_sp_ipip_entry *ipip_entry;
1103 struct mlxsw_sp_ipip_entry *ret = NULL;
1105 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1106 ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL);
1108 return ERR_PTR(-ENOMEM);
1110 ipip_entry->ol_lb = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipipt,
1112 if (IS_ERR(ipip_entry->ol_lb)) {
1113 ret = ERR_CAST(ipip_entry->ol_lb);
1114 goto err_ol_ipip_lb_create;
1117 ipip_entry->ipipt = ipipt;
1118 ipip_entry->ol_dev = ol_dev;
1120 switch (ipip_ops->ul_proto) {
1121 case MLXSW_SP_L3_PROTO_IPV4:
1122 ipip_entry->parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev);
1124 case MLXSW_SP_L3_PROTO_IPV6:
1131 err_ol_ipip_lb_create:
1137 mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp_ipip_entry *ipip_entry)
1139 mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1144 mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp,
1145 const enum mlxsw_sp_l3proto ul_proto,
1146 union mlxsw_sp_l3addr saddr,
1148 struct mlxsw_sp_ipip_entry *ipip_entry)
1150 u32 tun_ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1151 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1152 union mlxsw_sp_l3addr tun_saddr;
1154 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1157 tun_saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1158 return tun_ul_tb_id == ul_tb_id &&
1159 mlxsw_sp_l3addr_eq(&tun_saddr, &saddr);
1163 mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
1164 struct mlxsw_sp_fib_entry *fib_entry,
1165 struct mlxsw_sp_ipip_entry *ipip_entry)
1170 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1175 ipip_entry->decap_fib_entry = fib_entry;
1176 fib_entry->decap.ipip_entry = ipip_entry;
1177 fib_entry->decap.tunnel_index = tunnel_index;
1181 static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
1182 struct mlxsw_sp_fib_entry *fib_entry)
1184 /* Unlink this node from the IPIP entry that it's the decap entry of. */
1185 fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
1186 fib_entry->decap.ipip_entry = NULL;
1187 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1188 1, fib_entry->decap.tunnel_index);
1191 static struct mlxsw_sp_fib_node *
1192 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
1193 size_t addr_len, unsigned char prefix_len);
1194 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1195 struct mlxsw_sp_fib_entry *fib_entry);
1198 mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp *mlxsw_sp,
1199 struct mlxsw_sp_ipip_entry *ipip_entry)
1201 struct mlxsw_sp_fib_entry *fib_entry = ipip_entry->decap_fib_entry;
1203 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
1204 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1206 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1210 mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp *mlxsw_sp,
1211 struct mlxsw_sp_ipip_entry *ipip_entry,
1212 struct mlxsw_sp_fib_entry *decap_fib_entry)
1214 if (mlxsw_sp_fib_entry_decap_init(mlxsw_sp, decap_fib_entry,
1217 decap_fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
1219 if (mlxsw_sp_fib_entry_update(mlxsw_sp, decap_fib_entry))
1220 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1223 static struct mlxsw_sp_fib_entry *
1224 mlxsw_sp_router_ip2me_fib_entry_find(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
1225 enum mlxsw_sp_l3proto proto,
1226 const union mlxsw_sp_l3addr *addr,
1227 enum mlxsw_sp_fib_entry_type type)
1229 struct mlxsw_sp_fib_node *fib_node;
1230 unsigned char addr_prefix_len;
1231 struct mlxsw_sp_fib *fib;
1232 struct mlxsw_sp_vr *vr;
1237 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
1240 fib = mlxsw_sp_vr_fib(vr, proto);
1243 case MLXSW_SP_L3_PROTO_IPV4:
1244 addr4 = be32_to_cpu(addr->addr4);
1247 addr_prefix_len = 32;
1249 case MLXSW_SP_L3_PROTO_IPV6:
1255 fib_node = mlxsw_sp_fib_node_lookup(fib, addrp, addr_len,
1257 if (!fib_node || fib_node->fib_entry->type != type)
1260 return fib_node->fib_entry;
1263 /* Given an IPIP entry, find the corresponding decap route. */
1264 static struct mlxsw_sp_fib_entry *
1265 mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
1266 struct mlxsw_sp_ipip_entry *ipip_entry)
1268 static struct mlxsw_sp_fib_node *fib_node;
1269 const struct mlxsw_sp_ipip_ops *ipip_ops;
1270 unsigned char saddr_prefix_len;
1271 union mlxsw_sp_l3addr saddr;
1272 struct mlxsw_sp_fib *ul_fib;
1273 struct mlxsw_sp_vr *ul_vr;
1279 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1281 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1282 ul_vr = mlxsw_sp_vr_find(mlxsw_sp, ul_tb_id);
1286 ul_fib = mlxsw_sp_vr_fib(ul_vr, ipip_ops->ul_proto);
1287 saddr = mlxsw_sp_ipip_netdev_saddr(ipip_ops->ul_proto,
1288 ipip_entry->ol_dev);
1290 switch (ipip_ops->ul_proto) {
1291 case MLXSW_SP_L3_PROTO_IPV4:
1292 saddr4 = be32_to_cpu(saddr.addr4);
1295 saddr_prefix_len = 32;
1302 fib_node = mlxsw_sp_fib_node_lookup(ul_fib, saddrp, saddr_len,
1305 fib_node->fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
1308 return fib_node->fib_entry;
1311 static struct mlxsw_sp_ipip_entry *
1312 mlxsw_sp_ipip_entry_create(struct mlxsw_sp *mlxsw_sp,
1313 enum mlxsw_sp_ipip_type ipipt,
1314 struct net_device *ol_dev)
1316 struct mlxsw_sp_ipip_entry *ipip_entry;
1318 ipip_entry = mlxsw_sp_ipip_entry_alloc(mlxsw_sp, ipipt, ol_dev);
1319 if (IS_ERR(ipip_entry))
1322 list_add_tail(&ipip_entry->ipip_list_node,
1323 &mlxsw_sp->router->ipip_list);
1329 mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1330 struct mlxsw_sp_ipip_entry *ipip_entry)
1332 list_del(&ipip_entry->ipip_list_node);
1333 mlxsw_sp_ipip_entry_dealloc(ipip_entry);
1337 mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp,
1338 const struct net_device *ul_dev,
1339 enum mlxsw_sp_l3proto ul_proto,
1340 union mlxsw_sp_l3addr ul_dip,
1341 struct mlxsw_sp_ipip_entry *ipip_entry)
1343 u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1344 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1346 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1349 return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip,
1350 ul_tb_id, ipip_entry);
1353 /* Given decap parameters, find the corresponding IPIP entry. */
1354 static struct mlxsw_sp_ipip_entry *
1355 mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp,
1356 const struct net_device *ul_dev,
1357 enum mlxsw_sp_l3proto ul_proto,
1358 union mlxsw_sp_l3addr ul_dip)
1360 struct mlxsw_sp_ipip_entry *ipip_entry;
1362 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1364 if (mlxsw_sp_ipip_entry_matches_decap(mlxsw_sp, ul_dev,
1372 static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp,
1373 const struct net_device *dev,
1374 enum mlxsw_sp_ipip_type *p_type)
1376 struct mlxsw_sp_router *router = mlxsw_sp->router;
1377 const struct mlxsw_sp_ipip_ops *ipip_ops;
1378 enum mlxsw_sp_ipip_type ipipt;
1380 for (ipipt = 0; ipipt < MLXSW_SP_IPIP_TYPE_MAX; ++ipipt) {
1381 ipip_ops = router->ipip_ops_arr[ipipt];
1382 if (dev->type == ipip_ops->dev_type) {
1391 bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp,
1392 const struct net_device *dev)
1394 return mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
1397 static struct mlxsw_sp_ipip_entry *
1398 mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp *mlxsw_sp,
1399 const struct net_device *ol_dev)
1401 struct mlxsw_sp_ipip_entry *ipip_entry;
1403 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1405 if (ipip_entry->ol_dev == ol_dev)
1411 static struct mlxsw_sp_ipip_entry *
1412 mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp,
1413 const struct net_device *ul_dev,
1414 struct mlxsw_sp_ipip_entry *start)
1416 struct mlxsw_sp_ipip_entry *ipip_entry;
1418 ipip_entry = list_prepare_entry(start, &mlxsw_sp->router->ipip_list,
1420 list_for_each_entry_continue(ipip_entry, &mlxsw_sp->router->ipip_list,
1422 struct net_device *ol_dev = ipip_entry->ol_dev;
1423 struct net_device *ipip_ul_dev;
1426 ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1429 if (ipip_ul_dev == ul_dev)
1436 bool mlxsw_sp_netdev_is_ipip_ul(struct mlxsw_sp *mlxsw_sp,
1437 const struct net_device *dev)
1441 mutex_lock(&mlxsw_sp->router->lock);
1442 is_ipip_ul = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, dev, NULL);
1443 mutex_unlock(&mlxsw_sp->router->lock);
1448 static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp,
1449 const struct net_device *ol_dev,
1450 enum mlxsw_sp_ipip_type ipipt)
1452 const struct mlxsw_sp_ipip_ops *ops
1453 = mlxsw_sp->router->ipip_ops_arr[ipipt];
1455 /* For deciding whether decap should be offloaded, we don't care about
1456 * overlay protocol, so ask whether either one is supported.
1458 return ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV4) ||
1459 ops->can_offload(mlxsw_sp, ol_dev, MLXSW_SP_L3_PROTO_IPV6);
1462 static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp,
1463 struct net_device *ol_dev)
1465 enum mlxsw_sp_ipip_type ipipt = MLXSW_SP_IPIP_TYPE_MAX;
1466 struct mlxsw_sp_ipip_entry *ipip_entry;
1467 enum mlxsw_sp_l3proto ul_proto;
1468 union mlxsw_sp_l3addr saddr;
1471 mlxsw_sp_netdev_ipip_type(mlxsw_sp, ol_dev, &ipipt);
1472 if (mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev, ipipt)) {
1473 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1474 ul_proto = mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto;
1475 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1476 if (!mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1479 ipip_entry = mlxsw_sp_ipip_entry_create(mlxsw_sp, ipipt,
1481 if (IS_ERR(ipip_entry))
1482 return PTR_ERR(ipip_entry);
1489 static void mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp *mlxsw_sp,
1490 struct net_device *ol_dev)
1492 struct mlxsw_sp_ipip_entry *ipip_entry;
1494 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1496 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1500 mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1501 struct mlxsw_sp_ipip_entry *ipip_entry)
1503 struct mlxsw_sp_fib_entry *decap_fib_entry;
1505 decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp, ipip_entry);
1506 if (decap_fib_entry)
1507 mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry,
1512 mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif, u16 ul_vr_id,
1513 u16 ul_rif_id, bool enable)
1515 struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
1516 struct mlxsw_sp_rif *rif = &lb_rif->common;
1517 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
1518 char ritr_pl[MLXSW_REG_RITR_LEN];
1521 switch (lb_cf.ul_protocol) {
1522 case MLXSW_SP_L3_PROTO_IPV4:
1523 saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
1524 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
1525 rif->rif_index, rif->vr_id, rif->dev->mtu);
1526 mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
1527 MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET,
1528 ul_vr_id, ul_rif_id, saddr4, lb_cf.okey);
1531 case MLXSW_SP_L3_PROTO_IPV6:
1532 return -EAFNOSUPPORT;
1535 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
1538 static int mlxsw_sp_netdevice_ipip_ol_update_mtu(struct mlxsw_sp *mlxsw_sp,
1539 struct net_device *ol_dev)
1541 struct mlxsw_sp_ipip_entry *ipip_entry;
1542 struct mlxsw_sp_rif_ipip_lb *lb_rif;
1545 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1547 lb_rif = ipip_entry->ol_lb;
1548 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, lb_rif->ul_vr_id,
1549 lb_rif->ul_rif_id, true);
1552 lb_rif->common.mtu = ol_dev->mtu;
1559 static void mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1560 struct net_device *ol_dev)
1562 struct mlxsw_sp_ipip_entry *ipip_entry;
1564 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1566 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
1570 mlxsw_sp_ipip_entry_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1571 struct mlxsw_sp_ipip_entry *ipip_entry)
1573 if (ipip_entry->decap_fib_entry)
1574 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1577 static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1578 struct net_device *ol_dev)
1580 struct mlxsw_sp_ipip_entry *ipip_entry;
1582 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1584 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1587 static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
1588 struct mlxsw_sp_rif *old_rif,
1589 struct mlxsw_sp_rif *new_rif);
1591 mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
1592 struct mlxsw_sp_ipip_entry *ipip_entry,
1594 struct netlink_ext_ack *extack)
1596 struct mlxsw_sp_rif_ipip_lb *old_lb_rif = ipip_entry->ol_lb;
1597 struct mlxsw_sp_rif_ipip_lb *new_lb_rif;
1599 new_lb_rif = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp,
1603 if (IS_ERR(new_lb_rif))
1604 return PTR_ERR(new_lb_rif);
1605 ipip_entry->ol_lb = new_lb_rif;
1608 mlxsw_sp_nexthop_rif_migrate(mlxsw_sp, &old_lb_rif->common,
1609 &new_lb_rif->common);
1611 mlxsw_sp_rif_destroy(&old_lb_rif->common);
1616 static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
1617 struct mlxsw_sp_rif *rif);
1620 * __mlxsw_sp_ipip_entry_update_tunnel - Update offload related to IPIP entry.
1621 * @mlxsw_sp: mlxsw_sp.
1622 * @ipip_entry: IPIP entry.
1623 * @recreate_loopback: Recreates the associated loopback RIF.
1624 * @keep_encap: Updates next hops that use the tunnel netdevice. This is only
1625 * relevant when recreate_loopback is true.
1626 * @update_nexthops: Updates next hops, keeping the current loopback RIF. This
1627 * is only relevant when recreate_loopback is false.
1630 * Return: Non-zero value on failure.
1632 int __mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp *mlxsw_sp,
1633 struct mlxsw_sp_ipip_entry *ipip_entry,
1634 bool recreate_loopback,
1636 bool update_nexthops,
1637 struct netlink_ext_ack *extack)
1641 /* RIFs can't be edited, so to update loopback, we need to destroy and
1642 * recreate it. That creates a window of opportunity where RALUE and
1643 * RATR registers end up referencing a RIF that's already gone. RATRs
1644 * are handled in mlxsw_sp_ipip_entry_ol_lb_update(), and to take care
1645 * of RALUE, demote the decap route back.
1647 if (ipip_entry->decap_fib_entry)
1648 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1650 if (recreate_loopback) {
1651 err = mlxsw_sp_ipip_entry_ol_lb_update(mlxsw_sp, ipip_entry,
1652 keep_encap, extack);
1655 } else if (update_nexthops) {
1656 mlxsw_sp_nexthop_rif_update(mlxsw_sp,
1657 &ipip_entry->ol_lb->common);
1660 if (ipip_entry->ol_dev->flags & IFF_UP)
1661 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
1666 static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
1667 struct net_device *ol_dev,
1668 struct netlink_ext_ack *extack)
1670 struct mlxsw_sp_ipip_entry *ipip_entry =
1671 mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1676 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1677 true, false, false, extack);
1681 mlxsw_sp_netdevice_ipip_ul_vrf_event(struct mlxsw_sp *mlxsw_sp,
1682 struct mlxsw_sp_ipip_entry *ipip_entry,
1683 struct net_device *ul_dev,
1685 struct netlink_ext_ack *extack)
1687 u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1688 enum mlxsw_sp_l3proto ul_proto;
1689 union mlxsw_sp_l3addr saddr;
1691 /* Moving underlay to a different VRF might cause local address
1692 * conflict, and the conflicting tunnels need to be demoted.
1694 ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
1695 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1696 if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1699 *demote_this = true;
1703 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1704 true, true, false, extack);
1708 mlxsw_sp_netdevice_ipip_ul_up_event(struct mlxsw_sp *mlxsw_sp,
1709 struct mlxsw_sp_ipip_entry *ipip_entry,
1710 struct net_device *ul_dev)
1712 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1713 false, false, true, NULL);
1717 mlxsw_sp_netdevice_ipip_ul_down_event(struct mlxsw_sp *mlxsw_sp,
1718 struct mlxsw_sp_ipip_entry *ipip_entry,
1719 struct net_device *ul_dev)
1721 /* A down underlay device causes encapsulated packets to not be
1722 * forwarded, but decap still works. So refresh next hops without
1723 * touching anything else.
1725 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1726 false, false, true, NULL);
1730 mlxsw_sp_netdevice_ipip_ol_change_event(struct mlxsw_sp *mlxsw_sp,
1731 struct net_device *ol_dev,
1732 struct netlink_ext_ack *extack)
1734 const struct mlxsw_sp_ipip_ops *ipip_ops;
1735 struct mlxsw_sp_ipip_entry *ipip_entry;
1738 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1740 /* A change might make a tunnel eligible for offloading, but
1741 * that is currently not implemented. What falls to slow path
1746 /* A change might make a tunnel not eligible for offloading. */
1747 if (!mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev,
1748 ipip_entry->ipipt)) {
1749 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1753 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1754 err = ipip_ops->ol_netdev_change(mlxsw_sp, ipip_entry, extack);
1758 void mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp *mlxsw_sp,
1759 struct mlxsw_sp_ipip_entry *ipip_entry)
1761 struct net_device *ol_dev = ipip_entry->ol_dev;
1763 if (ol_dev->flags & IFF_UP)
1764 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1765 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1768 /* The configuration where several tunnels have the same local address in the
1769 * same underlay table needs special treatment in the HW. That is currently not
1770 * implemented in the driver. This function finds and demotes the first tunnel
1771 * with a given source address, except the one passed in in the argument
1775 mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp *mlxsw_sp,
1776 enum mlxsw_sp_l3proto ul_proto,
1777 union mlxsw_sp_l3addr saddr,
1779 const struct mlxsw_sp_ipip_entry *except)
1781 struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1783 list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1785 if (ipip_entry != except &&
1786 mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr,
1787 ul_tb_id, ipip_entry)) {
1788 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1796 static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp,
1797 struct net_device *ul_dev)
1799 struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1801 list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1803 struct net_device *ol_dev = ipip_entry->ol_dev;
1804 struct net_device *ipip_ul_dev;
1807 ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1809 if (ipip_ul_dev == ul_dev)
1810 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1814 int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
1815 struct net_device *ol_dev,
1816 unsigned long event,
1817 struct netdev_notifier_info *info)
1819 struct netdev_notifier_changeupper_info *chup;
1820 struct netlink_ext_ack *extack;
1823 mutex_lock(&mlxsw_sp->router->lock);
1825 case NETDEV_REGISTER:
1826 err = mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev);
1828 case NETDEV_UNREGISTER:
1829 mlxsw_sp_netdevice_ipip_ol_unreg_event(mlxsw_sp, ol_dev);
1832 mlxsw_sp_netdevice_ipip_ol_up_event(mlxsw_sp, ol_dev);
1835 mlxsw_sp_netdevice_ipip_ol_down_event(mlxsw_sp, ol_dev);
1837 case NETDEV_CHANGEUPPER:
1838 chup = container_of(info, typeof(*chup), info);
1839 extack = info->extack;
1840 if (netif_is_l3_master(chup->upper_dev))
1841 err = mlxsw_sp_netdevice_ipip_ol_vrf_event(mlxsw_sp,
1846 extack = info->extack;
1847 err = mlxsw_sp_netdevice_ipip_ol_change_event(mlxsw_sp,
1850 case NETDEV_CHANGEMTU:
1851 err = mlxsw_sp_netdevice_ipip_ol_update_mtu(mlxsw_sp, ol_dev);
1854 mutex_unlock(&mlxsw_sp->router->lock);
1859 __mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
1860 struct mlxsw_sp_ipip_entry *ipip_entry,
1861 struct net_device *ul_dev,
1863 unsigned long event,
1864 struct netdev_notifier_info *info)
1866 struct netdev_notifier_changeupper_info *chup;
1867 struct netlink_ext_ack *extack;
1870 case NETDEV_CHANGEUPPER:
1871 chup = container_of(info, typeof(*chup), info);
1872 extack = info->extack;
1873 if (netif_is_l3_master(chup->upper_dev))
1874 return mlxsw_sp_netdevice_ipip_ul_vrf_event(mlxsw_sp,
1882 return mlxsw_sp_netdevice_ipip_ul_up_event(mlxsw_sp, ipip_entry,
1885 return mlxsw_sp_netdevice_ipip_ul_down_event(mlxsw_sp,
1893 mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
1894 struct net_device *ul_dev,
1895 unsigned long event,
1896 struct netdev_notifier_info *info)
1898 struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
1901 mutex_lock(&mlxsw_sp->router->lock);
1902 while ((ipip_entry = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp,
1905 struct mlxsw_sp_ipip_entry *prev;
1906 bool demote_this = false;
1908 err = __mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, ipip_entry,
1909 ul_dev, &demote_this,
1912 mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(mlxsw_sp,
1918 if (list_is_first(&ipip_entry->ipip_list_node,
1919 &mlxsw_sp->router->ipip_list))
1922 /* This can't be cached from previous iteration,
1923 * because that entry could be gone now.
1925 prev = list_prev_entry(ipip_entry,
1927 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1931 mutex_unlock(&mlxsw_sp->router->lock);
1936 int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
1937 enum mlxsw_sp_l3proto ul_proto,
1938 const union mlxsw_sp_l3addr *ul_sip,
1941 enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1942 struct mlxsw_sp_router *router = mlxsw_sp->router;
1943 struct mlxsw_sp_fib_entry *fib_entry;
1946 mutex_lock(&mlxsw_sp->router->lock);
1948 if (WARN_ON_ONCE(router->nve_decap_config.valid)) {
1953 router->nve_decap_config.ul_tb_id = ul_tb_id;
1954 router->nve_decap_config.tunnel_index = tunnel_index;
1955 router->nve_decap_config.ul_proto = ul_proto;
1956 router->nve_decap_config.ul_sip = *ul_sip;
1957 router->nve_decap_config.valid = true;
1959 /* It is valid to create a tunnel with a local IP and only later
1960 * assign this IP address to a local interface
1962 fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
1968 fib_entry->decap.tunnel_index = tunnel_index;
1969 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
1971 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1973 goto err_fib_entry_update;
1977 err_fib_entry_update:
1978 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1979 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1981 mutex_unlock(&mlxsw_sp->router->lock);
1985 void mlxsw_sp_router_nve_demote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
1986 enum mlxsw_sp_l3proto ul_proto,
1987 const union mlxsw_sp_l3addr *ul_sip)
1989 enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
1990 struct mlxsw_sp_router *router = mlxsw_sp->router;
1991 struct mlxsw_sp_fib_entry *fib_entry;
1993 mutex_lock(&mlxsw_sp->router->lock);
1995 if (WARN_ON_ONCE(!router->nve_decap_config.valid))
1998 router->nve_decap_config.valid = false;
2000 fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
2006 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2007 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2009 mutex_unlock(&mlxsw_sp->router->lock);
2012 static bool mlxsw_sp_router_nve_is_decap(struct mlxsw_sp *mlxsw_sp,
2014 enum mlxsw_sp_l3proto ul_proto,
2015 const union mlxsw_sp_l3addr *ul_sip)
2017 struct mlxsw_sp_router *router = mlxsw_sp->router;
2019 return router->nve_decap_config.valid &&
2020 router->nve_decap_config.ul_tb_id == ul_tb_id &&
2021 router->nve_decap_config.ul_proto == ul_proto &&
2022 !memcmp(&router->nve_decap_config.ul_sip, ul_sip,
2026 struct mlxsw_sp_neigh_key {
2027 struct neighbour *n;
2030 struct mlxsw_sp_neigh_entry {
2031 struct list_head rif_list_node;
2032 struct rhash_head ht_node;
2033 struct mlxsw_sp_neigh_key key;
2036 unsigned char ha[ETH_ALEN];
2037 struct list_head nexthop_list; /* list of nexthops using
2040 struct list_head nexthop_neighs_list_node;
2041 unsigned int counter_index;
2045 static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
2046 .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
2047 .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
2048 .key_len = sizeof(struct mlxsw_sp_neigh_key),
2051 struct mlxsw_sp_neigh_entry *
2052 mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif,
2053 struct mlxsw_sp_neigh_entry *neigh_entry)
2056 if (list_empty(&rif->neigh_list))
2059 return list_first_entry(&rif->neigh_list,
2060 typeof(*neigh_entry),
2063 if (list_is_last(&neigh_entry->rif_list_node, &rif->neigh_list))
2065 return list_next_entry(neigh_entry, rif_list_node);
2068 int mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry *neigh_entry)
2070 return neigh_entry->key.n->tbl->family;
2074 mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry *neigh_entry)
2076 return neigh_entry->ha;
2079 u32 mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
2081 struct neighbour *n;
2083 n = neigh_entry->key.n;
2084 return ntohl(*((__be32 *) n->primary_key));
2088 mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
2090 struct neighbour *n;
2092 n = neigh_entry->key.n;
2093 return (struct in6_addr *) &n->primary_key;
2096 int mlxsw_sp_neigh_counter_get(struct mlxsw_sp *mlxsw_sp,
2097 struct mlxsw_sp_neigh_entry *neigh_entry,
2100 if (!neigh_entry->counter_valid)
2103 return mlxsw_sp_flow_counter_get(mlxsw_sp, neigh_entry->counter_index,
2107 static struct mlxsw_sp_neigh_entry *
2108 mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
2111 struct mlxsw_sp_neigh_entry *neigh_entry;
2113 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
2117 neigh_entry->key.n = n;
2118 neigh_entry->rif = rif;
2119 INIT_LIST_HEAD(&neigh_entry->nexthop_list);
2124 static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
2130 mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
2131 struct mlxsw_sp_neigh_entry *neigh_entry)
2133 return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
2134 &neigh_entry->ht_node,
2135 mlxsw_sp_neigh_ht_params);
2139 mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
2140 struct mlxsw_sp_neigh_entry *neigh_entry)
2142 rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
2143 &neigh_entry->ht_node,
2144 mlxsw_sp_neigh_ht_params);
2148 mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp *mlxsw_sp,
2149 struct mlxsw_sp_neigh_entry *neigh_entry)
2151 struct devlink *devlink;
2152 const char *table_name;
2154 switch (mlxsw_sp_neigh_entry_type(neigh_entry)) {
2156 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST4;
2159 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST6;
2166 devlink = priv_to_devlink(mlxsw_sp->core);
2167 return devlink_dpipe_table_counter_enabled(devlink, table_name);
2171 mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2172 struct mlxsw_sp_neigh_entry *neigh_entry)
2174 if (!mlxsw_sp_neigh_counter_should_alloc(mlxsw_sp, neigh_entry))
2177 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &neigh_entry->counter_index))
2180 neigh_entry->counter_valid = true;
2184 mlxsw_sp_neigh_counter_free(struct mlxsw_sp *mlxsw_sp,
2185 struct mlxsw_sp_neigh_entry *neigh_entry)
2187 if (!neigh_entry->counter_valid)
2189 mlxsw_sp_flow_counter_free(mlxsw_sp,
2190 neigh_entry->counter_index);
2191 neigh_entry->counter_valid = false;
2194 static struct mlxsw_sp_neigh_entry *
2195 mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
2197 struct mlxsw_sp_neigh_entry *neigh_entry;
2198 struct mlxsw_sp_rif *rif;
2201 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
2203 return ERR_PTR(-EINVAL);
2205 neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
2207 return ERR_PTR(-ENOMEM);
2209 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
2211 goto err_neigh_entry_insert;
2213 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2214 list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
2218 err_neigh_entry_insert:
2219 mlxsw_sp_neigh_entry_free(neigh_entry);
2220 return ERR_PTR(err);
2224 mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
2225 struct mlxsw_sp_neigh_entry *neigh_entry)
2227 list_del(&neigh_entry->rif_list_node);
2228 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2229 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
2230 mlxsw_sp_neigh_entry_free(neigh_entry);
2233 static struct mlxsw_sp_neigh_entry *
2234 mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
2236 struct mlxsw_sp_neigh_key key;
2239 return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
2240 &key, mlxsw_sp_neigh_ht_params);
2244 mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
2246 unsigned long interval;
2248 #if IS_ENABLED(CONFIG_IPV6)
2249 interval = min_t(unsigned long,
2250 NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME),
2251 NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME));
2253 interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
2255 mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
2258 static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2262 struct net_device *dev;
2263 struct neighbour *n;
2268 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
2270 if (!mlxsw_sp->router->rifs[rif]) {
2271 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2276 dev = mlxsw_sp->router->rifs[rif]->dev;
2277 n = neigh_lookup(&arp_tbl, &dipn, dev);
2281 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
2282 neigh_event_send(n, NULL);
2286 #if IS_ENABLED(CONFIG_IPV6)
2287 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2291 struct net_device *dev;
2292 struct neighbour *n;
2293 struct in6_addr dip;
2296 mlxsw_reg_rauhtd_ent_ipv6_unpack(rauhtd_pl, rec_index, &rif,
2299 if (!mlxsw_sp->router->rifs[rif]) {
2300 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2304 dev = mlxsw_sp->router->rifs[rif]->dev;
2305 n = neigh_lookup(&nd_tbl, &dip, dev);
2309 netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
2310 neigh_event_send(n, NULL);
2314 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2321 static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2328 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2330 /* Hardware starts counting at 0, so add 1. */
2333 /* Each record consists of several neighbour entries. */
2334 for (i = 0; i < num_entries; i++) {
2337 ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
2338 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
2344 static void mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2348 /* One record contains one entry. */
2349 mlxsw_sp_router_neigh_ent_ipv6_process(mlxsw_sp, rauhtd_pl,
2353 static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
2354 char *rauhtd_pl, int rec_index)
2356 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
2357 case MLXSW_REG_RAUHTD_TYPE_IPV4:
2358 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
2361 case MLXSW_REG_RAUHTD_TYPE_IPV6:
2362 mlxsw_sp_router_neigh_rec_ipv6_process(mlxsw_sp, rauhtd_pl,
2368 static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
2370 u8 num_rec, last_rec_index, num_entries;
2372 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2373 last_rec_index = num_rec - 1;
2375 if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
2377 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
2378 MLXSW_REG_RAUHTD_TYPE_IPV6)
2381 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2383 if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
2389 __mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
2391 enum mlxsw_reg_rauhtd_type type)
2396 /* Ensure the RIF we read from the device does not change mid-dump. */
2397 mutex_lock(&mlxsw_sp->router->lock);
2399 mlxsw_reg_rauhtd_pack(rauhtd_pl, type);
2400 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
2403 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour table\n");
2406 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2407 for (i = 0; i < num_rec; i++)
2408 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
2410 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
2411 mutex_unlock(&mlxsw_sp->router->lock);
2416 static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
2418 enum mlxsw_reg_rauhtd_type type;
2422 rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
2426 type = MLXSW_REG_RAUHTD_TYPE_IPV4;
2427 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2431 type = MLXSW_REG_RAUHTD_TYPE_IPV6;
2432 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2438 static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
2440 struct mlxsw_sp_neigh_entry *neigh_entry;
2442 mutex_lock(&mlxsw_sp->router->lock);
2443 list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
2444 nexthop_neighs_list_node)
2445 /* If this neigh have nexthops, make the kernel think this neigh
2446 * is active regardless of the traffic.
2448 neigh_event_send(neigh_entry->key.n, NULL);
2449 mutex_unlock(&mlxsw_sp->router->lock);
2453 mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
2455 unsigned long interval = mlxsw_sp->router->neighs_update.interval;
2457 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
2458 msecs_to_jiffies(interval));
2461 static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
2463 struct mlxsw_sp_router *router;
2466 router = container_of(work, struct mlxsw_sp_router,
2467 neighs_update.dw.work);
2468 err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
2470 dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
2472 mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
2474 mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
2477 static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
2479 struct mlxsw_sp_neigh_entry *neigh_entry;
2480 struct mlxsw_sp_router *router;
2482 router = container_of(work, struct mlxsw_sp_router,
2483 nexthop_probe_dw.work);
2484 /* Iterate over nexthop neighbours, find those who are unresolved and
2485 * send arp on them. This solves the chicken-egg problem when
2486 * the nexthop wouldn't get offloaded until the neighbor is resolved
2487 * but it wouldn't get resolved ever in case traffic is flowing in HW
2488 * using different nexthop.
2490 mutex_lock(&router->lock);
2491 list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
2492 nexthop_neighs_list_node)
2493 if (!neigh_entry->connected)
2494 neigh_event_send(neigh_entry->key.n, NULL);
2495 mutex_unlock(&router->lock);
2497 mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
2498 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
2502 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
2503 struct mlxsw_sp_neigh_entry *neigh_entry,
2504 bool removing, bool dead);
2506 static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
2508 return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
2509 MLXSW_REG_RAUHT_OP_WRITE_DELETE;
2513 mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
2514 struct mlxsw_sp_neigh_entry *neigh_entry,
2515 enum mlxsw_reg_rauht_op op)
2517 struct neighbour *n = neigh_entry->key.n;
2518 u32 dip = ntohl(*((__be32 *) n->primary_key));
2519 char rauht_pl[MLXSW_REG_RAUHT_LEN];
2521 mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2523 if (neigh_entry->counter_valid)
2524 mlxsw_reg_rauht_pack_counter(rauht_pl,
2525 neigh_entry->counter_index);
2526 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2530 mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
2531 struct mlxsw_sp_neigh_entry *neigh_entry,
2532 enum mlxsw_reg_rauht_op op)
2534 struct neighbour *n = neigh_entry->key.n;
2535 char rauht_pl[MLXSW_REG_RAUHT_LEN];
2536 const char *dip = n->primary_key;
2538 mlxsw_reg_rauht_pack6(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2540 if (neigh_entry->counter_valid)
2541 mlxsw_reg_rauht_pack_counter(rauht_pl,
2542 neigh_entry->counter_index);
2543 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2546 bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry)
2548 struct neighbour *n = neigh_entry->key.n;
2550 /* Packets with a link-local destination address are trapped
2551 * after LPM lookup and never reach the neighbour table, so
2552 * there is no need to program such neighbours to the device.
2554 if (ipv6_addr_type((struct in6_addr *) &n->primary_key) &
2555 IPV6_ADDR_LINKLOCAL)
2561 mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
2562 struct mlxsw_sp_neigh_entry *neigh_entry,
2565 enum mlxsw_reg_rauht_op op = mlxsw_sp_rauht_op(adding);
2568 if (!adding && !neigh_entry->connected)
2570 neigh_entry->connected = adding;
2571 if (neigh_entry->key.n->tbl->family == AF_INET) {
2572 err = mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
2576 } else if (neigh_entry->key.n->tbl->family == AF_INET6) {
2577 if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry))
2579 err = mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
2589 neigh_entry->key.n->flags |= NTF_OFFLOADED;
2591 neigh_entry->key.n->flags &= ~NTF_OFFLOADED;
2595 mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp,
2596 struct mlxsw_sp_neigh_entry *neigh_entry,
2600 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2602 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2603 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, true);
2606 struct mlxsw_sp_netevent_work {
2607 struct work_struct work;
2608 struct mlxsw_sp *mlxsw_sp;
2609 struct neighbour *n;
2612 static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
2614 struct mlxsw_sp_netevent_work *net_work =
2615 container_of(work, struct mlxsw_sp_netevent_work, work);
2616 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2617 struct mlxsw_sp_neigh_entry *neigh_entry;
2618 struct neighbour *n = net_work->n;
2619 unsigned char ha[ETH_ALEN];
2620 bool entry_connected;
2623 /* If these parameters are changed after we release the lock,
2624 * then we are guaranteed to receive another event letting us
2627 read_lock_bh(&n->lock);
2628 memcpy(ha, n->ha, ETH_ALEN);
2629 nud_state = n->nud_state;
2631 read_unlock_bh(&n->lock);
2633 mutex_lock(&mlxsw_sp->router->lock);
2634 mlxsw_sp_span_respin(mlxsw_sp);
2636 entry_connected = nud_state & NUD_VALID && !dead;
2637 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
2638 if (!entry_connected && !neigh_entry)
2641 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
2642 if (IS_ERR(neigh_entry))
2646 memcpy(neigh_entry->ha, ha, ETH_ALEN);
2647 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
2648 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected,
2651 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
2652 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2655 mutex_unlock(&mlxsw_sp->router->lock);
2660 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp);
2662 static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work)
2664 struct mlxsw_sp_netevent_work *net_work =
2665 container_of(work, struct mlxsw_sp_netevent_work, work);
2666 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2668 mlxsw_sp_mp_hash_init(mlxsw_sp);
2672 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
2674 static void mlxsw_sp_router_update_priority_work(struct work_struct *work)
2676 struct mlxsw_sp_netevent_work *net_work =
2677 container_of(work, struct mlxsw_sp_netevent_work, work);
2678 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2680 __mlxsw_sp_router_init(mlxsw_sp);
2684 static int mlxsw_sp_router_schedule_work(struct net *net,
2685 struct notifier_block *nb,
2686 void (*cb)(struct work_struct *))
2688 struct mlxsw_sp_netevent_work *net_work;
2689 struct mlxsw_sp_router *router;
2691 router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
2692 if (!net_eq(net, mlxsw_sp_net(router->mlxsw_sp)))
2695 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2699 INIT_WORK(&net_work->work, cb);
2700 net_work->mlxsw_sp = router->mlxsw_sp;
2701 mlxsw_core_schedule_work(&net_work->work);
2705 static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
2706 unsigned long event, void *ptr)
2708 struct mlxsw_sp_netevent_work *net_work;
2709 struct mlxsw_sp_port *mlxsw_sp_port;
2710 struct mlxsw_sp *mlxsw_sp;
2711 unsigned long interval;
2712 struct neigh_parms *p;
2713 struct neighbour *n;
2716 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
2719 /* We don't care about changes in the default table. */
2720 if (!p->dev || (p->tbl->family != AF_INET &&
2721 p->tbl->family != AF_INET6))
2724 /* We are in atomic context and can't take RTNL mutex,
2725 * so use RCU variant to walk the device chain.
2727 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
2731 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2732 interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
2733 mlxsw_sp->router->neighs_update.interval = interval;
2735 mlxsw_sp_port_dev_put(mlxsw_sp_port);
2737 case NETEVENT_NEIGH_UPDATE:
2740 if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6)
2743 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
2747 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2749 mlxsw_sp_port_dev_put(mlxsw_sp_port);
2753 INIT_WORK(&net_work->work, mlxsw_sp_router_neigh_event_work);
2754 net_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2757 /* Take a reference to ensure the neighbour won't be
2758 * destructed until we drop the reference in delayed
2762 mlxsw_core_schedule_work(&net_work->work);
2763 mlxsw_sp_port_dev_put(mlxsw_sp_port);
2765 case NETEVENT_IPV4_MPATH_HASH_UPDATE:
2766 case NETEVENT_IPV6_MPATH_HASH_UPDATE:
2767 return mlxsw_sp_router_schedule_work(ptr, nb,
2768 mlxsw_sp_router_mp_hash_event_work);
2770 case NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE:
2771 return mlxsw_sp_router_schedule_work(ptr, nb,
2772 mlxsw_sp_router_update_priority_work);
2778 static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
2782 err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
2783 &mlxsw_sp_neigh_ht_params);
2787 /* Initialize the polling interval according to the default
2790 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
2792 /* Create the delayed works for the activity_update */
2793 INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
2794 mlxsw_sp_router_neighs_update_work);
2795 INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
2796 mlxsw_sp_router_probe_unresolved_nexthops);
2797 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
2798 mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
2802 static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
2804 cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
2805 cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
2806 rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
2809 static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
2810 struct mlxsw_sp_rif *rif)
2812 struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
2814 list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
2816 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
2817 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2821 enum mlxsw_sp_nexthop_type {
2822 MLXSW_SP_NEXTHOP_TYPE_ETH,
2823 MLXSW_SP_NEXTHOP_TYPE_IPIP,
2826 struct mlxsw_sp_nexthop_key {
2827 struct fib_nh *fib_nh;
2830 struct mlxsw_sp_nexthop {
2831 struct list_head neigh_list_node; /* member of neigh entry list */
2832 struct list_head rif_list_node;
2833 struct list_head router_list_node;
2834 struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
2837 struct rhash_head ht_node;
2838 struct mlxsw_sp_nexthop_key key;
2839 unsigned char gw_addr[sizeof(struct in6_addr)];
2843 int num_adj_entries;
2844 struct mlxsw_sp_rif *rif;
2845 u8 should_offload:1, /* set indicates this neigh is connected and
2846 * should be put to KVD linear area of this group.
2848 offloaded:1, /* set in case the neigh is actually put into
2849 * KVD linear area of this group.
2851 update:1; /* set indicates that MAC of this neigh should be
2854 enum mlxsw_sp_nexthop_type type;
2856 struct mlxsw_sp_neigh_entry *neigh_entry;
2857 struct mlxsw_sp_ipip_entry *ipip_entry;
2859 unsigned int counter_index;
2863 enum mlxsw_sp_nexthop_group_type {
2864 MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4,
2865 MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6,
2868 struct mlxsw_sp_nexthop_group {
2870 struct rhash_head ht_node;
2871 struct list_head fib_list; /* list of fib entries that use this group */
2872 struct neigh_table *neigh_tbl;
2873 enum mlxsw_sp_nexthop_group_type type;
2874 u8 adj_index_valid:1,
2875 gateway:1; /* routes using the group use a gateway */
2879 int sum_norm_weight;
2880 struct mlxsw_sp_nexthop nexthops[0];
2881 #define nh_rif nexthops[0].rif
2884 void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2885 struct mlxsw_sp_nexthop *nh)
2887 struct devlink *devlink;
2889 devlink = priv_to_devlink(mlxsw_sp->core);
2890 if (!devlink_dpipe_table_counter_enabled(devlink,
2891 MLXSW_SP_DPIPE_TABLE_NAME_ADJ))
2894 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nh->counter_index))
2897 nh->counter_valid = true;
2900 void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
2901 struct mlxsw_sp_nexthop *nh)
2903 if (!nh->counter_valid)
2905 mlxsw_sp_flow_counter_free(mlxsw_sp, nh->counter_index);
2906 nh->counter_valid = false;
2909 int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
2910 struct mlxsw_sp_nexthop *nh, u64 *p_counter)
2912 if (!nh->counter_valid)
2915 return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter_index,
2919 struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
2920 struct mlxsw_sp_nexthop *nh)
2923 if (list_empty(&router->nexthop_list))
2926 return list_first_entry(&router->nexthop_list,
2927 typeof(*nh), router_list_node);
2929 if (list_is_last(&nh->router_list_node, &router->nexthop_list))
2931 return list_next_entry(nh, router_list_node);
2934 bool mlxsw_sp_nexthop_offload(struct mlxsw_sp_nexthop *nh)
2936 return nh->offloaded;
2939 unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh)
2943 return nh->neigh_entry->ha;
2946 int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
2947 u32 *p_adj_size, u32 *p_adj_hash_index)
2949 struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
2950 u32 adj_hash_index = 0;
2953 if (!nh->offloaded || !nh_grp->adj_index_valid)
2956 *p_adj_index = nh_grp->adj_index;
2957 *p_adj_size = nh_grp->ecmp_size;
2959 for (i = 0; i < nh_grp->count; i++) {
2960 struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
2964 if (nh_iter->offloaded)
2965 adj_hash_index += nh_iter->num_adj_entries;
2968 *p_adj_hash_index = adj_hash_index;
2972 struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh)
2977 bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
2979 struct mlxsw_sp_nexthop_group *nh_grp = nh->nh_grp;
2982 for (i = 0; i < nh_grp->count; i++) {
2983 struct mlxsw_sp_nexthop *nh_iter = &nh_grp->nexthops[i];
2985 if (nh_iter->type == MLXSW_SP_NEXTHOP_TYPE_IPIP)
2991 static struct fib_info *
2992 mlxsw_sp_nexthop4_group_fi(const struct mlxsw_sp_nexthop_group *nh_grp)
2994 return nh_grp->priv;
2997 struct mlxsw_sp_nexthop_group_cmp_arg {
2998 enum mlxsw_sp_l3proto proto;
3000 struct fib_info *fi;
3001 struct mlxsw_sp_fib6_entry *fib6_entry;
3006 mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
3007 const struct in6_addr *gw, int ifindex,
3012 for (i = 0; i < nh_grp->count; i++) {
3013 const struct mlxsw_sp_nexthop *nh;
3015 nh = &nh_grp->nexthops[i];
3016 if (nh->ifindex == ifindex && nh->nh_weight == weight &&
3017 ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
3025 mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
3026 const struct mlxsw_sp_fib6_entry *fib6_entry)
3028 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3030 if (nh_grp->count != fib6_entry->nrt6)
3033 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3034 struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3035 struct in6_addr *gw;
3036 int ifindex, weight;
3038 ifindex = fib6_nh->fib_nh_dev->ifindex;
3039 weight = fib6_nh->fib_nh_weight;
3040 gw = &fib6_nh->fib_nh_gw6;
3041 if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex,
3050 mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr)
3052 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key;
3053 const struct mlxsw_sp_nexthop_group *nh_grp = ptr;
3055 switch (cmp_arg->proto) {
3056 case MLXSW_SP_L3_PROTO_IPV4:
3057 if (nh_grp->type != MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4)
3059 return cmp_arg->fi != mlxsw_sp_nexthop4_group_fi(nh_grp);
3060 case MLXSW_SP_L3_PROTO_IPV6:
3061 if (nh_grp->type != MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6)
3063 return !mlxsw_sp_nexthop6_group_cmp(nh_grp,
3064 cmp_arg->fib6_entry);
3071 static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
3073 const struct mlxsw_sp_nexthop_group *nh_grp = data;
3074 const struct mlxsw_sp_nexthop *nh;
3075 struct fib_info *fi;
3079 switch (nh_grp->type) {
3080 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3081 fi = mlxsw_sp_nexthop4_group_fi(nh_grp);
3082 return jhash(&fi, sizeof(fi), seed);
3083 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3084 val = nh_grp->count;
3085 for (i = 0; i < nh_grp->count; i++) {
3086 nh = &nh_grp->nexthops[i];
3087 val ^= jhash(&nh->ifindex, sizeof(nh->ifindex), seed);
3088 val ^= jhash(&nh->gw_addr, sizeof(nh->gw_addr), seed);
3090 return jhash(&val, sizeof(val), seed);
3098 mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
3100 unsigned int val = fib6_entry->nrt6;
3101 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3103 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3104 struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3105 struct net_device *dev = fib6_nh->fib_nh_dev;
3106 struct in6_addr *gw = &fib6_nh->fib_nh_gw6;
3108 val ^= jhash(&dev->ifindex, sizeof(dev->ifindex), seed);
3109 val ^= jhash(gw, sizeof(*gw), seed);
3112 return jhash(&val, sizeof(val), seed);
3116 mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed)
3118 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data;
3120 switch (cmp_arg->proto) {
3121 case MLXSW_SP_L3_PROTO_IPV4:
3122 return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed);
3123 case MLXSW_SP_L3_PROTO_IPV6:
3124 return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed);
3131 static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
3132 .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
3133 .hashfn = mlxsw_sp_nexthop_group_hash,
3134 .obj_hashfn = mlxsw_sp_nexthop_group_hash_obj,
3135 .obj_cmpfn = mlxsw_sp_nexthop_group_cmp,
3138 static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
3139 struct mlxsw_sp_nexthop_group *nh_grp)
3141 if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6 &&
3145 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
3147 mlxsw_sp_nexthop_group_ht_params);
3150 static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
3151 struct mlxsw_sp_nexthop_group *nh_grp)
3153 if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6 &&
3157 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
3159 mlxsw_sp_nexthop_group_ht_params);
3162 static struct mlxsw_sp_nexthop_group *
3163 mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp,
3164 struct fib_info *fi)
3166 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
3168 cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV4;
3170 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
3172 mlxsw_sp_nexthop_group_ht_params);
3175 static struct mlxsw_sp_nexthop_group *
3176 mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp *mlxsw_sp,
3177 struct mlxsw_sp_fib6_entry *fib6_entry)
3179 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
3181 cmp_arg.proto = MLXSW_SP_L3_PROTO_IPV6;
3182 cmp_arg.fib6_entry = fib6_entry;
3183 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
3185 mlxsw_sp_nexthop_group_ht_params);
3188 static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
3189 .key_offset = offsetof(struct mlxsw_sp_nexthop, key),
3190 .head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
3191 .key_len = sizeof(struct mlxsw_sp_nexthop_key),
3194 static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
3195 struct mlxsw_sp_nexthop *nh)
3197 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
3198 &nh->ht_node, mlxsw_sp_nexthop_ht_params);
3201 static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
3202 struct mlxsw_sp_nexthop *nh)
3204 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
3205 mlxsw_sp_nexthop_ht_params);
3208 static struct mlxsw_sp_nexthop *
3209 mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
3210 struct mlxsw_sp_nexthop_key key)
3212 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
3213 mlxsw_sp_nexthop_ht_params);
3216 static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
3217 const struct mlxsw_sp_fib *fib,
3218 u32 adj_index, u16 ecmp_size,
3222 char raleu_pl[MLXSW_REG_RALEU_LEN];
3224 mlxsw_reg_raleu_pack(raleu_pl,
3225 (enum mlxsw_reg_ralxx_protocol) fib->proto,
3226 fib->vr->id, adj_index, ecmp_size, new_adj_index,
3228 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
3231 static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
3232 struct mlxsw_sp_nexthop_group *nh_grp,
3233 u32 old_adj_index, u16 old_ecmp_size)
3235 struct mlxsw_sp_fib_entry *fib_entry;
3236 struct mlxsw_sp_fib *fib = NULL;
3239 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
3240 if (fib == fib_entry->fib_node->fib)
3242 fib = fib_entry->fib_node->fib;
3243 err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, fib,
3254 static int __mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3255 struct mlxsw_sp_nexthop *nh)
3257 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
3258 char ratr_pl[MLXSW_REG_RATR_LEN];
3260 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
3261 true, MLXSW_REG_RATR_TYPE_ETHERNET,
3262 adj_index, neigh_entry->rif);
3263 mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
3264 if (nh->counter_valid)
3265 mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true);
3267 mlxsw_reg_ratr_counter_pack(ratr_pl, 0, false);
3269 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
3272 int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3273 struct mlxsw_sp_nexthop *nh)
3277 for (i = 0; i < nh->num_adj_entries; i++) {
3280 err = __mlxsw_sp_nexthop_update(mlxsw_sp, adj_index + i, nh);
3288 static int __mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3290 struct mlxsw_sp_nexthop *nh)
3292 const struct mlxsw_sp_ipip_ops *ipip_ops;
3294 ipip_ops = mlxsw_sp->router->ipip_ops_arr[nh->ipip_entry->ipipt];
3295 return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry);
3298 static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3300 struct mlxsw_sp_nexthop *nh)
3304 for (i = 0; i < nh->num_adj_entries; i++) {
3307 err = __mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index + i,
3317 mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
3318 struct mlxsw_sp_nexthop_group *nh_grp,
3321 u32 adj_index = nh_grp->adj_index; /* base */
3322 struct mlxsw_sp_nexthop *nh;
3325 for (i = 0; i < nh_grp->count; i++) {
3326 nh = &nh_grp->nexthops[i];
3328 if (!nh->should_offload) {
3333 if (nh->update || reallocate) {
3337 case MLXSW_SP_NEXTHOP_TYPE_ETH:
3338 err = mlxsw_sp_nexthop_update
3339 (mlxsw_sp, adj_index, nh);
3341 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
3342 err = mlxsw_sp_nexthop_ipip_update
3343 (mlxsw_sp, adj_index, nh);
3351 adj_index += nh->num_adj_entries;
3357 mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
3358 struct mlxsw_sp_nexthop_group *nh_grp)
3360 struct mlxsw_sp_fib_entry *fib_entry;
3363 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
3364 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
3371 static void mlxsw_sp_adj_grp_size_round_up(u16 *p_adj_grp_size)
3373 /* Valid sizes for an adjacency group are:
3374 * 1-64, 512, 1024, 2048 and 4096.
3376 if (*p_adj_grp_size <= 64)
3378 else if (*p_adj_grp_size <= 512)
3379 *p_adj_grp_size = 512;
3380 else if (*p_adj_grp_size <= 1024)
3381 *p_adj_grp_size = 1024;
3382 else if (*p_adj_grp_size <= 2048)
3383 *p_adj_grp_size = 2048;
3385 *p_adj_grp_size = 4096;
3388 static void mlxsw_sp_adj_grp_size_round_down(u16 *p_adj_grp_size,
3389 unsigned int alloc_size)
3391 if (alloc_size >= 4096)
3392 *p_adj_grp_size = 4096;
3393 else if (alloc_size >= 2048)
3394 *p_adj_grp_size = 2048;
3395 else if (alloc_size >= 1024)
3396 *p_adj_grp_size = 1024;
3397 else if (alloc_size >= 512)
3398 *p_adj_grp_size = 512;
3401 static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
3402 u16 *p_adj_grp_size)
3404 unsigned int alloc_size;
3407 /* Round up the requested group size to the next size supported
3408 * by the device and make sure the request can be satisfied.
3410 mlxsw_sp_adj_grp_size_round_up(p_adj_grp_size);
3411 err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
3412 MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3413 *p_adj_grp_size, &alloc_size);
3416 /* It is possible the allocation results in more allocated
3417 * entries than requested. Try to use as much of them as
3420 mlxsw_sp_adj_grp_size_round_down(p_adj_grp_size, alloc_size);
3426 mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group *nh_grp)
3428 int i, g = 0, sum_norm_weight = 0;
3429 struct mlxsw_sp_nexthop *nh;
3431 for (i = 0; i < nh_grp->count; i++) {
3432 nh = &nh_grp->nexthops[i];
3434 if (!nh->should_offload)
3437 g = gcd(nh->nh_weight, g);
3442 for (i = 0; i < nh_grp->count; i++) {
3443 nh = &nh_grp->nexthops[i];
3445 if (!nh->should_offload)
3447 nh->norm_nh_weight = nh->nh_weight / g;
3448 sum_norm_weight += nh->norm_nh_weight;
3451 nh_grp->sum_norm_weight = sum_norm_weight;
3455 mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group *nh_grp)
3457 int total = nh_grp->sum_norm_weight;
3458 u16 ecmp_size = nh_grp->ecmp_size;
3459 int i, weight = 0, lower_bound = 0;
3461 for (i = 0; i < nh_grp->count; i++) {
3462 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3465 if (!nh->should_offload)
3467 weight += nh->norm_nh_weight;
3468 upper_bound = DIV_ROUND_CLOSEST(ecmp_size * weight, total);
3469 nh->num_adj_entries = upper_bound - lower_bound;
3470 lower_bound = upper_bound;
3474 static struct mlxsw_sp_nexthop *
3475 mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
3476 const struct mlxsw_sp_rt6 *mlxsw_sp_rt6);
3479 mlxsw_sp_nexthop4_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3480 struct mlxsw_sp_nexthop_group *nh_grp)
3484 for (i = 0; i < nh_grp->count; i++) {
3485 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3488 nh->key.fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
3490 nh->key.fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
3495 __mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp_nexthop_group *nh_grp,
3496 struct mlxsw_sp_fib6_entry *fib6_entry)
3498 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3500 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3501 struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3502 struct mlxsw_sp_nexthop *nh;
3504 nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
3505 if (nh && nh->offloaded)
3506 fib6_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
3508 fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
3513 mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3514 struct mlxsw_sp_nexthop_group *nh_grp)
3516 struct mlxsw_sp_fib6_entry *fib6_entry;
3518 /* Unfortunately, in IPv6 the route and the nexthop are described by
3519 * the same struct, so we need to iterate over all the routes using the
3520 * nexthop group and set / clear the offload indication for them.
3522 list_for_each_entry(fib6_entry, &nh_grp->fib_list,
3523 common.nexthop_group_node)
3524 __mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
3528 mlxsw_sp_nexthop_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3529 struct mlxsw_sp_nexthop_group *nh_grp)
3531 switch (nh_grp->type) {
3532 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3533 mlxsw_sp_nexthop4_group_offload_refresh(mlxsw_sp, nh_grp);
3535 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3536 mlxsw_sp_nexthop6_group_offload_refresh(mlxsw_sp, nh_grp);
3542 mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
3543 struct mlxsw_sp_nexthop_group *nh_grp)
3545 u16 ecmp_size, old_ecmp_size;
3546 struct mlxsw_sp_nexthop *nh;
3547 bool offload_change = false;
3549 bool old_adj_index_valid;
3554 if (!nh_grp->gateway) {
3555 mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3559 for (i = 0; i < nh_grp->count; i++) {
3560 nh = &nh_grp->nexthops[i];
3562 if (nh->should_offload != nh->offloaded) {
3563 offload_change = true;
3564 if (nh->should_offload)
3568 if (!offload_change) {
3569 /* Nothing was added or removed, so no need to reallocate. Just
3570 * update MAC on existing adjacency indexes.
3572 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, false);
3574 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3579 mlxsw_sp_nexthop_group_normalize(nh_grp);
3580 if (!nh_grp->sum_norm_weight)
3581 /* No neigh of this group is connected so we just set
3582 * the trap and let everthing flow through kernel.
3586 ecmp_size = nh_grp->sum_norm_weight;
3587 err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size);
3589 /* No valid allocation size available. */
3592 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3593 ecmp_size, &adj_index);
3595 /* We ran out of KVD linear space, just set the
3596 * trap and let everything flow through kernel.
3598 dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
3601 old_adj_index_valid = nh_grp->adj_index_valid;
3602 old_adj_index = nh_grp->adj_index;
3603 old_ecmp_size = nh_grp->ecmp_size;
3604 nh_grp->adj_index_valid = 1;
3605 nh_grp->adj_index = adj_index;
3606 nh_grp->ecmp_size = ecmp_size;
3607 mlxsw_sp_nexthop_group_rebalance(nh_grp);
3608 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nh_grp, true);
3610 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3614 mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
3616 if (!old_adj_index_valid) {
3617 /* The trap was set for fib entries, so we have to call
3618 * fib entry update to unset it and use adjacency index.
3620 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3622 dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
3628 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
3629 old_adj_index, old_ecmp_size);
3630 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3631 old_ecmp_size, old_adj_index);
3633 dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
3640 old_adj_index_valid = nh_grp->adj_index_valid;
3641 nh_grp->adj_index_valid = 0;
3642 for (i = 0; i < nh_grp->count; i++) {
3643 nh = &nh_grp->nexthops[i];
3646 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3648 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
3649 mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
3650 if (old_adj_index_valid)
3651 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3652 nh_grp->ecmp_size, nh_grp->adj_index);
3655 static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
3659 nh->should_offload = 1;
3661 nh->should_offload = 0;
3666 mlxsw_sp_nexthop_dead_neigh_replace(struct mlxsw_sp *mlxsw_sp,
3667 struct mlxsw_sp_neigh_entry *neigh_entry)
3669 struct neighbour *n, *old_n = neigh_entry->key.n;
3670 struct mlxsw_sp_nexthop *nh;
3671 bool entry_connected;
3675 nh = list_first_entry(&neigh_entry->nexthop_list,
3676 struct mlxsw_sp_nexthop, neigh_list_node);
3678 n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev);
3680 n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr,
3684 neigh_event_send(n, NULL);
3687 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
3688 neigh_entry->key.n = n;
3689 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
3691 goto err_neigh_entry_insert;
3693 read_lock_bh(&n->lock);
3694 nud_state = n->nud_state;
3696 read_unlock_bh(&n->lock);
3697 entry_connected = nud_state & NUD_VALID && !dead;
3699 list_for_each_entry(nh, &neigh_entry->nexthop_list,
3701 neigh_release(old_n);
3703 __mlxsw_sp_nexthop_neigh_update(nh, !entry_connected);
3704 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3711 err_neigh_entry_insert:
3712 neigh_entry->key.n = old_n;
3713 mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
3719 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
3720 struct mlxsw_sp_neigh_entry *neigh_entry,
3721 bool removing, bool dead)
3723 struct mlxsw_sp_nexthop *nh;
3725 if (list_empty(&neigh_entry->nexthop_list))
3731 err = mlxsw_sp_nexthop_dead_neigh_replace(mlxsw_sp,
3734 dev_err(mlxsw_sp->bus_info->dev, "Failed to replace dead neigh\n");
3738 list_for_each_entry(nh, &neigh_entry->nexthop_list,
3740 __mlxsw_sp_nexthop_neigh_update(nh, removing);
3741 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
3745 static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
3746 struct mlxsw_sp_rif *rif)
3752 list_add(&nh->rif_list_node, &rif->nexthop_list);
3755 static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
3760 list_del(&nh->rif_list_node);
3764 static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
3765 struct mlxsw_sp_nexthop *nh)
3767 struct mlxsw_sp_neigh_entry *neigh_entry;
3768 struct neighbour *n;
3772 if (!nh->nh_grp->gateway || nh->neigh_entry)
3775 /* Take a reference of neigh here ensuring that neigh would
3776 * not be destructed before the nexthop entry is finished.
3777 * The reference is taken either in neigh_lookup() or
3778 * in neigh_create() in case n is not found.
3780 n = neigh_lookup(nh->nh_grp->neigh_tbl, &nh->gw_addr, nh->rif->dev);
3782 n = neigh_create(nh->nh_grp->neigh_tbl, &nh->gw_addr,
3786 neigh_event_send(n, NULL);
3788 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
3790 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
3791 if (IS_ERR(neigh_entry)) {
3793 goto err_neigh_entry_create;
3797 /* If that is the first nexthop connected to that neigh, add to
3798 * nexthop_neighs_list
3800 if (list_empty(&neigh_entry->nexthop_list))
3801 list_add_tail(&neigh_entry->nexthop_neighs_list_node,
3802 &mlxsw_sp->router->nexthop_neighs_list);
3804 nh->neigh_entry = neigh_entry;
3805 list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
3806 read_lock_bh(&n->lock);
3807 nud_state = n->nud_state;
3809 read_unlock_bh(&n->lock);
3810 __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
3814 err_neigh_entry_create:
3819 static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
3820 struct mlxsw_sp_nexthop *nh)
3822 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
3823 struct neighbour *n;
3827 n = neigh_entry->key.n;
3829 __mlxsw_sp_nexthop_neigh_update(nh, true);
3830 list_del(&nh->neigh_list_node);
3831 nh->neigh_entry = NULL;
3833 /* If that is the last nexthop connected to that neigh, remove from
3834 * nexthop_neighs_list
3836 if (list_empty(&neigh_entry->nexthop_list))
3837 list_del(&neigh_entry->nexthop_neighs_list_node);
3839 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
3840 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
3845 static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
3847 struct net_device *ul_dev;
3851 ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
3852 is_up = ul_dev ? (ul_dev->flags & IFF_UP) : true;
3858 static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
3859 struct mlxsw_sp_nexthop *nh,
3860 struct mlxsw_sp_ipip_entry *ipip_entry)
3864 if (!nh->nh_grp->gateway || nh->ipip_entry)
3867 nh->ipip_entry = ipip_entry;
3868 removing = !mlxsw_sp_ipip_netdev_ul_up(ipip_entry->ol_dev);
3869 __mlxsw_sp_nexthop_neigh_update(nh, removing);
3870 mlxsw_sp_nexthop_rif_init(nh, &ipip_entry->ol_lb->common);
3873 static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
3874 struct mlxsw_sp_nexthop *nh)
3876 struct mlxsw_sp_ipip_entry *ipip_entry = nh->ipip_entry;
3881 __mlxsw_sp_nexthop_neigh_update(nh, true);
3882 nh->ipip_entry = NULL;
3885 static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp,
3886 const struct fib_nh *fib_nh,
3887 enum mlxsw_sp_ipip_type *p_ipipt)
3889 struct net_device *dev = fib_nh->fib_nh_dev;
3892 fib_nh->nh_parent->fib_type == RTN_UNICAST &&
3893 mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, p_ipipt);
3896 static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
3897 struct mlxsw_sp_nexthop *nh)
3900 case MLXSW_SP_NEXTHOP_TYPE_ETH:
3901 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
3902 mlxsw_sp_nexthop_rif_fini(nh);
3904 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
3905 mlxsw_sp_nexthop_rif_fini(nh);
3906 mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
3911 static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp,
3912 struct mlxsw_sp_nexthop *nh,
3913 struct fib_nh *fib_nh)
3915 const struct mlxsw_sp_ipip_ops *ipip_ops;
3916 struct net_device *dev = fib_nh->fib_nh_dev;
3917 struct mlxsw_sp_ipip_entry *ipip_entry;
3918 struct mlxsw_sp_rif *rif;
3921 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
3923 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
3924 if (ipip_ops->can_offload(mlxsw_sp, dev,
3925 MLXSW_SP_L3_PROTO_IPV4)) {
3926 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
3927 mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
3932 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
3933 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3937 mlxsw_sp_nexthop_rif_init(nh, rif);
3938 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
3940 goto err_neigh_init;
3945 mlxsw_sp_nexthop_rif_fini(nh);
3949 static void mlxsw_sp_nexthop4_type_fini(struct mlxsw_sp *mlxsw_sp,
3950 struct mlxsw_sp_nexthop *nh)
3952 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
3955 static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
3956 struct mlxsw_sp_nexthop_group *nh_grp,
3957 struct mlxsw_sp_nexthop *nh,
3958 struct fib_nh *fib_nh)
3960 struct net_device *dev = fib_nh->fib_nh_dev;
3961 struct in_device *in_dev;
3964 nh->nh_grp = nh_grp;
3965 nh->key.fib_nh = fib_nh;
3966 #ifdef CONFIG_IP_ROUTE_MULTIPATH
3967 nh->nh_weight = fib_nh->fib_nh_weight;
3971 memcpy(&nh->gw_addr, &fib_nh->fib_nh_gw4, sizeof(fib_nh->fib_nh_gw4));
3972 err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
3976 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
3977 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
3983 in_dev = __in_dev_get_rcu(dev);
3984 if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
3985 fib_nh->fib_nh_flags & RTNH_F_LINKDOWN) {
3991 err = mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
3993 goto err_nexthop_neigh_init;
3997 err_nexthop_neigh_init:
3998 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
4002 static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
4003 struct mlxsw_sp_nexthop *nh)
4005 mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
4006 list_del(&nh->router_list_node);
4007 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
4008 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
4011 static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
4012 unsigned long event, struct fib_nh *fib_nh)
4014 struct mlxsw_sp_nexthop_key key;
4015 struct mlxsw_sp_nexthop *nh;
4017 if (mlxsw_sp->router->aborted)
4020 key.fib_nh = fib_nh;
4021 nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
4026 case FIB_EVENT_NH_ADD:
4027 mlxsw_sp_nexthop4_type_init(mlxsw_sp, nh, fib_nh);
4029 case FIB_EVENT_NH_DEL:
4030 mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
4034 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
4037 static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
4038 struct mlxsw_sp_rif *rif)
4040 struct mlxsw_sp_nexthop *nh;
4043 list_for_each_entry(nh, &rif->nexthop_list, rif_list_node) {
4045 case MLXSW_SP_NEXTHOP_TYPE_ETH:
4048 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
4049 removing = !mlxsw_sp_ipip_netdev_ul_up(rif->dev);
4056 __mlxsw_sp_nexthop_neigh_update(nh, removing);
4057 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
4061 static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
4062 struct mlxsw_sp_rif *old_rif,
4063 struct mlxsw_sp_rif *new_rif)
4065 struct mlxsw_sp_nexthop *nh;
4067 list_splice_init(&old_rif->nexthop_list, &new_rif->nexthop_list);
4068 list_for_each_entry(nh, &new_rif->nexthop_list, rif_list_node)
4070 mlxsw_sp_nexthop_rif_update(mlxsw_sp, new_rif);
4073 static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
4074 struct mlxsw_sp_rif *rif)
4076 struct mlxsw_sp_nexthop *nh, *tmp;
4078 list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
4079 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4080 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
4084 static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
4085 struct fib_info *fi)
4087 const struct fib_nh *nh = fib_info_nh(fi, 0);
4089 return nh->fib_nh_scope == RT_SCOPE_LINK ||
4090 mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, nh, NULL);
4093 static struct mlxsw_sp_nexthop_group *
4094 mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
4096 unsigned int nhs = fib_info_num_path(fi);
4097 struct mlxsw_sp_nexthop_group *nh_grp;
4098 struct mlxsw_sp_nexthop *nh;
4099 struct fib_nh *fib_nh;
4103 nh_grp = kzalloc(struct_size(nh_grp, nexthops, nhs), GFP_KERNEL);
4105 return ERR_PTR(-ENOMEM);
4107 INIT_LIST_HEAD(&nh_grp->fib_list);
4108 nh_grp->neigh_tbl = &arp_tbl;
4109 nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4;
4111 nh_grp->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, fi);
4112 nh_grp->count = nhs;
4114 for (i = 0; i < nh_grp->count; i++) {
4115 nh = &nh_grp->nexthops[i];
4116 fib_nh = fib_info_nh(fi, i);
4117 err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
4119 goto err_nexthop4_init;
4121 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
4123 goto err_nexthop_group_insert;
4124 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4127 err_nexthop_group_insert:
4129 for (i--; i >= 0; i--) {
4130 nh = &nh_grp->nexthops[i];
4131 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
4135 return ERR_PTR(err);
4139 mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp,
4140 struct mlxsw_sp_nexthop_group *nh_grp)
4142 struct mlxsw_sp_nexthop *nh;
4145 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
4146 for (i = 0; i < nh_grp->count; i++) {
4147 nh = &nh_grp->nexthops[i];
4148 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
4150 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4151 WARN_ON_ONCE(nh_grp->adj_index_valid);
4152 fib_info_put(mlxsw_sp_nexthop4_group_fi(nh_grp));
4156 static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp,
4157 struct mlxsw_sp_fib_entry *fib_entry,
4158 struct fib_info *fi)
4160 struct mlxsw_sp_nexthop_group *nh_grp;
4162 nh_grp = mlxsw_sp_nexthop4_group_lookup(mlxsw_sp, fi);
4164 nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi);
4166 return PTR_ERR(nh_grp);
4168 list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
4169 fib_entry->nh_group = nh_grp;
4173 static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp,
4174 struct mlxsw_sp_fib_entry *fib_entry)
4176 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
4178 list_del(&fib_entry->nexthop_group_node);
4179 if (!list_empty(&nh_grp->fib_list))
4181 mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp);
4185 mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
4187 struct mlxsw_sp_fib4_entry *fib4_entry;
4189 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
4191 return !fib4_entry->tos;
4195 mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
4197 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
4199 switch (fib_entry->fib_node->fib->proto) {
4200 case MLXSW_SP_L3_PROTO_IPV4:
4201 if (!mlxsw_sp_fib4_entry_should_offload(fib_entry))
4204 case MLXSW_SP_L3_PROTO_IPV6:
4208 switch (fib_entry->type) {
4209 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
4210 return !!nh_group->adj_index_valid;
4211 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
4212 return !!nh_group->nh_rif;
4213 case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
4214 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
4215 case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
4222 static struct mlxsw_sp_nexthop *
4223 mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
4224 const struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
4228 for (i = 0; i < nh_grp->count; i++) {
4229 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
4230 struct fib6_info *rt = mlxsw_sp_rt6->rt;
4232 if (nh->rif && nh->rif->dev == rt->fib6_nh->fib_nh_dev &&
4233 ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
4234 &rt->fib6_nh->fib_nh_gw6))
4243 mlxsw_sp_fib4_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
4244 struct mlxsw_sp_fib_entry *fib_entry)
4246 struct fib_info *fi = mlxsw_sp_nexthop4_group_fi(fib_entry->nh_group);
4247 u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
4248 int dst_len = fib_entry->fib_node->key.prefix_len;
4249 struct mlxsw_sp_fib4_entry *fib4_entry;
4250 struct fib_rt_info fri;
4251 bool should_offload;
4253 should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
4254 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
4257 fri.tb_id = fib4_entry->tb_id;
4258 fri.dst = cpu_to_be32(*p_dst);
4259 fri.dst_len = dst_len;
4260 fri.tos = fib4_entry->tos;
4261 fri.type = fib4_entry->type;
4262 fri.offload = should_offload;
4263 fri.trap = !should_offload;
4264 fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
4268 mlxsw_sp_fib4_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
4269 struct mlxsw_sp_fib_entry *fib_entry)
4271 struct fib_info *fi = mlxsw_sp_nexthop4_group_fi(fib_entry->nh_group);
4272 u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
4273 int dst_len = fib_entry->fib_node->key.prefix_len;
4274 struct mlxsw_sp_fib4_entry *fib4_entry;
4275 struct fib_rt_info fri;
4277 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
4280 fri.tb_id = fib4_entry->tb_id;
4281 fri.dst = cpu_to_be32(*p_dst);
4282 fri.dst_len = dst_len;
4283 fri.tos = fib4_entry->tos;
4284 fri.type = fib4_entry->type;
4285 fri.offload = false;
4287 fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
4291 mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
4292 struct mlxsw_sp_fib_entry *fib_entry)
4294 struct mlxsw_sp_fib6_entry *fib6_entry;
4295 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4296 bool should_offload;
4298 should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
4300 /* In IPv6 a multipath route is represented using multiple routes, so
4301 * we need to set the flags on all of them.
4303 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
4305 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
4306 fib6_info_hw_flags_set(mlxsw_sp_rt6->rt, should_offload,
4311 mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
4312 struct mlxsw_sp_fib_entry *fib_entry)
4314 struct mlxsw_sp_fib6_entry *fib6_entry;
4315 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4317 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
4319 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
4320 fib6_info_hw_flags_set(mlxsw_sp_rt6->rt, false, false);
4324 mlxsw_sp_fib_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
4325 struct mlxsw_sp_fib_entry *fib_entry)
4327 switch (fib_entry->fib_node->fib->proto) {
4328 case MLXSW_SP_L3_PROTO_IPV4:
4329 mlxsw_sp_fib4_entry_hw_flags_set(mlxsw_sp, fib_entry);
4331 case MLXSW_SP_L3_PROTO_IPV6:
4332 mlxsw_sp_fib6_entry_hw_flags_set(mlxsw_sp, fib_entry);
4338 mlxsw_sp_fib_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
4339 struct mlxsw_sp_fib_entry *fib_entry)
4341 switch (fib_entry->fib_node->fib->proto) {
4342 case MLXSW_SP_L3_PROTO_IPV4:
4343 mlxsw_sp_fib4_entry_hw_flags_clear(mlxsw_sp, fib_entry);
4345 case MLXSW_SP_L3_PROTO_IPV6:
4346 mlxsw_sp_fib6_entry_hw_flags_clear(mlxsw_sp, fib_entry);
4352 mlxsw_sp_fib_entry_hw_flags_refresh(struct mlxsw_sp *mlxsw_sp,
4353 struct mlxsw_sp_fib_entry *fib_entry,
4354 enum mlxsw_sp_fib_entry_op op)
4357 case MLXSW_SP_FIB_ENTRY_OP_WRITE:
4358 case MLXSW_SP_FIB_ENTRY_OP_UPDATE:
4359 mlxsw_sp_fib_entry_hw_flags_set(mlxsw_sp, fib_entry);
4361 case MLXSW_SP_FIB_ENTRY_OP_DELETE:
4362 mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, fib_entry);
4369 struct mlxsw_sp_fib_entry_op_ctx_basic {
4370 char ralue_pl[MLXSW_REG_RALUE_LEN];
4374 mlxsw_sp_router_ll_basic_fib_entry_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
4375 enum mlxsw_sp_l3proto proto,
4376 enum mlxsw_sp_fib_entry_op op,
4377 u16 virtual_router, u8 prefix_len,
4378 unsigned char *addr,
4379 struct mlxsw_sp_fib_entry_priv *priv)
4381 struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
4382 enum mlxsw_reg_ralxx_protocol ralxx_proto;
4383 char *ralue_pl = op_ctx_basic->ralue_pl;
4384 enum mlxsw_reg_ralue_op ralue_op;
4386 ralxx_proto = (enum mlxsw_reg_ralxx_protocol) proto;
4389 case MLXSW_SP_FIB_ENTRY_OP_WRITE:
4390 case MLXSW_SP_FIB_ENTRY_OP_UPDATE:
4391 ralue_op = MLXSW_REG_RALUE_OP_WRITE_WRITE;
4393 case MLXSW_SP_FIB_ENTRY_OP_DELETE:
4394 ralue_op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
4402 case MLXSW_SP_L3_PROTO_IPV4:
4403 mlxsw_reg_ralue_pack4(ralue_pl, ralxx_proto, ralue_op,
4404 virtual_router, prefix_len, (u32 *) addr);
4406 case MLXSW_SP_L3_PROTO_IPV6:
4407 mlxsw_reg_ralue_pack6(ralue_pl, ralxx_proto, ralue_op,
4408 virtual_router, prefix_len, addr);
4414 mlxsw_sp_router_ll_basic_fib_entry_act_remote_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
4415 enum mlxsw_reg_ralue_trap_action trap_action,
4416 u16 trap_id, u32 adjacency_index, u16 ecmp_size)
4418 struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
4420 mlxsw_reg_ralue_act_remote_pack(op_ctx_basic->ralue_pl, trap_action,
4421 trap_id, adjacency_index, ecmp_size);
4425 mlxsw_sp_router_ll_basic_fib_entry_act_local_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
4426 enum mlxsw_reg_ralue_trap_action trap_action,
4427 u16 trap_id, u16 local_erif)
4429 struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
4431 mlxsw_reg_ralue_act_local_pack(op_ctx_basic->ralue_pl, trap_action,
4432 trap_id, local_erif);
4436 mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx)
4438 struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
4440 mlxsw_reg_ralue_act_ip2me_pack(op_ctx_basic->ralue_pl);
4444 mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_tun_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
4447 struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
4449 mlxsw_reg_ralue_act_ip2me_tun_pack(op_ctx_basic->ralue_pl, tunnel_ptr);
4453 mlxsw_sp_router_ll_basic_fib_entry_commit(struct mlxsw_sp *mlxsw_sp,
4454 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
4455 bool *postponed_for_bulk)
4457 struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
4459 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
4460 op_ctx_basic->ralue_pl);
4464 mlxsw_sp_router_ll_basic_fib_entry_is_committed(struct mlxsw_sp_fib_entry_priv *priv)
4469 static void mlxsw_sp_fib_entry_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
4470 struct mlxsw_sp_fib_entry *fib_entry,
4471 enum mlxsw_sp_fib_entry_op op)
4473 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
4475 mlxsw_sp_fib_entry_op_ctx_priv_hold(op_ctx, fib_entry->priv);
4476 fib->ll_ops->fib_entry_pack(op_ctx, fib->proto, op, fib->vr->id,
4477 fib_entry->fib_node->key.prefix_len,
4478 fib_entry->fib_node->key.addr,
4482 int mlxsw_sp_fib_entry_commit(struct mlxsw_sp *mlxsw_sp,
4483 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
4484 const struct mlxsw_sp_router_ll_ops *ll_ops)
4486 bool postponed_for_bulk = false;
4489 err = ll_ops->fib_entry_commit(mlxsw_sp, op_ctx, &postponed_for_bulk);
4490 if (!postponed_for_bulk)
4491 mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
4495 static int mlxsw_sp_adj_discard_write(struct mlxsw_sp *mlxsw_sp, u16 rif_index)
4497 enum mlxsw_reg_ratr_trap_action trap_action;
4498 char ratr_pl[MLXSW_REG_RATR_LEN];
4501 if (mlxsw_sp->router->adj_discard_index_valid)
4504 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4505 &mlxsw_sp->router->adj_discard_index);
4509 trap_action = MLXSW_REG_RATR_TRAP_ACTION_DISCARD_ERRORS;
4510 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, true,
4511 MLXSW_REG_RATR_TYPE_ETHERNET,
4512 mlxsw_sp->router->adj_discard_index, rif_index);
4513 mlxsw_reg_ratr_trap_action_set(ratr_pl, trap_action);
4514 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
4516 goto err_ratr_write;
4518 mlxsw_sp->router->adj_discard_index_valid = true;
4523 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4524 mlxsw_sp->router->adj_discard_index);
4528 static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
4529 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
4530 struct mlxsw_sp_fib_entry *fib_entry,
4531 enum mlxsw_sp_fib_entry_op op)
4533 const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
4534 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
4535 enum mlxsw_reg_ralue_trap_action trap_action;
4537 u32 adjacency_index = 0;
4541 /* In case the nexthop group adjacency index is valid, use it
4542 * with provided ECMP size. Otherwise, setup trap and pass
4543 * traffic to kernel.
4545 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
4546 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
4547 adjacency_index = fib_entry->nh_group->adj_index;
4548 ecmp_size = fib_entry->nh_group->ecmp_size;
4549 } else if (!nh_group->adj_index_valid && nh_group->count &&
4551 err = mlxsw_sp_adj_discard_write(mlxsw_sp,
4552 nh_group->nh_rif->rif_index);
4555 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
4556 adjacency_index = mlxsw_sp->router->adj_discard_index;
4559 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
4560 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
4563 mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
4564 ll_ops->fib_entry_act_remote_pack(op_ctx, trap_action, trap_id,
4565 adjacency_index, ecmp_size);
4566 return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
4569 static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
4570 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
4571 struct mlxsw_sp_fib_entry *fib_entry,
4572 enum mlxsw_sp_fib_entry_op op)
4574 const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
4575 struct mlxsw_sp_rif *rif = fib_entry->nh_group->nh_rif;
4576 enum mlxsw_reg_ralue_trap_action trap_action;
4580 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
4581 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
4582 rif_index = rif->rif_index;
4584 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
4585 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
4588 mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
4589 ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, trap_id, rif_index);
4590 return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
4593 static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
4594 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
4595 struct mlxsw_sp_fib_entry *fib_entry,
4596 enum mlxsw_sp_fib_entry_op op)
4598 const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
4600 mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
4601 ll_ops->fib_entry_act_ip2me_pack(op_ctx);
4602 return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
4605 static int mlxsw_sp_fib_entry_op_blackhole(struct mlxsw_sp *mlxsw_sp,
4606 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
4607 struct mlxsw_sp_fib_entry *fib_entry,
4608 enum mlxsw_sp_fib_entry_op op)
4610 const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
4611 enum mlxsw_reg_ralue_trap_action trap_action;
4613 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_DISCARD_ERROR;
4614 mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
4615 ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, 0, 0);
4616 return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
4620 mlxsw_sp_fib_entry_op_unreachable(struct mlxsw_sp *mlxsw_sp,
4621 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
4622 struct mlxsw_sp_fib_entry *fib_entry,
4623 enum mlxsw_sp_fib_entry_op op)
4625 const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
4626 enum mlxsw_reg_ralue_trap_action trap_action;
4629 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
4630 trap_id = MLXSW_TRAP_ID_RTR_INGRESS1;
4632 mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
4633 ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, trap_id, 0);
4634 return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
4638 mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
4639 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
4640 struct mlxsw_sp_fib_entry *fib_entry,
4641 enum mlxsw_sp_fib_entry_op op)
4643 const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
4644 struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry;
4645 const struct mlxsw_sp_ipip_ops *ipip_ops;
4647 if (WARN_ON(!ipip_entry))
4650 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
4651 return ipip_ops->fib_entry_op(mlxsw_sp, ll_ops, op_ctx, ipip_entry, op,
4652 fib_entry->decap.tunnel_index, fib_entry->priv);
4655 static int mlxsw_sp_fib_entry_op_nve_decap(struct mlxsw_sp *mlxsw_sp,
4656 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
4657 struct mlxsw_sp_fib_entry *fib_entry,
4658 enum mlxsw_sp_fib_entry_op op)
4660 const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
4662 mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
4663 ll_ops->fib_entry_act_ip2me_tun_pack(op_ctx,
4664 fib_entry->decap.tunnel_index);
4665 return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
4668 static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
4669 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
4670 struct mlxsw_sp_fib_entry *fib_entry,
4671 enum mlxsw_sp_fib_entry_op op)
4673 switch (fib_entry->type) {
4674 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
4675 return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, op_ctx, fib_entry, op);
4676 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
4677 return mlxsw_sp_fib_entry_op_local(mlxsw_sp, op_ctx, fib_entry, op);
4678 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
4679 return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, op_ctx, fib_entry, op);
4680 case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
4681 return mlxsw_sp_fib_entry_op_blackhole(mlxsw_sp, op_ctx, fib_entry, op);
4682 case MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE:
4683 return mlxsw_sp_fib_entry_op_unreachable(mlxsw_sp, op_ctx, fib_entry, op);
4684 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
4685 return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp, op_ctx, fib_entry, op);
4686 case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
4687 return mlxsw_sp_fib_entry_op_nve_decap(mlxsw_sp, op_ctx, fib_entry, op);
4692 static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
4693 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
4694 struct mlxsw_sp_fib_entry *fib_entry,
4695 enum mlxsw_sp_fib_entry_op op)
4697 int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry, op);
4702 mlxsw_sp_fib_entry_hw_flags_refresh(mlxsw_sp, fib_entry, op);
4707 static int __mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
4708 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
4709 struct mlxsw_sp_fib_entry *fib_entry,
4712 return mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry,
4713 is_new ? MLXSW_SP_FIB_ENTRY_OP_WRITE :
4714 MLXSW_SP_FIB_ENTRY_OP_UPDATE);
4717 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
4718 struct mlxsw_sp_fib_entry *fib_entry)
4720 struct mlxsw_sp_fib_entry_op_ctx *op_ctx = mlxsw_sp->router->ll_op_ctx;
4722 mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
4723 return __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx, fib_entry, false);
4726 static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
4727 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
4728 struct mlxsw_sp_fib_entry *fib_entry)
4730 const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
4732 if (!ll_ops->fib_entry_is_committed(fib_entry->priv))
4734 return mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry,
4735 MLXSW_SP_FIB_ENTRY_OP_DELETE);
4739 mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
4740 const struct fib_entry_notifier_info *fen_info,
4741 struct mlxsw_sp_fib_entry *fib_entry)
4743 struct net_device *dev = fib_info_nh(fen_info->fi, 0)->fib_nh_dev;
4744 union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) };
4745 struct mlxsw_sp_router *router = mlxsw_sp->router;
4746 u32 tb_id = mlxsw_sp_fix_tb_id(fen_info->tb_id);
4747 struct mlxsw_sp_ipip_entry *ipip_entry;
4748 struct fib_info *fi = fen_info->fi;
4750 switch (fen_info->type) {
4752 ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, dev,
4753 MLXSW_SP_L3_PROTO_IPV4, dip);
4754 if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
4755 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
4756 return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
4760 if (mlxsw_sp_router_nve_is_decap(mlxsw_sp, tb_id,
4761 MLXSW_SP_L3_PROTO_IPV4,
4765 tunnel_index = router->nve_decap_config.tunnel_index;
4766 fib_entry->decap.tunnel_index = tunnel_index;
4767 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
4772 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
4775 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
4777 case RTN_UNREACHABLE:
4779 /* Packets hitting these routes need to be trapped, but
4780 * can do so with a lower priority than packets directed
4781 * at the host, so use action type local instead of trap.
4783 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
4786 if (mlxsw_sp_fi_is_gateway(mlxsw_sp, fi))
4787 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
4789 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
4797 mlxsw_sp_fib4_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
4798 struct mlxsw_sp_fib_entry *fib_entry)
4800 switch (fib_entry->type) {
4801 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
4802 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
4809 static struct mlxsw_sp_fib4_entry *
4810 mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
4811 struct mlxsw_sp_fib_node *fib_node,
4812 const struct fib_entry_notifier_info *fen_info)
4814 struct mlxsw_sp_fib4_entry *fib4_entry;
4815 struct mlxsw_sp_fib_entry *fib_entry;
4818 fib4_entry = kzalloc(sizeof(*fib4_entry), GFP_KERNEL);
4820 return ERR_PTR(-ENOMEM);
4821 fib_entry = &fib4_entry->common;
4823 fib_entry->priv = mlxsw_sp_fib_entry_priv_create(fib_node->fib->ll_ops);
4824 if (IS_ERR(fib_entry->priv)) {
4825 err = PTR_ERR(fib_entry->priv);
4826 goto err_fib_entry_priv_create;
4829 err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
4831 goto err_fib4_entry_type_set;
4833 err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
4835 goto err_nexthop4_group_get;
4837 fib4_entry->prio = fen_info->fi->fib_priority;
4838 fib4_entry->tb_id = fen_info->tb_id;
4839 fib4_entry->type = fen_info->type;
4840 fib4_entry->tos = fen_info->tos;
4842 fib_entry->fib_node = fib_node;
4846 err_nexthop4_group_get:
4847 mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, fib_entry);
4848 err_fib4_entry_type_set:
4849 mlxsw_sp_fib_entry_priv_put(fib_entry->priv);
4850 err_fib_entry_priv_create:
4852 return ERR_PTR(err);
4855 static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
4856 struct mlxsw_sp_fib4_entry *fib4_entry)
4858 mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
4859 mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, &fib4_entry->common);
4860 mlxsw_sp_fib_entry_priv_put(fib4_entry->common.priv);
4864 static struct mlxsw_sp_fib4_entry *
4865 mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
4866 const struct fib_entry_notifier_info *fen_info)
4868 struct mlxsw_sp_fib4_entry *fib4_entry;
4869 struct mlxsw_sp_fib_node *fib_node;
4870 struct mlxsw_sp_fib *fib;
4871 struct mlxsw_sp_vr *vr;
4873 vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id);
4876 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
4878 fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
4879 sizeof(fen_info->dst),
4884 fib4_entry = container_of(fib_node->fib_entry,
4885 struct mlxsw_sp_fib4_entry, common);
4886 if (fib4_entry->tb_id == fen_info->tb_id &&
4887 fib4_entry->tos == fen_info->tos &&
4888 fib4_entry->type == fen_info->type &&
4889 mlxsw_sp_nexthop4_group_fi(fib4_entry->common.nh_group) ==
4896 static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
4897 .key_offset = offsetof(struct mlxsw_sp_fib_node, key),
4898 .head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
4899 .key_len = sizeof(struct mlxsw_sp_fib_key),
4900 .automatic_shrinking = true,
4903 static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
4904 struct mlxsw_sp_fib_node *fib_node)
4906 return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
4907 mlxsw_sp_fib_ht_params);
4910 static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
4911 struct mlxsw_sp_fib_node *fib_node)
4913 rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
4914 mlxsw_sp_fib_ht_params);
4917 static struct mlxsw_sp_fib_node *
4918 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
4919 size_t addr_len, unsigned char prefix_len)
4921 struct mlxsw_sp_fib_key key;
4923 memset(&key, 0, sizeof(key));
4924 memcpy(key.addr, addr, addr_len);
4925 key.prefix_len = prefix_len;
4926 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
4929 static struct mlxsw_sp_fib_node *
4930 mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
4931 size_t addr_len, unsigned char prefix_len)
4933 struct mlxsw_sp_fib_node *fib_node;
4935 fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
4939 list_add(&fib_node->list, &fib->node_list);
4940 memcpy(fib_node->key.addr, addr, addr_len);
4941 fib_node->key.prefix_len = prefix_len;
4946 static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
4948 list_del(&fib_node->list);
4952 static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
4953 struct mlxsw_sp_fib_node *fib_node)
4955 struct mlxsw_sp_prefix_usage req_prefix_usage;
4956 struct mlxsw_sp_fib *fib = fib_node->fib;
4957 struct mlxsw_sp_lpm_tree *lpm_tree;
4960 lpm_tree = mlxsw_sp->router->lpm.proto_trees[fib->proto];
4961 if (lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
4964 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
4965 mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
4966 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
4968 if (IS_ERR(lpm_tree))
4969 return PTR_ERR(lpm_tree);
4971 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
4973 goto err_lpm_tree_replace;
4976 lpm_tree->prefix_ref_count[fib_node->key.prefix_len]++;
4979 err_lpm_tree_replace:
4980 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
4984 static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
4985 struct mlxsw_sp_fib_node *fib_node)
4987 struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
4988 struct mlxsw_sp_prefix_usage req_prefix_usage;
4989 struct mlxsw_sp_fib *fib = fib_node->fib;
4992 if (--lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
4994 /* Try to construct a new LPM tree from the current prefix usage
4995 * minus the unused one. If we fail, continue using the old one.
4997 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
4998 mlxsw_sp_prefix_usage_clear(&req_prefix_usage,
4999 fib_node->key.prefix_len);
5000 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
5002 if (IS_ERR(lpm_tree))
5005 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
5007 goto err_lpm_tree_replace;
5011 err_lpm_tree_replace:
5012 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
5015 static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
5016 struct mlxsw_sp_fib_node *fib_node,
5017 struct mlxsw_sp_fib *fib)
5021 err = mlxsw_sp_fib_node_insert(fib, fib_node);
5024 fib_node->fib = fib;
5026 err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib_node);
5028 goto err_fib_lpm_tree_link;
5032 err_fib_lpm_tree_link:
5033 fib_node->fib = NULL;
5034 mlxsw_sp_fib_node_remove(fib, fib_node);
5038 static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
5039 struct mlxsw_sp_fib_node *fib_node)
5041 struct mlxsw_sp_fib *fib = fib_node->fib;
5043 mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib_node);
5044 fib_node->fib = NULL;
5045 mlxsw_sp_fib_node_remove(fib, fib_node);
5048 static struct mlxsw_sp_fib_node *
5049 mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
5050 size_t addr_len, unsigned char prefix_len,
5051 enum mlxsw_sp_l3proto proto)
5053 struct mlxsw_sp_fib_node *fib_node;
5054 struct mlxsw_sp_fib *fib;
5055 struct mlxsw_sp_vr *vr;
5058 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, NULL);
5060 return ERR_CAST(vr);
5061 fib = mlxsw_sp_vr_fib(vr, proto);
5063 fib_node = mlxsw_sp_fib_node_lookup(fib, addr, addr_len, prefix_len);
5067 fib_node = mlxsw_sp_fib_node_create(fib, addr, addr_len, prefix_len);
5070 goto err_fib_node_create;
5073 err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
5075 goto err_fib_node_init;
5080 mlxsw_sp_fib_node_destroy(fib_node);
5081 err_fib_node_create:
5082 mlxsw_sp_vr_put(mlxsw_sp, vr);
5083 return ERR_PTR(err);
5086 static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
5087 struct mlxsw_sp_fib_node *fib_node)
5089 struct mlxsw_sp_vr *vr = fib_node->fib->vr;
5091 if (fib_node->fib_entry)
5093 mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
5094 mlxsw_sp_fib_node_destroy(fib_node);
5095 mlxsw_sp_vr_put(mlxsw_sp, vr);
5098 static int mlxsw_sp_fib_node_entry_link(struct mlxsw_sp *mlxsw_sp,
5099 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5100 struct mlxsw_sp_fib_entry *fib_entry)
5102 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
5103 bool is_new = !fib_node->fib_entry;
5106 fib_node->fib_entry = fib_entry;
5108 err = __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx, fib_entry, is_new);
5110 goto err_fib_entry_update;
5114 err_fib_entry_update:
5115 fib_node->fib_entry = NULL;
5119 static int __mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
5120 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5121 struct mlxsw_sp_fib_entry *fib_entry)
5123 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
5126 err = mlxsw_sp_fib_entry_del(mlxsw_sp, op_ctx, fib_entry);
5127 fib_node->fib_entry = NULL;
5131 static void mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
5132 struct mlxsw_sp_fib_entry *fib_entry)
5134 struct mlxsw_sp_fib_entry_op_ctx *op_ctx = mlxsw_sp->router->ll_op_ctx;
5136 mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
5137 __mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, fib_entry);
5140 static bool mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry *fib4_entry)
5142 struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
5143 struct mlxsw_sp_fib4_entry *fib4_replaced;
5145 if (!fib_node->fib_entry)
5148 fib4_replaced = container_of(fib_node->fib_entry,
5149 struct mlxsw_sp_fib4_entry, common);
5150 if (fib4_entry->tb_id == RT_TABLE_MAIN &&
5151 fib4_replaced->tb_id == RT_TABLE_LOCAL)
5158 mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp,
5159 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5160 const struct fib_entry_notifier_info *fen_info)
5162 struct mlxsw_sp_fib4_entry *fib4_entry, *fib4_replaced;
5163 struct mlxsw_sp_fib_entry *replaced;
5164 struct mlxsw_sp_fib_node *fib_node;
5167 if (mlxsw_sp->router->aborted)
5170 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id,
5171 &fen_info->dst, sizeof(fen_info->dst),
5173 MLXSW_SP_L3_PROTO_IPV4);
5174 if (IS_ERR(fib_node)) {
5175 dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
5176 return PTR_ERR(fib_node);
5179 fib4_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
5180 if (IS_ERR(fib4_entry)) {
5181 dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
5182 err = PTR_ERR(fib4_entry);
5183 goto err_fib4_entry_create;
5186 if (!mlxsw_sp_fib4_allow_replace(fib4_entry)) {
5187 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
5188 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5192 replaced = fib_node->fib_entry;
5193 err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, op_ctx, &fib4_entry->common);
5195 dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
5196 goto err_fib_node_entry_link;
5199 /* Nothing to replace */
5203 mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
5204 fib4_replaced = container_of(replaced, struct mlxsw_sp_fib4_entry,
5206 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_replaced);
5210 err_fib_node_entry_link:
5211 fib_node->fib_entry = replaced;
5212 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
5213 err_fib4_entry_create:
5214 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5218 static int mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
5219 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5220 struct fib_entry_notifier_info *fen_info)
5222 struct mlxsw_sp_fib4_entry *fib4_entry;
5223 struct mlxsw_sp_fib_node *fib_node;
5226 if (mlxsw_sp->router->aborted)
5229 fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
5232 fib_node = fib4_entry->common.fib_node;
5234 err = __mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, &fib4_entry->common);
5235 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
5236 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5240 static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt)
5242 /* Multicast routes aren't supported, so ignore them. Neighbour
5243 * Discovery packets are specifically trapped.
5245 if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_MULTICAST)
5248 /* Cloned routes are irrelevant in the forwarding path. */
5249 if (rt->fib6_flags & RTF_CACHE)
5255 static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct fib6_info *rt)
5257 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5259 mlxsw_sp_rt6 = kzalloc(sizeof(*mlxsw_sp_rt6), GFP_KERNEL);
5261 return ERR_PTR(-ENOMEM);
5263 /* In case of route replace, replaced route is deleted with
5264 * no notification. Take reference to prevent accessing freed
5267 mlxsw_sp_rt6->rt = rt;
5270 return mlxsw_sp_rt6;
5273 #if IS_ENABLED(CONFIG_IPV6)
5274 static void mlxsw_sp_rt6_release(struct fib6_info *rt)
5276 fib6_info_release(rt);
5279 static void mlxsw_sp_rt6_release(struct fib6_info *rt)
5284 static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
5286 struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
5288 fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
5289 mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
5290 kfree(mlxsw_sp_rt6);
5293 static struct fib6_info *
5294 mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
5296 return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
5300 static struct mlxsw_sp_rt6 *
5301 mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
5302 const struct fib6_info *rt)
5304 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5306 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
5307 if (mlxsw_sp_rt6->rt == rt)
5308 return mlxsw_sp_rt6;
5314 static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
5315 const struct fib6_info *rt,
5316 enum mlxsw_sp_ipip_type *ret)
5318 return rt->fib6_nh->fib_nh_dev &&
5319 mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->fib6_nh->fib_nh_dev, ret);
5322 static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,
5323 struct mlxsw_sp_nexthop_group *nh_grp,
5324 struct mlxsw_sp_nexthop *nh,
5325 const struct fib6_info *rt)
5327 const struct mlxsw_sp_ipip_ops *ipip_ops;
5328 struct mlxsw_sp_ipip_entry *ipip_entry;
5329 struct net_device *dev = rt->fib6_nh->fib_nh_dev;
5330 struct mlxsw_sp_rif *rif;
5333 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
5335 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
5336 if (ipip_ops->can_offload(mlxsw_sp, dev,
5337 MLXSW_SP_L3_PROTO_IPV6)) {
5338 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
5339 mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
5344 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
5345 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
5348 mlxsw_sp_nexthop_rif_init(nh, rif);
5350 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
5352 goto err_nexthop_neigh_init;
5356 err_nexthop_neigh_init:
5357 mlxsw_sp_nexthop_rif_fini(nh);
5361 static void mlxsw_sp_nexthop6_type_fini(struct mlxsw_sp *mlxsw_sp,
5362 struct mlxsw_sp_nexthop *nh)
5364 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
5367 static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
5368 struct mlxsw_sp_nexthop_group *nh_grp,
5369 struct mlxsw_sp_nexthop *nh,
5370 const struct fib6_info *rt)
5372 struct net_device *dev = rt->fib6_nh->fib_nh_dev;
5374 nh->nh_grp = nh_grp;
5375 nh->nh_weight = rt->fib6_nh->fib_nh_weight;
5376 memcpy(&nh->gw_addr, &rt->fib6_nh->fib_nh_gw6, sizeof(nh->gw_addr));
5377 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
5379 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
5383 nh->ifindex = dev->ifindex;
5385 return mlxsw_sp_nexthop6_type_init(mlxsw_sp, nh_grp, nh, rt);
5388 static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
5389 struct mlxsw_sp_nexthop *nh)
5391 mlxsw_sp_nexthop6_type_fini(mlxsw_sp, nh);
5392 list_del(&nh->router_list_node);
5393 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
5396 static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
5397 const struct fib6_info *rt)
5399 return rt->fib6_nh->fib_nh_gw_family ||
5400 mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
5403 static struct mlxsw_sp_nexthop_group *
5404 mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
5405 struct mlxsw_sp_fib6_entry *fib6_entry)
5407 struct mlxsw_sp_nexthop_group *nh_grp;
5408 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5409 struct mlxsw_sp_nexthop *nh;
5413 nh_grp = kzalloc(struct_size(nh_grp, nexthops, fib6_entry->nrt6),
5416 return ERR_PTR(-ENOMEM);
5417 INIT_LIST_HEAD(&nh_grp->fib_list);
5418 #if IS_ENABLED(CONFIG_IPV6)
5419 nh_grp->neigh_tbl = &nd_tbl;
5421 nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6;
5422 mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list,
5423 struct mlxsw_sp_rt6, list);
5424 nh_grp->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt);
5425 nh_grp->count = fib6_entry->nrt6;
5426 for (i = 0; i < nh_grp->count; i++) {
5427 struct fib6_info *rt = mlxsw_sp_rt6->rt;
5429 nh = &nh_grp->nexthops[i];
5430 err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt);
5432 goto err_nexthop6_init;
5433 mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
5436 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
5438 goto err_nexthop_group_insert;
5440 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5443 err_nexthop_group_insert:
5445 for (i--; i >= 0; i--) {
5446 nh = &nh_grp->nexthops[i];
5447 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
5450 return ERR_PTR(err);
5454 mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp,
5455 struct mlxsw_sp_nexthop_group *nh_grp)
5457 struct mlxsw_sp_nexthop *nh;
5458 int i = nh_grp->count;
5460 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
5461 for (i--; i >= 0; i--) {
5462 nh = &nh_grp->nexthops[i];
5463 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
5465 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5466 WARN_ON(nh_grp->adj_index_valid);
5470 static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
5471 struct mlxsw_sp_fib6_entry *fib6_entry)
5473 struct mlxsw_sp_nexthop_group *nh_grp;
5475 nh_grp = mlxsw_sp_nexthop6_group_lookup(mlxsw_sp, fib6_entry);
5477 nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry);
5479 return PTR_ERR(nh_grp);
5482 list_add_tail(&fib6_entry->common.nexthop_group_node,
5484 fib6_entry->common.nh_group = nh_grp;
5486 /* The route and the nexthop are described by the same struct, so we
5487 * need to the update the nexthop offload indication for the new route.
5489 __mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
5494 static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
5495 struct mlxsw_sp_fib_entry *fib_entry)
5497 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
5499 list_del(&fib_entry->nexthop_group_node);
5500 if (!list_empty(&nh_grp->fib_list))
5502 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
5505 static int mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
5506 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5507 struct mlxsw_sp_fib6_entry *fib6_entry)
5509 struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
5512 fib6_entry->common.nh_group = NULL;
5513 list_del(&fib6_entry->common.nexthop_group_node);
5515 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
5517 goto err_nexthop6_group_get;
5519 /* In case this entry is offloaded, then the adjacency index
5520 * currently associated with it in the device's table is that
5521 * of the old group. Start using the new one instead.
5523 err = __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx,
5524 &fib6_entry->common, false);
5526 goto err_fib_entry_update;
5528 if (list_empty(&old_nh_grp->fib_list))
5529 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp);
5533 err_fib_entry_update:
5534 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
5535 err_nexthop6_group_get:
5536 list_add_tail(&fib6_entry->common.nexthop_group_node,
5537 &old_nh_grp->fib_list);
5538 fib6_entry->common.nh_group = old_nh_grp;
5543 mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
5544 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5545 struct mlxsw_sp_fib6_entry *fib6_entry,
5546 struct fib6_info **rt_arr, unsigned int nrt6)
5548 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5551 for (i = 0; i < nrt6; i++) {
5552 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
5553 if (IS_ERR(mlxsw_sp_rt6)) {
5554 err = PTR_ERR(mlxsw_sp_rt6);
5555 goto err_rt6_create;
5558 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
5562 err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, op_ctx, fib6_entry);
5564 goto err_nexthop6_group_update;
5568 err_nexthop6_group_update:
5571 for (i--; i >= 0; i--) {
5573 mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
5574 struct mlxsw_sp_rt6, list);
5575 list_del(&mlxsw_sp_rt6->list);
5576 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5582 mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
5583 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5584 struct mlxsw_sp_fib6_entry *fib6_entry,
5585 struct fib6_info **rt_arr, unsigned int nrt6)
5587 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5590 for (i = 0; i < nrt6; i++) {
5591 mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry,
5593 if (WARN_ON_ONCE(!mlxsw_sp_rt6))
5597 list_del(&mlxsw_sp_rt6->list);
5598 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5601 mlxsw_sp_nexthop6_group_update(mlxsw_sp, op_ctx, fib6_entry);
5604 static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
5605 struct mlxsw_sp_fib_entry *fib_entry,
5606 const struct fib6_info *rt)
5608 /* Packets hitting RTF_REJECT routes need to be discarded by the
5609 * stack. We can rely on their destination device not having a
5610 * RIF (it's the loopback device) and can thus use action type
5611 * local, which will cause them to be trapped with a lower
5612 * priority than packets that need to be locally received.
5614 if (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))
5615 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
5616 else if (rt->fib6_type == RTN_BLACKHOLE)
5617 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
5618 else if (rt->fib6_flags & RTF_REJECT)
5619 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
5620 else if (mlxsw_sp_rt6_is_gateway(mlxsw_sp, rt))
5621 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
5623 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
5627 mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry)
5629 struct mlxsw_sp_rt6 *mlxsw_sp_rt6, *tmp;
5631 list_for_each_entry_safe(mlxsw_sp_rt6, tmp, &fib6_entry->rt6_list,
5634 list_del(&mlxsw_sp_rt6->list);
5635 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5639 static struct mlxsw_sp_fib6_entry *
5640 mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
5641 struct mlxsw_sp_fib_node *fib_node,
5642 struct fib6_info **rt_arr, unsigned int nrt6)
5644 struct mlxsw_sp_fib6_entry *fib6_entry;
5645 struct mlxsw_sp_fib_entry *fib_entry;
5646 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5649 fib6_entry = kzalloc(sizeof(*fib6_entry), GFP_KERNEL);
5651 return ERR_PTR(-ENOMEM);
5652 fib_entry = &fib6_entry->common;
5654 fib_entry->priv = mlxsw_sp_fib_entry_priv_create(fib_node->fib->ll_ops);
5655 if (IS_ERR(fib_entry->priv)) {
5656 err = PTR_ERR(fib_entry->priv);
5657 goto err_fib_entry_priv_create;
5660 INIT_LIST_HEAD(&fib6_entry->rt6_list);
5662 for (i = 0; i < nrt6; i++) {
5663 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
5664 if (IS_ERR(mlxsw_sp_rt6)) {
5665 err = PTR_ERR(mlxsw_sp_rt6);
5666 goto err_rt6_create;
5668 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
5672 mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, rt_arr[0]);
5674 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
5676 goto err_nexthop6_group_get;
5678 fib_entry->fib_node = fib_node;
5682 err_nexthop6_group_get:
5685 for (i--; i >= 0; i--) {
5687 mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
5688 struct mlxsw_sp_rt6, list);
5689 list_del(&mlxsw_sp_rt6->list);
5690 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5692 mlxsw_sp_fib_entry_priv_put(fib_entry->priv);
5693 err_fib_entry_priv_create:
5695 return ERR_PTR(err);
5698 static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
5699 struct mlxsw_sp_fib6_entry *fib6_entry)
5701 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
5702 mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
5703 WARN_ON(fib6_entry->nrt6);
5704 mlxsw_sp_fib_entry_priv_put(fib6_entry->common.priv);
5708 static struct mlxsw_sp_fib6_entry *
5709 mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
5710 const struct fib6_info *rt)
5712 struct mlxsw_sp_fib6_entry *fib6_entry;
5713 struct mlxsw_sp_fib_node *fib_node;
5714 struct mlxsw_sp_fib *fib;
5715 struct fib6_info *cmp_rt;
5716 struct mlxsw_sp_vr *vr;
5718 vr = mlxsw_sp_vr_find(mlxsw_sp, rt->fib6_table->tb6_id);
5721 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6);
5723 fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->fib6_dst.addr,
5724 sizeof(rt->fib6_dst.addr),
5729 fib6_entry = container_of(fib_node->fib_entry,
5730 struct mlxsw_sp_fib6_entry, common);
5731 cmp_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
5732 if (rt->fib6_table->tb6_id == cmp_rt->fib6_table->tb6_id &&
5733 rt->fib6_metric == cmp_rt->fib6_metric &&
5734 mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
5740 static bool mlxsw_sp_fib6_allow_replace(struct mlxsw_sp_fib6_entry *fib6_entry)
5742 struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
5743 struct mlxsw_sp_fib6_entry *fib6_replaced;
5744 struct fib6_info *rt, *rt_replaced;
5746 if (!fib_node->fib_entry)
5749 fib6_replaced = container_of(fib_node->fib_entry,
5750 struct mlxsw_sp_fib6_entry,
5752 rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
5753 rt_replaced = mlxsw_sp_fib6_entry_rt(fib6_replaced);
5754 if (rt->fib6_table->tb6_id == RT_TABLE_MAIN &&
5755 rt_replaced->fib6_table->tb6_id == RT_TABLE_LOCAL)
5761 static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp,
5762 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5763 struct fib6_info **rt_arr, unsigned int nrt6)
5765 struct mlxsw_sp_fib6_entry *fib6_entry, *fib6_replaced;
5766 struct mlxsw_sp_fib_entry *replaced;
5767 struct mlxsw_sp_fib_node *fib_node;
5768 struct fib6_info *rt = rt_arr[0];
5771 if (mlxsw_sp->router->aborted)
5774 if (rt->fib6_src.plen)
5777 if (mlxsw_sp_fib6_rt_should_ignore(rt))
5780 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
5782 sizeof(rt->fib6_dst.addr),
5784 MLXSW_SP_L3_PROTO_IPV6);
5785 if (IS_ERR(fib_node))
5786 return PTR_ERR(fib_node);
5788 fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt_arr,
5790 if (IS_ERR(fib6_entry)) {
5791 err = PTR_ERR(fib6_entry);
5792 goto err_fib6_entry_create;
5795 if (!mlxsw_sp_fib6_allow_replace(fib6_entry)) {
5796 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5797 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5801 replaced = fib_node->fib_entry;
5802 err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, op_ctx, &fib6_entry->common);
5804 goto err_fib_node_entry_link;
5806 /* Nothing to replace */
5810 mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
5811 fib6_replaced = container_of(replaced, struct mlxsw_sp_fib6_entry,
5813 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_replaced);
5817 err_fib_node_entry_link:
5818 fib_node->fib_entry = replaced;
5819 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5820 err_fib6_entry_create:
5821 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5825 static int mlxsw_sp_router_fib6_append(struct mlxsw_sp *mlxsw_sp,
5826 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5827 struct fib6_info **rt_arr, unsigned int nrt6)
5829 struct mlxsw_sp_fib6_entry *fib6_entry;
5830 struct mlxsw_sp_fib_node *fib_node;
5831 struct fib6_info *rt = rt_arr[0];
5834 if (mlxsw_sp->router->aborted)
5837 if (rt->fib6_src.plen)
5840 if (mlxsw_sp_fib6_rt_should_ignore(rt))
5843 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
5845 sizeof(rt->fib6_dst.addr),
5847 MLXSW_SP_L3_PROTO_IPV6);
5848 if (IS_ERR(fib_node))
5849 return PTR_ERR(fib_node);
5851 if (WARN_ON_ONCE(!fib_node->fib_entry)) {
5852 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5856 fib6_entry = container_of(fib_node->fib_entry,
5857 struct mlxsw_sp_fib6_entry, common);
5858 err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, op_ctx, fib6_entry, rt_arr, nrt6);
5860 goto err_fib6_entry_nexthop_add;
5864 err_fib6_entry_nexthop_add:
5865 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5869 static int mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
5870 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5871 struct fib6_info **rt_arr, unsigned int nrt6)
5873 struct mlxsw_sp_fib6_entry *fib6_entry;
5874 struct mlxsw_sp_fib_node *fib_node;
5875 struct fib6_info *rt = rt_arr[0];
5878 if (mlxsw_sp->router->aborted)
5881 if (mlxsw_sp_fib6_rt_should_ignore(rt))
5884 /* Multipath routes are first added to the FIB trie and only then
5885 * notified. If we vetoed the addition, we will get a delete
5886 * notification for a route we do not have. Therefore, do not warn if
5887 * route was not found.
5889 fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
5893 /* If not all the nexthops are deleted, then only reduce the nexthop
5896 if (nrt6 != fib6_entry->nrt6) {
5897 mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, op_ctx, fib6_entry, rt_arr, nrt6);
5901 fib_node = fib6_entry->common.fib_node;
5903 err = __mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, &fib6_entry->common);
5904 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5905 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5909 static int __mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp,
5910 enum mlxsw_sp_l3proto proto,
5913 const struct mlxsw_sp_router_ll_ops *ll_ops = mlxsw_sp->router->proto_ll_ops[proto];
5914 enum mlxsw_reg_ralxx_protocol ralxx_proto =
5915 (enum mlxsw_reg_ralxx_protocol) proto;
5916 struct mlxsw_sp_fib_entry_priv *priv;
5917 char xralta_pl[MLXSW_REG_XRALTA_LEN];
5918 char xralst_pl[MLXSW_REG_XRALST_LEN];
5921 mlxsw_reg_xralta_pack(xralta_pl, true, ralxx_proto, tree_id);
5922 err = ll_ops->ralta_write(mlxsw_sp, xralta_pl);
5926 mlxsw_reg_xralst_pack(xralst_pl, 0xff, tree_id);
5927 err = ll_ops->ralst_write(mlxsw_sp, xralst_pl);
5931 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
5932 struct mlxsw_sp_fib_entry_op_ctx *op_ctx = mlxsw_sp->router->ll_op_ctx;
5933 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
5934 char xraltb_pl[MLXSW_REG_XRALTB_LEN];
5936 mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
5937 mlxsw_reg_xraltb_pack(xraltb_pl, vr->id, ralxx_proto, tree_id);
5938 err = ll_ops->raltb_write(mlxsw_sp, xraltb_pl);
5942 priv = mlxsw_sp_fib_entry_priv_create(ll_ops);
5944 return PTR_ERR(priv);
5946 ll_ops->fib_entry_pack(op_ctx, proto, MLXSW_SP_FIB_ENTRY_OP_WRITE,
5947 vr->id, 0, NULL, priv);
5948 ll_ops->fib_entry_act_ip2me_pack(op_ctx);
5949 err = ll_ops->fib_entry_commit(mlxsw_sp, op_ctx, NULL);
5950 mlxsw_sp_fib_entry_priv_put(priv);
5958 static struct mlxsw_sp_mr_table *
5959 mlxsw_sp_router_fibmr_family_to_table(struct mlxsw_sp_vr *vr, int family)
5961 if (family == RTNL_FAMILY_IPMR)
5962 return vr->mr_table[MLXSW_SP_L3_PROTO_IPV4];
5964 return vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
5967 static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp,
5968 struct mfc_entry_notifier_info *men_info,
5971 struct mlxsw_sp_mr_table *mrt;
5972 struct mlxsw_sp_vr *vr;
5974 if (mlxsw_sp->router->aborted)
5977 vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id, NULL);
5981 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
5982 return mlxsw_sp_mr_route_add(mrt, men_info->mfc, replace);
5985 static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
5986 struct mfc_entry_notifier_info *men_info)
5988 struct mlxsw_sp_mr_table *mrt;
5989 struct mlxsw_sp_vr *vr;
5991 if (mlxsw_sp->router->aborted)
5994 vr = mlxsw_sp_vr_find(mlxsw_sp, men_info->tb_id);
5998 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
5999 mlxsw_sp_mr_route_del(mrt, men_info->mfc);
6000 mlxsw_sp_vr_put(mlxsw_sp, vr);
6004 mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp,
6005 struct vif_entry_notifier_info *ven_info)
6007 struct mlxsw_sp_mr_table *mrt;
6008 struct mlxsw_sp_rif *rif;
6009 struct mlxsw_sp_vr *vr;
6011 if (mlxsw_sp->router->aborted)
6014 vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id, NULL);
6018 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
6019 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev);
6020 return mlxsw_sp_mr_vif_add(mrt, ven_info->dev,
6021 ven_info->vif_index,
6022 ven_info->vif_flags, rif);
6026 mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
6027 struct vif_entry_notifier_info *ven_info)
6029 struct mlxsw_sp_mr_table *mrt;
6030 struct mlxsw_sp_vr *vr;
6032 if (mlxsw_sp->router->aborted)
6035 vr = mlxsw_sp_vr_find(mlxsw_sp, ven_info->tb_id);
6039 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
6040 mlxsw_sp_mr_vif_del(mrt, ven_info->vif_index);
6041 mlxsw_sp_vr_put(mlxsw_sp, vr);
6044 static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
6046 enum mlxsw_sp_l3proto proto = MLXSW_SP_L3_PROTO_IPV4;
6049 err = __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
6050 MLXSW_SP_LPM_TREE_MIN);
6054 /* The multicast router code does not need an abort trap as by default,
6055 * packets that don't match any routes are trapped to the CPU.
6058 proto = MLXSW_SP_L3_PROTO_IPV6;
6059 return __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
6060 MLXSW_SP_LPM_TREE_MIN + 1);
6063 static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
6064 struct mlxsw_sp_fib_node *fib_node)
6066 struct mlxsw_sp_fib4_entry *fib4_entry;
6068 fib4_entry = container_of(fib_node->fib_entry,
6069 struct mlxsw_sp_fib4_entry, common);
6070 mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
6071 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6072 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6075 static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp,
6076 struct mlxsw_sp_fib_node *fib_node)
6078 struct mlxsw_sp_fib6_entry *fib6_entry;
6080 fib6_entry = container_of(fib_node->fib_entry,
6081 struct mlxsw_sp_fib6_entry, common);
6082 mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
6083 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
6084 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6087 static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
6088 struct mlxsw_sp_fib_node *fib_node)
6090 switch (fib_node->fib->proto) {
6091 case MLXSW_SP_L3_PROTO_IPV4:
6092 mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
6094 case MLXSW_SP_L3_PROTO_IPV6:
6095 mlxsw_sp_fib6_node_flush(mlxsw_sp, fib_node);
6100 static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
6101 struct mlxsw_sp_vr *vr,
6102 enum mlxsw_sp_l3proto proto)
6104 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
6105 struct mlxsw_sp_fib_node *fib_node, *tmp;
6107 list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
6108 bool do_break = &tmp->list == &fib->node_list;
6110 mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
6116 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
6120 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
6121 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
6123 if (!mlxsw_sp_vr_is_used(vr))
6126 for (j = 0; j < MLXSW_SP_L3_PROTO_MAX; j++)
6127 mlxsw_sp_mr_table_flush(vr->mr_table[j]);
6128 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
6130 /* If virtual router was only used for IPv4, then it's no
6133 if (!mlxsw_sp_vr_is_used(vr))
6135 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
6138 /* After flushing all the routes, it is not possible anyone is still
6139 * using the adjacency index that is discarding packets, so free it in
6140 * case it was allocated.
6142 if (!mlxsw_sp->router->adj_discard_index_valid)
6144 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
6145 mlxsw_sp->router->adj_discard_index);
6146 mlxsw_sp->router->adj_discard_index_valid = false;
6149 static void mlxsw_sp_router_fib_abort(struct mlxsw_sp *mlxsw_sp)
6153 if (mlxsw_sp->router->aborted)
6155 dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
6156 mlxsw_sp_router_fib_flush(mlxsw_sp);
6157 mlxsw_sp->router->aborted = true;
6158 err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
6160 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
6163 struct mlxsw_sp_fib6_event {
6164 struct fib6_info **rt_arr;
6168 struct mlxsw_sp_fib_event {
6169 struct list_head list; /* node in fib queue */
6171 struct mlxsw_sp_fib6_event fib6_event;
6172 struct fib_entry_notifier_info fen_info;
6173 struct fib_rule_notifier_info fr_info;
6174 struct fib_nh_notifier_info fnh_info;
6175 struct mfc_entry_notifier_info men_info;
6176 struct vif_entry_notifier_info ven_info;
6178 struct mlxsw_sp *mlxsw_sp;
6179 unsigned long event;
6184 mlxsw_sp_router_fib6_event_init(struct mlxsw_sp_fib6_event *fib6_event,
6185 struct fib6_entry_notifier_info *fen6_info)
6187 struct fib6_info *rt = fen6_info->rt;
6188 struct fib6_info **rt_arr;
6189 struct fib6_info *iter;
6193 nrt6 = fen6_info->nsiblings + 1;
6195 rt_arr = kcalloc(nrt6, sizeof(struct fib6_info *), GFP_ATOMIC);
6199 fib6_event->rt_arr = rt_arr;
6200 fib6_event->nrt6 = nrt6;
6205 if (!fen6_info->nsiblings)
6208 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
6209 if (i == fen6_info->nsiblings)
6212 rt_arr[i + 1] = iter;
6213 fib6_info_hold(iter);
6216 WARN_ON_ONCE(i != fen6_info->nsiblings);
6222 mlxsw_sp_router_fib6_event_fini(struct mlxsw_sp_fib6_event *fib6_event)
6226 for (i = 0; i < fib6_event->nrt6; i++)
6227 mlxsw_sp_rt6_release(fib6_event->rt_arr[i]);
6228 kfree(fib6_event->rt_arr);
6231 static void mlxsw_sp_router_fib4_event_process(struct mlxsw_sp *mlxsw_sp,
6232 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6233 struct mlxsw_sp_fib_event *fib_event)
6237 mlxsw_sp_span_respin(mlxsw_sp);
6239 switch (fib_event->event) {
6240 case FIB_EVENT_ENTRY_REPLACE:
6241 err = mlxsw_sp_router_fib4_replace(mlxsw_sp, op_ctx, &fib_event->fen_info);
6243 mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
6244 mlxsw_sp_router_fib_abort(mlxsw_sp);
6246 fib_info_put(fib_event->fen_info.fi);
6248 case FIB_EVENT_ENTRY_DEL:
6249 err = mlxsw_sp_router_fib4_del(mlxsw_sp, op_ctx, &fib_event->fen_info);
6251 mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
6252 fib_info_put(fib_event->fen_info.fi);
6254 case FIB_EVENT_NH_ADD:
6255 case FIB_EVENT_NH_DEL:
6256 mlxsw_sp_nexthop4_event(mlxsw_sp, fib_event->event, fib_event->fnh_info.fib_nh);
6257 fib_info_put(fib_event->fnh_info.fib_nh->nh_parent);
6262 static void mlxsw_sp_router_fib6_event_process(struct mlxsw_sp *mlxsw_sp,
6263 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6264 struct mlxsw_sp_fib_event *fib_event)
6268 mlxsw_sp_span_respin(mlxsw_sp);
6270 switch (fib_event->event) {
6271 case FIB_EVENT_ENTRY_REPLACE:
6272 err = mlxsw_sp_router_fib6_replace(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
6273 fib_event->fib6_event.nrt6);
6275 mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
6276 mlxsw_sp_router_fib_abort(mlxsw_sp);
6278 mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
6280 case FIB_EVENT_ENTRY_APPEND:
6281 err = mlxsw_sp_router_fib6_append(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
6282 fib_event->fib6_event.nrt6);
6284 mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
6285 mlxsw_sp_router_fib_abort(mlxsw_sp);
6287 mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
6289 case FIB_EVENT_ENTRY_DEL:
6290 err = mlxsw_sp_router_fib6_del(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
6291 fib_event->fib6_event.nrt6);
6293 mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
6294 mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
6299 static void mlxsw_sp_router_fibmr_event_process(struct mlxsw_sp *mlxsw_sp,
6300 struct mlxsw_sp_fib_event *fib_event)
6306 mutex_lock(&mlxsw_sp->router->lock);
6307 switch (fib_event->event) {
6308 case FIB_EVENT_ENTRY_REPLACE:
6309 case FIB_EVENT_ENTRY_ADD:
6310 replace = fib_event->event == FIB_EVENT_ENTRY_REPLACE;
6312 err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_event->men_info, replace);
6314 mlxsw_sp_router_fib_abort(mlxsw_sp);
6315 mr_cache_put(fib_event->men_info.mfc);
6317 case FIB_EVENT_ENTRY_DEL:
6318 mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_event->men_info);
6319 mr_cache_put(fib_event->men_info.mfc);
6321 case FIB_EVENT_VIF_ADD:
6322 err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
6323 &fib_event->ven_info);
6325 mlxsw_sp_router_fib_abort(mlxsw_sp);
6326 dev_put(fib_event->ven_info.dev);
6328 case FIB_EVENT_VIF_DEL:
6329 mlxsw_sp_router_fibmr_vif_del(mlxsw_sp, &fib_event->ven_info);
6330 dev_put(fib_event->ven_info.dev);
6333 mutex_unlock(&mlxsw_sp->router->lock);
6337 static void mlxsw_sp_router_fib_event_work(struct work_struct *work)
6339 struct mlxsw_sp_router *router = container_of(work, struct mlxsw_sp_router, fib_event_work);
6340 struct mlxsw_sp_fib_entry_op_ctx *op_ctx = router->ll_op_ctx;
6341 struct mlxsw_sp *mlxsw_sp = router->mlxsw_sp;
6342 struct mlxsw_sp_fib_event *next_fib_event;
6343 struct mlxsw_sp_fib_event *fib_event;
6344 int last_family = AF_UNSPEC;
6345 LIST_HEAD(fib_event_queue);
6347 spin_lock_bh(&router->fib_event_queue_lock);
6348 list_splice_init(&router->fib_event_queue, &fib_event_queue);
6349 spin_unlock_bh(&router->fib_event_queue_lock);
6351 /* Router lock is held here to make sure per-instance
6352 * operation context is not used in between FIB4/6 events
6355 mutex_lock(&router->lock);
6356 mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
6357 list_for_each_entry_safe(fib_event, next_fib_event,
6358 &fib_event_queue, list) {
6359 /* Check if the next entry in the queue exists and it is
6360 * of the same type (family and event) as the currect one.
6361 * In that case it is permitted to do the bulking
6362 * of multiple FIB entries to a single register write.
6364 op_ctx->bulk_ok = !list_is_last(&fib_event->list, &fib_event_queue) &&
6365 fib_event->family == next_fib_event->family &&
6366 fib_event->event == next_fib_event->event;
6368 /* In case family of this and the previous entry are different, context
6369 * reinitialization is going to be needed now, indicate that.
6370 * Note that since last_family is initialized to AF_UNSPEC, this is always
6371 * going to happen for the first entry processed in the work.
6373 if (fib_event->family != last_family)
6374 op_ctx->initialized = false;
6376 switch (fib_event->family) {
6378 mlxsw_sp_router_fib4_event_process(mlxsw_sp, op_ctx,
6382 mlxsw_sp_router_fib6_event_process(mlxsw_sp, op_ctx,
6385 case RTNL_FAMILY_IP6MR:
6386 case RTNL_FAMILY_IPMR:
6387 /* Unlock here as inside FIBMR the lock is taken again
6388 * under RTNL. The per-instance operation context
6389 * is not used by FIBMR.
6391 mutex_unlock(&router->lock);
6392 mlxsw_sp_router_fibmr_event_process(mlxsw_sp,
6394 mutex_lock(&router->lock);
6399 last_family = fib_event->family;
6403 WARN_ON_ONCE(!list_empty(&router->ll_op_ctx->fib_entry_priv_list));
6404 mutex_unlock(&router->lock);
6407 static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event *fib_event,
6408 struct fib_notifier_info *info)
6410 struct fib_entry_notifier_info *fen_info;
6411 struct fib_nh_notifier_info *fnh_info;
6413 switch (fib_event->event) {
6414 case FIB_EVENT_ENTRY_REPLACE:
6415 case FIB_EVENT_ENTRY_DEL:
6416 fen_info = container_of(info, struct fib_entry_notifier_info,
6418 fib_event->fen_info = *fen_info;
6419 /* Take reference on fib_info to prevent it from being
6420 * freed while event is queued. Release it afterwards.
6422 fib_info_hold(fib_event->fen_info.fi);
6424 case FIB_EVENT_NH_ADD:
6425 case FIB_EVENT_NH_DEL:
6426 fnh_info = container_of(info, struct fib_nh_notifier_info,
6428 fib_event->fnh_info = *fnh_info;
6429 fib_info_hold(fib_event->fnh_info.fib_nh->nh_parent);
6434 static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event *fib_event,
6435 struct fib_notifier_info *info)
6437 struct fib6_entry_notifier_info *fen6_info;
6440 switch (fib_event->event) {
6441 case FIB_EVENT_ENTRY_REPLACE:
6442 case FIB_EVENT_ENTRY_APPEND:
6443 case FIB_EVENT_ENTRY_DEL:
6444 fen6_info = container_of(info, struct fib6_entry_notifier_info,
6446 err = mlxsw_sp_router_fib6_event_init(&fib_event->fib6_event,
6457 mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event *fib_event,
6458 struct fib_notifier_info *info)
6460 switch (fib_event->event) {
6461 case FIB_EVENT_ENTRY_REPLACE:
6462 case FIB_EVENT_ENTRY_ADD:
6463 case FIB_EVENT_ENTRY_DEL:
6464 memcpy(&fib_event->men_info, info, sizeof(fib_event->men_info));
6465 mr_cache_hold(fib_event->men_info.mfc);
6467 case FIB_EVENT_VIF_ADD:
6468 case FIB_EVENT_VIF_DEL:
6469 memcpy(&fib_event->ven_info, info, sizeof(fib_event->ven_info));
6470 dev_hold(fib_event->ven_info.dev);
6475 static int mlxsw_sp_router_fib_rule_event(unsigned long event,
6476 struct fib_notifier_info *info,
6477 struct mlxsw_sp *mlxsw_sp)
6479 struct netlink_ext_ack *extack = info->extack;
6480 struct fib_rule_notifier_info *fr_info;
6481 struct fib_rule *rule;
6484 /* nothing to do at the moment */
6485 if (event == FIB_EVENT_RULE_DEL)
6488 if (mlxsw_sp->router->aborted)
6491 fr_info = container_of(info, struct fib_rule_notifier_info, info);
6492 rule = fr_info->rule;
6494 /* Rule only affects locally generated traffic */
6495 if (rule->iifindex == mlxsw_sp_net(mlxsw_sp)->loopback_dev->ifindex)
6498 switch (info->family) {
6500 if (!fib4_rule_default(rule) && !rule->l3mdev)
6504 if (!fib6_rule_default(rule) && !rule->l3mdev)
6507 case RTNL_FAMILY_IPMR:
6508 if (!ipmr_rule_default(rule) && !rule->l3mdev)
6511 case RTNL_FAMILY_IP6MR:
6512 if (!ip6mr_rule_default(rule) && !rule->l3mdev)
6518 NL_SET_ERR_MSG_MOD(extack, "FIB rules not supported");
6523 /* Called with rcu_read_lock() */
6524 static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
6525 unsigned long event, void *ptr)
6527 struct mlxsw_sp_fib_event *fib_event;
6528 struct fib_notifier_info *info = ptr;
6529 struct mlxsw_sp_router *router;
6532 if ((info->family != AF_INET && info->family != AF_INET6 &&
6533 info->family != RTNL_FAMILY_IPMR &&
6534 info->family != RTNL_FAMILY_IP6MR))
6537 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
6540 case FIB_EVENT_RULE_ADD:
6541 case FIB_EVENT_RULE_DEL:
6542 err = mlxsw_sp_router_fib_rule_event(event, info,
6544 return notifier_from_errno(err);
6545 case FIB_EVENT_ENTRY_ADD:
6546 case FIB_EVENT_ENTRY_REPLACE:
6547 case FIB_EVENT_ENTRY_APPEND:
6548 if (router->aborted) {
6549 NL_SET_ERR_MSG_MOD(info->extack, "FIB offload was aborted. Not configuring route");
6550 return notifier_from_errno(-EINVAL);
6552 if (info->family == AF_INET) {
6553 struct fib_entry_notifier_info *fen_info = ptr;
6555 if (fen_info->fi->fib_nh_is_v6) {
6556 NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported");
6557 return notifier_from_errno(-EINVAL);
6559 if (fen_info->fi->nh) {
6560 NL_SET_ERR_MSG_MOD(info->extack, "IPv4 route with nexthop objects is not supported");
6561 return notifier_from_errno(-EINVAL);
6563 } else if (info->family == AF_INET6) {
6564 struct fib6_entry_notifier_info *fen6_info;
6566 fen6_info = container_of(info,
6567 struct fib6_entry_notifier_info,
6569 if (fen6_info->rt->nh) {
6570 NL_SET_ERR_MSG_MOD(info->extack, "IPv6 route with nexthop objects is not supported");
6571 return notifier_from_errno(-EINVAL);
6577 fib_event = kzalloc(sizeof(*fib_event), GFP_ATOMIC);
6581 fib_event->mlxsw_sp = router->mlxsw_sp;
6582 fib_event->event = event;
6583 fib_event->family = info->family;
6585 switch (info->family) {
6587 mlxsw_sp_router_fib4_event(fib_event, info);
6590 err = mlxsw_sp_router_fib6_event(fib_event, info);
6594 case RTNL_FAMILY_IP6MR:
6595 case RTNL_FAMILY_IPMR:
6596 mlxsw_sp_router_fibmr_event(fib_event, info);
6600 /* Enqueue the event and trigger the work */
6601 spin_lock_bh(&router->fib_event_queue_lock);
6602 list_add_tail(&fib_event->list, &router->fib_event_queue);
6603 spin_unlock_bh(&router->fib_event_queue_lock);
6604 mlxsw_core_schedule_work(&router->fib_event_work);
6613 static struct mlxsw_sp_rif *
6614 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
6615 const struct net_device *dev)
6619 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
6620 if (mlxsw_sp->router->rifs[i] &&
6621 mlxsw_sp->router->rifs[i]->dev == dev)
6622 return mlxsw_sp->router->rifs[i];
6627 bool mlxsw_sp_rif_exists(struct mlxsw_sp *mlxsw_sp,
6628 const struct net_device *dev)
6630 struct mlxsw_sp_rif *rif;
6632 mutex_lock(&mlxsw_sp->router->lock);
6633 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6634 mutex_unlock(&mlxsw_sp->router->lock);
6639 u16 mlxsw_sp_rif_vid(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev)
6641 struct mlxsw_sp_rif *rif;
6644 mutex_lock(&mlxsw_sp->router->lock);
6645 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6649 /* We only return the VID for VLAN RIFs. Otherwise we return an
6650 * invalid value (0).
6652 if (rif->ops->type != MLXSW_SP_RIF_TYPE_VLAN)
6655 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
6658 mutex_unlock(&mlxsw_sp->router->lock);
6662 static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
6664 char ritr_pl[MLXSW_REG_RITR_LEN];
6667 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
6668 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6672 mlxsw_reg_ritr_enable_set(ritr_pl, false);
6673 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6676 static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
6677 struct mlxsw_sp_rif *rif)
6679 mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
6680 mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
6681 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
6685 mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
6686 unsigned long event)
6688 struct inet6_dev *inet6_dev;
6689 bool addr_list_empty = true;
6690 struct in_device *idev;
6697 idev = __in_dev_get_rcu(dev);
6698 if (idev && idev->ifa_list)
6699 addr_list_empty = false;
6701 inet6_dev = __in6_dev_get(dev);
6702 if (addr_list_empty && inet6_dev &&
6703 !list_empty(&inet6_dev->addr_list))
6704 addr_list_empty = false;
6707 /* macvlans do not have a RIF, but rather piggy back on the
6708 * RIF of their lower device.
6710 if (netif_is_macvlan(dev) && addr_list_empty)
6713 if (rif && addr_list_empty &&
6714 !netif_is_l3_slave(rif->dev))
6716 /* It is possible we already removed the RIF ourselves
6717 * if it was assigned to a netdev that is now a bridge
6726 static enum mlxsw_sp_rif_type
6727 mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
6728 const struct net_device *dev)
6730 enum mlxsw_sp_fid_type type;
6732 if (mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL))
6733 return MLXSW_SP_RIF_TYPE_IPIP_LB;
6735 /* Otherwise RIF type is derived from the type of the underlying FID. */
6736 if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
6737 type = MLXSW_SP_FID_TYPE_8021Q;
6738 else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
6739 type = MLXSW_SP_FID_TYPE_8021Q;
6740 else if (netif_is_bridge_master(dev))
6741 type = MLXSW_SP_FID_TYPE_8021D;
6743 type = MLXSW_SP_FID_TYPE_RFID;
6745 return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
6748 static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index)
6752 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
6753 if (!mlxsw_sp->router->rifs[i]) {
6762 static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
6764 struct net_device *l3_dev)
6766 struct mlxsw_sp_rif *rif;
6768 rif = kzalloc(rif_size, GFP_KERNEL);
6772 INIT_LIST_HEAD(&rif->nexthop_list);
6773 INIT_LIST_HEAD(&rif->neigh_list);
6775 ether_addr_copy(rif->addr, l3_dev->dev_addr);
6776 rif->mtu = l3_dev->mtu;
6780 rif->rif_index = rif_index;
6785 struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
6788 return mlxsw_sp->router->rifs[rif_index];
6791 u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
6793 return rif->rif_index;
6796 u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
6798 return lb_rif->common.rif_index;
6801 u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
6803 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(lb_rif->common.dev);
6804 struct mlxsw_sp_vr *ul_vr;
6806 ul_vr = mlxsw_sp_vr_get(lb_rif->common.mlxsw_sp, ul_tb_id, NULL);
6807 if (WARN_ON(IS_ERR(ul_vr)))
6813 u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
6815 return lb_rif->ul_rif_id;
6818 int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
6820 return rif->dev->ifindex;
6823 const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
6828 static struct mlxsw_sp_rif *
6829 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
6830 const struct mlxsw_sp_rif_params *params,
6831 struct netlink_ext_ack *extack)
6833 u32 tb_id = l3mdev_fib_table(params->dev);
6834 const struct mlxsw_sp_rif_ops *ops;
6835 struct mlxsw_sp_fid *fid = NULL;
6836 enum mlxsw_sp_rif_type type;
6837 struct mlxsw_sp_rif *rif;
6838 struct mlxsw_sp_vr *vr;
6842 type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
6843 ops = mlxsw_sp->rif_ops_arr[type];
6845 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack);
6847 return ERR_CAST(vr);
6850 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
6852 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
6853 goto err_rif_index_alloc;
6856 rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, params->dev);
6862 mlxsw_sp->router->rifs[rif_index] = rif;
6863 rif->mlxsw_sp = mlxsw_sp;
6867 fid = ops->fid_get(rif, extack);
6876 ops->setup(rif, params);
6878 err = ops->configure(rif);
6882 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) {
6883 err = mlxsw_sp_mr_rif_add(vr->mr_table[i], rif);
6885 goto err_mr_rif_add;
6888 mlxsw_sp_rif_counters_alloc(rif);
6893 for (i--; i >= 0; i--)
6894 mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
6895 ops->deconfigure(rif);
6898 mlxsw_sp_fid_put(fid);
6900 mlxsw_sp->router->rifs[rif_index] = NULL;
6904 err_rif_index_alloc:
6906 mlxsw_sp_vr_put(mlxsw_sp, vr);
6907 return ERR_PTR(err);
6910 static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
6912 const struct mlxsw_sp_rif_ops *ops = rif->ops;
6913 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6914 struct mlxsw_sp_fid *fid = rif->fid;
6915 struct mlxsw_sp_vr *vr;
6918 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
6919 vr = &mlxsw_sp->router->vrs[rif->vr_id];
6921 mlxsw_sp_rif_counters_free(rif);
6922 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
6923 mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
6924 ops->deconfigure(rif);
6926 /* Loopback RIFs are not associated with a FID. */
6927 mlxsw_sp_fid_put(fid);
6928 mlxsw_sp->router->rifs[rif->rif_index] = NULL;
6932 mlxsw_sp_vr_put(mlxsw_sp, vr);
6935 void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
6936 struct net_device *dev)
6938 struct mlxsw_sp_rif *rif;
6940 mutex_lock(&mlxsw_sp->router->lock);
6941 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6944 mlxsw_sp_rif_destroy(rif);
6946 mutex_unlock(&mlxsw_sp->router->lock);
6950 mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
6951 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
6953 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
6955 params->vid = mlxsw_sp_port_vlan->vid;
6956 params->lag = mlxsw_sp_port->lagged;
6958 params->lag_id = mlxsw_sp_port->lag_id;
6960 params->system_port = mlxsw_sp_port->local_port;
6963 static struct mlxsw_sp_rif_subport *
6964 mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
6966 return container_of(rif, struct mlxsw_sp_rif_subport, common);
6969 static struct mlxsw_sp_rif *
6970 mlxsw_sp_rif_subport_get(struct mlxsw_sp *mlxsw_sp,
6971 const struct mlxsw_sp_rif_params *params,
6972 struct netlink_ext_ack *extack)
6974 struct mlxsw_sp_rif_subport *rif_subport;
6975 struct mlxsw_sp_rif *rif;
6977 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, params->dev);
6979 return mlxsw_sp_rif_create(mlxsw_sp, params, extack);
6981 rif_subport = mlxsw_sp_rif_subport_rif(rif);
6982 refcount_inc(&rif_subport->ref_count);
6986 static void mlxsw_sp_rif_subport_put(struct mlxsw_sp_rif *rif)
6988 struct mlxsw_sp_rif_subport *rif_subport;
6990 rif_subport = mlxsw_sp_rif_subport_rif(rif);
6991 if (!refcount_dec_and_test(&rif_subport->ref_count))
6994 mlxsw_sp_rif_destroy(rif);
6998 mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
6999 struct net_device *l3_dev,
7000 struct netlink_ext_ack *extack)
7002 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
7003 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
7004 struct mlxsw_sp_rif_params params = {
7007 u16 vid = mlxsw_sp_port_vlan->vid;
7008 struct mlxsw_sp_rif *rif;
7009 struct mlxsw_sp_fid *fid;
7012 mlxsw_sp_rif_subport_params_init(¶ms, mlxsw_sp_port_vlan);
7013 rif = mlxsw_sp_rif_subport_get(mlxsw_sp, ¶ms, extack);
7015 return PTR_ERR(rif);
7017 /* FID was already created, just take a reference */
7018 fid = rif->ops->fid_get(rif, extack);
7019 err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
7021 goto err_fid_port_vid_map;
7023 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
7025 goto err_port_vid_learning_set;
7027 err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
7028 BR_STATE_FORWARDING);
7030 goto err_port_vid_stp_set;
7032 mlxsw_sp_port_vlan->fid = fid;
7036 err_port_vid_stp_set:
7037 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
7038 err_port_vid_learning_set:
7039 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
7040 err_fid_port_vid_map:
7041 mlxsw_sp_fid_put(fid);
7042 mlxsw_sp_rif_subport_put(rif);
7047 __mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
7049 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
7050 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
7051 struct mlxsw_sp_rif *rif = mlxsw_sp_fid_rif(fid);
7052 u16 vid = mlxsw_sp_port_vlan->vid;
7054 if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
7057 mlxsw_sp_port_vlan->fid = NULL;
7058 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
7059 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
7060 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
7061 mlxsw_sp_fid_put(fid);
7062 mlxsw_sp_rif_subport_put(rif);
7066 mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
7068 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port_vlan->mlxsw_sp_port->mlxsw_sp;
7070 mutex_lock(&mlxsw_sp->router->lock);
7071 __mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
7072 mutex_unlock(&mlxsw_sp->router->lock);
7075 static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
7076 struct net_device *port_dev,
7077 unsigned long event, u16 vid,
7078 struct netlink_ext_ack *extack)
7080 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
7081 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
7083 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
7084 if (WARN_ON(!mlxsw_sp_port_vlan))
7089 return mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
7092 __mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
7099 static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
7100 unsigned long event,
7101 struct netlink_ext_ack *extack)
7103 if (netif_is_bridge_port(port_dev) ||
7104 netif_is_lag_port(port_dev) ||
7105 netif_is_ovs_port(port_dev))
7108 return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event,
7109 MLXSW_SP_DEFAULT_VID, extack);
7112 static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
7113 struct net_device *lag_dev,
7114 unsigned long event, u16 vid,
7115 struct netlink_ext_ack *extack)
7117 struct net_device *port_dev;
7118 struct list_head *iter;
7121 netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
7122 if (mlxsw_sp_port_dev_check(port_dev)) {
7123 err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
7135 static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
7136 unsigned long event,
7137 struct netlink_ext_ack *extack)
7139 if (netif_is_bridge_port(lag_dev))
7142 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event,
7143 MLXSW_SP_DEFAULT_VID, extack);
7146 static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp,
7147 struct net_device *l3_dev,
7148 unsigned long event,
7149 struct netlink_ext_ack *extack)
7151 struct mlxsw_sp_rif_params params = {
7154 struct mlxsw_sp_rif *rif;
7158 rif = mlxsw_sp_rif_create(mlxsw_sp, ¶ms, extack);
7160 return PTR_ERR(rif);
7163 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
7164 mlxsw_sp_rif_destroy(rif);
7171 static int mlxsw_sp_inetaddr_vlan_event(struct mlxsw_sp *mlxsw_sp,
7172 struct net_device *vlan_dev,
7173 unsigned long event,
7174 struct netlink_ext_ack *extack)
7176 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
7177 u16 vid = vlan_dev_vlan_id(vlan_dev);
7179 if (netif_is_bridge_port(vlan_dev))
7182 if (mlxsw_sp_port_dev_check(real_dev))
7183 return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
7184 event, vid, extack);
7185 else if (netif_is_lag_master(real_dev))
7186 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
7188 else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
7189 return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, vlan_dev, event,
7195 static bool mlxsw_sp_rif_macvlan_is_vrrp4(const u8 *mac)
7197 u8 vrrp4[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x01, 0x00 };
7198 u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
7200 return ether_addr_equal_masked(mac, vrrp4, mask);
7203 static bool mlxsw_sp_rif_macvlan_is_vrrp6(const u8 *mac)
7205 u8 vrrp6[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x02, 0x00 };
7206 u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
7208 return ether_addr_equal_masked(mac, vrrp6, mask);
7211 static int mlxsw_sp_rif_vrrp_op(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
7212 const u8 *mac, bool adding)
7214 char ritr_pl[MLXSW_REG_RITR_LEN];
7215 u8 vrrp_id = adding ? mac[5] : 0;
7218 if (!mlxsw_sp_rif_macvlan_is_vrrp4(mac) &&
7219 !mlxsw_sp_rif_macvlan_is_vrrp6(mac))
7222 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
7223 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7227 if (mlxsw_sp_rif_macvlan_is_vrrp4(mac))
7228 mlxsw_reg_ritr_if_vrrp_id_ipv4_set(ritr_pl, vrrp_id);
7230 mlxsw_reg_ritr_if_vrrp_id_ipv6_set(ritr_pl, vrrp_id);
7232 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7235 static int mlxsw_sp_rif_macvlan_add(struct mlxsw_sp *mlxsw_sp,
7236 const struct net_device *macvlan_dev,
7237 struct netlink_ext_ack *extack)
7239 struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
7240 struct mlxsw_sp_rif *rif;
7243 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
7245 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
7249 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
7250 mlxsw_sp_fid_index(rif->fid), true);
7254 err = mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index,
7255 macvlan_dev->dev_addr, true);
7257 goto err_rif_vrrp_add;
7259 /* Make sure the bridge driver does not have this MAC pointing at
7262 if (rif->ops->fdb_del)
7263 rif->ops->fdb_del(rif, macvlan_dev->dev_addr);
7268 mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
7269 mlxsw_sp_fid_index(rif->fid), false);
7273 static void __mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
7274 const struct net_device *macvlan_dev)
7276 struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
7277 struct mlxsw_sp_rif *rif;
7279 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
7280 /* If we do not have a RIF, then we already took care of
7281 * removing the macvlan's MAC during RIF deletion.
7285 mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index, macvlan_dev->dev_addr,
7287 mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
7288 mlxsw_sp_fid_index(rif->fid), false);
7291 void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
7292 const struct net_device *macvlan_dev)
7294 mutex_lock(&mlxsw_sp->router->lock);
7295 __mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
7296 mutex_unlock(&mlxsw_sp->router->lock);
7299 static int mlxsw_sp_inetaddr_macvlan_event(struct mlxsw_sp *mlxsw_sp,
7300 struct net_device *macvlan_dev,
7301 unsigned long event,
7302 struct netlink_ext_ack *extack)
7306 return mlxsw_sp_rif_macvlan_add(mlxsw_sp, macvlan_dev, extack);
7308 __mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
7315 static int mlxsw_sp_router_port_check_rif_addr(struct mlxsw_sp *mlxsw_sp,
7316 struct net_device *dev,
7317 const unsigned char *dev_addr,
7318 struct netlink_ext_ack *extack)
7320 struct mlxsw_sp_rif *rif;
7323 /* A RIF is not created for macvlan netdevs. Their MAC is used to
7326 if (netif_is_macvlan(dev) || netif_is_l3_master(dev))
7329 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
7330 rif = mlxsw_sp->router->rifs[i];
7331 if (rif && rif->ops &&
7332 rif->ops->type == MLXSW_SP_RIF_TYPE_IPIP_LB)
7334 if (rif && rif->dev && rif->dev != dev &&
7335 !ether_addr_equal_masked(rif->dev->dev_addr, dev_addr,
7336 mlxsw_sp->mac_mask)) {
7337 NL_SET_ERR_MSG_MOD(extack, "All router interface MAC addresses must have the same prefix");
7345 static int __mlxsw_sp_inetaddr_event(struct mlxsw_sp *mlxsw_sp,
7346 struct net_device *dev,
7347 unsigned long event,
7348 struct netlink_ext_ack *extack)
7350 if (mlxsw_sp_port_dev_check(dev))
7351 return mlxsw_sp_inetaddr_port_event(dev, event, extack);
7352 else if (netif_is_lag_master(dev))
7353 return mlxsw_sp_inetaddr_lag_event(dev, event, extack);
7354 else if (netif_is_bridge_master(dev))
7355 return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, dev, event,
7357 else if (is_vlan_dev(dev))
7358 return mlxsw_sp_inetaddr_vlan_event(mlxsw_sp, dev, event,
7360 else if (netif_is_macvlan(dev))
7361 return mlxsw_sp_inetaddr_macvlan_event(mlxsw_sp, dev, event,
7367 static int mlxsw_sp_inetaddr_event(struct notifier_block *nb,
7368 unsigned long event, void *ptr)
7370 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
7371 struct net_device *dev = ifa->ifa_dev->dev;
7372 struct mlxsw_sp_router *router;
7373 struct mlxsw_sp_rif *rif;
7376 /* NETDEV_UP event is handled by mlxsw_sp_inetaddr_valid_event */
7377 if (event == NETDEV_UP)
7380 router = container_of(nb, struct mlxsw_sp_router, inetaddr_nb);
7381 mutex_lock(&router->lock);
7382 rif = mlxsw_sp_rif_find_by_dev(router->mlxsw_sp, dev);
7383 if (!mlxsw_sp_rif_should_config(rif, dev, event))
7386 err = __mlxsw_sp_inetaddr_event(router->mlxsw_sp, dev, event, NULL);
7388 mutex_unlock(&router->lock);
7389 return notifier_from_errno(err);
7392 int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
7393 unsigned long event, void *ptr)
7395 struct in_validator_info *ivi = (struct in_validator_info *) ptr;
7396 struct net_device *dev = ivi->ivi_dev->dev;
7397 struct mlxsw_sp *mlxsw_sp;
7398 struct mlxsw_sp_rif *rif;
7401 mlxsw_sp = mlxsw_sp_lower_get(dev);
7405 mutex_lock(&mlxsw_sp->router->lock);
7406 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7407 if (!mlxsw_sp_rif_should_config(rif, dev, event))
7410 err = mlxsw_sp_router_port_check_rif_addr(mlxsw_sp, dev, dev->dev_addr,
7415 err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, ivi->extack);
7417 mutex_unlock(&mlxsw_sp->router->lock);
7418 return notifier_from_errno(err);
7421 struct mlxsw_sp_inet6addr_event_work {
7422 struct work_struct work;
7423 struct mlxsw_sp *mlxsw_sp;
7424 struct net_device *dev;
7425 unsigned long event;
7428 static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
7430 struct mlxsw_sp_inet6addr_event_work *inet6addr_work =
7431 container_of(work, struct mlxsw_sp_inet6addr_event_work, work);
7432 struct mlxsw_sp *mlxsw_sp = inet6addr_work->mlxsw_sp;
7433 struct net_device *dev = inet6addr_work->dev;
7434 unsigned long event = inet6addr_work->event;
7435 struct mlxsw_sp_rif *rif;
7438 mutex_lock(&mlxsw_sp->router->lock);
7440 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7441 if (!mlxsw_sp_rif_should_config(rif, dev, event))
7444 __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, NULL);
7446 mutex_unlock(&mlxsw_sp->router->lock);
7449 kfree(inet6addr_work);
7452 /* Called with rcu_read_lock() */
7453 static int mlxsw_sp_inet6addr_event(struct notifier_block *nb,
7454 unsigned long event, void *ptr)
7456 struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr;
7457 struct mlxsw_sp_inet6addr_event_work *inet6addr_work;
7458 struct net_device *dev = if6->idev->dev;
7459 struct mlxsw_sp_router *router;
7461 /* NETDEV_UP event is handled by mlxsw_sp_inet6addr_valid_event */
7462 if (event == NETDEV_UP)
7465 inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC);
7466 if (!inet6addr_work)
7469 router = container_of(nb, struct mlxsw_sp_router, inet6addr_nb);
7470 INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work);
7471 inet6addr_work->mlxsw_sp = router->mlxsw_sp;
7472 inet6addr_work->dev = dev;
7473 inet6addr_work->event = event;
7475 mlxsw_core_schedule_work(&inet6addr_work->work);
7480 int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
7481 unsigned long event, void *ptr)
7483 struct in6_validator_info *i6vi = (struct in6_validator_info *) ptr;
7484 struct net_device *dev = i6vi->i6vi_dev->dev;
7485 struct mlxsw_sp *mlxsw_sp;
7486 struct mlxsw_sp_rif *rif;
7489 mlxsw_sp = mlxsw_sp_lower_get(dev);
7493 mutex_lock(&mlxsw_sp->router->lock);
7494 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7495 if (!mlxsw_sp_rif_should_config(rif, dev, event))
7498 err = mlxsw_sp_router_port_check_rif_addr(mlxsw_sp, dev, dev->dev_addr,
7503 err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, i6vi->extack);
7505 mutex_unlock(&mlxsw_sp->router->lock);
7506 return notifier_from_errno(err);
7509 static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
7510 const char *mac, int mtu)
7512 char ritr_pl[MLXSW_REG_RITR_LEN];
7515 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
7516 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7520 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
7521 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
7522 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
7523 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7527 mlxsw_sp_router_port_change_event(struct mlxsw_sp *mlxsw_sp,
7528 struct mlxsw_sp_rif *rif)
7530 struct net_device *dev = rif->dev;
7534 fid_index = mlxsw_sp_fid_index(rif->fid);
7536 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
7540 err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
7545 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
7547 goto err_rif_fdb_op;
7549 if (rif->mtu != dev->mtu) {
7550 struct mlxsw_sp_vr *vr;
7553 /* The RIF is relevant only to its mr_table instance, as unlike
7554 * unicast routing, in multicast routing a RIF cannot be shared
7555 * between several multicast routing tables.
7557 vr = &mlxsw_sp->router->vrs[rif->vr_id];
7558 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
7559 mlxsw_sp_mr_rif_mtu_update(vr->mr_table[i],
7563 ether_addr_copy(rif->addr, dev->dev_addr);
7564 rif->mtu = dev->mtu;
7566 netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
7571 mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu);
7573 mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
7577 static int mlxsw_sp_router_port_pre_changeaddr_event(struct mlxsw_sp_rif *rif,
7578 struct netdev_notifier_pre_changeaddr_info *info)
7580 struct netlink_ext_ack *extack;
7582 extack = netdev_notifier_info_to_extack(&info->info);
7583 return mlxsw_sp_router_port_check_rif_addr(rif->mlxsw_sp, rif->dev,
7584 info->dev_addr, extack);
7587 int mlxsw_sp_netdevice_router_port_event(struct net_device *dev,
7588 unsigned long event, void *ptr)
7590 struct mlxsw_sp *mlxsw_sp;
7591 struct mlxsw_sp_rif *rif;
7594 mlxsw_sp = mlxsw_sp_lower_get(dev);
7598 mutex_lock(&mlxsw_sp->router->lock);
7599 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7604 case NETDEV_CHANGEMTU:
7605 case NETDEV_CHANGEADDR:
7606 err = mlxsw_sp_router_port_change_event(mlxsw_sp, rif);
7608 case NETDEV_PRE_CHANGEADDR:
7609 err = mlxsw_sp_router_port_pre_changeaddr_event(rif, ptr);
7614 mutex_unlock(&mlxsw_sp->router->lock);
7618 static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
7619 struct net_device *l3_dev,
7620 struct netlink_ext_ack *extack)
7622 struct mlxsw_sp_rif *rif;
7624 /* If netdev is already associated with a RIF, then we need to
7625 * destroy it and create a new one with the new virtual router ID.
7627 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
7629 __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN,
7632 return __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_UP, extack);
7635 static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
7636 struct net_device *l3_dev)
7638 struct mlxsw_sp_rif *rif;
7640 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
7643 __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN, NULL);
7646 int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
7647 struct netdev_notifier_changeupper_info *info)
7649 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
7652 /* We do not create a RIF for a macvlan, but only use it to
7653 * direct more MAC addresses to the router.
7655 if (!mlxsw_sp || netif_is_macvlan(l3_dev))
7658 mutex_lock(&mlxsw_sp->router->lock);
7660 case NETDEV_PRECHANGEUPPER:
7662 case NETDEV_CHANGEUPPER:
7663 if (info->linking) {
7664 struct netlink_ext_ack *extack;
7666 extack = netdev_notifier_info_to_extack(&info->info);
7667 err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev, extack);
7669 mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
7673 mutex_unlock(&mlxsw_sp->router->lock);
7678 static int __mlxsw_sp_rif_macvlan_flush(struct net_device *dev,
7679 struct netdev_nested_priv *priv)
7681 struct mlxsw_sp_rif *rif = (struct mlxsw_sp_rif *)priv->data;
7683 if (!netif_is_macvlan(dev))
7686 return mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
7687 mlxsw_sp_fid_index(rif->fid), false);
7690 static int mlxsw_sp_rif_macvlan_flush(struct mlxsw_sp_rif *rif)
7692 struct netdev_nested_priv priv = {
7693 .data = (void *)rif,
7696 if (!netif_is_macvlan_port(rif->dev))
7699 netdev_warn(rif->dev, "Router interface is deleted. Upper macvlans will not work\n");
7700 return netdev_walk_all_upper_dev_rcu(rif->dev,
7701 __mlxsw_sp_rif_macvlan_flush, &priv);
7704 static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
7705 const struct mlxsw_sp_rif_params *params)
7707 struct mlxsw_sp_rif_subport *rif_subport;
7709 rif_subport = mlxsw_sp_rif_subport_rif(rif);
7710 refcount_set(&rif_subport->ref_count, 1);
7711 rif_subport->vid = params->vid;
7712 rif_subport->lag = params->lag;
7714 rif_subport->lag_id = params->lag_id;
7716 rif_subport->system_port = params->system_port;
7719 static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
7721 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7722 struct mlxsw_sp_rif_subport *rif_subport;
7723 char ritr_pl[MLXSW_REG_RITR_LEN];
7725 rif_subport = mlxsw_sp_rif_subport_rif(rif);
7726 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
7727 rif->rif_index, rif->vr_id, rif->dev->mtu);
7728 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
7729 mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
7730 rif_subport->lag ? rif_subport->lag_id :
7731 rif_subport->system_port,
7734 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7737 static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif)
7741 err = mlxsw_sp_rif_subport_op(rif, true);
7745 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
7746 mlxsw_sp_fid_index(rif->fid), true);
7748 goto err_rif_fdb_op;
7750 mlxsw_sp_fid_rif_set(rif->fid, rif);
7754 mlxsw_sp_rif_subport_op(rif, false);
7758 static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
7760 struct mlxsw_sp_fid *fid = rif->fid;
7762 mlxsw_sp_fid_rif_set(fid, NULL);
7763 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
7764 mlxsw_sp_fid_index(fid), false);
7765 mlxsw_sp_rif_macvlan_flush(rif);
7766 mlxsw_sp_rif_subport_op(rif, false);
7769 static struct mlxsw_sp_fid *
7770 mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif,
7771 struct netlink_ext_ack *extack)
7773 return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
7776 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
7777 .type = MLXSW_SP_RIF_TYPE_SUBPORT,
7778 .rif_size = sizeof(struct mlxsw_sp_rif_subport),
7779 .setup = mlxsw_sp_rif_subport_setup,
7780 .configure = mlxsw_sp_rif_subport_configure,
7781 .deconfigure = mlxsw_sp_rif_subport_deconfigure,
7782 .fid_get = mlxsw_sp_rif_subport_fid_get,
7785 static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
7786 enum mlxsw_reg_ritr_if_type type,
7787 u16 vid_fid, bool enable)
7789 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7790 char ritr_pl[MLXSW_REG_RITR_LEN];
7792 mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
7794 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
7795 mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid);
7797 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7800 u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
7802 return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
7805 static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif)
7807 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7808 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
7811 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
7816 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
7817 mlxsw_sp_router_port(mlxsw_sp), true);
7819 goto err_fid_mc_flood_set;
7821 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
7822 mlxsw_sp_router_port(mlxsw_sp), true);
7824 goto err_fid_bc_flood_set;
7826 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
7827 mlxsw_sp_fid_index(rif->fid), true);
7829 goto err_rif_fdb_op;
7831 mlxsw_sp_fid_rif_set(rif->fid, rif);
7835 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
7836 mlxsw_sp_router_port(mlxsw_sp), false);
7837 err_fid_bc_flood_set:
7838 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
7839 mlxsw_sp_router_port(mlxsw_sp), false);
7840 err_fid_mc_flood_set:
7841 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
7845 static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
7847 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
7848 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7849 struct mlxsw_sp_fid *fid = rif->fid;
7851 mlxsw_sp_fid_rif_set(fid, NULL);
7852 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
7853 mlxsw_sp_fid_index(fid), false);
7854 mlxsw_sp_rif_macvlan_flush(rif);
7855 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
7856 mlxsw_sp_router_port(mlxsw_sp), false);
7857 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
7858 mlxsw_sp_router_port(mlxsw_sp), false);
7859 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
7862 static struct mlxsw_sp_fid *
7863 mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif,
7864 struct netlink_ext_ack *extack)
7866 return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
7869 static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
7871 struct switchdev_notifier_fdb_info info;
7872 struct net_device *dev;
7874 dev = br_fdb_find_port(rif->dev, mac, 0);
7880 call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
7884 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
7885 .type = MLXSW_SP_RIF_TYPE_FID,
7886 .rif_size = sizeof(struct mlxsw_sp_rif),
7887 .configure = mlxsw_sp_rif_fid_configure,
7888 .deconfigure = mlxsw_sp_rif_fid_deconfigure,
7889 .fid_get = mlxsw_sp_rif_fid_fid_get,
7890 .fdb_del = mlxsw_sp_rif_fid_fdb_del,
7893 static struct mlxsw_sp_fid *
7894 mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
7895 struct netlink_ext_ack *extack)
7897 struct net_device *br_dev;
7901 if (is_vlan_dev(rif->dev)) {
7902 vid = vlan_dev_vlan_id(rif->dev);
7903 br_dev = vlan_dev_real_dev(rif->dev);
7904 if (WARN_ON(!netif_is_bridge_master(br_dev)))
7905 return ERR_PTR(-EINVAL);
7907 err = br_vlan_get_pvid(rif->dev, &vid);
7908 if (err < 0 || !vid) {
7909 NL_SET_ERR_MSG_MOD(extack, "Couldn't determine bridge PVID");
7910 return ERR_PTR(-EINVAL);
7914 return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
7917 static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
7919 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
7920 struct switchdev_notifier_fdb_info info;
7921 struct net_device *br_dev;
7922 struct net_device *dev;
7924 br_dev = is_vlan_dev(rif->dev) ? vlan_dev_real_dev(rif->dev) : rif->dev;
7925 dev = br_fdb_find_port(br_dev, mac, vid);
7931 call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
7935 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_emu_ops = {
7936 .type = MLXSW_SP_RIF_TYPE_VLAN,
7937 .rif_size = sizeof(struct mlxsw_sp_rif),
7938 .configure = mlxsw_sp_rif_fid_configure,
7939 .deconfigure = mlxsw_sp_rif_fid_deconfigure,
7940 .fid_get = mlxsw_sp_rif_vlan_fid_get,
7941 .fdb_del = mlxsw_sp_rif_vlan_fdb_del,
7944 static struct mlxsw_sp_rif_ipip_lb *
7945 mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif *rif)
7947 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
7951 mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif,
7952 const struct mlxsw_sp_rif_params *params)
7954 struct mlxsw_sp_rif_params_ipip_lb *params_lb;
7955 struct mlxsw_sp_rif_ipip_lb *rif_lb;
7957 params_lb = container_of(params, struct mlxsw_sp_rif_params_ipip_lb,
7959 rif_lb = mlxsw_sp_rif_ipip_lb_rif(rif);
7960 rif_lb->lb_config = params_lb->lb_config;
7964 mlxsw_sp1_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
7966 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
7967 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
7968 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7969 struct mlxsw_sp_vr *ul_vr;
7972 ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id, NULL);
7974 return PTR_ERR(ul_vr);
7976 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, true);
7978 goto err_loopback_op;
7980 lb_rif->ul_vr_id = ul_vr->id;
7981 lb_rif->ul_rif_id = 0;
7986 mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
7990 static void mlxsw_sp1_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
7992 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
7993 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7994 struct mlxsw_sp_vr *ul_vr;
7996 ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
7997 mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, false);
8000 mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
8003 static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_ipip_lb_ops = {
8004 .type = MLXSW_SP_RIF_TYPE_IPIP_LB,
8005 .rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb),
8006 .setup = mlxsw_sp_rif_ipip_lb_setup,
8007 .configure = mlxsw_sp1_rif_ipip_lb_configure,
8008 .deconfigure = mlxsw_sp1_rif_ipip_lb_deconfigure,
8011 const struct mlxsw_sp_rif_ops *mlxsw_sp1_rif_ops_arr[] = {
8012 [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
8013 [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_emu_ops,
8014 [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
8015 [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp1_rif_ipip_lb_ops,
8019 mlxsw_sp_rif_ipip_lb_ul_rif_op(struct mlxsw_sp_rif *ul_rif, bool enable)
8021 struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
8022 char ritr_pl[MLXSW_REG_RITR_LEN];
8024 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
8025 ul_rif->rif_index, ul_rif->vr_id, IP_MAX_MTU);
8026 mlxsw_reg_ritr_loopback_protocol_set(ritr_pl,
8027 MLXSW_REG_RITR_LOOPBACK_GENERIC);
8029 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
8032 static struct mlxsw_sp_rif *
8033 mlxsw_sp_ul_rif_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
8034 struct netlink_ext_ack *extack)
8036 struct mlxsw_sp_rif *ul_rif;
8040 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
8042 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
8043 return ERR_PTR(err);
8046 ul_rif = mlxsw_sp_rif_alloc(sizeof(*ul_rif), rif_index, vr->id, NULL);
8048 return ERR_PTR(-ENOMEM);
8050 mlxsw_sp->router->rifs[rif_index] = ul_rif;
8051 ul_rif->mlxsw_sp = mlxsw_sp;
8052 err = mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, true);
8059 mlxsw_sp->router->rifs[rif_index] = NULL;
8061 return ERR_PTR(err);
8064 static void mlxsw_sp_ul_rif_destroy(struct mlxsw_sp_rif *ul_rif)
8066 struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
8068 mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, false);
8069 mlxsw_sp->router->rifs[ul_rif->rif_index] = NULL;
8073 static struct mlxsw_sp_rif *
8074 mlxsw_sp_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
8075 struct netlink_ext_ack *extack)
8077 struct mlxsw_sp_vr *vr;
8080 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, extack);
8082 return ERR_CAST(vr);
8084 if (refcount_inc_not_zero(&vr->ul_rif_refcnt))
8087 vr->ul_rif = mlxsw_sp_ul_rif_create(mlxsw_sp, vr, extack);
8088 if (IS_ERR(vr->ul_rif)) {
8089 err = PTR_ERR(vr->ul_rif);
8090 goto err_ul_rif_create;
8094 refcount_set(&vr->ul_rif_refcnt, 1);
8099 mlxsw_sp_vr_put(mlxsw_sp, vr);
8100 return ERR_PTR(err);
8103 static void mlxsw_sp_ul_rif_put(struct mlxsw_sp_rif *ul_rif)
8105 struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
8106 struct mlxsw_sp_vr *vr;
8108 vr = &mlxsw_sp->router->vrs[ul_rif->vr_id];
8110 if (!refcount_dec_and_test(&vr->ul_rif_refcnt))
8114 mlxsw_sp_ul_rif_destroy(ul_rif);
8115 mlxsw_sp_vr_put(mlxsw_sp, vr);
8118 int mlxsw_sp_router_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
8121 struct mlxsw_sp_rif *ul_rif;
8124 mutex_lock(&mlxsw_sp->router->lock);
8125 ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL);
8126 if (IS_ERR(ul_rif)) {
8127 err = PTR_ERR(ul_rif);
8130 *ul_rif_index = ul_rif->rif_index;
8132 mutex_unlock(&mlxsw_sp->router->lock);
8136 void mlxsw_sp_router_ul_rif_put(struct mlxsw_sp *mlxsw_sp, u16 ul_rif_index)
8138 struct mlxsw_sp_rif *ul_rif;
8140 mutex_lock(&mlxsw_sp->router->lock);
8141 ul_rif = mlxsw_sp->router->rifs[ul_rif_index];
8142 if (WARN_ON(!ul_rif))
8145 mlxsw_sp_ul_rif_put(ul_rif);
8147 mutex_unlock(&mlxsw_sp->router->lock);
8151 mlxsw_sp2_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
8153 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
8154 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
8155 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
8156 struct mlxsw_sp_rif *ul_rif;
8159 ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL);
8161 return PTR_ERR(ul_rif);
8163 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, ul_rif->rif_index, true);
8165 goto err_loopback_op;
8167 lb_rif->ul_vr_id = 0;
8168 lb_rif->ul_rif_id = ul_rif->rif_index;
8173 mlxsw_sp_ul_rif_put(ul_rif);
8177 static void mlxsw_sp2_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
8179 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
8180 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
8181 struct mlxsw_sp_rif *ul_rif;
8183 ul_rif = mlxsw_sp_rif_by_index(mlxsw_sp, lb_rif->ul_rif_id);
8184 mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, lb_rif->ul_rif_id, false);
8185 mlxsw_sp_ul_rif_put(ul_rif);
8188 static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_ipip_lb_ops = {
8189 .type = MLXSW_SP_RIF_TYPE_IPIP_LB,
8190 .rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb),
8191 .setup = mlxsw_sp_rif_ipip_lb_setup,
8192 .configure = mlxsw_sp2_rif_ipip_lb_configure,
8193 .deconfigure = mlxsw_sp2_rif_ipip_lb_deconfigure,
8196 const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[] = {
8197 [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
8198 [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_emu_ops,
8199 [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
8200 [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp2_rif_ipip_lb_ops,
8203 static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
8205 u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
8207 mlxsw_sp->router->rifs = kcalloc(max_rifs,
8208 sizeof(struct mlxsw_sp_rif *),
8210 if (!mlxsw_sp->router->rifs)
8216 static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
8220 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
8221 WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
8223 kfree(mlxsw_sp->router->rifs);
8227 mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
8229 char tigcr_pl[MLXSW_REG_TIGCR_LEN];
8231 mlxsw_reg_tigcr_pack(tigcr_pl, true, 0);
8232 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl);
8235 static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
8239 mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr;
8240 INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
8242 err = mlxsw_sp_ipip_ecn_encap_init(mlxsw_sp);
8245 err = mlxsw_sp_ipip_ecn_decap_init(mlxsw_sp);
8249 return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
8252 static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
8254 WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list));
8257 static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
8259 struct mlxsw_sp_router *router;
8261 /* Flush pending FIB notifications and then flush the device's
8262 * table before requesting another dump. The FIB notification
8263 * block is unregistered, so no need to take RTNL.
8265 mlxsw_core_flush_owq();
8266 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
8267 mlxsw_sp_router_fib_flush(router->mlxsw_sp);
8270 #ifdef CONFIG_IP_ROUTE_MULTIPATH
8271 static void mlxsw_sp_mp_hash_header_set(char *recr2_pl, int header)
8273 mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, header, true);
8276 static void mlxsw_sp_mp_hash_field_set(char *recr2_pl, int field)
8278 mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, field, true);
8281 static void mlxsw_sp_mp4_hash_init(struct mlxsw_sp *mlxsw_sp, char *recr2_pl)
8283 struct net *net = mlxsw_sp_net(mlxsw_sp);
8284 bool only_l3 = !net->ipv4.sysctl_fib_multipath_hash_policy;
8286 mlxsw_sp_mp_hash_header_set(recr2_pl,
8287 MLXSW_REG_RECR2_IPV4_EN_NOT_TCP_NOT_UDP);
8288 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV4_EN_TCP_UDP);
8289 mlxsw_reg_recr2_ipv4_sip_enable(recr2_pl);
8290 mlxsw_reg_recr2_ipv4_dip_enable(recr2_pl);
8293 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_EN_IPV4);
8294 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV4_PROTOCOL);
8295 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_SPORT);
8296 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_DPORT);
8299 static void mlxsw_sp_mp6_hash_init(struct mlxsw_sp *mlxsw_sp, char *recr2_pl)
8301 bool only_l3 = !ip6_multipath_hash_policy(mlxsw_sp_net(mlxsw_sp));
8303 mlxsw_sp_mp_hash_header_set(recr2_pl,
8304 MLXSW_REG_RECR2_IPV6_EN_NOT_TCP_NOT_UDP);
8305 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV6_EN_TCP_UDP);
8306 mlxsw_reg_recr2_ipv6_sip_enable(recr2_pl);
8307 mlxsw_reg_recr2_ipv6_dip_enable(recr2_pl);
8308 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_NEXT_HEADER);
8310 mlxsw_sp_mp_hash_field_set(recr2_pl,
8311 MLXSW_REG_RECR2_IPV6_FLOW_LABEL);
8313 mlxsw_sp_mp_hash_header_set(recr2_pl,
8314 MLXSW_REG_RECR2_TCP_UDP_EN_IPV6);
8315 mlxsw_sp_mp_hash_field_set(recr2_pl,
8316 MLXSW_REG_RECR2_TCP_UDP_SPORT);
8317 mlxsw_sp_mp_hash_field_set(recr2_pl,
8318 MLXSW_REG_RECR2_TCP_UDP_DPORT);
8322 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
8324 char recr2_pl[MLXSW_REG_RECR2_LEN];
8327 seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 0);
8328 mlxsw_reg_recr2_pack(recr2_pl, seed);
8329 mlxsw_sp_mp4_hash_init(mlxsw_sp, recr2_pl);
8330 mlxsw_sp_mp6_hash_init(mlxsw_sp, recr2_pl);
8332 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl);
8335 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
8341 static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
8343 char rdpm_pl[MLXSW_REG_RDPM_LEN];
8346 MLXSW_REG_ZERO(rdpm, rdpm_pl);
8348 /* HW is determining switch priority based on DSCP-bits, but the
8349 * kernel is still doing that based on the ToS. Since there's a
8350 * mismatch in bits we need to make sure to translate the right
8351 * value ToS would observe, skipping the 2 least-significant ECN bits.
8353 for (i = 0; i < MLXSW_REG_RDPM_DSCP_ENTRY_REC_MAX_COUNT; i++)
8354 mlxsw_reg_rdpm_pack(rdpm_pl, i, rt_tos2priority(i << 2));
8356 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rdpm), rdpm_pl);
8359 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
8361 struct net *net = mlxsw_sp_net(mlxsw_sp);
8362 bool usp = net->ipv4.sysctl_ip_fwd_update_priority;
8363 char rgcr_pl[MLXSW_REG_RGCR_LEN];
8366 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
8368 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
8370 mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
8371 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
8372 mlxsw_reg_rgcr_usp_set(rgcr_pl, usp);
8373 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
8376 static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
8378 char rgcr_pl[MLXSW_REG_RGCR_LEN];
8380 mlxsw_reg_rgcr_pack(rgcr_pl, false, false);
8381 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
8384 static const struct mlxsw_sp_router_ll_ops mlxsw_sp_router_ll_basic_ops = {
8385 .ralta_write = mlxsw_sp_router_ll_basic_ralta_write,
8386 .ralst_write = mlxsw_sp_router_ll_basic_ralst_write,
8387 .raltb_write = mlxsw_sp_router_ll_basic_raltb_write,
8388 .fib_entry_op_ctx_size = sizeof(struct mlxsw_sp_fib_entry_op_ctx_basic),
8389 .fib_entry_pack = mlxsw_sp_router_ll_basic_fib_entry_pack,
8390 .fib_entry_act_remote_pack = mlxsw_sp_router_ll_basic_fib_entry_act_remote_pack,
8391 .fib_entry_act_local_pack = mlxsw_sp_router_ll_basic_fib_entry_act_local_pack,
8392 .fib_entry_act_ip2me_pack = mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_pack,
8393 .fib_entry_act_ip2me_tun_pack = mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_tun_pack,
8394 .fib_entry_commit = mlxsw_sp_router_ll_basic_fib_entry_commit,
8395 .fib_entry_is_committed = mlxsw_sp_router_ll_basic_fib_entry_is_committed,
8398 static int mlxsw_sp_router_ll_op_ctx_init(struct mlxsw_sp_router *router)
8400 size_t max_size = 0;
8403 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) {
8404 size_t size = router->proto_ll_ops[i]->fib_entry_op_ctx_size;
8406 if (size > max_size)
8409 router->ll_op_ctx = kzalloc(sizeof(*router->ll_op_ctx) + max_size,
8411 if (!router->ll_op_ctx)
8413 INIT_LIST_HEAD(&router->ll_op_ctx->fib_entry_priv_list);
8417 static void mlxsw_sp_router_ll_op_ctx_fini(struct mlxsw_sp_router *router)
8419 WARN_ON(!list_empty(&router->ll_op_ctx->fib_entry_priv_list));
8420 kfree(router->ll_op_ctx);
8423 int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
8424 struct netlink_ext_ack *extack)
8426 struct mlxsw_sp_router *router;
8429 router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
8432 mutex_init(&router->lock);
8433 mlxsw_sp->router = router;
8434 router->mlxsw_sp = mlxsw_sp;
8436 router->proto_ll_ops[MLXSW_SP_L3_PROTO_IPV4] = &mlxsw_sp_router_ll_basic_ops;
8437 router->proto_ll_ops[MLXSW_SP_L3_PROTO_IPV6] = &mlxsw_sp_router_ll_basic_ops;
8439 err = mlxsw_sp_router_ll_op_ctx_init(router);
8441 goto err_ll_op_ctx_init;
8443 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
8444 err = __mlxsw_sp_router_init(mlxsw_sp);
8446 goto err_router_init;
8448 err = mlxsw_sp_rifs_init(mlxsw_sp);
8452 err = mlxsw_sp_ipips_init(mlxsw_sp);
8454 goto err_ipips_init;
8456 err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
8457 &mlxsw_sp_nexthop_ht_params);
8459 goto err_nexthop_ht_init;
8461 err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
8462 &mlxsw_sp_nexthop_group_ht_params);
8464 goto err_nexthop_group_ht_init;
8466 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_list);
8467 err = mlxsw_sp_lpm_init(mlxsw_sp);
8471 err = mlxsw_sp_mr_init(mlxsw_sp, &mlxsw_sp_mr_tcam_ops);
8475 err = mlxsw_sp_vrs_init(mlxsw_sp);
8479 err = mlxsw_sp_neigh_init(mlxsw_sp);
8481 goto err_neigh_init;
8483 err = mlxsw_sp_mp_hash_init(mlxsw_sp);
8485 goto err_mp_hash_init;
8487 err = mlxsw_sp_dscp_init(mlxsw_sp);
8491 INIT_WORK(&router->fib_event_work, mlxsw_sp_router_fib_event_work);
8492 INIT_LIST_HEAD(&router->fib_event_queue);
8493 spin_lock_init(&router->fib_event_queue_lock);
8495 router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event;
8496 err = register_inetaddr_notifier(&router->inetaddr_nb);
8498 goto err_register_inetaddr_notifier;
8500 router->inet6addr_nb.notifier_call = mlxsw_sp_inet6addr_event;
8501 err = register_inet6addr_notifier(&router->inet6addr_nb);
8503 goto err_register_inet6addr_notifier;
8505 mlxsw_sp->router->netevent_nb.notifier_call =
8506 mlxsw_sp_router_netevent_event;
8507 err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
8509 goto err_register_netevent_notifier;
8511 mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
8512 err = register_fib_notifier(mlxsw_sp_net(mlxsw_sp),
8513 &mlxsw_sp->router->fib_nb,
8514 mlxsw_sp_router_fib_dump_flush, extack);
8516 goto err_register_fib_notifier;
8520 err_register_fib_notifier:
8521 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
8522 err_register_netevent_notifier:
8523 unregister_inet6addr_notifier(&router->inet6addr_nb);
8524 err_register_inet6addr_notifier:
8525 unregister_inetaddr_notifier(&router->inetaddr_nb);
8526 err_register_inetaddr_notifier:
8527 mlxsw_core_flush_owq();
8528 WARN_ON(!list_empty(&router->fib_event_queue));
8531 mlxsw_sp_neigh_fini(mlxsw_sp);
8533 mlxsw_sp_vrs_fini(mlxsw_sp);
8535 mlxsw_sp_mr_fini(mlxsw_sp);
8537 mlxsw_sp_lpm_fini(mlxsw_sp);
8539 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
8540 err_nexthop_group_ht_init:
8541 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
8542 err_nexthop_ht_init:
8543 mlxsw_sp_ipips_fini(mlxsw_sp);
8545 mlxsw_sp_rifs_fini(mlxsw_sp);
8547 __mlxsw_sp_router_fini(mlxsw_sp);
8549 mlxsw_sp_router_ll_op_ctx_fini(router);
8551 mutex_destroy(&mlxsw_sp->router->lock);
8552 kfree(mlxsw_sp->router);
8556 void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
8558 unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp),
8559 &mlxsw_sp->router->fib_nb);
8560 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
8561 unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
8562 unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);
8563 mlxsw_core_flush_owq();
8564 WARN_ON(!list_empty(&mlxsw_sp->router->fib_event_queue));
8565 mlxsw_sp_neigh_fini(mlxsw_sp);
8566 mlxsw_sp_vrs_fini(mlxsw_sp);
8567 mlxsw_sp_mr_fini(mlxsw_sp);
8568 mlxsw_sp_lpm_fini(mlxsw_sp);
8569 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
8570 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
8571 mlxsw_sp_ipips_fini(mlxsw_sp);
8572 mlxsw_sp_rifs_fini(mlxsw_sp);
8573 __mlxsw_sp_router_fini(mlxsw_sp);
8574 mlxsw_sp_router_ll_op_ctx_fini(mlxsw_sp->router);
8575 mutex_destroy(&mlxsw_sp->router->lock);
8576 kfree(mlxsw_sp->router);