1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/rhashtable.h>
7 #include <linux/bitops.h>
9 #include <linux/notifier.h>
10 #include <linux/inetdevice.h>
11 #include <linux/netdevice.h>
12 #include <linux/if_bridge.h>
13 #include <linux/socket.h>
14 #include <linux/route.h>
15 #include <linux/gcd.h>
16 #include <linux/if_macvlan.h>
17 #include <linux/refcount.h>
18 #include <linux/jhash.h>
19 #include <linux/net_namespace.h>
20 #include <linux/mutex.h>
21 #include <net/netevent.h>
22 #include <net/neighbour.h>
24 #include <net/ip_fib.h>
25 #include <net/ip6_fib.h>
26 #include <net/nexthop.h>
27 #include <net/fib_rules.h>
28 #include <net/ip_tunnels.h>
29 #include <net/l3mdev.h>
30 #include <net/addrconf.h>
31 #include <net/ndisc.h>
33 #include <net/fib_notifier.h>
34 #include <net/switchdev.h>
39 #include "spectrum_cnt.h"
40 #include "spectrum_dpipe.h"
41 #include "spectrum_ipip.h"
42 #include "spectrum_mr.h"
43 #include "spectrum_mr_tcam.h"
44 #include "spectrum_router.h"
45 #include "spectrum_span.h"
49 struct mlxsw_sp_lpm_tree;
50 struct mlxsw_sp_rif_ops;
53 struct list_head nexthop_list;
54 struct list_head neigh_list;
55 struct net_device *dev; /* NULL for underlay RIF */
56 struct mlxsw_sp_fid *fid;
57 unsigned char addr[ETH_ALEN];
61 const struct mlxsw_sp_rif_ops *ops;
62 struct mlxsw_sp *mlxsw_sp;
64 unsigned int counter_ingress;
65 bool counter_ingress_valid;
66 unsigned int counter_egress;
67 bool counter_egress_valid;
70 struct mlxsw_sp_rif_params {
71 struct net_device *dev;
80 struct mlxsw_sp_rif_subport {
81 struct mlxsw_sp_rif common;
91 struct mlxsw_sp_rif_ipip_lb {
92 struct mlxsw_sp_rif common;
93 struct mlxsw_sp_rif_ipip_lb_config lb_config;
94 u16 ul_vr_id; /* Reserved for Spectrum-2. */
95 u16 ul_rif_id; /* Reserved for Spectrum. */
98 struct mlxsw_sp_rif_params_ipip_lb {
99 struct mlxsw_sp_rif_params common;
100 struct mlxsw_sp_rif_ipip_lb_config lb_config;
103 struct mlxsw_sp_rif_ops {
104 enum mlxsw_sp_rif_type type;
107 void (*setup)(struct mlxsw_sp_rif *rif,
108 const struct mlxsw_sp_rif_params *params);
109 int (*configure)(struct mlxsw_sp_rif *rif);
110 void (*deconfigure)(struct mlxsw_sp_rif *rif);
111 struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif,
112 struct netlink_ext_ack *extack);
113 void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac);
116 static struct mlxsw_sp_rif *
117 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
118 const struct net_device *dev);
119 static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
120 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree);
121 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
122 struct mlxsw_sp_lpm_tree *lpm_tree);
123 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
124 const struct mlxsw_sp_fib *fib,
126 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
127 const struct mlxsw_sp_fib *fib);
129 static unsigned int *
130 mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
131 enum mlxsw_sp_rif_counter_dir dir)
134 case MLXSW_SP_RIF_COUNTER_EGRESS:
135 return &rif->counter_egress;
136 case MLXSW_SP_RIF_COUNTER_INGRESS:
137 return &rif->counter_ingress;
143 mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
144 enum mlxsw_sp_rif_counter_dir dir)
147 case MLXSW_SP_RIF_COUNTER_EGRESS:
148 return rif->counter_egress_valid;
149 case MLXSW_SP_RIF_COUNTER_INGRESS:
150 return rif->counter_ingress_valid;
156 mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
157 enum mlxsw_sp_rif_counter_dir dir,
161 case MLXSW_SP_RIF_COUNTER_EGRESS:
162 rif->counter_egress_valid = valid;
164 case MLXSW_SP_RIF_COUNTER_INGRESS:
165 rif->counter_ingress_valid = valid;
170 static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
171 unsigned int counter_index, bool enable,
172 enum mlxsw_sp_rif_counter_dir dir)
174 char ritr_pl[MLXSW_REG_RITR_LEN];
175 bool is_egress = false;
178 if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
180 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
181 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
185 mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
187 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
190 int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
191 struct mlxsw_sp_rif *rif,
192 enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
194 char ricnt_pl[MLXSW_REG_RICNT_LEN];
195 unsigned int *p_counter_index;
199 valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
203 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
204 if (!p_counter_index)
206 mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
207 MLXSW_REG_RICNT_OPCODE_NOP);
208 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
211 *cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
215 static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
216 unsigned int counter_index)
218 char ricnt_pl[MLXSW_REG_RICNT_LEN];
220 mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
221 MLXSW_REG_RICNT_OPCODE_CLEAR);
222 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
225 int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp *mlxsw_sp,
226 struct mlxsw_sp_rif *rif,
227 enum mlxsw_sp_rif_counter_dir dir)
229 unsigned int *p_counter_index;
232 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
233 if (!p_counter_index)
235 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
240 err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
242 goto err_counter_clear;
244 err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
245 *p_counter_index, true, dir);
247 goto err_counter_edit;
248 mlxsw_sp_rif_counter_valid_set(rif, dir, true);
253 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
258 void mlxsw_sp_rif_counter_free(struct mlxsw_sp *mlxsw_sp,
259 struct mlxsw_sp_rif *rif,
260 enum mlxsw_sp_rif_counter_dir dir)
262 unsigned int *p_counter_index;
264 if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
267 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
268 if (WARN_ON(!p_counter_index))
270 mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
271 *p_counter_index, false, dir);
272 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
274 mlxsw_sp_rif_counter_valid_set(rif, dir, false);
277 static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
279 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
280 struct devlink *devlink;
282 devlink = priv_to_devlink(mlxsw_sp->core);
283 if (!devlink_dpipe_table_counter_enabled(devlink,
284 MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
286 mlxsw_sp_rif_counter_alloc(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
289 static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
291 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
293 mlxsw_sp_rif_counter_free(mlxsw_sp, rif, MLXSW_SP_RIF_COUNTER_EGRESS);
296 #define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
298 struct mlxsw_sp_prefix_usage {
299 DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
302 #define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
303 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
306 mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
307 struct mlxsw_sp_prefix_usage *prefix_usage2)
309 return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
313 mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
314 struct mlxsw_sp_prefix_usage *prefix_usage2)
316 memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
320 mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
321 unsigned char prefix_len)
323 set_bit(prefix_len, prefix_usage->b);
327 mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
328 unsigned char prefix_len)
330 clear_bit(prefix_len, prefix_usage->b);
333 struct mlxsw_sp_fib_key {
334 unsigned char addr[sizeof(struct in6_addr)];
335 unsigned char prefix_len;
338 enum mlxsw_sp_fib_entry_type {
339 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
340 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
341 MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
342 MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE,
343 MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE,
345 /* This is a special case of local delivery, where a packet should be
346 * decapsulated on reception. Note that there is no corresponding ENCAP,
347 * because that's a type of next hop, not of FIB entry. (There can be
348 * several next hops in a REMOTE entry, and some of them may be
349 * encapsulating entries.)
351 MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP,
352 MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP,
355 struct mlxsw_sp_nexthop_group_info;
356 struct mlxsw_sp_nexthop_group;
357 struct mlxsw_sp_fib_entry;
359 struct mlxsw_sp_fib_node {
360 struct mlxsw_sp_fib_entry *fib_entry;
361 struct list_head list;
362 struct rhash_head ht_node;
363 struct mlxsw_sp_fib *fib;
364 struct mlxsw_sp_fib_key key;
367 struct mlxsw_sp_fib_entry_decap {
368 struct mlxsw_sp_ipip_entry *ipip_entry;
372 static struct mlxsw_sp_fib_entry_priv *
373 mlxsw_sp_fib_entry_priv_create(const struct mlxsw_sp_router_ll_ops *ll_ops)
375 struct mlxsw_sp_fib_entry_priv *priv;
377 if (!ll_ops->fib_entry_priv_size)
378 /* No need to have priv */
381 priv = kzalloc(sizeof(*priv) + ll_ops->fib_entry_priv_size, GFP_KERNEL);
383 return ERR_PTR(-ENOMEM);
384 refcount_set(&priv->refcnt, 1);
389 mlxsw_sp_fib_entry_priv_destroy(struct mlxsw_sp_fib_entry_priv *priv)
394 static void mlxsw_sp_fib_entry_priv_hold(struct mlxsw_sp_fib_entry_priv *priv)
396 refcount_inc(&priv->refcnt);
399 static void mlxsw_sp_fib_entry_priv_put(struct mlxsw_sp_fib_entry_priv *priv)
401 if (!priv || !refcount_dec_and_test(&priv->refcnt))
403 mlxsw_sp_fib_entry_priv_destroy(priv);
406 static void mlxsw_sp_fib_entry_op_ctx_priv_hold(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
407 struct mlxsw_sp_fib_entry_priv *priv)
411 mlxsw_sp_fib_entry_priv_hold(priv);
412 list_add(&priv->list, &op_ctx->fib_entry_priv_list);
415 static void mlxsw_sp_fib_entry_op_ctx_priv_put_all(struct mlxsw_sp_fib_entry_op_ctx *op_ctx)
417 struct mlxsw_sp_fib_entry_priv *priv, *tmp;
419 list_for_each_entry_safe(priv, tmp, &op_ctx->fib_entry_priv_list, list)
420 mlxsw_sp_fib_entry_priv_put(priv);
421 INIT_LIST_HEAD(&op_ctx->fib_entry_priv_list);
424 struct mlxsw_sp_fib_entry {
425 struct mlxsw_sp_fib_node *fib_node;
426 enum mlxsw_sp_fib_entry_type type;
427 struct list_head nexthop_group_node;
428 struct mlxsw_sp_nexthop_group *nh_group;
429 struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */
430 struct mlxsw_sp_fib_entry_priv *priv;
433 struct mlxsw_sp_fib4_entry {
434 struct mlxsw_sp_fib_entry common;
441 struct mlxsw_sp_fib6_entry {
442 struct mlxsw_sp_fib_entry common;
443 struct list_head rt6_list;
447 struct mlxsw_sp_rt6 {
448 struct list_head list;
449 struct fib6_info *rt;
452 struct mlxsw_sp_lpm_tree {
454 unsigned int ref_count;
455 enum mlxsw_sp_l3proto proto;
456 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
457 struct mlxsw_sp_prefix_usage prefix_usage;
460 struct mlxsw_sp_fib {
461 struct rhashtable ht;
462 struct list_head node_list;
463 struct mlxsw_sp_vr *vr;
464 struct mlxsw_sp_lpm_tree *lpm_tree;
465 enum mlxsw_sp_l3proto proto;
466 const struct mlxsw_sp_router_ll_ops *ll_ops;
470 u16 id; /* virtual router ID */
471 u32 tb_id; /* kernel fib table id */
472 unsigned int rif_count;
473 struct mlxsw_sp_fib *fib4;
474 struct mlxsw_sp_fib *fib6;
475 struct mlxsw_sp_mr_table *mr_table[MLXSW_SP_L3_PROTO_MAX];
476 struct mlxsw_sp_rif *ul_rif;
477 refcount_t ul_rif_refcnt;
480 static int mlxsw_sp_router_ll_basic_ralta_write(struct mlxsw_sp *mlxsw_sp, char *xralta_pl)
482 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta),
483 xralta_pl + MLXSW_REG_XRALTA_RALTA_OFFSET);
486 static int mlxsw_sp_router_ll_basic_ralst_write(struct mlxsw_sp *mlxsw_sp, char *xralst_pl)
488 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst),
489 xralst_pl + MLXSW_REG_XRALST_RALST_OFFSET);
492 static int mlxsw_sp_router_ll_basic_raltb_write(struct mlxsw_sp *mlxsw_sp, char *xraltb_pl)
494 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb),
495 xraltb_pl + MLXSW_REG_XRALTB_RALTB_OFFSET);
498 static const struct rhashtable_params mlxsw_sp_fib_ht_params;
500 static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
501 struct mlxsw_sp_vr *vr,
502 enum mlxsw_sp_l3proto proto)
504 const struct mlxsw_sp_router_ll_ops *ll_ops = mlxsw_sp->router->proto_ll_ops[proto];
505 struct mlxsw_sp_lpm_tree *lpm_tree;
506 struct mlxsw_sp_fib *fib;
509 lpm_tree = mlxsw_sp->router->lpm.proto_trees[proto];
510 fib = kzalloc(sizeof(*fib), GFP_KERNEL);
512 return ERR_PTR(-ENOMEM);
513 err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
515 goto err_rhashtable_init;
516 INIT_LIST_HEAD(&fib->node_list);
519 fib->lpm_tree = lpm_tree;
520 fib->ll_ops = ll_ops;
521 mlxsw_sp_lpm_tree_hold(lpm_tree);
522 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, lpm_tree->id);
524 goto err_lpm_tree_bind;
528 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
534 static void mlxsw_sp_fib_destroy(struct mlxsw_sp *mlxsw_sp,
535 struct mlxsw_sp_fib *fib)
537 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
538 mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
539 WARN_ON(!list_empty(&fib->node_list));
540 rhashtable_destroy(&fib->ht);
544 static struct mlxsw_sp_lpm_tree *
545 mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
547 static struct mlxsw_sp_lpm_tree *lpm_tree;
550 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
551 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
552 if (lpm_tree->ref_count == 0)
558 static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
559 const struct mlxsw_sp_router_ll_ops *ll_ops,
560 struct mlxsw_sp_lpm_tree *lpm_tree)
562 char xralta_pl[MLXSW_REG_XRALTA_LEN];
564 mlxsw_reg_xralta_pack(xralta_pl, true,
565 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
567 return ll_ops->ralta_write(mlxsw_sp, xralta_pl);
570 static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
571 const struct mlxsw_sp_router_ll_ops *ll_ops,
572 struct mlxsw_sp_lpm_tree *lpm_tree)
574 char xralta_pl[MLXSW_REG_XRALTA_LEN];
576 mlxsw_reg_xralta_pack(xralta_pl, false,
577 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
579 ll_ops->ralta_write(mlxsw_sp, xralta_pl);
583 mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
584 const struct mlxsw_sp_router_ll_ops *ll_ops,
585 struct mlxsw_sp_prefix_usage *prefix_usage,
586 struct mlxsw_sp_lpm_tree *lpm_tree)
588 char xralst_pl[MLXSW_REG_XRALST_LEN];
591 u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
593 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
596 mlxsw_reg_xralst_pack(xralst_pl, root_bin, lpm_tree->id);
597 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
600 mlxsw_reg_xralst_bin_pack(xralst_pl, prefix, last_prefix,
601 MLXSW_REG_RALST_BIN_NO_CHILD);
602 last_prefix = prefix;
604 return ll_ops->ralst_write(mlxsw_sp, xralst_pl);
607 static struct mlxsw_sp_lpm_tree *
608 mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
609 const struct mlxsw_sp_router_ll_ops *ll_ops,
610 struct mlxsw_sp_prefix_usage *prefix_usage,
611 enum mlxsw_sp_l3proto proto)
613 struct mlxsw_sp_lpm_tree *lpm_tree;
616 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
618 return ERR_PTR(-EBUSY);
619 lpm_tree->proto = proto;
620 err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, ll_ops, lpm_tree);
624 err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, ll_ops, prefix_usage, lpm_tree);
626 goto err_left_struct_set;
627 memcpy(&lpm_tree->prefix_usage, prefix_usage,
628 sizeof(lpm_tree->prefix_usage));
629 memset(&lpm_tree->prefix_ref_count, 0,
630 sizeof(lpm_tree->prefix_ref_count));
631 lpm_tree->ref_count = 1;
635 mlxsw_sp_lpm_tree_free(mlxsw_sp, ll_ops, lpm_tree);
639 static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
640 const struct mlxsw_sp_router_ll_ops *ll_ops,
641 struct mlxsw_sp_lpm_tree *lpm_tree)
643 mlxsw_sp_lpm_tree_free(mlxsw_sp, ll_ops, lpm_tree);
646 static struct mlxsw_sp_lpm_tree *
647 mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
648 struct mlxsw_sp_prefix_usage *prefix_usage,
649 enum mlxsw_sp_l3proto proto)
651 const struct mlxsw_sp_router_ll_ops *ll_ops = mlxsw_sp->router->proto_ll_ops[proto];
652 struct mlxsw_sp_lpm_tree *lpm_tree;
655 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
656 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
657 if (lpm_tree->ref_count != 0 &&
658 lpm_tree->proto == proto &&
659 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
661 mlxsw_sp_lpm_tree_hold(lpm_tree);
665 return mlxsw_sp_lpm_tree_create(mlxsw_sp, ll_ops, prefix_usage, proto);
668 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
670 lpm_tree->ref_count++;
673 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
674 struct mlxsw_sp_lpm_tree *lpm_tree)
676 const struct mlxsw_sp_router_ll_ops *ll_ops =
677 mlxsw_sp->router->proto_ll_ops[lpm_tree->proto];
679 if (--lpm_tree->ref_count == 0)
680 mlxsw_sp_lpm_tree_destroy(mlxsw_sp, ll_ops, lpm_tree);
683 #define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
685 static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
687 struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
688 struct mlxsw_sp_lpm_tree *lpm_tree;
692 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
695 max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
696 mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
697 mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
698 sizeof(struct mlxsw_sp_lpm_tree),
700 if (!mlxsw_sp->router->lpm.trees)
703 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
704 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
705 lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
708 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
709 MLXSW_SP_L3_PROTO_IPV4);
710 if (IS_ERR(lpm_tree)) {
711 err = PTR_ERR(lpm_tree);
712 goto err_ipv4_tree_get;
714 mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4] = lpm_tree;
716 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
717 MLXSW_SP_L3_PROTO_IPV6);
718 if (IS_ERR(lpm_tree)) {
719 err = PTR_ERR(lpm_tree);
720 goto err_ipv6_tree_get;
722 mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6] = lpm_tree;
727 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
728 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
730 kfree(mlxsw_sp->router->lpm.trees);
734 static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
736 struct mlxsw_sp_lpm_tree *lpm_tree;
738 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6];
739 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
741 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
742 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
744 kfree(mlxsw_sp->router->lpm.trees);
747 static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
749 return !!vr->fib4 || !!vr->fib6 ||
750 !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] ||
751 !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
754 static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
756 struct mlxsw_sp_vr *vr;
759 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
760 vr = &mlxsw_sp->router->vrs[i];
761 if (!mlxsw_sp_vr_is_used(vr))
767 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
768 const struct mlxsw_sp_fib *fib, u8 tree_id)
770 char xraltb_pl[MLXSW_REG_XRALTB_LEN];
772 mlxsw_reg_xraltb_pack(xraltb_pl, fib->vr->id,
773 (enum mlxsw_reg_ralxx_protocol) fib->proto,
775 return fib->ll_ops->raltb_write(mlxsw_sp, xraltb_pl);
778 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
779 const struct mlxsw_sp_fib *fib)
781 char xraltb_pl[MLXSW_REG_XRALTB_LEN];
783 /* Bind to tree 0 which is default */
784 mlxsw_reg_xraltb_pack(xraltb_pl, fib->vr->id,
785 (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
786 return fib->ll_ops->raltb_write(mlxsw_sp, xraltb_pl);
789 static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
791 /* For our purpose, squash main, default and local tables into one */
792 if (tb_id == RT_TABLE_LOCAL || tb_id == RT_TABLE_DEFAULT)
793 tb_id = RT_TABLE_MAIN;
797 static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
800 struct mlxsw_sp_vr *vr;
803 tb_id = mlxsw_sp_fix_tb_id(tb_id);
805 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
806 vr = &mlxsw_sp->router->vrs[i];
807 if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
813 int mlxsw_sp_router_tb_id_vr_id(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
816 struct mlxsw_sp_vr *vr;
819 mutex_lock(&mlxsw_sp->router->lock);
820 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
827 mutex_unlock(&mlxsw_sp->router->lock);
831 static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
832 enum mlxsw_sp_l3proto proto)
835 case MLXSW_SP_L3_PROTO_IPV4:
837 case MLXSW_SP_L3_PROTO_IPV6:
843 static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
845 struct netlink_ext_ack *extack)
847 struct mlxsw_sp_mr_table *mr4_table, *mr6_table;
848 struct mlxsw_sp_fib *fib4;
849 struct mlxsw_sp_fib *fib6;
850 struct mlxsw_sp_vr *vr;
853 vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
855 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported virtual routers");
856 return ERR_PTR(-EBUSY);
858 fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
860 return ERR_CAST(fib4);
861 fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
864 goto err_fib6_create;
866 mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
867 MLXSW_SP_L3_PROTO_IPV4);
868 if (IS_ERR(mr4_table)) {
869 err = PTR_ERR(mr4_table);
870 goto err_mr4_table_create;
872 mr6_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
873 MLXSW_SP_L3_PROTO_IPV6);
874 if (IS_ERR(mr6_table)) {
875 err = PTR_ERR(mr6_table);
876 goto err_mr6_table_create;
881 vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = mr4_table;
882 vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = mr6_table;
886 err_mr6_table_create:
887 mlxsw_sp_mr_table_destroy(mr4_table);
888 err_mr4_table_create:
889 mlxsw_sp_fib_destroy(mlxsw_sp, fib6);
891 mlxsw_sp_fib_destroy(mlxsw_sp, fib4);
895 static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
896 struct mlxsw_sp_vr *vr)
898 mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]);
899 vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = NULL;
900 mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]);
901 vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = NULL;
902 mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6);
904 mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4);
908 static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
909 struct netlink_ext_ack *extack)
911 struct mlxsw_sp_vr *vr;
913 tb_id = mlxsw_sp_fix_tb_id(tb_id);
914 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
916 vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id, extack);
920 static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
922 if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
923 list_empty(&vr->fib6->node_list) &&
924 mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]) &&
925 mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]))
926 mlxsw_sp_vr_destroy(mlxsw_sp, vr);
930 mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr,
931 enum mlxsw_sp_l3proto proto, u8 tree_id)
933 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
935 if (!mlxsw_sp_vr_is_used(vr))
937 if (fib->lpm_tree->id == tree_id)
942 static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
943 struct mlxsw_sp_fib *fib,
944 struct mlxsw_sp_lpm_tree *new_tree)
946 struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
949 fib->lpm_tree = new_tree;
950 mlxsw_sp_lpm_tree_hold(new_tree);
951 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
954 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
958 mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
959 fib->lpm_tree = old_tree;
963 static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
964 struct mlxsw_sp_fib *fib,
965 struct mlxsw_sp_lpm_tree *new_tree)
967 enum mlxsw_sp_l3proto proto = fib->proto;
968 struct mlxsw_sp_lpm_tree *old_tree;
969 u8 old_id, new_id = new_tree->id;
970 struct mlxsw_sp_vr *vr;
973 old_tree = mlxsw_sp->router->lpm.proto_trees[proto];
974 old_id = old_tree->id;
976 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
977 vr = &mlxsw_sp->router->vrs[i];
978 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, old_id))
980 err = mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
981 mlxsw_sp_vr_fib(vr, proto),
984 goto err_tree_replace;
987 memcpy(new_tree->prefix_ref_count, old_tree->prefix_ref_count,
988 sizeof(new_tree->prefix_ref_count));
989 mlxsw_sp->router->lpm.proto_trees[proto] = new_tree;
990 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
995 for (i--; i >= 0; i--) {
996 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, new_id))
998 mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
999 mlxsw_sp_vr_fib(vr, proto),
1005 static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
1007 struct mlxsw_sp_vr *vr;
1011 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
1014 max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
1015 mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
1017 if (!mlxsw_sp->router->vrs)
1020 for (i = 0; i < max_vrs; i++) {
1021 vr = &mlxsw_sp->router->vrs[i];
1028 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
1030 static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
1032 /* At this stage we're guaranteed not to have new incoming
1033 * FIB notifications and the work queue is free from FIBs
1034 * sitting on top of mlxsw netdevs. However, we can still
1035 * have other FIBs queued. Flush the queue before flushing
1036 * the device's tables. No need for locks, as we're the only
1039 mlxsw_core_flush_owq();
1040 mlxsw_sp_router_fib_flush(mlxsw_sp);
1041 kfree(mlxsw_sp->router->vrs);
1044 static struct net_device *
1045 __mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev)
1047 struct ip_tunnel *tun = netdev_priv(ol_dev);
1048 struct net *net = dev_net(ol_dev);
1050 return dev_get_by_index_rcu(net, tun->parms.link);
1053 u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
1055 struct net_device *d;
1059 d = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1061 tb_id = l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
1063 tb_id = RT_TABLE_MAIN;
1069 static struct mlxsw_sp_rif *
1070 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
1071 const struct mlxsw_sp_rif_params *params,
1072 struct netlink_ext_ack *extack);
1074 static struct mlxsw_sp_rif_ipip_lb *
1075 mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp,
1076 enum mlxsw_sp_ipip_type ipipt,
1077 struct net_device *ol_dev,
1078 struct netlink_ext_ack *extack)
1080 struct mlxsw_sp_rif_params_ipip_lb lb_params;
1081 const struct mlxsw_sp_ipip_ops *ipip_ops;
1082 struct mlxsw_sp_rif *rif;
1084 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1085 lb_params = (struct mlxsw_sp_rif_params_ipip_lb) {
1086 .common.dev = ol_dev,
1087 .common.lag = false,
1088 .lb_config = ipip_ops->ol_loopback_config(mlxsw_sp, ol_dev),
1091 rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common, extack);
1093 return ERR_CAST(rif);
1094 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
1097 static struct mlxsw_sp_ipip_entry *
1098 mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
1099 enum mlxsw_sp_ipip_type ipipt,
1100 struct net_device *ol_dev)
1102 const struct mlxsw_sp_ipip_ops *ipip_ops;
1103 struct mlxsw_sp_ipip_entry *ipip_entry;
1104 struct mlxsw_sp_ipip_entry *ret = NULL;
1106 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1107 ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL);
1109 return ERR_PTR(-ENOMEM);
1111 ipip_entry->ol_lb = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipipt,
1113 if (IS_ERR(ipip_entry->ol_lb)) {
1114 ret = ERR_CAST(ipip_entry->ol_lb);
1115 goto err_ol_ipip_lb_create;
1118 ipip_entry->ipipt = ipipt;
1119 ipip_entry->ol_dev = ol_dev;
1121 switch (ipip_ops->ul_proto) {
1122 case MLXSW_SP_L3_PROTO_IPV4:
1123 ipip_entry->parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev);
1125 case MLXSW_SP_L3_PROTO_IPV6:
1132 err_ol_ipip_lb_create:
1138 mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp_ipip_entry *ipip_entry)
1140 mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1145 mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp,
1146 const enum mlxsw_sp_l3proto ul_proto,
1147 union mlxsw_sp_l3addr saddr,
1149 struct mlxsw_sp_ipip_entry *ipip_entry)
1151 u32 tun_ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1152 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1153 union mlxsw_sp_l3addr tun_saddr;
1155 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1158 tun_saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1159 return tun_ul_tb_id == ul_tb_id &&
1160 mlxsw_sp_l3addr_eq(&tun_saddr, &saddr);
1164 mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
1165 struct mlxsw_sp_fib_entry *fib_entry,
1166 struct mlxsw_sp_ipip_entry *ipip_entry)
1171 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1176 ipip_entry->decap_fib_entry = fib_entry;
1177 fib_entry->decap.ipip_entry = ipip_entry;
1178 fib_entry->decap.tunnel_index = tunnel_index;
1182 static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
1183 struct mlxsw_sp_fib_entry *fib_entry)
1185 /* Unlink this node from the IPIP entry that it's the decap entry of. */
1186 fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
1187 fib_entry->decap.ipip_entry = NULL;
1188 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1189 1, fib_entry->decap.tunnel_index);
1192 static struct mlxsw_sp_fib_node *
1193 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
1194 size_t addr_len, unsigned char prefix_len);
1195 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1196 struct mlxsw_sp_fib_entry *fib_entry);
1199 mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp *mlxsw_sp,
1200 struct mlxsw_sp_ipip_entry *ipip_entry)
1202 struct mlxsw_sp_fib_entry *fib_entry = ipip_entry->decap_fib_entry;
1204 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
1205 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1207 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1211 mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp *mlxsw_sp,
1212 struct mlxsw_sp_ipip_entry *ipip_entry,
1213 struct mlxsw_sp_fib_entry *decap_fib_entry)
1215 if (mlxsw_sp_fib_entry_decap_init(mlxsw_sp, decap_fib_entry,
1218 decap_fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
1220 if (mlxsw_sp_fib_entry_update(mlxsw_sp, decap_fib_entry))
1221 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1224 static struct mlxsw_sp_fib_entry *
1225 mlxsw_sp_router_ip2me_fib_entry_find(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
1226 enum mlxsw_sp_l3proto proto,
1227 const union mlxsw_sp_l3addr *addr,
1228 enum mlxsw_sp_fib_entry_type type)
1230 struct mlxsw_sp_fib_node *fib_node;
1231 unsigned char addr_prefix_len;
1232 struct mlxsw_sp_fib *fib;
1233 struct mlxsw_sp_vr *vr;
1238 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
1241 fib = mlxsw_sp_vr_fib(vr, proto);
1244 case MLXSW_SP_L3_PROTO_IPV4:
1245 addr4 = be32_to_cpu(addr->addr4);
1248 addr_prefix_len = 32;
1250 case MLXSW_SP_L3_PROTO_IPV6:
1256 fib_node = mlxsw_sp_fib_node_lookup(fib, addrp, addr_len,
1258 if (!fib_node || fib_node->fib_entry->type != type)
1261 return fib_node->fib_entry;
1264 /* Given an IPIP entry, find the corresponding decap route. */
1265 static struct mlxsw_sp_fib_entry *
1266 mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
1267 struct mlxsw_sp_ipip_entry *ipip_entry)
1269 static struct mlxsw_sp_fib_node *fib_node;
1270 const struct mlxsw_sp_ipip_ops *ipip_ops;
1271 unsigned char saddr_prefix_len;
1272 union mlxsw_sp_l3addr saddr;
1273 struct mlxsw_sp_fib *ul_fib;
1274 struct mlxsw_sp_vr *ul_vr;
1280 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1282 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1283 ul_vr = mlxsw_sp_vr_find(mlxsw_sp, ul_tb_id);
1287 ul_fib = mlxsw_sp_vr_fib(ul_vr, ipip_ops->ul_proto);
1288 saddr = mlxsw_sp_ipip_netdev_saddr(ipip_ops->ul_proto,
1289 ipip_entry->ol_dev);
1291 switch (ipip_ops->ul_proto) {
1292 case MLXSW_SP_L3_PROTO_IPV4:
1293 saddr4 = be32_to_cpu(saddr.addr4);
1296 saddr_prefix_len = 32;
1303 fib_node = mlxsw_sp_fib_node_lookup(ul_fib, saddrp, saddr_len,
1306 fib_node->fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
1309 return fib_node->fib_entry;
1312 static struct mlxsw_sp_ipip_entry *
1313 mlxsw_sp_ipip_entry_create(struct mlxsw_sp *mlxsw_sp,
1314 enum mlxsw_sp_ipip_type ipipt,
1315 struct net_device *ol_dev)
1317 struct mlxsw_sp_ipip_entry *ipip_entry;
1319 ipip_entry = mlxsw_sp_ipip_entry_alloc(mlxsw_sp, ipipt, ol_dev);
1320 if (IS_ERR(ipip_entry))
1323 list_add_tail(&ipip_entry->ipip_list_node,
1324 &mlxsw_sp->router->ipip_list);
1330 mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1331 struct mlxsw_sp_ipip_entry *ipip_entry)
1333 list_del(&ipip_entry->ipip_list_node);
1334 mlxsw_sp_ipip_entry_dealloc(ipip_entry);
1338 mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp,
1339 const struct net_device *ul_dev,
1340 enum mlxsw_sp_l3proto ul_proto,
1341 union mlxsw_sp_l3addr ul_dip,
1342 struct mlxsw_sp_ipip_entry *ipip_entry)
1344 u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1345 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1347 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1350 return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip,
1351 ul_tb_id, ipip_entry);
1354 /* Given decap parameters, find the corresponding IPIP entry. */
1355 static struct mlxsw_sp_ipip_entry *
1356 mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp,
1357 const struct net_device *ul_dev,
1358 enum mlxsw_sp_l3proto ul_proto,
1359 union mlxsw_sp_l3addr ul_dip)
1361 struct mlxsw_sp_ipip_entry *ipip_entry;
1363 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1365 if (mlxsw_sp_ipip_entry_matches_decap(mlxsw_sp, ul_dev,
1373 static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp,
1374 const struct net_device *dev,
1375 enum mlxsw_sp_ipip_type *p_type)
1377 struct mlxsw_sp_router *router = mlxsw_sp->router;
1378 const struct mlxsw_sp_ipip_ops *ipip_ops;
1379 enum mlxsw_sp_ipip_type ipipt;
1381 for (ipipt = 0; ipipt < MLXSW_SP_IPIP_TYPE_MAX; ++ipipt) {
1382 ipip_ops = router->ipip_ops_arr[ipipt];
1383 if (dev->type == ipip_ops->dev_type) {
1392 bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp,
1393 const struct net_device *dev)
1395 return mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
1398 static struct mlxsw_sp_ipip_entry *
1399 mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp *mlxsw_sp,
1400 const struct net_device *ol_dev)
1402 struct mlxsw_sp_ipip_entry *ipip_entry;
1404 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1406 if (ipip_entry->ol_dev == ol_dev)
1412 static struct mlxsw_sp_ipip_entry *
1413 mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp,
1414 const struct net_device *ul_dev,
1415 struct mlxsw_sp_ipip_entry *start)
1417 struct mlxsw_sp_ipip_entry *ipip_entry;
1419 ipip_entry = list_prepare_entry(start, &mlxsw_sp->router->ipip_list,
1421 list_for_each_entry_continue(ipip_entry, &mlxsw_sp->router->ipip_list,
1423 struct net_device *ol_dev = ipip_entry->ol_dev;
1424 struct net_device *ipip_ul_dev;
1427 ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1430 if (ipip_ul_dev == ul_dev)
1437 bool mlxsw_sp_netdev_is_ipip_ul(struct mlxsw_sp *mlxsw_sp,
1438 const struct net_device *dev)
1442 mutex_lock(&mlxsw_sp->router->lock);
1443 is_ipip_ul = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, dev, NULL);
1444 mutex_unlock(&mlxsw_sp->router->lock);
1449 static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp,
1450 const struct net_device *ol_dev,
1451 enum mlxsw_sp_ipip_type ipipt)
1453 const struct mlxsw_sp_ipip_ops *ops
1454 = mlxsw_sp->router->ipip_ops_arr[ipipt];
1456 return ops->can_offload(mlxsw_sp, ol_dev);
1459 static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp,
1460 struct net_device *ol_dev)
1462 enum mlxsw_sp_ipip_type ipipt = MLXSW_SP_IPIP_TYPE_MAX;
1463 struct mlxsw_sp_ipip_entry *ipip_entry;
1464 enum mlxsw_sp_l3proto ul_proto;
1465 union mlxsw_sp_l3addr saddr;
1468 mlxsw_sp_netdev_ipip_type(mlxsw_sp, ol_dev, &ipipt);
1469 if (mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev, ipipt)) {
1470 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1471 ul_proto = mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto;
1472 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1473 if (!mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1476 ipip_entry = mlxsw_sp_ipip_entry_create(mlxsw_sp, ipipt,
1478 if (IS_ERR(ipip_entry))
1479 return PTR_ERR(ipip_entry);
1486 static void mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp *mlxsw_sp,
1487 struct net_device *ol_dev)
1489 struct mlxsw_sp_ipip_entry *ipip_entry;
1491 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1493 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1497 mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1498 struct mlxsw_sp_ipip_entry *ipip_entry)
1500 struct mlxsw_sp_fib_entry *decap_fib_entry;
1502 decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp, ipip_entry);
1503 if (decap_fib_entry)
1504 mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry,
1509 mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif, u16 ul_vr_id,
1510 u16 ul_rif_id, bool enable)
1512 struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
1513 struct mlxsw_sp_rif *rif = &lb_rif->common;
1514 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
1515 char ritr_pl[MLXSW_REG_RITR_LEN];
1518 switch (lb_cf.ul_protocol) {
1519 case MLXSW_SP_L3_PROTO_IPV4:
1520 saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
1521 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
1522 rif->rif_index, rif->vr_id, rif->dev->mtu);
1523 mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
1524 MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET,
1525 ul_vr_id, ul_rif_id, saddr4, lb_cf.okey);
1528 case MLXSW_SP_L3_PROTO_IPV6:
1529 return -EAFNOSUPPORT;
1532 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
1535 static int mlxsw_sp_netdevice_ipip_ol_update_mtu(struct mlxsw_sp *mlxsw_sp,
1536 struct net_device *ol_dev)
1538 struct mlxsw_sp_ipip_entry *ipip_entry;
1539 struct mlxsw_sp_rif_ipip_lb *lb_rif;
1542 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1544 lb_rif = ipip_entry->ol_lb;
1545 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, lb_rif->ul_vr_id,
1546 lb_rif->ul_rif_id, true);
1549 lb_rif->common.mtu = ol_dev->mtu;
1556 static void mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1557 struct net_device *ol_dev)
1559 struct mlxsw_sp_ipip_entry *ipip_entry;
1561 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1563 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
1567 mlxsw_sp_ipip_entry_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1568 struct mlxsw_sp_ipip_entry *ipip_entry)
1570 if (ipip_entry->decap_fib_entry)
1571 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1574 static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1575 struct net_device *ol_dev)
1577 struct mlxsw_sp_ipip_entry *ipip_entry;
1579 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1581 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1584 static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
1585 struct mlxsw_sp_rif *old_rif,
1586 struct mlxsw_sp_rif *new_rif);
1588 mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
1589 struct mlxsw_sp_ipip_entry *ipip_entry,
1591 struct netlink_ext_ack *extack)
1593 struct mlxsw_sp_rif_ipip_lb *old_lb_rif = ipip_entry->ol_lb;
1594 struct mlxsw_sp_rif_ipip_lb *new_lb_rif;
1596 new_lb_rif = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp,
1600 if (IS_ERR(new_lb_rif))
1601 return PTR_ERR(new_lb_rif);
1602 ipip_entry->ol_lb = new_lb_rif;
1605 mlxsw_sp_nexthop_rif_migrate(mlxsw_sp, &old_lb_rif->common,
1606 &new_lb_rif->common);
1608 mlxsw_sp_rif_destroy(&old_lb_rif->common);
1613 static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
1614 struct mlxsw_sp_rif *rif);
1617 * __mlxsw_sp_ipip_entry_update_tunnel - Update offload related to IPIP entry.
1618 * @mlxsw_sp: mlxsw_sp.
1619 * @ipip_entry: IPIP entry.
1620 * @recreate_loopback: Recreates the associated loopback RIF.
1621 * @keep_encap: Updates next hops that use the tunnel netdevice. This is only
1622 * relevant when recreate_loopback is true.
1623 * @update_nexthops: Updates next hops, keeping the current loopback RIF. This
1624 * is only relevant when recreate_loopback is false.
1627 * Return: Non-zero value on failure.
1629 int __mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp *mlxsw_sp,
1630 struct mlxsw_sp_ipip_entry *ipip_entry,
1631 bool recreate_loopback,
1633 bool update_nexthops,
1634 struct netlink_ext_ack *extack)
1638 /* RIFs can't be edited, so to update loopback, we need to destroy and
1639 * recreate it. That creates a window of opportunity where RALUE and
1640 * RATR registers end up referencing a RIF that's already gone. RATRs
1641 * are handled in mlxsw_sp_ipip_entry_ol_lb_update(), and to take care
1642 * of RALUE, demote the decap route back.
1644 if (ipip_entry->decap_fib_entry)
1645 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1647 if (recreate_loopback) {
1648 err = mlxsw_sp_ipip_entry_ol_lb_update(mlxsw_sp, ipip_entry,
1649 keep_encap, extack);
1652 } else if (update_nexthops) {
1653 mlxsw_sp_nexthop_rif_update(mlxsw_sp,
1654 &ipip_entry->ol_lb->common);
1657 if (ipip_entry->ol_dev->flags & IFF_UP)
1658 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
1663 static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
1664 struct net_device *ol_dev,
1665 struct netlink_ext_ack *extack)
1667 struct mlxsw_sp_ipip_entry *ipip_entry =
1668 mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1673 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1674 true, false, false, extack);
1678 mlxsw_sp_netdevice_ipip_ul_vrf_event(struct mlxsw_sp *mlxsw_sp,
1679 struct mlxsw_sp_ipip_entry *ipip_entry,
1680 struct net_device *ul_dev,
1682 struct netlink_ext_ack *extack)
1684 u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1685 enum mlxsw_sp_l3proto ul_proto;
1686 union mlxsw_sp_l3addr saddr;
1688 /* Moving underlay to a different VRF might cause local address
1689 * conflict, and the conflicting tunnels need to be demoted.
1691 ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
1692 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1693 if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1696 *demote_this = true;
1700 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1701 true, true, false, extack);
1705 mlxsw_sp_netdevice_ipip_ul_up_event(struct mlxsw_sp *mlxsw_sp,
1706 struct mlxsw_sp_ipip_entry *ipip_entry,
1707 struct net_device *ul_dev)
1709 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1710 false, false, true, NULL);
1714 mlxsw_sp_netdevice_ipip_ul_down_event(struct mlxsw_sp *mlxsw_sp,
1715 struct mlxsw_sp_ipip_entry *ipip_entry,
1716 struct net_device *ul_dev)
1718 /* A down underlay device causes encapsulated packets to not be
1719 * forwarded, but decap still works. So refresh next hops without
1720 * touching anything else.
1722 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1723 false, false, true, NULL);
1727 mlxsw_sp_netdevice_ipip_ol_change_event(struct mlxsw_sp *mlxsw_sp,
1728 struct net_device *ol_dev,
1729 struct netlink_ext_ack *extack)
1731 const struct mlxsw_sp_ipip_ops *ipip_ops;
1732 struct mlxsw_sp_ipip_entry *ipip_entry;
1735 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1737 /* A change might make a tunnel eligible for offloading, but
1738 * that is currently not implemented. What falls to slow path
1743 /* A change might make a tunnel not eligible for offloading. */
1744 if (!mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev,
1745 ipip_entry->ipipt)) {
1746 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1750 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1751 err = ipip_ops->ol_netdev_change(mlxsw_sp, ipip_entry, extack);
1755 void mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp *mlxsw_sp,
1756 struct mlxsw_sp_ipip_entry *ipip_entry)
1758 struct net_device *ol_dev = ipip_entry->ol_dev;
1760 if (ol_dev->flags & IFF_UP)
1761 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1762 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1765 /* The configuration where several tunnels have the same local address in the
1766 * same underlay table needs special treatment in the HW. That is currently not
1767 * implemented in the driver. This function finds and demotes the first tunnel
1768 * with a given source address, except the one passed in in the argument
1772 mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp *mlxsw_sp,
1773 enum mlxsw_sp_l3proto ul_proto,
1774 union mlxsw_sp_l3addr saddr,
1776 const struct mlxsw_sp_ipip_entry *except)
1778 struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1780 list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1782 if (ipip_entry != except &&
1783 mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr,
1784 ul_tb_id, ipip_entry)) {
1785 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1793 static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp,
1794 struct net_device *ul_dev)
1796 struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1798 list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1800 struct net_device *ol_dev = ipip_entry->ol_dev;
1801 struct net_device *ipip_ul_dev;
1804 ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1806 if (ipip_ul_dev == ul_dev)
1807 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1811 int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
1812 struct net_device *ol_dev,
1813 unsigned long event,
1814 struct netdev_notifier_info *info)
1816 struct netdev_notifier_changeupper_info *chup;
1817 struct netlink_ext_ack *extack;
1820 mutex_lock(&mlxsw_sp->router->lock);
1822 case NETDEV_REGISTER:
1823 err = mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev);
1825 case NETDEV_UNREGISTER:
1826 mlxsw_sp_netdevice_ipip_ol_unreg_event(mlxsw_sp, ol_dev);
1829 mlxsw_sp_netdevice_ipip_ol_up_event(mlxsw_sp, ol_dev);
1832 mlxsw_sp_netdevice_ipip_ol_down_event(mlxsw_sp, ol_dev);
1834 case NETDEV_CHANGEUPPER:
1835 chup = container_of(info, typeof(*chup), info);
1836 extack = info->extack;
1837 if (netif_is_l3_master(chup->upper_dev))
1838 err = mlxsw_sp_netdevice_ipip_ol_vrf_event(mlxsw_sp,
1843 extack = info->extack;
1844 err = mlxsw_sp_netdevice_ipip_ol_change_event(mlxsw_sp,
1847 case NETDEV_CHANGEMTU:
1848 err = mlxsw_sp_netdevice_ipip_ol_update_mtu(mlxsw_sp, ol_dev);
1851 mutex_unlock(&mlxsw_sp->router->lock);
1856 __mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
1857 struct mlxsw_sp_ipip_entry *ipip_entry,
1858 struct net_device *ul_dev,
1860 unsigned long event,
1861 struct netdev_notifier_info *info)
1863 struct netdev_notifier_changeupper_info *chup;
1864 struct netlink_ext_ack *extack;
1867 case NETDEV_CHANGEUPPER:
1868 chup = container_of(info, typeof(*chup), info);
1869 extack = info->extack;
1870 if (netif_is_l3_master(chup->upper_dev))
1871 return mlxsw_sp_netdevice_ipip_ul_vrf_event(mlxsw_sp,
1879 return mlxsw_sp_netdevice_ipip_ul_up_event(mlxsw_sp, ipip_entry,
1882 return mlxsw_sp_netdevice_ipip_ul_down_event(mlxsw_sp,
1890 mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
1891 struct net_device *ul_dev,
1892 unsigned long event,
1893 struct netdev_notifier_info *info)
1895 struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
1898 mutex_lock(&mlxsw_sp->router->lock);
1899 while ((ipip_entry = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp,
1902 struct mlxsw_sp_ipip_entry *prev;
1903 bool demote_this = false;
1905 err = __mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, ipip_entry,
1906 ul_dev, &demote_this,
1909 mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(mlxsw_sp,
1915 if (list_is_first(&ipip_entry->ipip_list_node,
1916 &mlxsw_sp->router->ipip_list))
1919 /* This can't be cached from previous iteration,
1920 * because that entry could be gone now.
1922 prev = list_prev_entry(ipip_entry,
1924 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1928 mutex_unlock(&mlxsw_sp->router->lock);
1933 int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
1934 enum mlxsw_sp_l3proto ul_proto,
1935 const union mlxsw_sp_l3addr *ul_sip,
1938 enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1939 struct mlxsw_sp_router *router = mlxsw_sp->router;
1940 struct mlxsw_sp_fib_entry *fib_entry;
1943 mutex_lock(&mlxsw_sp->router->lock);
1945 if (WARN_ON_ONCE(router->nve_decap_config.valid)) {
1950 router->nve_decap_config.ul_tb_id = ul_tb_id;
1951 router->nve_decap_config.tunnel_index = tunnel_index;
1952 router->nve_decap_config.ul_proto = ul_proto;
1953 router->nve_decap_config.ul_sip = *ul_sip;
1954 router->nve_decap_config.valid = true;
1956 /* It is valid to create a tunnel with a local IP and only later
1957 * assign this IP address to a local interface
1959 fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
1965 fib_entry->decap.tunnel_index = tunnel_index;
1966 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
1968 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1970 goto err_fib_entry_update;
1974 err_fib_entry_update:
1975 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1976 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1978 mutex_unlock(&mlxsw_sp->router->lock);
1982 void mlxsw_sp_router_nve_demote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
1983 enum mlxsw_sp_l3proto ul_proto,
1984 const union mlxsw_sp_l3addr *ul_sip)
1986 enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
1987 struct mlxsw_sp_router *router = mlxsw_sp->router;
1988 struct mlxsw_sp_fib_entry *fib_entry;
1990 mutex_lock(&mlxsw_sp->router->lock);
1992 if (WARN_ON_ONCE(!router->nve_decap_config.valid))
1995 router->nve_decap_config.valid = false;
1997 fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
2003 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2004 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2006 mutex_unlock(&mlxsw_sp->router->lock);
2009 static bool mlxsw_sp_router_nve_is_decap(struct mlxsw_sp *mlxsw_sp,
2011 enum mlxsw_sp_l3proto ul_proto,
2012 const union mlxsw_sp_l3addr *ul_sip)
2014 struct mlxsw_sp_router *router = mlxsw_sp->router;
2016 return router->nve_decap_config.valid &&
2017 router->nve_decap_config.ul_tb_id == ul_tb_id &&
2018 router->nve_decap_config.ul_proto == ul_proto &&
2019 !memcmp(&router->nve_decap_config.ul_sip, ul_sip,
2023 struct mlxsw_sp_neigh_key {
2024 struct neighbour *n;
2027 struct mlxsw_sp_neigh_entry {
2028 struct list_head rif_list_node;
2029 struct rhash_head ht_node;
2030 struct mlxsw_sp_neigh_key key;
2033 unsigned char ha[ETH_ALEN];
2034 struct list_head nexthop_list; /* list of nexthops using
2037 struct list_head nexthop_neighs_list_node;
2038 unsigned int counter_index;
2042 static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
2043 .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
2044 .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
2045 .key_len = sizeof(struct mlxsw_sp_neigh_key),
2048 struct mlxsw_sp_neigh_entry *
2049 mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif,
2050 struct mlxsw_sp_neigh_entry *neigh_entry)
2053 if (list_empty(&rif->neigh_list))
2056 return list_first_entry(&rif->neigh_list,
2057 typeof(*neigh_entry),
2060 if (list_is_last(&neigh_entry->rif_list_node, &rif->neigh_list))
2062 return list_next_entry(neigh_entry, rif_list_node);
2065 int mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry *neigh_entry)
2067 return neigh_entry->key.n->tbl->family;
2071 mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry *neigh_entry)
2073 return neigh_entry->ha;
2076 u32 mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
2078 struct neighbour *n;
2080 n = neigh_entry->key.n;
2081 return ntohl(*((__be32 *) n->primary_key));
2085 mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
2087 struct neighbour *n;
2089 n = neigh_entry->key.n;
2090 return (struct in6_addr *) &n->primary_key;
2093 int mlxsw_sp_neigh_counter_get(struct mlxsw_sp *mlxsw_sp,
2094 struct mlxsw_sp_neigh_entry *neigh_entry,
2097 if (!neigh_entry->counter_valid)
2100 return mlxsw_sp_flow_counter_get(mlxsw_sp, neigh_entry->counter_index,
2104 static struct mlxsw_sp_neigh_entry *
2105 mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
2108 struct mlxsw_sp_neigh_entry *neigh_entry;
2110 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
2114 neigh_entry->key.n = n;
2115 neigh_entry->rif = rif;
2116 INIT_LIST_HEAD(&neigh_entry->nexthop_list);
2121 static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
2127 mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
2128 struct mlxsw_sp_neigh_entry *neigh_entry)
2130 return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
2131 &neigh_entry->ht_node,
2132 mlxsw_sp_neigh_ht_params);
2136 mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
2137 struct mlxsw_sp_neigh_entry *neigh_entry)
2139 rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
2140 &neigh_entry->ht_node,
2141 mlxsw_sp_neigh_ht_params);
2145 mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp *mlxsw_sp,
2146 struct mlxsw_sp_neigh_entry *neigh_entry)
2148 struct devlink *devlink;
2149 const char *table_name;
2151 switch (mlxsw_sp_neigh_entry_type(neigh_entry)) {
2153 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST4;
2156 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST6;
2163 devlink = priv_to_devlink(mlxsw_sp->core);
2164 return devlink_dpipe_table_counter_enabled(devlink, table_name);
2168 mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2169 struct mlxsw_sp_neigh_entry *neigh_entry)
2171 if (!mlxsw_sp_neigh_counter_should_alloc(mlxsw_sp, neigh_entry))
2174 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &neigh_entry->counter_index))
2177 neigh_entry->counter_valid = true;
2181 mlxsw_sp_neigh_counter_free(struct mlxsw_sp *mlxsw_sp,
2182 struct mlxsw_sp_neigh_entry *neigh_entry)
2184 if (!neigh_entry->counter_valid)
2186 mlxsw_sp_flow_counter_free(mlxsw_sp,
2187 neigh_entry->counter_index);
2188 neigh_entry->counter_valid = false;
2191 static struct mlxsw_sp_neigh_entry *
2192 mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
2194 struct mlxsw_sp_neigh_entry *neigh_entry;
2195 struct mlxsw_sp_rif *rif;
2198 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
2200 return ERR_PTR(-EINVAL);
2202 neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
2204 return ERR_PTR(-ENOMEM);
2206 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
2208 goto err_neigh_entry_insert;
2210 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2211 list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
2215 err_neigh_entry_insert:
2216 mlxsw_sp_neigh_entry_free(neigh_entry);
2217 return ERR_PTR(err);
2221 mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
2222 struct mlxsw_sp_neigh_entry *neigh_entry)
2224 list_del(&neigh_entry->rif_list_node);
2225 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2226 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
2227 mlxsw_sp_neigh_entry_free(neigh_entry);
2230 static struct mlxsw_sp_neigh_entry *
2231 mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
2233 struct mlxsw_sp_neigh_key key;
2236 return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
2237 &key, mlxsw_sp_neigh_ht_params);
2241 mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
2243 unsigned long interval;
2245 #if IS_ENABLED(CONFIG_IPV6)
2246 interval = min_t(unsigned long,
2247 NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME),
2248 NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME));
2250 interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
2252 mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
2255 static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2259 struct net_device *dev;
2260 struct neighbour *n;
2265 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
2267 if (!mlxsw_sp->router->rifs[rif]) {
2268 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2273 dev = mlxsw_sp->router->rifs[rif]->dev;
2274 n = neigh_lookup(&arp_tbl, &dipn, dev);
2278 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
2279 neigh_event_send(n, NULL);
2283 #if IS_ENABLED(CONFIG_IPV6)
2284 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2288 struct net_device *dev;
2289 struct neighbour *n;
2290 struct in6_addr dip;
2293 mlxsw_reg_rauhtd_ent_ipv6_unpack(rauhtd_pl, rec_index, &rif,
2296 if (!mlxsw_sp->router->rifs[rif]) {
2297 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2301 dev = mlxsw_sp->router->rifs[rif]->dev;
2302 n = neigh_lookup(&nd_tbl, &dip, dev);
2306 netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
2307 neigh_event_send(n, NULL);
2311 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2318 static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2325 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2327 /* Hardware starts counting at 0, so add 1. */
2330 /* Each record consists of several neighbour entries. */
2331 for (i = 0; i < num_entries; i++) {
2334 ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
2335 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
2341 static void mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2345 /* One record contains one entry. */
2346 mlxsw_sp_router_neigh_ent_ipv6_process(mlxsw_sp, rauhtd_pl,
2350 static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
2351 char *rauhtd_pl, int rec_index)
2353 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
2354 case MLXSW_REG_RAUHTD_TYPE_IPV4:
2355 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
2358 case MLXSW_REG_RAUHTD_TYPE_IPV6:
2359 mlxsw_sp_router_neigh_rec_ipv6_process(mlxsw_sp, rauhtd_pl,
2365 static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
2367 u8 num_rec, last_rec_index, num_entries;
2369 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2370 last_rec_index = num_rec - 1;
2372 if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
2374 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
2375 MLXSW_REG_RAUHTD_TYPE_IPV6)
2378 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2380 if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
2386 __mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
2388 enum mlxsw_reg_rauhtd_type type)
2393 /* Ensure the RIF we read from the device does not change mid-dump. */
2394 mutex_lock(&mlxsw_sp->router->lock);
2396 mlxsw_reg_rauhtd_pack(rauhtd_pl, type);
2397 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
2400 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour table\n");
2403 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2404 for (i = 0; i < num_rec; i++)
2405 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
2407 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
2408 mutex_unlock(&mlxsw_sp->router->lock);
2413 static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
2415 enum mlxsw_reg_rauhtd_type type;
2419 rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
2423 type = MLXSW_REG_RAUHTD_TYPE_IPV4;
2424 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2428 type = MLXSW_REG_RAUHTD_TYPE_IPV6;
2429 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2435 static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
2437 struct mlxsw_sp_neigh_entry *neigh_entry;
2439 mutex_lock(&mlxsw_sp->router->lock);
2440 list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
2441 nexthop_neighs_list_node)
2442 /* If this neigh have nexthops, make the kernel think this neigh
2443 * is active regardless of the traffic.
2445 neigh_event_send(neigh_entry->key.n, NULL);
2446 mutex_unlock(&mlxsw_sp->router->lock);
2450 mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
2452 unsigned long interval = mlxsw_sp->router->neighs_update.interval;
2454 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
2455 msecs_to_jiffies(interval));
2458 static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
2460 struct mlxsw_sp_router *router;
2463 router = container_of(work, struct mlxsw_sp_router,
2464 neighs_update.dw.work);
2465 err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
2467 dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
2469 mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
2471 mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
2474 static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
2476 struct mlxsw_sp_neigh_entry *neigh_entry;
2477 struct mlxsw_sp_router *router;
2479 router = container_of(work, struct mlxsw_sp_router,
2480 nexthop_probe_dw.work);
2481 /* Iterate over nexthop neighbours, find those who are unresolved and
2482 * send arp on them. This solves the chicken-egg problem when
2483 * the nexthop wouldn't get offloaded until the neighbor is resolved
2484 * but it wouldn't get resolved ever in case traffic is flowing in HW
2485 * using different nexthop.
2487 mutex_lock(&router->lock);
2488 list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
2489 nexthop_neighs_list_node)
2490 if (!neigh_entry->connected)
2491 neigh_event_send(neigh_entry->key.n, NULL);
2492 mutex_unlock(&router->lock);
2494 mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
2495 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
2499 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
2500 struct mlxsw_sp_neigh_entry *neigh_entry,
2501 bool removing, bool dead);
2503 static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
2505 return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
2506 MLXSW_REG_RAUHT_OP_WRITE_DELETE;
2510 mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
2511 struct mlxsw_sp_neigh_entry *neigh_entry,
2512 enum mlxsw_reg_rauht_op op)
2514 struct neighbour *n = neigh_entry->key.n;
2515 u32 dip = ntohl(*((__be32 *) n->primary_key));
2516 char rauht_pl[MLXSW_REG_RAUHT_LEN];
2518 mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2520 if (neigh_entry->counter_valid)
2521 mlxsw_reg_rauht_pack_counter(rauht_pl,
2522 neigh_entry->counter_index);
2523 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2527 mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
2528 struct mlxsw_sp_neigh_entry *neigh_entry,
2529 enum mlxsw_reg_rauht_op op)
2531 struct neighbour *n = neigh_entry->key.n;
2532 char rauht_pl[MLXSW_REG_RAUHT_LEN];
2533 const char *dip = n->primary_key;
2535 mlxsw_reg_rauht_pack6(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2537 if (neigh_entry->counter_valid)
2538 mlxsw_reg_rauht_pack_counter(rauht_pl,
2539 neigh_entry->counter_index);
2540 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2543 bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry)
2545 struct neighbour *n = neigh_entry->key.n;
2547 /* Packets with a link-local destination address are trapped
2548 * after LPM lookup and never reach the neighbour table, so
2549 * there is no need to program such neighbours to the device.
2551 if (ipv6_addr_type((struct in6_addr *) &n->primary_key) &
2552 IPV6_ADDR_LINKLOCAL)
2558 mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
2559 struct mlxsw_sp_neigh_entry *neigh_entry,
2562 enum mlxsw_reg_rauht_op op = mlxsw_sp_rauht_op(adding);
2565 if (!adding && !neigh_entry->connected)
2567 neigh_entry->connected = adding;
2568 if (neigh_entry->key.n->tbl->family == AF_INET) {
2569 err = mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
2573 } else if (neigh_entry->key.n->tbl->family == AF_INET6) {
2574 if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry))
2576 err = mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
2586 neigh_entry->key.n->flags |= NTF_OFFLOADED;
2588 neigh_entry->key.n->flags &= ~NTF_OFFLOADED;
2592 mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp,
2593 struct mlxsw_sp_neigh_entry *neigh_entry,
2597 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2599 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2600 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, true);
2603 struct mlxsw_sp_netevent_work {
2604 struct work_struct work;
2605 struct mlxsw_sp *mlxsw_sp;
2606 struct neighbour *n;
2609 static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
2611 struct mlxsw_sp_netevent_work *net_work =
2612 container_of(work, struct mlxsw_sp_netevent_work, work);
2613 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2614 struct mlxsw_sp_neigh_entry *neigh_entry;
2615 struct neighbour *n = net_work->n;
2616 unsigned char ha[ETH_ALEN];
2617 bool entry_connected;
2620 /* If these parameters are changed after we release the lock,
2621 * then we are guaranteed to receive another event letting us
2624 read_lock_bh(&n->lock);
2625 memcpy(ha, n->ha, ETH_ALEN);
2626 nud_state = n->nud_state;
2628 read_unlock_bh(&n->lock);
2630 mutex_lock(&mlxsw_sp->router->lock);
2631 mlxsw_sp_span_respin(mlxsw_sp);
2633 entry_connected = nud_state & NUD_VALID && !dead;
2634 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
2635 if (!entry_connected && !neigh_entry)
2638 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
2639 if (IS_ERR(neigh_entry))
2643 memcpy(neigh_entry->ha, ha, ETH_ALEN);
2644 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
2645 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected,
2648 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
2649 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2652 mutex_unlock(&mlxsw_sp->router->lock);
2657 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp);
2659 static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work)
2661 struct mlxsw_sp_netevent_work *net_work =
2662 container_of(work, struct mlxsw_sp_netevent_work, work);
2663 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2665 mlxsw_sp_mp_hash_init(mlxsw_sp);
2669 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
2671 static void mlxsw_sp_router_update_priority_work(struct work_struct *work)
2673 struct mlxsw_sp_netevent_work *net_work =
2674 container_of(work, struct mlxsw_sp_netevent_work, work);
2675 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2677 __mlxsw_sp_router_init(mlxsw_sp);
2681 static int mlxsw_sp_router_schedule_work(struct net *net,
2682 struct notifier_block *nb,
2683 void (*cb)(struct work_struct *))
2685 struct mlxsw_sp_netevent_work *net_work;
2686 struct mlxsw_sp_router *router;
2688 router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
2689 if (!net_eq(net, mlxsw_sp_net(router->mlxsw_sp)))
2692 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2696 INIT_WORK(&net_work->work, cb);
2697 net_work->mlxsw_sp = router->mlxsw_sp;
2698 mlxsw_core_schedule_work(&net_work->work);
2702 static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
2703 unsigned long event, void *ptr)
2705 struct mlxsw_sp_netevent_work *net_work;
2706 struct mlxsw_sp_port *mlxsw_sp_port;
2707 struct mlxsw_sp *mlxsw_sp;
2708 unsigned long interval;
2709 struct neigh_parms *p;
2710 struct neighbour *n;
2713 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
2716 /* We don't care about changes in the default table. */
2717 if (!p->dev || (p->tbl->family != AF_INET &&
2718 p->tbl->family != AF_INET6))
2721 /* We are in atomic context and can't take RTNL mutex,
2722 * so use RCU variant to walk the device chain.
2724 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
2728 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2729 interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
2730 mlxsw_sp->router->neighs_update.interval = interval;
2732 mlxsw_sp_port_dev_put(mlxsw_sp_port);
2734 case NETEVENT_NEIGH_UPDATE:
2737 if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6)
2740 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
2744 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2746 mlxsw_sp_port_dev_put(mlxsw_sp_port);
2750 INIT_WORK(&net_work->work, mlxsw_sp_router_neigh_event_work);
2751 net_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2754 /* Take a reference to ensure the neighbour won't be
2755 * destructed until we drop the reference in delayed
2759 mlxsw_core_schedule_work(&net_work->work);
2760 mlxsw_sp_port_dev_put(mlxsw_sp_port);
2762 case NETEVENT_IPV4_MPATH_HASH_UPDATE:
2763 case NETEVENT_IPV6_MPATH_HASH_UPDATE:
2764 return mlxsw_sp_router_schedule_work(ptr, nb,
2765 mlxsw_sp_router_mp_hash_event_work);
2767 case NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE:
2768 return mlxsw_sp_router_schedule_work(ptr, nb,
2769 mlxsw_sp_router_update_priority_work);
2775 static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
2779 err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
2780 &mlxsw_sp_neigh_ht_params);
2784 /* Initialize the polling interval according to the default
2787 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
2789 /* Create the delayed works for the activity_update */
2790 INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
2791 mlxsw_sp_router_neighs_update_work);
2792 INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
2793 mlxsw_sp_router_probe_unresolved_nexthops);
2794 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
2795 mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
2799 static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
2801 cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
2802 cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
2803 rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
2806 static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
2807 struct mlxsw_sp_rif *rif)
2809 struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
2811 list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
2813 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
2814 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2818 enum mlxsw_sp_nexthop_type {
2819 MLXSW_SP_NEXTHOP_TYPE_ETH,
2820 MLXSW_SP_NEXTHOP_TYPE_IPIP,
2823 struct mlxsw_sp_nexthop_key {
2824 struct fib_nh *fib_nh;
2827 struct mlxsw_sp_nexthop {
2828 struct list_head neigh_list_node; /* member of neigh entry list */
2829 struct list_head rif_list_node;
2830 struct list_head router_list_node;
2831 struct mlxsw_sp_nexthop_group_info *nhgi; /* pointer back to the group
2832 * this nexthop belongs to
2834 struct rhash_head ht_node;
2835 struct neigh_table *neigh_tbl;
2836 struct mlxsw_sp_nexthop_key key;
2837 unsigned char gw_addr[sizeof(struct in6_addr)];
2841 int num_adj_entries;
2842 struct mlxsw_sp_rif *rif;
2843 u8 should_offload:1, /* set indicates this neigh is connected and
2844 * should be put to KVD linear area of this group.
2846 offloaded:1, /* set in case the neigh is actually put into
2847 * KVD linear area of this group.
2849 update:1; /* set indicates that MAC of this neigh should be
2852 enum mlxsw_sp_nexthop_type type;
2854 struct mlxsw_sp_neigh_entry *neigh_entry;
2855 struct mlxsw_sp_ipip_entry *ipip_entry;
2857 unsigned int counter_index;
2861 enum mlxsw_sp_nexthop_group_type {
2862 MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4,
2863 MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6,
2866 struct mlxsw_sp_nexthop_group_info {
2867 struct mlxsw_sp_nexthop_group *nh_grp;
2871 int sum_norm_weight;
2872 u8 adj_index_valid:1,
2873 gateway:1; /* routes using the group use a gateway */
2874 struct mlxsw_sp_nexthop nexthops[0];
2875 #define nh_rif nexthops[0].rif
2878 struct mlxsw_sp_nexthop_group {
2879 struct rhash_head ht_node;
2880 struct list_head fib_list; /* list of fib entries that use this group */
2883 struct fib_info *fi;
2886 struct mlxsw_sp_nexthop_group_info *nhgi;
2887 enum mlxsw_sp_nexthop_group_type type;
2890 void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2891 struct mlxsw_sp_nexthop *nh)
2893 struct devlink *devlink;
2895 devlink = priv_to_devlink(mlxsw_sp->core);
2896 if (!devlink_dpipe_table_counter_enabled(devlink,
2897 MLXSW_SP_DPIPE_TABLE_NAME_ADJ))
2900 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nh->counter_index))
2903 nh->counter_valid = true;
2906 void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
2907 struct mlxsw_sp_nexthop *nh)
2909 if (!nh->counter_valid)
2911 mlxsw_sp_flow_counter_free(mlxsw_sp, nh->counter_index);
2912 nh->counter_valid = false;
2915 int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
2916 struct mlxsw_sp_nexthop *nh, u64 *p_counter)
2918 if (!nh->counter_valid)
2921 return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter_index,
2925 struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
2926 struct mlxsw_sp_nexthop *nh)
2929 if (list_empty(&router->nexthop_list))
2932 return list_first_entry(&router->nexthop_list,
2933 typeof(*nh), router_list_node);
2935 if (list_is_last(&nh->router_list_node, &router->nexthop_list))
2937 return list_next_entry(nh, router_list_node);
2940 bool mlxsw_sp_nexthop_offload(struct mlxsw_sp_nexthop *nh)
2942 return nh->offloaded;
2945 unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh)
2949 return nh->neigh_entry->ha;
2952 int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
2953 u32 *p_adj_size, u32 *p_adj_hash_index)
2955 struct mlxsw_sp_nexthop_group_info *nhgi = nh->nhgi;
2956 u32 adj_hash_index = 0;
2959 if (!nh->offloaded || !nhgi->adj_index_valid)
2962 *p_adj_index = nhgi->adj_index;
2963 *p_adj_size = nhgi->ecmp_size;
2965 for (i = 0; i < nhgi->count; i++) {
2966 struct mlxsw_sp_nexthop *nh_iter = &nhgi->nexthops[i];
2970 if (nh_iter->offloaded)
2971 adj_hash_index += nh_iter->num_adj_entries;
2974 *p_adj_hash_index = adj_hash_index;
2978 struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh)
2983 bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
2985 struct mlxsw_sp_nexthop_group_info *nhgi = nh->nhgi;
2988 for (i = 0; i < nhgi->count; i++) {
2989 struct mlxsw_sp_nexthop *nh_iter = &nhgi->nexthops[i];
2991 if (nh_iter->type == MLXSW_SP_NEXTHOP_TYPE_IPIP)
2997 struct mlxsw_sp_nexthop_group_cmp_arg {
2998 enum mlxsw_sp_nexthop_group_type type;
3000 struct fib_info *fi;
3001 struct mlxsw_sp_fib6_entry *fib6_entry;
3006 mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
3007 const struct in6_addr *gw, int ifindex,
3012 for (i = 0; i < nh_grp->nhgi->count; i++) {
3013 const struct mlxsw_sp_nexthop *nh;
3015 nh = &nh_grp->nhgi->nexthops[i];
3016 if (nh->ifindex == ifindex && nh->nh_weight == weight &&
3017 ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
3025 mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
3026 const struct mlxsw_sp_fib6_entry *fib6_entry)
3028 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3030 if (nh_grp->nhgi->count != fib6_entry->nrt6)
3033 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3034 struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3035 struct in6_addr *gw;
3036 int ifindex, weight;
3038 ifindex = fib6_nh->fib_nh_dev->ifindex;
3039 weight = fib6_nh->fib_nh_weight;
3040 gw = &fib6_nh->fib_nh_gw6;
3041 if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex,
3050 mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr)
3052 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key;
3053 const struct mlxsw_sp_nexthop_group *nh_grp = ptr;
3055 if (nh_grp->type != cmp_arg->type)
3058 switch (cmp_arg->type) {
3059 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3060 return cmp_arg->fi != nh_grp->ipv4.fi;
3061 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3062 return !mlxsw_sp_nexthop6_group_cmp(nh_grp,
3063 cmp_arg->fib6_entry);
3070 static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
3072 const struct mlxsw_sp_nexthop_group *nh_grp = data;
3073 const struct mlxsw_sp_nexthop *nh;
3074 struct fib_info *fi;
3078 switch (nh_grp->type) {
3079 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3080 fi = nh_grp->ipv4.fi;
3081 return jhash(&fi, sizeof(fi), seed);
3082 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3083 val = nh_grp->nhgi->count;
3084 for (i = 0; i < nh_grp->nhgi->count; i++) {
3085 nh = &nh_grp->nhgi->nexthops[i];
3086 val ^= jhash(&nh->ifindex, sizeof(nh->ifindex), seed);
3087 val ^= jhash(&nh->gw_addr, sizeof(nh->gw_addr), seed);
3089 return jhash(&val, sizeof(val), seed);
3097 mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
3099 unsigned int val = fib6_entry->nrt6;
3100 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3102 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3103 struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3104 struct net_device *dev = fib6_nh->fib_nh_dev;
3105 struct in6_addr *gw = &fib6_nh->fib_nh_gw6;
3107 val ^= jhash(&dev->ifindex, sizeof(dev->ifindex), seed);
3108 val ^= jhash(gw, sizeof(*gw), seed);
3111 return jhash(&val, sizeof(val), seed);
3115 mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed)
3117 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data;
3119 switch (cmp_arg->type) {
3120 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3121 return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed);
3122 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3123 return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed);
3130 static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
3131 .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
3132 .hashfn = mlxsw_sp_nexthop_group_hash,
3133 .obj_hashfn = mlxsw_sp_nexthop_group_hash_obj,
3134 .obj_cmpfn = mlxsw_sp_nexthop_group_cmp,
3137 static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
3138 struct mlxsw_sp_nexthop_group *nh_grp)
3140 if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6 &&
3141 !nh_grp->nhgi->gateway)
3144 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
3146 mlxsw_sp_nexthop_group_ht_params);
3149 static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
3150 struct mlxsw_sp_nexthop_group *nh_grp)
3152 if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6 &&
3153 !nh_grp->nhgi->gateway)
3156 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
3158 mlxsw_sp_nexthop_group_ht_params);
3161 static struct mlxsw_sp_nexthop_group *
3162 mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp,
3163 struct fib_info *fi)
3165 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
3167 cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4;
3169 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
3171 mlxsw_sp_nexthop_group_ht_params);
3174 static struct mlxsw_sp_nexthop_group *
3175 mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp *mlxsw_sp,
3176 struct mlxsw_sp_fib6_entry *fib6_entry)
3178 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
3180 cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6;
3181 cmp_arg.fib6_entry = fib6_entry;
3182 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
3184 mlxsw_sp_nexthop_group_ht_params);
3187 static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
3188 .key_offset = offsetof(struct mlxsw_sp_nexthop, key),
3189 .head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
3190 .key_len = sizeof(struct mlxsw_sp_nexthop_key),
3193 static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
3194 struct mlxsw_sp_nexthop *nh)
3196 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
3197 &nh->ht_node, mlxsw_sp_nexthop_ht_params);
3200 static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
3201 struct mlxsw_sp_nexthop *nh)
3203 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
3204 mlxsw_sp_nexthop_ht_params);
3207 static struct mlxsw_sp_nexthop *
3208 mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
3209 struct mlxsw_sp_nexthop_key key)
3211 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
3212 mlxsw_sp_nexthop_ht_params);
3215 static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
3216 const struct mlxsw_sp_fib *fib,
3217 u32 adj_index, u16 ecmp_size,
3221 char raleu_pl[MLXSW_REG_RALEU_LEN];
3223 mlxsw_reg_raleu_pack(raleu_pl,
3224 (enum mlxsw_reg_ralxx_protocol) fib->proto,
3225 fib->vr->id, adj_index, ecmp_size, new_adj_index,
3227 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
3230 static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
3231 struct mlxsw_sp_nexthop_group *nh_grp,
3232 u32 old_adj_index, u16 old_ecmp_size)
3234 struct mlxsw_sp_fib_entry *fib_entry;
3235 struct mlxsw_sp_fib *fib = NULL;
3238 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
3239 struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
3241 if (fib == fib_entry->fib_node->fib)
3243 fib = fib_entry->fib_node->fib;
3244 err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, fib,
3255 static int __mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3256 struct mlxsw_sp_nexthop *nh)
3258 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
3259 char ratr_pl[MLXSW_REG_RATR_LEN];
3261 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
3262 true, MLXSW_REG_RATR_TYPE_ETHERNET,
3263 adj_index, neigh_entry->rif);
3264 mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
3265 if (nh->counter_valid)
3266 mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true);
3268 mlxsw_reg_ratr_counter_pack(ratr_pl, 0, false);
3270 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
3273 int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3274 struct mlxsw_sp_nexthop *nh)
3278 for (i = 0; i < nh->num_adj_entries; i++) {
3281 err = __mlxsw_sp_nexthop_update(mlxsw_sp, adj_index + i, nh);
3289 static int __mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3291 struct mlxsw_sp_nexthop *nh)
3293 const struct mlxsw_sp_ipip_ops *ipip_ops;
3295 ipip_ops = mlxsw_sp->router->ipip_ops_arr[nh->ipip_entry->ipipt];
3296 return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry);
3299 static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3301 struct mlxsw_sp_nexthop *nh)
3305 for (i = 0; i < nh->num_adj_entries; i++) {
3308 err = __mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index + i,
3318 mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
3319 struct mlxsw_sp_nexthop_group_info *nhgi,
3322 u32 adj_index = nhgi->adj_index; /* base */
3323 struct mlxsw_sp_nexthop *nh;
3326 for (i = 0; i < nhgi->count; i++) {
3327 nh = &nhgi->nexthops[i];
3329 if (!nh->should_offload) {
3334 if (nh->update || reallocate) {
3338 case MLXSW_SP_NEXTHOP_TYPE_ETH:
3339 err = mlxsw_sp_nexthop_update
3340 (mlxsw_sp, adj_index, nh);
3342 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
3343 err = mlxsw_sp_nexthop_ipip_update
3344 (mlxsw_sp, adj_index, nh);
3352 adj_index += nh->num_adj_entries;
3358 mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
3359 struct mlxsw_sp_nexthop_group *nh_grp)
3361 struct mlxsw_sp_fib_entry *fib_entry;
3364 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
3365 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
3372 static void mlxsw_sp_adj_grp_size_round_up(u16 *p_adj_grp_size)
3374 /* Valid sizes for an adjacency group are:
3375 * 1-64, 512, 1024, 2048 and 4096.
3377 if (*p_adj_grp_size <= 64)
3379 else if (*p_adj_grp_size <= 512)
3380 *p_adj_grp_size = 512;
3381 else if (*p_adj_grp_size <= 1024)
3382 *p_adj_grp_size = 1024;
3383 else if (*p_adj_grp_size <= 2048)
3384 *p_adj_grp_size = 2048;
3386 *p_adj_grp_size = 4096;
3389 static void mlxsw_sp_adj_grp_size_round_down(u16 *p_adj_grp_size,
3390 unsigned int alloc_size)
3392 if (alloc_size >= 4096)
3393 *p_adj_grp_size = 4096;
3394 else if (alloc_size >= 2048)
3395 *p_adj_grp_size = 2048;
3396 else if (alloc_size >= 1024)
3397 *p_adj_grp_size = 1024;
3398 else if (alloc_size >= 512)
3399 *p_adj_grp_size = 512;
3402 static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
3403 u16 *p_adj_grp_size)
3405 unsigned int alloc_size;
3408 /* Round up the requested group size to the next size supported
3409 * by the device and make sure the request can be satisfied.
3411 mlxsw_sp_adj_grp_size_round_up(p_adj_grp_size);
3412 err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
3413 MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3414 *p_adj_grp_size, &alloc_size);
3417 /* It is possible the allocation results in more allocated
3418 * entries than requested. Try to use as much of them as
3421 mlxsw_sp_adj_grp_size_round_down(p_adj_grp_size, alloc_size);
3427 mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group_info *nhgi)
3429 int i, g = 0, sum_norm_weight = 0;
3430 struct mlxsw_sp_nexthop *nh;
3432 for (i = 0; i < nhgi->count; i++) {
3433 nh = &nhgi->nexthops[i];
3435 if (!nh->should_offload)
3438 g = gcd(nh->nh_weight, g);
3443 for (i = 0; i < nhgi->count; i++) {
3444 nh = &nhgi->nexthops[i];
3446 if (!nh->should_offload)
3448 nh->norm_nh_weight = nh->nh_weight / g;
3449 sum_norm_weight += nh->norm_nh_weight;
3452 nhgi->sum_norm_weight = sum_norm_weight;
3456 mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group_info *nhgi)
3458 int i, weight = 0, lower_bound = 0;
3459 int total = nhgi->sum_norm_weight;
3460 u16 ecmp_size = nhgi->ecmp_size;
3462 for (i = 0; i < nhgi->count; i++) {
3463 struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
3466 if (!nh->should_offload)
3468 weight += nh->norm_nh_weight;
3469 upper_bound = DIV_ROUND_CLOSEST(ecmp_size * weight, total);
3470 nh->num_adj_entries = upper_bound - lower_bound;
3471 lower_bound = upper_bound;
3475 static struct mlxsw_sp_nexthop *
3476 mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
3477 const struct mlxsw_sp_rt6 *mlxsw_sp_rt6);
3480 mlxsw_sp_nexthop4_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3481 struct mlxsw_sp_nexthop_group *nh_grp)
3485 for (i = 0; i < nh_grp->nhgi->count; i++) {
3486 struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
3489 nh->key.fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
3491 nh->key.fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
3496 __mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp_nexthop_group *nh_grp,
3497 struct mlxsw_sp_fib6_entry *fib6_entry)
3499 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3501 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3502 struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3503 struct mlxsw_sp_nexthop *nh;
3505 nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
3506 if (nh && nh->offloaded)
3507 fib6_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
3509 fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
3514 mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3515 struct mlxsw_sp_nexthop_group *nh_grp)
3517 struct mlxsw_sp_fib6_entry *fib6_entry;
3519 /* Unfortunately, in IPv6 the route and the nexthop are described by
3520 * the same struct, so we need to iterate over all the routes using the
3521 * nexthop group and set / clear the offload indication for them.
3523 list_for_each_entry(fib6_entry, &nh_grp->fib_list,
3524 common.nexthop_group_node)
3525 __mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
3529 mlxsw_sp_nexthop_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3530 struct mlxsw_sp_nexthop_group *nh_grp)
3532 switch (nh_grp->type) {
3533 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3534 mlxsw_sp_nexthop4_group_offload_refresh(mlxsw_sp, nh_grp);
3536 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3537 mlxsw_sp_nexthop6_group_offload_refresh(mlxsw_sp, nh_grp);
3543 mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
3544 struct mlxsw_sp_nexthop_group *nh_grp)
3546 struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
3547 u16 ecmp_size, old_ecmp_size;
3548 struct mlxsw_sp_nexthop *nh;
3549 bool offload_change = false;
3551 bool old_adj_index_valid;
3556 if (!nhgi->gateway) {
3557 mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3561 for (i = 0; i < nhgi->count; i++) {
3562 nh = &nhgi->nexthops[i];
3564 if (nh->should_offload != nh->offloaded) {
3565 offload_change = true;
3566 if (nh->should_offload)
3570 if (!offload_change) {
3571 /* Nothing was added or removed, so no need to reallocate. Just
3572 * update MAC on existing adjacency indexes.
3574 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nhgi, false);
3576 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3581 mlxsw_sp_nexthop_group_normalize(nhgi);
3582 if (!nhgi->sum_norm_weight)
3583 /* No neigh of this group is connected so we just set
3584 * the trap and let everthing flow through kernel.
3588 ecmp_size = nhgi->sum_norm_weight;
3589 err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size);
3591 /* No valid allocation size available. */
3594 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3595 ecmp_size, &adj_index);
3597 /* We ran out of KVD linear space, just set the
3598 * trap and let everything flow through kernel.
3600 dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
3603 old_adj_index_valid = nhgi->adj_index_valid;
3604 old_adj_index = nhgi->adj_index;
3605 old_ecmp_size = nhgi->ecmp_size;
3606 nhgi->adj_index_valid = 1;
3607 nhgi->adj_index = adj_index;
3608 nhgi->ecmp_size = ecmp_size;
3609 mlxsw_sp_nexthop_group_rebalance(nhgi);
3610 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nhgi, true);
3612 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3616 mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
3618 if (!old_adj_index_valid) {
3619 /* The trap was set for fib entries, so we have to call
3620 * fib entry update to unset it and use adjacency index.
3622 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3624 dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
3630 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
3631 old_adj_index, old_ecmp_size);
3632 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3633 old_ecmp_size, old_adj_index);
3635 dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
3642 old_adj_index_valid = nhgi->adj_index_valid;
3643 nhgi->adj_index_valid = 0;
3644 for (i = 0; i < nhgi->count; i++) {
3645 nh = &nhgi->nexthops[i];
3648 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3650 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
3651 mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
3652 if (old_adj_index_valid)
3653 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3654 nhgi->ecmp_size, nhgi->adj_index);
3657 static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
3661 nh->should_offload = 1;
3663 nh->should_offload = 0;
3668 mlxsw_sp_nexthop_dead_neigh_replace(struct mlxsw_sp *mlxsw_sp,
3669 struct mlxsw_sp_neigh_entry *neigh_entry)
3671 struct neighbour *n, *old_n = neigh_entry->key.n;
3672 struct mlxsw_sp_nexthop *nh;
3673 bool entry_connected;
3677 nh = list_first_entry(&neigh_entry->nexthop_list,
3678 struct mlxsw_sp_nexthop, neigh_list_node);
3680 n = neigh_lookup(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
3682 n = neigh_create(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
3685 neigh_event_send(n, NULL);
3688 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
3689 neigh_entry->key.n = n;
3690 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
3692 goto err_neigh_entry_insert;
3694 read_lock_bh(&n->lock);
3695 nud_state = n->nud_state;
3697 read_unlock_bh(&n->lock);
3698 entry_connected = nud_state & NUD_VALID && !dead;
3700 list_for_each_entry(nh, &neigh_entry->nexthop_list,
3702 neigh_release(old_n);
3704 __mlxsw_sp_nexthop_neigh_update(nh, !entry_connected);
3705 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
3712 err_neigh_entry_insert:
3713 neigh_entry->key.n = old_n;
3714 mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
3720 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
3721 struct mlxsw_sp_neigh_entry *neigh_entry,
3722 bool removing, bool dead)
3724 struct mlxsw_sp_nexthop *nh;
3726 if (list_empty(&neigh_entry->nexthop_list))
3732 err = mlxsw_sp_nexthop_dead_neigh_replace(mlxsw_sp,
3735 dev_err(mlxsw_sp->bus_info->dev, "Failed to replace dead neigh\n");
3739 list_for_each_entry(nh, &neigh_entry->nexthop_list,
3741 __mlxsw_sp_nexthop_neigh_update(nh, removing);
3742 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
3746 static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
3747 struct mlxsw_sp_rif *rif)
3753 list_add(&nh->rif_list_node, &rif->nexthop_list);
3756 static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
3761 list_del(&nh->rif_list_node);
3765 static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
3766 struct mlxsw_sp_nexthop *nh)
3768 struct mlxsw_sp_neigh_entry *neigh_entry;
3769 struct neighbour *n;
3773 if (!nh->nhgi->gateway || nh->neigh_entry)
3776 /* Take a reference of neigh here ensuring that neigh would
3777 * not be destructed before the nexthop entry is finished.
3778 * The reference is taken either in neigh_lookup() or
3779 * in neigh_create() in case n is not found.
3781 n = neigh_lookup(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
3783 n = neigh_create(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
3786 neigh_event_send(n, NULL);
3788 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
3790 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
3791 if (IS_ERR(neigh_entry)) {
3793 goto err_neigh_entry_create;
3797 /* If that is the first nexthop connected to that neigh, add to
3798 * nexthop_neighs_list
3800 if (list_empty(&neigh_entry->nexthop_list))
3801 list_add_tail(&neigh_entry->nexthop_neighs_list_node,
3802 &mlxsw_sp->router->nexthop_neighs_list);
3804 nh->neigh_entry = neigh_entry;
3805 list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
3806 read_lock_bh(&n->lock);
3807 nud_state = n->nud_state;
3809 read_unlock_bh(&n->lock);
3810 __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
3814 err_neigh_entry_create:
3819 static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
3820 struct mlxsw_sp_nexthop *nh)
3822 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
3823 struct neighbour *n;
3827 n = neigh_entry->key.n;
3829 __mlxsw_sp_nexthop_neigh_update(nh, true);
3830 list_del(&nh->neigh_list_node);
3831 nh->neigh_entry = NULL;
3833 /* If that is the last nexthop connected to that neigh, remove from
3834 * nexthop_neighs_list
3836 if (list_empty(&neigh_entry->nexthop_list))
3837 list_del(&neigh_entry->nexthop_neighs_list_node);
3839 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
3840 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
3845 static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
3847 struct net_device *ul_dev;
3851 ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
3852 is_up = ul_dev ? (ul_dev->flags & IFF_UP) : true;
3858 static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
3859 struct mlxsw_sp_nexthop *nh,
3860 struct mlxsw_sp_ipip_entry *ipip_entry)
3864 if (!nh->nhgi->gateway || nh->ipip_entry)
3867 nh->ipip_entry = ipip_entry;
3868 removing = !mlxsw_sp_ipip_netdev_ul_up(ipip_entry->ol_dev);
3869 __mlxsw_sp_nexthop_neigh_update(nh, removing);
3870 mlxsw_sp_nexthop_rif_init(nh, &ipip_entry->ol_lb->common);
3873 static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
3874 struct mlxsw_sp_nexthop *nh)
3876 struct mlxsw_sp_ipip_entry *ipip_entry = nh->ipip_entry;
3881 __mlxsw_sp_nexthop_neigh_update(nh, true);
3882 nh->ipip_entry = NULL;
3885 static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp,
3886 const struct fib_nh *fib_nh,
3887 enum mlxsw_sp_ipip_type *p_ipipt)
3889 struct net_device *dev = fib_nh->fib_nh_dev;
3892 fib_nh->nh_parent->fib_type == RTN_UNICAST &&
3893 mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, p_ipipt);
3896 static int mlxsw_sp_nexthop_type_init(struct mlxsw_sp *mlxsw_sp,
3897 struct mlxsw_sp_nexthop *nh,
3898 const struct net_device *dev)
3900 const struct mlxsw_sp_ipip_ops *ipip_ops;
3901 struct mlxsw_sp_ipip_entry *ipip_entry;
3902 struct mlxsw_sp_rif *rif;
3905 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
3907 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
3908 if (ipip_ops->can_offload(mlxsw_sp, dev)) {
3909 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
3910 mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
3915 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
3916 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
3920 mlxsw_sp_nexthop_rif_init(nh, rif);
3921 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
3923 goto err_neigh_init;
3928 mlxsw_sp_nexthop_rif_fini(nh);
3932 static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
3933 struct mlxsw_sp_nexthop *nh)
3936 case MLXSW_SP_NEXTHOP_TYPE_ETH:
3937 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
3938 mlxsw_sp_nexthop_rif_fini(nh);
3940 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
3941 mlxsw_sp_nexthop_rif_fini(nh);
3942 mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
3947 static void mlxsw_sp_nexthop4_type_fini(struct mlxsw_sp *mlxsw_sp,
3948 struct mlxsw_sp_nexthop *nh)
3950 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
3953 static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
3954 struct mlxsw_sp_nexthop_group *nh_grp,
3955 struct mlxsw_sp_nexthop *nh,
3956 struct fib_nh *fib_nh)
3958 struct net_device *dev = fib_nh->fib_nh_dev;
3959 struct in_device *in_dev;
3962 nh->nhgi = nh_grp->nhgi;
3963 nh->key.fib_nh = fib_nh;
3964 #ifdef CONFIG_IP_ROUTE_MULTIPATH
3965 nh->nh_weight = fib_nh->fib_nh_weight;
3969 memcpy(&nh->gw_addr, &fib_nh->fib_nh_gw4, sizeof(fib_nh->fib_nh_gw4));
3970 nh->neigh_tbl = &arp_tbl;
3971 err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
3975 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
3976 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
3982 in_dev = __in_dev_get_rcu(dev);
3983 if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
3984 fib_nh->fib_nh_flags & RTNH_F_LINKDOWN) {
3990 err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
3992 goto err_nexthop_neigh_init;
3996 err_nexthop_neigh_init:
3997 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
4001 static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
4002 struct mlxsw_sp_nexthop *nh)
4004 mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
4005 list_del(&nh->router_list_node);
4006 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
4007 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
4010 static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
4011 unsigned long event, struct fib_nh *fib_nh)
4013 struct mlxsw_sp_nexthop_key key;
4014 struct mlxsw_sp_nexthop *nh;
4016 if (mlxsw_sp->router->aborted)
4019 key.fib_nh = fib_nh;
4020 nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
4025 case FIB_EVENT_NH_ADD:
4026 mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, fib_nh->fib_nh_dev);
4028 case FIB_EVENT_NH_DEL:
4029 mlxsw_sp_nexthop4_type_fini(mlxsw_sp, nh);
4033 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4036 static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
4037 struct mlxsw_sp_rif *rif)
4039 struct mlxsw_sp_nexthop *nh;
4042 list_for_each_entry(nh, &rif->nexthop_list, rif_list_node) {
4044 case MLXSW_SP_NEXTHOP_TYPE_ETH:
4047 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
4048 removing = !mlxsw_sp_ipip_netdev_ul_up(rif->dev);
4055 __mlxsw_sp_nexthop_neigh_update(nh, removing);
4056 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4060 static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
4061 struct mlxsw_sp_rif *old_rif,
4062 struct mlxsw_sp_rif *new_rif)
4064 struct mlxsw_sp_nexthop *nh;
4066 list_splice_init(&old_rif->nexthop_list, &new_rif->nexthop_list);
4067 list_for_each_entry(nh, &new_rif->nexthop_list, rif_list_node)
4069 mlxsw_sp_nexthop_rif_update(mlxsw_sp, new_rif);
4072 static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
4073 struct mlxsw_sp_rif *rif)
4075 struct mlxsw_sp_nexthop *nh, *tmp;
4077 list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
4078 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4079 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4083 static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
4084 struct fib_info *fi)
4086 const struct fib_nh *nh = fib_info_nh(fi, 0);
4088 return nh->fib_nh_scope == RT_SCOPE_LINK ||
4089 mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, nh, NULL);
4093 mlxsw_sp_nexthop4_group_info_init(struct mlxsw_sp *mlxsw_sp,
4094 struct mlxsw_sp_nexthop_group *nh_grp)
4096 unsigned int nhs = fib_info_num_path(nh_grp->ipv4.fi);
4097 struct mlxsw_sp_nexthop_group_info *nhgi;
4098 struct mlxsw_sp_nexthop *nh;
4101 nhgi = kzalloc(struct_size(nhgi, nexthops, nhs), GFP_KERNEL);
4104 nh_grp->nhgi = nhgi;
4105 nhgi->nh_grp = nh_grp;
4106 nhgi->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, nh_grp->ipv4.fi);
4108 for (i = 0; i < nhgi->count; i++) {
4109 struct fib_nh *fib_nh;
4111 nh = &nhgi->nexthops[i];
4112 fib_nh = fib_info_nh(nh_grp->ipv4.fi, i);
4113 err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
4115 goto err_nexthop4_init;
4117 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4122 for (i--; i >= 0; i--) {
4123 nh = &nhgi->nexthops[i];
4124 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
4131 mlxsw_sp_nexthop4_group_info_fini(struct mlxsw_sp *mlxsw_sp,
4132 struct mlxsw_sp_nexthop_group *nh_grp)
4134 struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
4137 for (i = nhgi->count - 1; i >= 0; i--) {
4138 struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
4140 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
4142 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4143 WARN_ON_ONCE(nhgi->adj_index_valid);
4147 static struct mlxsw_sp_nexthop_group *
4148 mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
4150 struct mlxsw_sp_nexthop_group *nh_grp;
4153 nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
4155 return ERR_PTR(-ENOMEM);
4156 INIT_LIST_HEAD(&nh_grp->fib_list);
4157 nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4;
4158 nh_grp->ipv4.fi = fi;
4161 err = mlxsw_sp_nexthop4_group_info_init(mlxsw_sp, nh_grp);
4163 goto err_nexthop_group_info_init;
4165 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
4167 goto err_nexthop_group_insert;
4171 err_nexthop_group_insert:
4172 mlxsw_sp_nexthop4_group_info_fini(mlxsw_sp, nh_grp);
4173 err_nexthop_group_info_init:
4176 return ERR_PTR(err);
4180 mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp,
4181 struct mlxsw_sp_nexthop_group *nh_grp)
4183 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
4184 mlxsw_sp_nexthop4_group_info_fini(mlxsw_sp, nh_grp);
4185 fib_info_put(nh_grp->ipv4.fi);
4189 static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp,
4190 struct mlxsw_sp_fib_entry *fib_entry,
4191 struct fib_info *fi)
4193 struct mlxsw_sp_nexthop_group *nh_grp;
4195 nh_grp = mlxsw_sp_nexthop4_group_lookup(mlxsw_sp, fi);
4197 nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi);
4199 return PTR_ERR(nh_grp);
4201 list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
4202 fib_entry->nh_group = nh_grp;
4206 static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp,
4207 struct mlxsw_sp_fib_entry *fib_entry)
4209 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
4211 list_del(&fib_entry->nexthop_group_node);
4212 if (!list_empty(&nh_grp->fib_list))
4214 mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp);
4218 mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
4220 struct mlxsw_sp_fib4_entry *fib4_entry;
4222 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
4224 return !fib4_entry->tos;
4228 mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
4230 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
4232 switch (fib_entry->fib_node->fib->proto) {
4233 case MLXSW_SP_L3_PROTO_IPV4:
4234 if (!mlxsw_sp_fib4_entry_should_offload(fib_entry))
4237 case MLXSW_SP_L3_PROTO_IPV6:
4241 switch (fib_entry->type) {
4242 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
4243 return !!nh_group->nhgi->adj_index_valid;
4244 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
4245 return !!nh_group->nhgi->nh_rif;
4246 case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
4247 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
4248 case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
4255 static struct mlxsw_sp_nexthop *
4256 mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
4257 const struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
4261 for (i = 0; i < nh_grp->nhgi->count; i++) {
4262 struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
4263 struct fib6_info *rt = mlxsw_sp_rt6->rt;
4265 if (nh->rif && nh->rif->dev == rt->fib6_nh->fib_nh_dev &&
4266 ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
4267 &rt->fib6_nh->fib_nh_gw6))
4276 mlxsw_sp_fib4_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
4277 struct mlxsw_sp_fib_entry *fib_entry)
4279 u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
4280 int dst_len = fib_entry->fib_node->key.prefix_len;
4281 struct mlxsw_sp_fib4_entry *fib4_entry;
4282 struct fib_rt_info fri;
4283 bool should_offload;
4285 should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
4286 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
4288 fri.fi = fib4_entry->fi;
4289 fri.tb_id = fib4_entry->tb_id;
4290 fri.dst = cpu_to_be32(*p_dst);
4291 fri.dst_len = dst_len;
4292 fri.tos = fib4_entry->tos;
4293 fri.type = fib4_entry->type;
4294 fri.offload = should_offload;
4295 fri.trap = !should_offload;
4296 fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
4300 mlxsw_sp_fib4_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
4301 struct mlxsw_sp_fib_entry *fib_entry)
4303 u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
4304 int dst_len = fib_entry->fib_node->key.prefix_len;
4305 struct mlxsw_sp_fib4_entry *fib4_entry;
4306 struct fib_rt_info fri;
4308 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
4310 fri.fi = fib4_entry->fi;
4311 fri.tb_id = fib4_entry->tb_id;
4312 fri.dst = cpu_to_be32(*p_dst);
4313 fri.dst_len = dst_len;
4314 fri.tos = fib4_entry->tos;
4315 fri.type = fib4_entry->type;
4316 fri.offload = false;
4318 fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
4322 mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
4323 struct mlxsw_sp_fib_entry *fib_entry)
4325 struct mlxsw_sp_fib6_entry *fib6_entry;
4326 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4327 bool should_offload;
4329 should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
4331 /* In IPv6 a multipath route is represented using multiple routes, so
4332 * we need to set the flags on all of them.
4334 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
4336 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
4337 fib6_info_hw_flags_set(mlxsw_sp_rt6->rt, should_offload,
4342 mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
4343 struct mlxsw_sp_fib_entry *fib_entry)
4345 struct mlxsw_sp_fib6_entry *fib6_entry;
4346 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
4348 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
4350 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
4351 fib6_info_hw_flags_set(mlxsw_sp_rt6->rt, false, false);
4355 mlxsw_sp_fib_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
4356 struct mlxsw_sp_fib_entry *fib_entry)
4358 switch (fib_entry->fib_node->fib->proto) {
4359 case MLXSW_SP_L3_PROTO_IPV4:
4360 mlxsw_sp_fib4_entry_hw_flags_set(mlxsw_sp, fib_entry);
4362 case MLXSW_SP_L3_PROTO_IPV6:
4363 mlxsw_sp_fib6_entry_hw_flags_set(mlxsw_sp, fib_entry);
4369 mlxsw_sp_fib_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
4370 struct mlxsw_sp_fib_entry *fib_entry)
4372 switch (fib_entry->fib_node->fib->proto) {
4373 case MLXSW_SP_L3_PROTO_IPV4:
4374 mlxsw_sp_fib4_entry_hw_flags_clear(mlxsw_sp, fib_entry);
4376 case MLXSW_SP_L3_PROTO_IPV6:
4377 mlxsw_sp_fib6_entry_hw_flags_clear(mlxsw_sp, fib_entry);
4383 mlxsw_sp_fib_entry_hw_flags_refresh(struct mlxsw_sp *mlxsw_sp,
4384 struct mlxsw_sp_fib_entry *fib_entry,
4385 enum mlxsw_sp_fib_entry_op op)
4388 case MLXSW_SP_FIB_ENTRY_OP_WRITE:
4389 case MLXSW_SP_FIB_ENTRY_OP_UPDATE:
4390 mlxsw_sp_fib_entry_hw_flags_set(mlxsw_sp, fib_entry);
4392 case MLXSW_SP_FIB_ENTRY_OP_DELETE:
4393 mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, fib_entry);
4400 struct mlxsw_sp_fib_entry_op_ctx_basic {
4401 char ralue_pl[MLXSW_REG_RALUE_LEN];
4405 mlxsw_sp_router_ll_basic_fib_entry_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
4406 enum mlxsw_sp_l3proto proto,
4407 enum mlxsw_sp_fib_entry_op op,
4408 u16 virtual_router, u8 prefix_len,
4409 unsigned char *addr,
4410 struct mlxsw_sp_fib_entry_priv *priv)
4412 struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
4413 enum mlxsw_reg_ralxx_protocol ralxx_proto;
4414 char *ralue_pl = op_ctx_basic->ralue_pl;
4415 enum mlxsw_reg_ralue_op ralue_op;
4417 ralxx_proto = (enum mlxsw_reg_ralxx_protocol) proto;
4420 case MLXSW_SP_FIB_ENTRY_OP_WRITE:
4421 case MLXSW_SP_FIB_ENTRY_OP_UPDATE:
4422 ralue_op = MLXSW_REG_RALUE_OP_WRITE_WRITE;
4424 case MLXSW_SP_FIB_ENTRY_OP_DELETE:
4425 ralue_op = MLXSW_REG_RALUE_OP_WRITE_DELETE;
4433 case MLXSW_SP_L3_PROTO_IPV4:
4434 mlxsw_reg_ralue_pack4(ralue_pl, ralxx_proto, ralue_op,
4435 virtual_router, prefix_len, (u32 *) addr);
4437 case MLXSW_SP_L3_PROTO_IPV6:
4438 mlxsw_reg_ralue_pack6(ralue_pl, ralxx_proto, ralue_op,
4439 virtual_router, prefix_len, addr);
4445 mlxsw_sp_router_ll_basic_fib_entry_act_remote_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
4446 enum mlxsw_reg_ralue_trap_action trap_action,
4447 u16 trap_id, u32 adjacency_index, u16 ecmp_size)
4449 struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
4451 mlxsw_reg_ralue_act_remote_pack(op_ctx_basic->ralue_pl, trap_action,
4452 trap_id, adjacency_index, ecmp_size);
4456 mlxsw_sp_router_ll_basic_fib_entry_act_local_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
4457 enum mlxsw_reg_ralue_trap_action trap_action,
4458 u16 trap_id, u16 local_erif)
4460 struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
4462 mlxsw_reg_ralue_act_local_pack(op_ctx_basic->ralue_pl, trap_action,
4463 trap_id, local_erif);
4467 mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx)
4469 struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
4471 mlxsw_reg_ralue_act_ip2me_pack(op_ctx_basic->ralue_pl);
4475 mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_tun_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
4478 struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
4480 mlxsw_reg_ralue_act_ip2me_tun_pack(op_ctx_basic->ralue_pl, tunnel_ptr);
4484 mlxsw_sp_router_ll_basic_fib_entry_commit(struct mlxsw_sp *mlxsw_sp,
4485 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
4486 bool *postponed_for_bulk)
4488 struct mlxsw_sp_fib_entry_op_ctx_basic *op_ctx_basic = (void *) op_ctx->ll_priv;
4490 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue),
4491 op_ctx_basic->ralue_pl);
4495 mlxsw_sp_router_ll_basic_fib_entry_is_committed(struct mlxsw_sp_fib_entry_priv *priv)
4500 static void mlxsw_sp_fib_entry_pack(struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
4501 struct mlxsw_sp_fib_entry *fib_entry,
4502 enum mlxsw_sp_fib_entry_op op)
4504 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
4506 mlxsw_sp_fib_entry_op_ctx_priv_hold(op_ctx, fib_entry->priv);
4507 fib->ll_ops->fib_entry_pack(op_ctx, fib->proto, op, fib->vr->id,
4508 fib_entry->fib_node->key.prefix_len,
4509 fib_entry->fib_node->key.addr,
4513 int mlxsw_sp_fib_entry_commit(struct mlxsw_sp *mlxsw_sp,
4514 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
4515 const struct mlxsw_sp_router_ll_ops *ll_ops)
4517 bool postponed_for_bulk = false;
4520 err = ll_ops->fib_entry_commit(mlxsw_sp, op_ctx, &postponed_for_bulk);
4521 if (!postponed_for_bulk)
4522 mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
4526 static int mlxsw_sp_adj_discard_write(struct mlxsw_sp *mlxsw_sp, u16 rif_index)
4528 enum mlxsw_reg_ratr_trap_action trap_action;
4529 char ratr_pl[MLXSW_REG_RATR_LEN];
4532 if (mlxsw_sp->router->adj_discard_index_valid)
4535 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4536 &mlxsw_sp->router->adj_discard_index);
4540 trap_action = MLXSW_REG_RATR_TRAP_ACTION_DISCARD_ERRORS;
4541 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, true,
4542 MLXSW_REG_RATR_TYPE_ETHERNET,
4543 mlxsw_sp->router->adj_discard_index, rif_index);
4544 mlxsw_reg_ratr_trap_action_set(ratr_pl, trap_action);
4545 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
4547 goto err_ratr_write;
4549 mlxsw_sp->router->adj_discard_index_valid = true;
4554 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4555 mlxsw_sp->router->adj_discard_index);
4559 static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
4560 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
4561 struct mlxsw_sp_fib_entry *fib_entry,
4562 enum mlxsw_sp_fib_entry_op op)
4564 const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
4565 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
4566 struct mlxsw_sp_nexthop_group_info *nhgi = nh_group->nhgi;
4567 enum mlxsw_reg_ralue_trap_action trap_action;
4569 u32 adjacency_index = 0;
4573 /* In case the nexthop group adjacency index is valid, use it
4574 * with provided ECMP size. Otherwise, setup trap and pass
4575 * traffic to kernel.
4577 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
4578 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
4579 adjacency_index = nhgi->adj_index;
4580 ecmp_size = nhgi->ecmp_size;
4581 } else if (!nhgi->adj_index_valid && nhgi->count && nhgi->nh_rif) {
4582 err = mlxsw_sp_adj_discard_write(mlxsw_sp,
4583 nhgi->nh_rif->rif_index);
4586 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
4587 adjacency_index = mlxsw_sp->router->adj_discard_index;
4590 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
4591 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
4594 mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
4595 ll_ops->fib_entry_act_remote_pack(op_ctx, trap_action, trap_id,
4596 adjacency_index, ecmp_size);
4597 return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
4600 static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
4601 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
4602 struct mlxsw_sp_fib_entry *fib_entry,
4603 enum mlxsw_sp_fib_entry_op op)
4605 const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
4606 struct mlxsw_sp_rif *rif = fib_entry->nh_group->nhgi->nh_rif;
4607 enum mlxsw_reg_ralue_trap_action trap_action;
4611 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
4612 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
4613 rif_index = rif->rif_index;
4615 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
4616 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
4619 mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
4620 ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, trap_id, rif_index);
4621 return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
4624 static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
4625 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
4626 struct mlxsw_sp_fib_entry *fib_entry,
4627 enum mlxsw_sp_fib_entry_op op)
4629 const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
4631 mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
4632 ll_ops->fib_entry_act_ip2me_pack(op_ctx);
4633 return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
4636 static int mlxsw_sp_fib_entry_op_blackhole(struct mlxsw_sp *mlxsw_sp,
4637 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
4638 struct mlxsw_sp_fib_entry *fib_entry,
4639 enum mlxsw_sp_fib_entry_op op)
4641 const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
4642 enum mlxsw_reg_ralue_trap_action trap_action;
4644 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_DISCARD_ERROR;
4645 mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
4646 ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, 0, 0);
4647 return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
4651 mlxsw_sp_fib_entry_op_unreachable(struct mlxsw_sp *mlxsw_sp,
4652 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
4653 struct mlxsw_sp_fib_entry *fib_entry,
4654 enum mlxsw_sp_fib_entry_op op)
4656 const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
4657 enum mlxsw_reg_ralue_trap_action trap_action;
4660 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
4661 trap_id = MLXSW_TRAP_ID_RTR_INGRESS1;
4663 mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
4664 ll_ops->fib_entry_act_local_pack(op_ctx, trap_action, trap_id, 0);
4665 return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
4669 mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
4670 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
4671 struct mlxsw_sp_fib_entry *fib_entry,
4672 enum mlxsw_sp_fib_entry_op op)
4674 const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
4675 struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry;
4676 const struct mlxsw_sp_ipip_ops *ipip_ops;
4678 if (WARN_ON(!ipip_entry))
4681 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
4682 return ipip_ops->fib_entry_op(mlxsw_sp, ll_ops, op_ctx, ipip_entry, op,
4683 fib_entry->decap.tunnel_index, fib_entry->priv);
4686 static int mlxsw_sp_fib_entry_op_nve_decap(struct mlxsw_sp *mlxsw_sp,
4687 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
4688 struct mlxsw_sp_fib_entry *fib_entry,
4689 enum mlxsw_sp_fib_entry_op op)
4691 const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
4693 mlxsw_sp_fib_entry_pack(op_ctx, fib_entry, op);
4694 ll_ops->fib_entry_act_ip2me_tun_pack(op_ctx,
4695 fib_entry->decap.tunnel_index);
4696 return mlxsw_sp_fib_entry_commit(mlxsw_sp, op_ctx, ll_ops);
4699 static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
4700 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
4701 struct mlxsw_sp_fib_entry *fib_entry,
4702 enum mlxsw_sp_fib_entry_op op)
4704 switch (fib_entry->type) {
4705 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
4706 return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, op_ctx, fib_entry, op);
4707 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
4708 return mlxsw_sp_fib_entry_op_local(mlxsw_sp, op_ctx, fib_entry, op);
4709 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
4710 return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, op_ctx, fib_entry, op);
4711 case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
4712 return mlxsw_sp_fib_entry_op_blackhole(mlxsw_sp, op_ctx, fib_entry, op);
4713 case MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE:
4714 return mlxsw_sp_fib_entry_op_unreachable(mlxsw_sp, op_ctx, fib_entry, op);
4715 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
4716 return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp, op_ctx, fib_entry, op);
4717 case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
4718 return mlxsw_sp_fib_entry_op_nve_decap(mlxsw_sp, op_ctx, fib_entry, op);
4723 static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
4724 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
4725 struct mlxsw_sp_fib_entry *fib_entry,
4726 enum mlxsw_sp_fib_entry_op op)
4728 int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry, op);
4733 mlxsw_sp_fib_entry_hw_flags_refresh(mlxsw_sp, fib_entry, op);
4738 static int __mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
4739 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
4740 struct mlxsw_sp_fib_entry *fib_entry,
4743 return mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry,
4744 is_new ? MLXSW_SP_FIB_ENTRY_OP_WRITE :
4745 MLXSW_SP_FIB_ENTRY_OP_UPDATE);
4748 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
4749 struct mlxsw_sp_fib_entry *fib_entry)
4751 struct mlxsw_sp_fib_entry_op_ctx *op_ctx = mlxsw_sp->router->ll_op_ctx;
4753 mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
4754 return __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx, fib_entry, false);
4757 static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
4758 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
4759 struct mlxsw_sp_fib_entry *fib_entry)
4761 const struct mlxsw_sp_router_ll_ops *ll_ops = fib_entry->fib_node->fib->ll_ops;
4763 if (!ll_ops->fib_entry_is_committed(fib_entry->priv))
4765 return mlxsw_sp_fib_entry_op(mlxsw_sp, op_ctx, fib_entry,
4766 MLXSW_SP_FIB_ENTRY_OP_DELETE);
4770 mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
4771 const struct fib_entry_notifier_info *fen_info,
4772 struct mlxsw_sp_fib_entry *fib_entry)
4774 struct net_device *dev = fib_info_nh(fen_info->fi, 0)->fib_nh_dev;
4775 union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) };
4776 struct mlxsw_sp_router *router = mlxsw_sp->router;
4777 u32 tb_id = mlxsw_sp_fix_tb_id(fen_info->tb_id);
4778 struct mlxsw_sp_ipip_entry *ipip_entry;
4779 struct fib_info *fi = fen_info->fi;
4781 switch (fen_info->type) {
4783 ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, dev,
4784 MLXSW_SP_L3_PROTO_IPV4, dip);
4785 if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
4786 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
4787 return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
4791 if (mlxsw_sp_router_nve_is_decap(mlxsw_sp, tb_id,
4792 MLXSW_SP_L3_PROTO_IPV4,
4796 tunnel_index = router->nve_decap_config.tunnel_index;
4797 fib_entry->decap.tunnel_index = tunnel_index;
4798 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
4803 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
4806 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
4808 case RTN_UNREACHABLE:
4810 /* Packets hitting these routes need to be trapped, but
4811 * can do so with a lower priority than packets directed
4812 * at the host, so use action type local instead of trap.
4814 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
4817 if (mlxsw_sp_fi_is_gateway(mlxsw_sp, fi))
4818 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
4820 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
4828 mlxsw_sp_fib4_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
4829 struct mlxsw_sp_fib_entry *fib_entry)
4831 switch (fib_entry->type) {
4832 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
4833 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
4840 static struct mlxsw_sp_fib4_entry *
4841 mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
4842 struct mlxsw_sp_fib_node *fib_node,
4843 const struct fib_entry_notifier_info *fen_info)
4845 struct mlxsw_sp_fib4_entry *fib4_entry;
4846 struct mlxsw_sp_fib_entry *fib_entry;
4849 fib4_entry = kzalloc(sizeof(*fib4_entry), GFP_KERNEL);
4851 return ERR_PTR(-ENOMEM);
4852 fib_entry = &fib4_entry->common;
4854 fib_entry->priv = mlxsw_sp_fib_entry_priv_create(fib_node->fib->ll_ops);
4855 if (IS_ERR(fib_entry->priv)) {
4856 err = PTR_ERR(fib_entry->priv);
4857 goto err_fib_entry_priv_create;
4860 err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
4862 goto err_fib4_entry_type_set;
4864 err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
4866 goto err_nexthop4_group_get;
4868 fib4_entry->fi = fen_info->fi;
4869 fib_info_hold(fib4_entry->fi);
4870 fib4_entry->tb_id = fen_info->tb_id;
4871 fib4_entry->type = fen_info->type;
4872 fib4_entry->tos = fen_info->tos;
4874 fib_entry->fib_node = fib_node;
4878 err_nexthop4_group_get:
4879 mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, fib_entry);
4880 err_fib4_entry_type_set:
4881 mlxsw_sp_fib_entry_priv_put(fib_entry->priv);
4882 err_fib_entry_priv_create:
4884 return ERR_PTR(err);
4887 static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
4888 struct mlxsw_sp_fib4_entry *fib4_entry)
4890 fib_info_put(fib4_entry->fi);
4891 mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
4892 mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, &fib4_entry->common);
4893 mlxsw_sp_fib_entry_priv_put(fib4_entry->common.priv);
4897 static struct mlxsw_sp_fib4_entry *
4898 mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
4899 const struct fib_entry_notifier_info *fen_info)
4901 struct mlxsw_sp_fib4_entry *fib4_entry;
4902 struct mlxsw_sp_fib_node *fib_node;
4903 struct mlxsw_sp_fib *fib;
4904 struct mlxsw_sp_vr *vr;
4906 vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id);
4909 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
4911 fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
4912 sizeof(fen_info->dst),
4917 fib4_entry = container_of(fib_node->fib_entry,
4918 struct mlxsw_sp_fib4_entry, common);
4919 if (fib4_entry->tb_id == fen_info->tb_id &&
4920 fib4_entry->tos == fen_info->tos &&
4921 fib4_entry->type == fen_info->type &&
4922 fib4_entry->fi == fen_info->fi)
4928 static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
4929 .key_offset = offsetof(struct mlxsw_sp_fib_node, key),
4930 .head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
4931 .key_len = sizeof(struct mlxsw_sp_fib_key),
4932 .automatic_shrinking = true,
4935 static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
4936 struct mlxsw_sp_fib_node *fib_node)
4938 return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
4939 mlxsw_sp_fib_ht_params);
4942 static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
4943 struct mlxsw_sp_fib_node *fib_node)
4945 rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
4946 mlxsw_sp_fib_ht_params);
4949 static struct mlxsw_sp_fib_node *
4950 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
4951 size_t addr_len, unsigned char prefix_len)
4953 struct mlxsw_sp_fib_key key;
4955 memset(&key, 0, sizeof(key));
4956 memcpy(key.addr, addr, addr_len);
4957 key.prefix_len = prefix_len;
4958 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
4961 static struct mlxsw_sp_fib_node *
4962 mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
4963 size_t addr_len, unsigned char prefix_len)
4965 struct mlxsw_sp_fib_node *fib_node;
4967 fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
4971 list_add(&fib_node->list, &fib->node_list);
4972 memcpy(fib_node->key.addr, addr, addr_len);
4973 fib_node->key.prefix_len = prefix_len;
4978 static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
4980 list_del(&fib_node->list);
4984 static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
4985 struct mlxsw_sp_fib_node *fib_node)
4987 struct mlxsw_sp_prefix_usage req_prefix_usage;
4988 struct mlxsw_sp_fib *fib = fib_node->fib;
4989 struct mlxsw_sp_lpm_tree *lpm_tree;
4992 lpm_tree = mlxsw_sp->router->lpm.proto_trees[fib->proto];
4993 if (lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
4996 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
4997 mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
4998 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
5000 if (IS_ERR(lpm_tree))
5001 return PTR_ERR(lpm_tree);
5003 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
5005 goto err_lpm_tree_replace;
5008 lpm_tree->prefix_ref_count[fib_node->key.prefix_len]++;
5011 err_lpm_tree_replace:
5012 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
5016 static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
5017 struct mlxsw_sp_fib_node *fib_node)
5019 struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
5020 struct mlxsw_sp_prefix_usage req_prefix_usage;
5021 struct mlxsw_sp_fib *fib = fib_node->fib;
5024 if (--lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
5026 /* Try to construct a new LPM tree from the current prefix usage
5027 * minus the unused one. If we fail, continue using the old one.
5029 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
5030 mlxsw_sp_prefix_usage_clear(&req_prefix_usage,
5031 fib_node->key.prefix_len);
5032 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
5034 if (IS_ERR(lpm_tree))
5037 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
5039 goto err_lpm_tree_replace;
5043 err_lpm_tree_replace:
5044 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
5047 static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
5048 struct mlxsw_sp_fib_node *fib_node,
5049 struct mlxsw_sp_fib *fib)
5053 err = mlxsw_sp_fib_node_insert(fib, fib_node);
5056 fib_node->fib = fib;
5058 err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib_node);
5060 goto err_fib_lpm_tree_link;
5064 err_fib_lpm_tree_link:
5065 fib_node->fib = NULL;
5066 mlxsw_sp_fib_node_remove(fib, fib_node);
5070 static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
5071 struct mlxsw_sp_fib_node *fib_node)
5073 struct mlxsw_sp_fib *fib = fib_node->fib;
5075 mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib_node);
5076 fib_node->fib = NULL;
5077 mlxsw_sp_fib_node_remove(fib, fib_node);
5080 static struct mlxsw_sp_fib_node *
5081 mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
5082 size_t addr_len, unsigned char prefix_len,
5083 enum mlxsw_sp_l3proto proto)
5085 struct mlxsw_sp_fib_node *fib_node;
5086 struct mlxsw_sp_fib *fib;
5087 struct mlxsw_sp_vr *vr;
5090 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, NULL);
5092 return ERR_CAST(vr);
5093 fib = mlxsw_sp_vr_fib(vr, proto);
5095 fib_node = mlxsw_sp_fib_node_lookup(fib, addr, addr_len, prefix_len);
5099 fib_node = mlxsw_sp_fib_node_create(fib, addr, addr_len, prefix_len);
5102 goto err_fib_node_create;
5105 err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
5107 goto err_fib_node_init;
5112 mlxsw_sp_fib_node_destroy(fib_node);
5113 err_fib_node_create:
5114 mlxsw_sp_vr_put(mlxsw_sp, vr);
5115 return ERR_PTR(err);
5118 static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
5119 struct mlxsw_sp_fib_node *fib_node)
5121 struct mlxsw_sp_vr *vr = fib_node->fib->vr;
5123 if (fib_node->fib_entry)
5125 mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
5126 mlxsw_sp_fib_node_destroy(fib_node);
5127 mlxsw_sp_vr_put(mlxsw_sp, vr);
5130 static int mlxsw_sp_fib_node_entry_link(struct mlxsw_sp *mlxsw_sp,
5131 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5132 struct mlxsw_sp_fib_entry *fib_entry)
5134 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
5135 bool is_new = !fib_node->fib_entry;
5138 fib_node->fib_entry = fib_entry;
5140 err = __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx, fib_entry, is_new);
5142 goto err_fib_entry_update;
5146 err_fib_entry_update:
5147 fib_node->fib_entry = NULL;
5151 static int __mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
5152 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5153 struct mlxsw_sp_fib_entry *fib_entry)
5155 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
5158 err = mlxsw_sp_fib_entry_del(mlxsw_sp, op_ctx, fib_entry);
5159 fib_node->fib_entry = NULL;
5163 static void mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
5164 struct mlxsw_sp_fib_entry *fib_entry)
5166 struct mlxsw_sp_fib_entry_op_ctx *op_ctx = mlxsw_sp->router->ll_op_ctx;
5168 mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
5169 __mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, fib_entry);
5172 static bool mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry *fib4_entry)
5174 struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
5175 struct mlxsw_sp_fib4_entry *fib4_replaced;
5177 if (!fib_node->fib_entry)
5180 fib4_replaced = container_of(fib_node->fib_entry,
5181 struct mlxsw_sp_fib4_entry, common);
5182 if (fib4_entry->tb_id == RT_TABLE_MAIN &&
5183 fib4_replaced->tb_id == RT_TABLE_LOCAL)
5190 mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp,
5191 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5192 const struct fib_entry_notifier_info *fen_info)
5194 struct mlxsw_sp_fib4_entry *fib4_entry, *fib4_replaced;
5195 struct mlxsw_sp_fib_entry *replaced;
5196 struct mlxsw_sp_fib_node *fib_node;
5199 if (mlxsw_sp->router->aborted)
5202 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id,
5203 &fen_info->dst, sizeof(fen_info->dst),
5205 MLXSW_SP_L3_PROTO_IPV4);
5206 if (IS_ERR(fib_node)) {
5207 dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
5208 return PTR_ERR(fib_node);
5211 fib4_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
5212 if (IS_ERR(fib4_entry)) {
5213 dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
5214 err = PTR_ERR(fib4_entry);
5215 goto err_fib4_entry_create;
5218 if (!mlxsw_sp_fib4_allow_replace(fib4_entry)) {
5219 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
5220 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5224 replaced = fib_node->fib_entry;
5225 err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, op_ctx, &fib4_entry->common);
5227 dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
5228 goto err_fib_node_entry_link;
5231 /* Nothing to replace */
5235 mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
5236 fib4_replaced = container_of(replaced, struct mlxsw_sp_fib4_entry,
5238 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_replaced);
5242 err_fib_node_entry_link:
5243 fib_node->fib_entry = replaced;
5244 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
5245 err_fib4_entry_create:
5246 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5250 static int mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
5251 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5252 struct fib_entry_notifier_info *fen_info)
5254 struct mlxsw_sp_fib4_entry *fib4_entry;
5255 struct mlxsw_sp_fib_node *fib_node;
5258 if (mlxsw_sp->router->aborted)
5261 fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
5264 fib_node = fib4_entry->common.fib_node;
5266 err = __mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, &fib4_entry->common);
5267 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
5268 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5272 static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt)
5274 /* Multicast routes aren't supported, so ignore them. Neighbour
5275 * Discovery packets are specifically trapped.
5277 if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_MULTICAST)
5280 /* Cloned routes are irrelevant in the forwarding path. */
5281 if (rt->fib6_flags & RTF_CACHE)
5287 static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct fib6_info *rt)
5289 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5291 mlxsw_sp_rt6 = kzalloc(sizeof(*mlxsw_sp_rt6), GFP_KERNEL);
5293 return ERR_PTR(-ENOMEM);
5295 /* In case of route replace, replaced route is deleted with
5296 * no notification. Take reference to prevent accessing freed
5299 mlxsw_sp_rt6->rt = rt;
5302 return mlxsw_sp_rt6;
5305 #if IS_ENABLED(CONFIG_IPV6)
5306 static void mlxsw_sp_rt6_release(struct fib6_info *rt)
5308 fib6_info_release(rt);
5311 static void mlxsw_sp_rt6_release(struct fib6_info *rt)
5316 static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
5318 struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
5320 fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
5321 mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
5322 kfree(mlxsw_sp_rt6);
5325 static struct fib6_info *
5326 mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
5328 return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
5332 static struct mlxsw_sp_rt6 *
5333 mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
5334 const struct fib6_info *rt)
5336 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5338 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
5339 if (mlxsw_sp_rt6->rt == rt)
5340 return mlxsw_sp_rt6;
5346 static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
5347 const struct fib6_info *rt,
5348 enum mlxsw_sp_ipip_type *ret)
5350 return rt->fib6_nh->fib_nh_dev &&
5351 mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->fib6_nh->fib_nh_dev, ret);
5354 static void mlxsw_sp_nexthop6_type_fini(struct mlxsw_sp *mlxsw_sp,
5355 struct mlxsw_sp_nexthop *nh)
5357 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
5360 static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
5361 struct mlxsw_sp_nexthop_group *nh_grp,
5362 struct mlxsw_sp_nexthop *nh,
5363 const struct fib6_info *rt)
5365 struct net_device *dev = rt->fib6_nh->fib_nh_dev;
5367 nh->nhgi = nh_grp->nhgi;
5368 nh->nh_weight = rt->fib6_nh->fib_nh_weight;
5369 memcpy(&nh->gw_addr, &rt->fib6_nh->fib_nh_gw6, sizeof(nh->gw_addr));
5370 #if IS_ENABLED(CONFIG_IPV6)
5371 nh->neigh_tbl = &nd_tbl;
5373 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
5375 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
5379 nh->ifindex = dev->ifindex;
5381 return mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
5384 static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
5385 struct mlxsw_sp_nexthop *nh)
5387 mlxsw_sp_nexthop6_type_fini(mlxsw_sp, nh);
5388 list_del(&nh->router_list_node);
5389 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
5392 static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
5393 const struct fib6_info *rt)
5395 return rt->fib6_nh->fib_nh_gw_family ||
5396 mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
5400 mlxsw_sp_nexthop6_group_info_init(struct mlxsw_sp *mlxsw_sp,
5401 struct mlxsw_sp_nexthop_group *nh_grp,
5402 struct mlxsw_sp_fib6_entry *fib6_entry)
5404 struct mlxsw_sp_nexthop_group_info *nhgi;
5405 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5406 struct mlxsw_sp_nexthop *nh;
5409 nhgi = kzalloc(struct_size(nhgi, nexthops, fib6_entry->nrt6),
5413 nh_grp->nhgi = nhgi;
5414 nhgi->nh_grp = nh_grp;
5415 mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list,
5416 struct mlxsw_sp_rt6, list);
5417 nhgi->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt);
5418 nhgi->count = fib6_entry->nrt6;
5419 for (i = 0; i < nhgi->count; i++) {
5420 struct fib6_info *rt = mlxsw_sp_rt6->rt;
5422 nh = &nhgi->nexthops[i];
5423 err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt);
5425 goto err_nexthop6_init;
5426 mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
5428 nh_grp->nhgi = nhgi;
5429 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5434 for (i--; i >= 0; i--) {
5435 nh = &nhgi->nexthops[i];
5436 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
5443 mlxsw_sp_nexthop6_group_info_fini(struct mlxsw_sp *mlxsw_sp,
5444 struct mlxsw_sp_nexthop_group *nh_grp)
5446 struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
5449 for (i = nhgi->count - 1; i >= 0; i--) {
5450 struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
5452 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
5454 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5455 WARN_ON_ONCE(nhgi->adj_index_valid);
5459 static struct mlxsw_sp_nexthop_group *
5460 mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
5461 struct mlxsw_sp_fib6_entry *fib6_entry)
5463 struct mlxsw_sp_nexthop_group *nh_grp;
5466 nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
5468 return ERR_PTR(-ENOMEM);
5469 INIT_LIST_HEAD(&nh_grp->fib_list);
5470 nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6;
5472 err = mlxsw_sp_nexthop6_group_info_init(mlxsw_sp, nh_grp, fib6_entry);
5474 goto err_nexthop_group_info_init;
5476 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
5478 goto err_nexthop_group_insert;
5482 err_nexthop_group_insert:
5483 mlxsw_sp_nexthop6_group_info_fini(mlxsw_sp, nh_grp);
5484 err_nexthop_group_info_init:
5486 return ERR_PTR(err);
5490 mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp,
5491 struct mlxsw_sp_nexthop_group *nh_grp)
5493 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
5494 mlxsw_sp_nexthop6_group_info_fini(mlxsw_sp, nh_grp);
5498 static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
5499 struct mlxsw_sp_fib6_entry *fib6_entry)
5501 struct mlxsw_sp_nexthop_group *nh_grp;
5503 nh_grp = mlxsw_sp_nexthop6_group_lookup(mlxsw_sp, fib6_entry);
5505 nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry);
5507 return PTR_ERR(nh_grp);
5510 list_add_tail(&fib6_entry->common.nexthop_group_node,
5512 fib6_entry->common.nh_group = nh_grp;
5514 /* The route and the nexthop are described by the same struct, so we
5515 * need to the update the nexthop offload indication for the new route.
5517 __mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
5522 static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
5523 struct mlxsw_sp_fib_entry *fib_entry)
5525 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
5527 list_del(&fib_entry->nexthop_group_node);
5528 if (!list_empty(&nh_grp->fib_list))
5530 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
5533 static int mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
5534 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5535 struct mlxsw_sp_fib6_entry *fib6_entry)
5537 struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
5540 fib6_entry->common.nh_group = NULL;
5541 list_del(&fib6_entry->common.nexthop_group_node);
5543 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
5545 goto err_nexthop6_group_get;
5547 /* In case this entry is offloaded, then the adjacency index
5548 * currently associated with it in the device's table is that
5549 * of the old group. Start using the new one instead.
5551 err = __mlxsw_sp_fib_entry_update(mlxsw_sp, op_ctx,
5552 &fib6_entry->common, false);
5554 goto err_fib_entry_update;
5556 if (list_empty(&old_nh_grp->fib_list))
5557 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp);
5561 err_fib_entry_update:
5562 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
5563 err_nexthop6_group_get:
5564 list_add_tail(&fib6_entry->common.nexthop_group_node,
5565 &old_nh_grp->fib_list);
5566 fib6_entry->common.nh_group = old_nh_grp;
5571 mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
5572 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5573 struct mlxsw_sp_fib6_entry *fib6_entry,
5574 struct fib6_info **rt_arr, unsigned int nrt6)
5576 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5579 for (i = 0; i < nrt6; i++) {
5580 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
5581 if (IS_ERR(mlxsw_sp_rt6)) {
5582 err = PTR_ERR(mlxsw_sp_rt6);
5583 goto err_rt6_create;
5586 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
5590 err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, op_ctx, fib6_entry);
5592 goto err_nexthop6_group_update;
5596 err_nexthop6_group_update:
5599 for (i--; i >= 0; i--) {
5601 mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
5602 struct mlxsw_sp_rt6, list);
5603 list_del(&mlxsw_sp_rt6->list);
5604 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5610 mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
5611 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5612 struct mlxsw_sp_fib6_entry *fib6_entry,
5613 struct fib6_info **rt_arr, unsigned int nrt6)
5615 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5618 for (i = 0; i < nrt6; i++) {
5619 mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry,
5621 if (WARN_ON_ONCE(!mlxsw_sp_rt6))
5625 list_del(&mlxsw_sp_rt6->list);
5626 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5629 mlxsw_sp_nexthop6_group_update(mlxsw_sp, op_ctx, fib6_entry);
5632 static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
5633 struct mlxsw_sp_fib_entry *fib_entry,
5634 const struct fib6_info *rt)
5636 /* Packets hitting RTF_REJECT routes need to be discarded by the
5637 * stack. We can rely on their destination device not having a
5638 * RIF (it's the loopback device) and can thus use action type
5639 * local, which will cause them to be trapped with a lower
5640 * priority than packets that need to be locally received.
5642 if (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))
5643 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
5644 else if (rt->fib6_type == RTN_BLACKHOLE)
5645 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
5646 else if (rt->fib6_flags & RTF_REJECT)
5647 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
5648 else if (mlxsw_sp_rt6_is_gateway(mlxsw_sp, rt))
5649 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
5651 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
5655 mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry)
5657 struct mlxsw_sp_rt6 *mlxsw_sp_rt6, *tmp;
5659 list_for_each_entry_safe(mlxsw_sp_rt6, tmp, &fib6_entry->rt6_list,
5662 list_del(&mlxsw_sp_rt6->list);
5663 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5667 static struct mlxsw_sp_fib6_entry *
5668 mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
5669 struct mlxsw_sp_fib_node *fib_node,
5670 struct fib6_info **rt_arr, unsigned int nrt6)
5672 struct mlxsw_sp_fib6_entry *fib6_entry;
5673 struct mlxsw_sp_fib_entry *fib_entry;
5674 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5677 fib6_entry = kzalloc(sizeof(*fib6_entry), GFP_KERNEL);
5679 return ERR_PTR(-ENOMEM);
5680 fib_entry = &fib6_entry->common;
5682 fib_entry->priv = mlxsw_sp_fib_entry_priv_create(fib_node->fib->ll_ops);
5683 if (IS_ERR(fib_entry->priv)) {
5684 err = PTR_ERR(fib_entry->priv);
5685 goto err_fib_entry_priv_create;
5688 INIT_LIST_HEAD(&fib6_entry->rt6_list);
5690 for (i = 0; i < nrt6; i++) {
5691 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
5692 if (IS_ERR(mlxsw_sp_rt6)) {
5693 err = PTR_ERR(mlxsw_sp_rt6);
5694 goto err_rt6_create;
5696 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
5700 mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, rt_arr[0]);
5702 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
5704 goto err_nexthop6_group_get;
5706 fib_entry->fib_node = fib_node;
5710 err_nexthop6_group_get:
5713 for (i--; i >= 0; i--) {
5715 mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
5716 struct mlxsw_sp_rt6, list);
5717 list_del(&mlxsw_sp_rt6->list);
5718 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
5720 mlxsw_sp_fib_entry_priv_put(fib_entry->priv);
5721 err_fib_entry_priv_create:
5723 return ERR_PTR(err);
5726 static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
5727 struct mlxsw_sp_fib6_entry *fib6_entry)
5729 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
5730 mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
5731 WARN_ON(fib6_entry->nrt6);
5732 mlxsw_sp_fib_entry_priv_put(fib6_entry->common.priv);
5736 static struct mlxsw_sp_fib6_entry *
5737 mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
5738 const struct fib6_info *rt)
5740 struct mlxsw_sp_fib6_entry *fib6_entry;
5741 struct mlxsw_sp_fib_node *fib_node;
5742 struct mlxsw_sp_fib *fib;
5743 struct fib6_info *cmp_rt;
5744 struct mlxsw_sp_vr *vr;
5746 vr = mlxsw_sp_vr_find(mlxsw_sp, rt->fib6_table->tb6_id);
5749 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6);
5751 fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->fib6_dst.addr,
5752 sizeof(rt->fib6_dst.addr),
5757 fib6_entry = container_of(fib_node->fib_entry,
5758 struct mlxsw_sp_fib6_entry, common);
5759 cmp_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
5760 if (rt->fib6_table->tb6_id == cmp_rt->fib6_table->tb6_id &&
5761 rt->fib6_metric == cmp_rt->fib6_metric &&
5762 mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
5768 static bool mlxsw_sp_fib6_allow_replace(struct mlxsw_sp_fib6_entry *fib6_entry)
5770 struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
5771 struct mlxsw_sp_fib6_entry *fib6_replaced;
5772 struct fib6_info *rt, *rt_replaced;
5774 if (!fib_node->fib_entry)
5777 fib6_replaced = container_of(fib_node->fib_entry,
5778 struct mlxsw_sp_fib6_entry,
5780 rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
5781 rt_replaced = mlxsw_sp_fib6_entry_rt(fib6_replaced);
5782 if (rt->fib6_table->tb6_id == RT_TABLE_MAIN &&
5783 rt_replaced->fib6_table->tb6_id == RT_TABLE_LOCAL)
5789 static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp,
5790 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5791 struct fib6_info **rt_arr, unsigned int nrt6)
5793 struct mlxsw_sp_fib6_entry *fib6_entry, *fib6_replaced;
5794 struct mlxsw_sp_fib_entry *replaced;
5795 struct mlxsw_sp_fib_node *fib_node;
5796 struct fib6_info *rt = rt_arr[0];
5799 if (mlxsw_sp->router->aborted)
5802 if (rt->fib6_src.plen)
5805 if (mlxsw_sp_fib6_rt_should_ignore(rt))
5808 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
5810 sizeof(rt->fib6_dst.addr),
5812 MLXSW_SP_L3_PROTO_IPV6);
5813 if (IS_ERR(fib_node))
5814 return PTR_ERR(fib_node);
5816 fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt_arr,
5818 if (IS_ERR(fib6_entry)) {
5819 err = PTR_ERR(fib6_entry);
5820 goto err_fib6_entry_create;
5823 if (!mlxsw_sp_fib6_allow_replace(fib6_entry)) {
5824 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5825 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5829 replaced = fib_node->fib_entry;
5830 err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, op_ctx, &fib6_entry->common);
5832 goto err_fib_node_entry_link;
5834 /* Nothing to replace */
5838 mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
5839 fib6_replaced = container_of(replaced, struct mlxsw_sp_fib6_entry,
5841 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_replaced);
5845 err_fib_node_entry_link:
5846 fib_node->fib_entry = replaced;
5847 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5848 err_fib6_entry_create:
5849 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5853 static int mlxsw_sp_router_fib6_append(struct mlxsw_sp *mlxsw_sp,
5854 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5855 struct fib6_info **rt_arr, unsigned int nrt6)
5857 struct mlxsw_sp_fib6_entry *fib6_entry;
5858 struct mlxsw_sp_fib_node *fib_node;
5859 struct fib6_info *rt = rt_arr[0];
5862 if (mlxsw_sp->router->aborted)
5865 if (rt->fib6_src.plen)
5868 if (mlxsw_sp_fib6_rt_should_ignore(rt))
5871 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
5873 sizeof(rt->fib6_dst.addr),
5875 MLXSW_SP_L3_PROTO_IPV6);
5876 if (IS_ERR(fib_node))
5877 return PTR_ERR(fib_node);
5879 if (WARN_ON_ONCE(!fib_node->fib_entry)) {
5880 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5884 fib6_entry = container_of(fib_node->fib_entry,
5885 struct mlxsw_sp_fib6_entry, common);
5886 err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, op_ctx, fib6_entry, rt_arr, nrt6);
5888 goto err_fib6_entry_nexthop_add;
5892 err_fib6_entry_nexthop_add:
5893 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5897 static int mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
5898 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
5899 struct fib6_info **rt_arr, unsigned int nrt6)
5901 struct mlxsw_sp_fib6_entry *fib6_entry;
5902 struct mlxsw_sp_fib_node *fib_node;
5903 struct fib6_info *rt = rt_arr[0];
5906 if (mlxsw_sp->router->aborted)
5909 if (mlxsw_sp_fib6_rt_should_ignore(rt))
5912 /* Multipath routes are first added to the FIB trie and only then
5913 * notified. If we vetoed the addition, we will get a delete
5914 * notification for a route we do not have. Therefore, do not warn if
5915 * route was not found.
5917 fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
5921 /* If not all the nexthops are deleted, then only reduce the nexthop
5924 if (nrt6 != fib6_entry->nrt6) {
5925 mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, op_ctx, fib6_entry, rt_arr, nrt6);
5929 fib_node = fib6_entry->common.fib_node;
5931 err = __mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, op_ctx, &fib6_entry->common);
5932 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5933 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5937 static int __mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp,
5938 enum mlxsw_sp_l3proto proto,
5941 const struct mlxsw_sp_router_ll_ops *ll_ops = mlxsw_sp->router->proto_ll_ops[proto];
5942 enum mlxsw_reg_ralxx_protocol ralxx_proto =
5943 (enum mlxsw_reg_ralxx_protocol) proto;
5944 struct mlxsw_sp_fib_entry_priv *priv;
5945 char xralta_pl[MLXSW_REG_XRALTA_LEN];
5946 char xralst_pl[MLXSW_REG_XRALST_LEN];
5949 mlxsw_reg_xralta_pack(xralta_pl, true, ralxx_proto, tree_id);
5950 err = ll_ops->ralta_write(mlxsw_sp, xralta_pl);
5954 mlxsw_reg_xralst_pack(xralst_pl, 0xff, tree_id);
5955 err = ll_ops->ralst_write(mlxsw_sp, xralst_pl);
5959 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
5960 struct mlxsw_sp_fib_entry_op_ctx *op_ctx = mlxsw_sp->router->ll_op_ctx;
5961 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
5962 char xraltb_pl[MLXSW_REG_XRALTB_LEN];
5964 mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
5965 mlxsw_reg_xraltb_pack(xraltb_pl, vr->id, ralxx_proto, tree_id);
5966 err = ll_ops->raltb_write(mlxsw_sp, xraltb_pl);
5970 priv = mlxsw_sp_fib_entry_priv_create(ll_ops);
5972 return PTR_ERR(priv);
5974 ll_ops->fib_entry_pack(op_ctx, proto, MLXSW_SP_FIB_ENTRY_OP_WRITE,
5975 vr->id, 0, NULL, priv);
5976 ll_ops->fib_entry_act_ip2me_pack(op_ctx);
5977 err = ll_ops->fib_entry_commit(mlxsw_sp, op_ctx, NULL);
5978 mlxsw_sp_fib_entry_priv_put(priv);
5986 static struct mlxsw_sp_mr_table *
5987 mlxsw_sp_router_fibmr_family_to_table(struct mlxsw_sp_vr *vr, int family)
5989 if (family == RTNL_FAMILY_IPMR)
5990 return vr->mr_table[MLXSW_SP_L3_PROTO_IPV4];
5992 return vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
5995 static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp,
5996 struct mfc_entry_notifier_info *men_info,
5999 struct mlxsw_sp_mr_table *mrt;
6000 struct mlxsw_sp_vr *vr;
6002 if (mlxsw_sp->router->aborted)
6005 vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id, NULL);
6009 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
6010 return mlxsw_sp_mr_route_add(mrt, men_info->mfc, replace);
6013 static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
6014 struct mfc_entry_notifier_info *men_info)
6016 struct mlxsw_sp_mr_table *mrt;
6017 struct mlxsw_sp_vr *vr;
6019 if (mlxsw_sp->router->aborted)
6022 vr = mlxsw_sp_vr_find(mlxsw_sp, men_info->tb_id);
6026 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
6027 mlxsw_sp_mr_route_del(mrt, men_info->mfc);
6028 mlxsw_sp_vr_put(mlxsw_sp, vr);
6032 mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp,
6033 struct vif_entry_notifier_info *ven_info)
6035 struct mlxsw_sp_mr_table *mrt;
6036 struct mlxsw_sp_rif *rif;
6037 struct mlxsw_sp_vr *vr;
6039 if (mlxsw_sp->router->aborted)
6042 vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id, NULL);
6046 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
6047 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev);
6048 return mlxsw_sp_mr_vif_add(mrt, ven_info->dev,
6049 ven_info->vif_index,
6050 ven_info->vif_flags, rif);
6054 mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
6055 struct vif_entry_notifier_info *ven_info)
6057 struct mlxsw_sp_mr_table *mrt;
6058 struct mlxsw_sp_vr *vr;
6060 if (mlxsw_sp->router->aborted)
6063 vr = mlxsw_sp_vr_find(mlxsw_sp, ven_info->tb_id);
6067 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
6068 mlxsw_sp_mr_vif_del(mrt, ven_info->vif_index);
6069 mlxsw_sp_vr_put(mlxsw_sp, vr);
6072 static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp)
6074 enum mlxsw_sp_l3proto proto = MLXSW_SP_L3_PROTO_IPV4;
6077 err = __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
6078 MLXSW_SP_LPM_TREE_MIN);
6082 /* The multicast router code does not need an abort trap as by default,
6083 * packets that don't match any routes are trapped to the CPU.
6086 proto = MLXSW_SP_L3_PROTO_IPV6;
6087 return __mlxsw_sp_router_set_abort_trap(mlxsw_sp, proto,
6088 MLXSW_SP_LPM_TREE_MIN + 1);
6091 static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
6092 struct mlxsw_sp_fib_node *fib_node)
6094 struct mlxsw_sp_fib4_entry *fib4_entry;
6096 fib4_entry = container_of(fib_node->fib_entry,
6097 struct mlxsw_sp_fib4_entry, common);
6098 mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
6099 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6100 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6103 static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp,
6104 struct mlxsw_sp_fib_node *fib_node)
6106 struct mlxsw_sp_fib6_entry *fib6_entry;
6108 fib6_entry = container_of(fib_node->fib_entry,
6109 struct mlxsw_sp_fib6_entry, common);
6110 mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
6111 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
6112 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6115 static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
6116 struct mlxsw_sp_fib_node *fib_node)
6118 switch (fib_node->fib->proto) {
6119 case MLXSW_SP_L3_PROTO_IPV4:
6120 mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
6122 case MLXSW_SP_L3_PROTO_IPV6:
6123 mlxsw_sp_fib6_node_flush(mlxsw_sp, fib_node);
6128 static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
6129 struct mlxsw_sp_vr *vr,
6130 enum mlxsw_sp_l3proto proto)
6132 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
6133 struct mlxsw_sp_fib_node *fib_node, *tmp;
6135 list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
6136 bool do_break = &tmp->list == &fib->node_list;
6138 mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
6144 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
6148 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
6149 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
6151 if (!mlxsw_sp_vr_is_used(vr))
6154 for (j = 0; j < MLXSW_SP_L3_PROTO_MAX; j++)
6155 mlxsw_sp_mr_table_flush(vr->mr_table[j]);
6156 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
6158 /* If virtual router was only used for IPv4, then it's no
6161 if (!mlxsw_sp_vr_is_used(vr))
6163 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
6166 /* After flushing all the routes, it is not possible anyone is still
6167 * using the adjacency index that is discarding packets, so free it in
6168 * case it was allocated.
6170 if (!mlxsw_sp->router->adj_discard_index_valid)
6172 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
6173 mlxsw_sp->router->adj_discard_index);
6174 mlxsw_sp->router->adj_discard_index_valid = false;
6177 static void mlxsw_sp_router_fib_abort(struct mlxsw_sp *mlxsw_sp)
6181 if (mlxsw_sp->router->aborted)
6183 dev_warn(mlxsw_sp->bus_info->dev, "FIB abort triggered. Note that FIB entries are no longer being offloaded to this device.\n");
6184 mlxsw_sp_router_fib_flush(mlxsw_sp);
6185 mlxsw_sp->router->aborted = true;
6186 err = mlxsw_sp_router_set_abort_trap(mlxsw_sp);
6188 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set abort trap.\n");
6191 struct mlxsw_sp_fib6_event {
6192 struct fib6_info **rt_arr;
6196 struct mlxsw_sp_fib_event {
6197 struct list_head list; /* node in fib queue */
6199 struct mlxsw_sp_fib6_event fib6_event;
6200 struct fib_entry_notifier_info fen_info;
6201 struct fib_rule_notifier_info fr_info;
6202 struct fib_nh_notifier_info fnh_info;
6203 struct mfc_entry_notifier_info men_info;
6204 struct vif_entry_notifier_info ven_info;
6206 struct mlxsw_sp *mlxsw_sp;
6207 unsigned long event;
6212 mlxsw_sp_router_fib6_event_init(struct mlxsw_sp_fib6_event *fib6_event,
6213 struct fib6_entry_notifier_info *fen6_info)
6215 struct fib6_info *rt = fen6_info->rt;
6216 struct fib6_info **rt_arr;
6217 struct fib6_info *iter;
6221 nrt6 = fen6_info->nsiblings + 1;
6223 rt_arr = kcalloc(nrt6, sizeof(struct fib6_info *), GFP_ATOMIC);
6227 fib6_event->rt_arr = rt_arr;
6228 fib6_event->nrt6 = nrt6;
6233 if (!fen6_info->nsiblings)
6236 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
6237 if (i == fen6_info->nsiblings)
6240 rt_arr[i + 1] = iter;
6241 fib6_info_hold(iter);
6244 WARN_ON_ONCE(i != fen6_info->nsiblings);
6250 mlxsw_sp_router_fib6_event_fini(struct mlxsw_sp_fib6_event *fib6_event)
6254 for (i = 0; i < fib6_event->nrt6; i++)
6255 mlxsw_sp_rt6_release(fib6_event->rt_arr[i]);
6256 kfree(fib6_event->rt_arr);
6259 static void mlxsw_sp_router_fib4_event_process(struct mlxsw_sp *mlxsw_sp,
6260 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6261 struct mlxsw_sp_fib_event *fib_event)
6265 mlxsw_sp_span_respin(mlxsw_sp);
6267 switch (fib_event->event) {
6268 case FIB_EVENT_ENTRY_REPLACE:
6269 err = mlxsw_sp_router_fib4_replace(mlxsw_sp, op_ctx, &fib_event->fen_info);
6271 mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
6272 mlxsw_sp_router_fib_abort(mlxsw_sp);
6274 fib_info_put(fib_event->fen_info.fi);
6276 case FIB_EVENT_ENTRY_DEL:
6277 err = mlxsw_sp_router_fib4_del(mlxsw_sp, op_ctx, &fib_event->fen_info);
6279 mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
6280 fib_info_put(fib_event->fen_info.fi);
6282 case FIB_EVENT_NH_ADD:
6283 case FIB_EVENT_NH_DEL:
6284 mlxsw_sp_nexthop4_event(mlxsw_sp, fib_event->event, fib_event->fnh_info.fib_nh);
6285 fib_info_put(fib_event->fnh_info.fib_nh->nh_parent);
6290 static void mlxsw_sp_router_fib6_event_process(struct mlxsw_sp *mlxsw_sp,
6291 struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
6292 struct mlxsw_sp_fib_event *fib_event)
6296 mlxsw_sp_span_respin(mlxsw_sp);
6298 switch (fib_event->event) {
6299 case FIB_EVENT_ENTRY_REPLACE:
6300 err = mlxsw_sp_router_fib6_replace(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
6301 fib_event->fib6_event.nrt6);
6303 mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
6304 mlxsw_sp_router_fib_abort(mlxsw_sp);
6306 mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
6308 case FIB_EVENT_ENTRY_APPEND:
6309 err = mlxsw_sp_router_fib6_append(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
6310 fib_event->fib6_event.nrt6);
6312 mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
6313 mlxsw_sp_router_fib_abort(mlxsw_sp);
6315 mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
6317 case FIB_EVENT_ENTRY_DEL:
6318 err = mlxsw_sp_router_fib6_del(mlxsw_sp, op_ctx, fib_event->fib6_event.rt_arr,
6319 fib_event->fib6_event.nrt6);
6321 mlxsw_sp_fib_entry_op_ctx_priv_put_all(op_ctx);
6322 mlxsw_sp_router_fib6_event_fini(&fib_event->fib6_event);
6327 static void mlxsw_sp_router_fibmr_event_process(struct mlxsw_sp *mlxsw_sp,
6328 struct mlxsw_sp_fib_event *fib_event)
6334 mutex_lock(&mlxsw_sp->router->lock);
6335 switch (fib_event->event) {
6336 case FIB_EVENT_ENTRY_REPLACE:
6337 case FIB_EVENT_ENTRY_ADD:
6338 replace = fib_event->event == FIB_EVENT_ENTRY_REPLACE;
6340 err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_event->men_info, replace);
6342 mlxsw_sp_router_fib_abort(mlxsw_sp);
6343 mr_cache_put(fib_event->men_info.mfc);
6345 case FIB_EVENT_ENTRY_DEL:
6346 mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_event->men_info);
6347 mr_cache_put(fib_event->men_info.mfc);
6349 case FIB_EVENT_VIF_ADD:
6350 err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
6351 &fib_event->ven_info);
6353 mlxsw_sp_router_fib_abort(mlxsw_sp);
6354 dev_put(fib_event->ven_info.dev);
6356 case FIB_EVENT_VIF_DEL:
6357 mlxsw_sp_router_fibmr_vif_del(mlxsw_sp, &fib_event->ven_info);
6358 dev_put(fib_event->ven_info.dev);
6361 mutex_unlock(&mlxsw_sp->router->lock);
6365 static void mlxsw_sp_router_fib_event_work(struct work_struct *work)
6367 struct mlxsw_sp_router *router = container_of(work, struct mlxsw_sp_router, fib_event_work);
6368 struct mlxsw_sp_fib_entry_op_ctx *op_ctx = router->ll_op_ctx;
6369 struct mlxsw_sp *mlxsw_sp = router->mlxsw_sp;
6370 struct mlxsw_sp_fib_event *next_fib_event;
6371 struct mlxsw_sp_fib_event *fib_event;
6372 int last_family = AF_UNSPEC;
6373 LIST_HEAD(fib_event_queue);
6375 spin_lock_bh(&router->fib_event_queue_lock);
6376 list_splice_init(&router->fib_event_queue, &fib_event_queue);
6377 spin_unlock_bh(&router->fib_event_queue_lock);
6379 /* Router lock is held here to make sure per-instance
6380 * operation context is not used in between FIB4/6 events
6383 mutex_lock(&router->lock);
6384 mlxsw_sp_fib_entry_op_ctx_clear(op_ctx);
6385 list_for_each_entry_safe(fib_event, next_fib_event,
6386 &fib_event_queue, list) {
6387 /* Check if the next entry in the queue exists and it is
6388 * of the same type (family and event) as the currect one.
6389 * In that case it is permitted to do the bulking
6390 * of multiple FIB entries to a single register write.
6392 op_ctx->bulk_ok = !list_is_last(&fib_event->list, &fib_event_queue) &&
6393 fib_event->family == next_fib_event->family &&
6394 fib_event->event == next_fib_event->event;
6396 /* In case family of this and the previous entry are different, context
6397 * reinitialization is going to be needed now, indicate that.
6398 * Note that since last_family is initialized to AF_UNSPEC, this is always
6399 * going to happen for the first entry processed in the work.
6401 if (fib_event->family != last_family)
6402 op_ctx->initialized = false;
6404 switch (fib_event->family) {
6406 mlxsw_sp_router_fib4_event_process(mlxsw_sp, op_ctx,
6410 mlxsw_sp_router_fib6_event_process(mlxsw_sp, op_ctx,
6413 case RTNL_FAMILY_IP6MR:
6414 case RTNL_FAMILY_IPMR:
6415 /* Unlock here as inside FIBMR the lock is taken again
6416 * under RTNL. The per-instance operation context
6417 * is not used by FIBMR.
6419 mutex_unlock(&router->lock);
6420 mlxsw_sp_router_fibmr_event_process(mlxsw_sp,
6422 mutex_lock(&router->lock);
6427 last_family = fib_event->family;
6431 WARN_ON_ONCE(!list_empty(&router->ll_op_ctx->fib_entry_priv_list));
6432 mutex_unlock(&router->lock);
6435 static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event *fib_event,
6436 struct fib_notifier_info *info)
6438 struct fib_entry_notifier_info *fen_info;
6439 struct fib_nh_notifier_info *fnh_info;
6441 switch (fib_event->event) {
6442 case FIB_EVENT_ENTRY_REPLACE:
6443 case FIB_EVENT_ENTRY_DEL:
6444 fen_info = container_of(info, struct fib_entry_notifier_info,
6446 fib_event->fen_info = *fen_info;
6447 /* Take reference on fib_info to prevent it from being
6448 * freed while event is queued. Release it afterwards.
6450 fib_info_hold(fib_event->fen_info.fi);
6452 case FIB_EVENT_NH_ADD:
6453 case FIB_EVENT_NH_DEL:
6454 fnh_info = container_of(info, struct fib_nh_notifier_info,
6456 fib_event->fnh_info = *fnh_info;
6457 fib_info_hold(fib_event->fnh_info.fib_nh->nh_parent);
6462 static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event *fib_event,
6463 struct fib_notifier_info *info)
6465 struct fib6_entry_notifier_info *fen6_info;
6468 switch (fib_event->event) {
6469 case FIB_EVENT_ENTRY_REPLACE:
6470 case FIB_EVENT_ENTRY_APPEND:
6471 case FIB_EVENT_ENTRY_DEL:
6472 fen6_info = container_of(info, struct fib6_entry_notifier_info,
6474 err = mlxsw_sp_router_fib6_event_init(&fib_event->fib6_event,
6485 mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event *fib_event,
6486 struct fib_notifier_info *info)
6488 switch (fib_event->event) {
6489 case FIB_EVENT_ENTRY_REPLACE:
6490 case FIB_EVENT_ENTRY_ADD:
6491 case FIB_EVENT_ENTRY_DEL:
6492 memcpy(&fib_event->men_info, info, sizeof(fib_event->men_info));
6493 mr_cache_hold(fib_event->men_info.mfc);
6495 case FIB_EVENT_VIF_ADD:
6496 case FIB_EVENT_VIF_DEL:
6497 memcpy(&fib_event->ven_info, info, sizeof(fib_event->ven_info));
6498 dev_hold(fib_event->ven_info.dev);
6503 static int mlxsw_sp_router_fib_rule_event(unsigned long event,
6504 struct fib_notifier_info *info,
6505 struct mlxsw_sp *mlxsw_sp)
6507 struct netlink_ext_ack *extack = info->extack;
6508 struct fib_rule_notifier_info *fr_info;
6509 struct fib_rule *rule;
6512 /* nothing to do at the moment */
6513 if (event == FIB_EVENT_RULE_DEL)
6516 if (mlxsw_sp->router->aborted)
6519 fr_info = container_of(info, struct fib_rule_notifier_info, info);
6520 rule = fr_info->rule;
6522 /* Rule only affects locally generated traffic */
6523 if (rule->iifindex == mlxsw_sp_net(mlxsw_sp)->loopback_dev->ifindex)
6526 switch (info->family) {
6528 if (!fib4_rule_default(rule) && !rule->l3mdev)
6532 if (!fib6_rule_default(rule) && !rule->l3mdev)
6535 case RTNL_FAMILY_IPMR:
6536 if (!ipmr_rule_default(rule) && !rule->l3mdev)
6539 case RTNL_FAMILY_IP6MR:
6540 if (!ip6mr_rule_default(rule) && !rule->l3mdev)
6546 NL_SET_ERR_MSG_MOD(extack, "FIB rules not supported");
6551 /* Called with rcu_read_lock() */
6552 static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
6553 unsigned long event, void *ptr)
6555 struct mlxsw_sp_fib_event *fib_event;
6556 struct fib_notifier_info *info = ptr;
6557 struct mlxsw_sp_router *router;
6560 if ((info->family != AF_INET && info->family != AF_INET6 &&
6561 info->family != RTNL_FAMILY_IPMR &&
6562 info->family != RTNL_FAMILY_IP6MR))
6565 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
6568 case FIB_EVENT_RULE_ADD:
6569 case FIB_EVENT_RULE_DEL:
6570 err = mlxsw_sp_router_fib_rule_event(event, info,
6572 return notifier_from_errno(err);
6573 case FIB_EVENT_ENTRY_ADD:
6574 case FIB_EVENT_ENTRY_REPLACE:
6575 case FIB_EVENT_ENTRY_APPEND:
6576 if (router->aborted) {
6577 NL_SET_ERR_MSG_MOD(info->extack, "FIB offload was aborted. Not configuring route");
6578 return notifier_from_errno(-EINVAL);
6580 if (info->family == AF_INET) {
6581 struct fib_entry_notifier_info *fen_info = ptr;
6583 if (fen_info->fi->fib_nh_is_v6) {
6584 NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported");
6585 return notifier_from_errno(-EINVAL);
6587 if (fen_info->fi->nh) {
6588 NL_SET_ERR_MSG_MOD(info->extack, "IPv4 route with nexthop objects is not supported");
6589 return notifier_from_errno(-EINVAL);
6591 } else if (info->family == AF_INET6) {
6592 struct fib6_entry_notifier_info *fen6_info;
6594 fen6_info = container_of(info,
6595 struct fib6_entry_notifier_info,
6597 if (fen6_info->rt->nh) {
6598 NL_SET_ERR_MSG_MOD(info->extack, "IPv6 route with nexthop objects is not supported");
6599 return notifier_from_errno(-EINVAL);
6605 fib_event = kzalloc(sizeof(*fib_event), GFP_ATOMIC);
6609 fib_event->mlxsw_sp = router->mlxsw_sp;
6610 fib_event->event = event;
6611 fib_event->family = info->family;
6613 switch (info->family) {
6615 mlxsw_sp_router_fib4_event(fib_event, info);
6618 err = mlxsw_sp_router_fib6_event(fib_event, info);
6622 case RTNL_FAMILY_IP6MR:
6623 case RTNL_FAMILY_IPMR:
6624 mlxsw_sp_router_fibmr_event(fib_event, info);
6628 /* Enqueue the event and trigger the work */
6629 spin_lock_bh(&router->fib_event_queue_lock);
6630 list_add_tail(&fib_event->list, &router->fib_event_queue);
6631 spin_unlock_bh(&router->fib_event_queue_lock);
6632 mlxsw_core_schedule_work(&router->fib_event_work);
6641 static struct mlxsw_sp_rif *
6642 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
6643 const struct net_device *dev)
6647 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
6648 if (mlxsw_sp->router->rifs[i] &&
6649 mlxsw_sp->router->rifs[i]->dev == dev)
6650 return mlxsw_sp->router->rifs[i];
6655 bool mlxsw_sp_rif_exists(struct mlxsw_sp *mlxsw_sp,
6656 const struct net_device *dev)
6658 struct mlxsw_sp_rif *rif;
6660 mutex_lock(&mlxsw_sp->router->lock);
6661 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6662 mutex_unlock(&mlxsw_sp->router->lock);
6667 u16 mlxsw_sp_rif_vid(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev)
6669 struct mlxsw_sp_rif *rif;
6672 mutex_lock(&mlxsw_sp->router->lock);
6673 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6677 /* We only return the VID for VLAN RIFs. Otherwise we return an
6678 * invalid value (0).
6680 if (rif->ops->type != MLXSW_SP_RIF_TYPE_VLAN)
6683 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
6686 mutex_unlock(&mlxsw_sp->router->lock);
6690 static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
6692 char ritr_pl[MLXSW_REG_RITR_LEN];
6695 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
6696 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6700 mlxsw_reg_ritr_enable_set(ritr_pl, false);
6701 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
6704 static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
6705 struct mlxsw_sp_rif *rif)
6707 mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
6708 mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
6709 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
6713 mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
6714 unsigned long event)
6716 struct inet6_dev *inet6_dev;
6717 bool addr_list_empty = true;
6718 struct in_device *idev;
6725 idev = __in_dev_get_rcu(dev);
6726 if (idev && idev->ifa_list)
6727 addr_list_empty = false;
6729 inet6_dev = __in6_dev_get(dev);
6730 if (addr_list_empty && inet6_dev &&
6731 !list_empty(&inet6_dev->addr_list))
6732 addr_list_empty = false;
6735 /* macvlans do not have a RIF, but rather piggy back on the
6736 * RIF of their lower device.
6738 if (netif_is_macvlan(dev) && addr_list_empty)
6741 if (rif && addr_list_empty &&
6742 !netif_is_l3_slave(rif->dev))
6744 /* It is possible we already removed the RIF ourselves
6745 * if it was assigned to a netdev that is now a bridge
6754 static enum mlxsw_sp_rif_type
6755 mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
6756 const struct net_device *dev)
6758 enum mlxsw_sp_fid_type type;
6760 if (mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL))
6761 return MLXSW_SP_RIF_TYPE_IPIP_LB;
6763 /* Otherwise RIF type is derived from the type of the underlying FID. */
6764 if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
6765 type = MLXSW_SP_FID_TYPE_8021Q;
6766 else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
6767 type = MLXSW_SP_FID_TYPE_8021Q;
6768 else if (netif_is_bridge_master(dev))
6769 type = MLXSW_SP_FID_TYPE_8021D;
6771 type = MLXSW_SP_FID_TYPE_RFID;
6773 return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
6776 static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index)
6780 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
6781 if (!mlxsw_sp->router->rifs[i]) {
6790 static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
6792 struct net_device *l3_dev)
6794 struct mlxsw_sp_rif *rif;
6796 rif = kzalloc(rif_size, GFP_KERNEL);
6800 INIT_LIST_HEAD(&rif->nexthop_list);
6801 INIT_LIST_HEAD(&rif->neigh_list);
6803 ether_addr_copy(rif->addr, l3_dev->dev_addr);
6804 rif->mtu = l3_dev->mtu;
6808 rif->rif_index = rif_index;
6813 struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
6816 return mlxsw_sp->router->rifs[rif_index];
6819 u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
6821 return rif->rif_index;
6824 u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
6826 return lb_rif->common.rif_index;
6829 u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
6831 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(lb_rif->common.dev);
6832 struct mlxsw_sp_vr *ul_vr;
6834 ul_vr = mlxsw_sp_vr_get(lb_rif->common.mlxsw_sp, ul_tb_id, NULL);
6835 if (WARN_ON(IS_ERR(ul_vr)))
6841 u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
6843 return lb_rif->ul_rif_id;
6846 int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
6848 return rif->dev->ifindex;
6851 const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
6856 static struct mlxsw_sp_rif *
6857 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
6858 const struct mlxsw_sp_rif_params *params,
6859 struct netlink_ext_ack *extack)
6861 u32 tb_id = l3mdev_fib_table(params->dev);
6862 const struct mlxsw_sp_rif_ops *ops;
6863 struct mlxsw_sp_fid *fid = NULL;
6864 enum mlxsw_sp_rif_type type;
6865 struct mlxsw_sp_rif *rif;
6866 struct mlxsw_sp_vr *vr;
6870 type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
6871 ops = mlxsw_sp->rif_ops_arr[type];
6873 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack);
6875 return ERR_CAST(vr);
6878 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
6880 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
6881 goto err_rif_index_alloc;
6884 rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, params->dev);
6890 mlxsw_sp->router->rifs[rif_index] = rif;
6891 rif->mlxsw_sp = mlxsw_sp;
6895 fid = ops->fid_get(rif, extack);
6904 ops->setup(rif, params);
6906 err = ops->configure(rif);
6910 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) {
6911 err = mlxsw_sp_mr_rif_add(vr->mr_table[i], rif);
6913 goto err_mr_rif_add;
6916 mlxsw_sp_rif_counters_alloc(rif);
6921 for (i--; i >= 0; i--)
6922 mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
6923 ops->deconfigure(rif);
6926 mlxsw_sp_fid_put(fid);
6928 mlxsw_sp->router->rifs[rif_index] = NULL;
6932 err_rif_index_alloc:
6934 mlxsw_sp_vr_put(mlxsw_sp, vr);
6935 return ERR_PTR(err);
6938 static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
6940 const struct mlxsw_sp_rif_ops *ops = rif->ops;
6941 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
6942 struct mlxsw_sp_fid *fid = rif->fid;
6943 struct mlxsw_sp_vr *vr;
6946 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
6947 vr = &mlxsw_sp->router->vrs[rif->vr_id];
6949 mlxsw_sp_rif_counters_free(rif);
6950 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
6951 mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
6952 ops->deconfigure(rif);
6954 /* Loopback RIFs are not associated with a FID. */
6955 mlxsw_sp_fid_put(fid);
6956 mlxsw_sp->router->rifs[rif->rif_index] = NULL;
6960 mlxsw_sp_vr_put(mlxsw_sp, vr);
6963 void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
6964 struct net_device *dev)
6966 struct mlxsw_sp_rif *rif;
6968 mutex_lock(&mlxsw_sp->router->lock);
6969 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
6972 mlxsw_sp_rif_destroy(rif);
6974 mutex_unlock(&mlxsw_sp->router->lock);
6978 mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
6979 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
6981 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
6983 params->vid = mlxsw_sp_port_vlan->vid;
6984 params->lag = mlxsw_sp_port->lagged;
6986 params->lag_id = mlxsw_sp_port->lag_id;
6988 params->system_port = mlxsw_sp_port->local_port;
6991 static struct mlxsw_sp_rif_subport *
6992 mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
6994 return container_of(rif, struct mlxsw_sp_rif_subport, common);
6997 static struct mlxsw_sp_rif *
6998 mlxsw_sp_rif_subport_get(struct mlxsw_sp *mlxsw_sp,
6999 const struct mlxsw_sp_rif_params *params,
7000 struct netlink_ext_ack *extack)
7002 struct mlxsw_sp_rif_subport *rif_subport;
7003 struct mlxsw_sp_rif *rif;
7005 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, params->dev);
7007 return mlxsw_sp_rif_create(mlxsw_sp, params, extack);
7009 rif_subport = mlxsw_sp_rif_subport_rif(rif);
7010 refcount_inc(&rif_subport->ref_count);
7014 static void mlxsw_sp_rif_subport_put(struct mlxsw_sp_rif *rif)
7016 struct mlxsw_sp_rif_subport *rif_subport;
7018 rif_subport = mlxsw_sp_rif_subport_rif(rif);
7019 if (!refcount_dec_and_test(&rif_subport->ref_count))
7022 mlxsw_sp_rif_destroy(rif);
7026 mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
7027 struct net_device *l3_dev,
7028 struct netlink_ext_ack *extack)
7030 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
7031 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
7032 struct mlxsw_sp_rif_params params = {
7035 u16 vid = mlxsw_sp_port_vlan->vid;
7036 struct mlxsw_sp_rif *rif;
7037 struct mlxsw_sp_fid *fid;
7040 mlxsw_sp_rif_subport_params_init(¶ms, mlxsw_sp_port_vlan);
7041 rif = mlxsw_sp_rif_subport_get(mlxsw_sp, ¶ms, extack);
7043 return PTR_ERR(rif);
7045 /* FID was already created, just take a reference */
7046 fid = rif->ops->fid_get(rif, extack);
7047 err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
7049 goto err_fid_port_vid_map;
7051 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
7053 goto err_port_vid_learning_set;
7055 err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
7056 BR_STATE_FORWARDING);
7058 goto err_port_vid_stp_set;
7060 mlxsw_sp_port_vlan->fid = fid;
7064 err_port_vid_stp_set:
7065 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
7066 err_port_vid_learning_set:
7067 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
7068 err_fid_port_vid_map:
7069 mlxsw_sp_fid_put(fid);
7070 mlxsw_sp_rif_subport_put(rif);
7075 __mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
7077 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
7078 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
7079 struct mlxsw_sp_rif *rif = mlxsw_sp_fid_rif(fid);
7080 u16 vid = mlxsw_sp_port_vlan->vid;
7082 if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
7085 mlxsw_sp_port_vlan->fid = NULL;
7086 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
7087 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
7088 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
7089 mlxsw_sp_fid_put(fid);
7090 mlxsw_sp_rif_subport_put(rif);
7094 mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
7096 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port_vlan->mlxsw_sp_port->mlxsw_sp;
7098 mutex_lock(&mlxsw_sp->router->lock);
7099 __mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
7100 mutex_unlock(&mlxsw_sp->router->lock);
7103 static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
7104 struct net_device *port_dev,
7105 unsigned long event, u16 vid,
7106 struct netlink_ext_ack *extack)
7108 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
7109 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
7111 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
7112 if (WARN_ON(!mlxsw_sp_port_vlan))
7117 return mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
7120 __mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
7127 static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
7128 unsigned long event,
7129 struct netlink_ext_ack *extack)
7131 if (netif_is_bridge_port(port_dev) ||
7132 netif_is_lag_port(port_dev) ||
7133 netif_is_ovs_port(port_dev))
7136 return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event,
7137 MLXSW_SP_DEFAULT_VID, extack);
7140 static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
7141 struct net_device *lag_dev,
7142 unsigned long event, u16 vid,
7143 struct netlink_ext_ack *extack)
7145 struct net_device *port_dev;
7146 struct list_head *iter;
7149 netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
7150 if (mlxsw_sp_port_dev_check(port_dev)) {
7151 err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
7163 static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
7164 unsigned long event,
7165 struct netlink_ext_ack *extack)
7167 if (netif_is_bridge_port(lag_dev))
7170 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event,
7171 MLXSW_SP_DEFAULT_VID, extack);
7174 static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp,
7175 struct net_device *l3_dev,
7176 unsigned long event,
7177 struct netlink_ext_ack *extack)
7179 struct mlxsw_sp_rif_params params = {
7182 struct mlxsw_sp_rif *rif;
7186 rif = mlxsw_sp_rif_create(mlxsw_sp, ¶ms, extack);
7188 return PTR_ERR(rif);
7191 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
7192 mlxsw_sp_rif_destroy(rif);
7199 static int mlxsw_sp_inetaddr_vlan_event(struct mlxsw_sp *mlxsw_sp,
7200 struct net_device *vlan_dev,
7201 unsigned long event,
7202 struct netlink_ext_ack *extack)
7204 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
7205 u16 vid = vlan_dev_vlan_id(vlan_dev);
7207 if (netif_is_bridge_port(vlan_dev))
7210 if (mlxsw_sp_port_dev_check(real_dev))
7211 return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
7212 event, vid, extack);
7213 else if (netif_is_lag_master(real_dev))
7214 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
7216 else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
7217 return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, vlan_dev, event,
7223 static bool mlxsw_sp_rif_macvlan_is_vrrp4(const u8 *mac)
7225 u8 vrrp4[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x01, 0x00 };
7226 u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
7228 return ether_addr_equal_masked(mac, vrrp4, mask);
7231 static bool mlxsw_sp_rif_macvlan_is_vrrp6(const u8 *mac)
7233 u8 vrrp6[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x02, 0x00 };
7234 u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
7236 return ether_addr_equal_masked(mac, vrrp6, mask);
7239 static int mlxsw_sp_rif_vrrp_op(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
7240 const u8 *mac, bool adding)
7242 char ritr_pl[MLXSW_REG_RITR_LEN];
7243 u8 vrrp_id = adding ? mac[5] : 0;
7246 if (!mlxsw_sp_rif_macvlan_is_vrrp4(mac) &&
7247 !mlxsw_sp_rif_macvlan_is_vrrp6(mac))
7250 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
7251 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7255 if (mlxsw_sp_rif_macvlan_is_vrrp4(mac))
7256 mlxsw_reg_ritr_if_vrrp_id_ipv4_set(ritr_pl, vrrp_id);
7258 mlxsw_reg_ritr_if_vrrp_id_ipv6_set(ritr_pl, vrrp_id);
7260 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7263 static int mlxsw_sp_rif_macvlan_add(struct mlxsw_sp *mlxsw_sp,
7264 const struct net_device *macvlan_dev,
7265 struct netlink_ext_ack *extack)
7267 struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
7268 struct mlxsw_sp_rif *rif;
7271 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
7273 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
7277 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
7278 mlxsw_sp_fid_index(rif->fid), true);
7282 err = mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index,
7283 macvlan_dev->dev_addr, true);
7285 goto err_rif_vrrp_add;
7287 /* Make sure the bridge driver does not have this MAC pointing at
7290 if (rif->ops->fdb_del)
7291 rif->ops->fdb_del(rif, macvlan_dev->dev_addr);
7296 mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
7297 mlxsw_sp_fid_index(rif->fid), false);
7301 static void __mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
7302 const struct net_device *macvlan_dev)
7304 struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
7305 struct mlxsw_sp_rif *rif;
7307 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
7308 /* If we do not have a RIF, then we already took care of
7309 * removing the macvlan's MAC during RIF deletion.
7313 mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index, macvlan_dev->dev_addr,
7315 mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
7316 mlxsw_sp_fid_index(rif->fid), false);
7319 void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
7320 const struct net_device *macvlan_dev)
7322 mutex_lock(&mlxsw_sp->router->lock);
7323 __mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
7324 mutex_unlock(&mlxsw_sp->router->lock);
7327 static int mlxsw_sp_inetaddr_macvlan_event(struct mlxsw_sp *mlxsw_sp,
7328 struct net_device *macvlan_dev,
7329 unsigned long event,
7330 struct netlink_ext_ack *extack)
7334 return mlxsw_sp_rif_macvlan_add(mlxsw_sp, macvlan_dev, extack);
7336 __mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
7343 static int mlxsw_sp_router_port_check_rif_addr(struct mlxsw_sp *mlxsw_sp,
7344 struct net_device *dev,
7345 const unsigned char *dev_addr,
7346 struct netlink_ext_ack *extack)
7348 struct mlxsw_sp_rif *rif;
7351 /* A RIF is not created for macvlan netdevs. Their MAC is used to
7354 if (netif_is_macvlan(dev) || netif_is_l3_master(dev))
7357 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
7358 rif = mlxsw_sp->router->rifs[i];
7359 if (rif && rif->ops &&
7360 rif->ops->type == MLXSW_SP_RIF_TYPE_IPIP_LB)
7362 if (rif && rif->dev && rif->dev != dev &&
7363 !ether_addr_equal_masked(rif->dev->dev_addr, dev_addr,
7364 mlxsw_sp->mac_mask)) {
7365 NL_SET_ERR_MSG_MOD(extack, "All router interface MAC addresses must have the same prefix");
7373 static int __mlxsw_sp_inetaddr_event(struct mlxsw_sp *mlxsw_sp,
7374 struct net_device *dev,
7375 unsigned long event,
7376 struct netlink_ext_ack *extack)
7378 if (mlxsw_sp_port_dev_check(dev))
7379 return mlxsw_sp_inetaddr_port_event(dev, event, extack);
7380 else if (netif_is_lag_master(dev))
7381 return mlxsw_sp_inetaddr_lag_event(dev, event, extack);
7382 else if (netif_is_bridge_master(dev))
7383 return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, dev, event,
7385 else if (is_vlan_dev(dev))
7386 return mlxsw_sp_inetaddr_vlan_event(mlxsw_sp, dev, event,
7388 else if (netif_is_macvlan(dev))
7389 return mlxsw_sp_inetaddr_macvlan_event(mlxsw_sp, dev, event,
7395 static int mlxsw_sp_inetaddr_event(struct notifier_block *nb,
7396 unsigned long event, void *ptr)
7398 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
7399 struct net_device *dev = ifa->ifa_dev->dev;
7400 struct mlxsw_sp_router *router;
7401 struct mlxsw_sp_rif *rif;
7404 /* NETDEV_UP event is handled by mlxsw_sp_inetaddr_valid_event */
7405 if (event == NETDEV_UP)
7408 router = container_of(nb, struct mlxsw_sp_router, inetaddr_nb);
7409 mutex_lock(&router->lock);
7410 rif = mlxsw_sp_rif_find_by_dev(router->mlxsw_sp, dev);
7411 if (!mlxsw_sp_rif_should_config(rif, dev, event))
7414 err = __mlxsw_sp_inetaddr_event(router->mlxsw_sp, dev, event, NULL);
7416 mutex_unlock(&router->lock);
7417 return notifier_from_errno(err);
7420 int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
7421 unsigned long event, void *ptr)
7423 struct in_validator_info *ivi = (struct in_validator_info *) ptr;
7424 struct net_device *dev = ivi->ivi_dev->dev;
7425 struct mlxsw_sp *mlxsw_sp;
7426 struct mlxsw_sp_rif *rif;
7429 mlxsw_sp = mlxsw_sp_lower_get(dev);
7433 mutex_lock(&mlxsw_sp->router->lock);
7434 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7435 if (!mlxsw_sp_rif_should_config(rif, dev, event))
7438 err = mlxsw_sp_router_port_check_rif_addr(mlxsw_sp, dev, dev->dev_addr,
7443 err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, ivi->extack);
7445 mutex_unlock(&mlxsw_sp->router->lock);
7446 return notifier_from_errno(err);
7449 struct mlxsw_sp_inet6addr_event_work {
7450 struct work_struct work;
7451 struct mlxsw_sp *mlxsw_sp;
7452 struct net_device *dev;
7453 unsigned long event;
7456 static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
7458 struct mlxsw_sp_inet6addr_event_work *inet6addr_work =
7459 container_of(work, struct mlxsw_sp_inet6addr_event_work, work);
7460 struct mlxsw_sp *mlxsw_sp = inet6addr_work->mlxsw_sp;
7461 struct net_device *dev = inet6addr_work->dev;
7462 unsigned long event = inet6addr_work->event;
7463 struct mlxsw_sp_rif *rif;
7466 mutex_lock(&mlxsw_sp->router->lock);
7468 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7469 if (!mlxsw_sp_rif_should_config(rif, dev, event))
7472 __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, NULL);
7474 mutex_unlock(&mlxsw_sp->router->lock);
7477 kfree(inet6addr_work);
7480 /* Called with rcu_read_lock() */
7481 static int mlxsw_sp_inet6addr_event(struct notifier_block *nb,
7482 unsigned long event, void *ptr)
7484 struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr;
7485 struct mlxsw_sp_inet6addr_event_work *inet6addr_work;
7486 struct net_device *dev = if6->idev->dev;
7487 struct mlxsw_sp_router *router;
7489 /* NETDEV_UP event is handled by mlxsw_sp_inet6addr_valid_event */
7490 if (event == NETDEV_UP)
7493 inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC);
7494 if (!inet6addr_work)
7497 router = container_of(nb, struct mlxsw_sp_router, inet6addr_nb);
7498 INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work);
7499 inet6addr_work->mlxsw_sp = router->mlxsw_sp;
7500 inet6addr_work->dev = dev;
7501 inet6addr_work->event = event;
7503 mlxsw_core_schedule_work(&inet6addr_work->work);
7508 int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
7509 unsigned long event, void *ptr)
7511 struct in6_validator_info *i6vi = (struct in6_validator_info *) ptr;
7512 struct net_device *dev = i6vi->i6vi_dev->dev;
7513 struct mlxsw_sp *mlxsw_sp;
7514 struct mlxsw_sp_rif *rif;
7517 mlxsw_sp = mlxsw_sp_lower_get(dev);
7521 mutex_lock(&mlxsw_sp->router->lock);
7522 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7523 if (!mlxsw_sp_rif_should_config(rif, dev, event))
7526 err = mlxsw_sp_router_port_check_rif_addr(mlxsw_sp, dev, dev->dev_addr,
7531 err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, i6vi->extack);
7533 mutex_unlock(&mlxsw_sp->router->lock);
7534 return notifier_from_errno(err);
7537 static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
7538 const char *mac, int mtu)
7540 char ritr_pl[MLXSW_REG_RITR_LEN];
7543 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
7544 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7548 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
7549 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
7550 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
7551 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7555 mlxsw_sp_router_port_change_event(struct mlxsw_sp *mlxsw_sp,
7556 struct mlxsw_sp_rif *rif)
7558 struct net_device *dev = rif->dev;
7562 fid_index = mlxsw_sp_fid_index(rif->fid);
7564 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
7568 err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
7573 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
7575 goto err_rif_fdb_op;
7577 if (rif->mtu != dev->mtu) {
7578 struct mlxsw_sp_vr *vr;
7581 /* The RIF is relevant only to its mr_table instance, as unlike
7582 * unicast routing, in multicast routing a RIF cannot be shared
7583 * between several multicast routing tables.
7585 vr = &mlxsw_sp->router->vrs[rif->vr_id];
7586 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
7587 mlxsw_sp_mr_rif_mtu_update(vr->mr_table[i],
7591 ether_addr_copy(rif->addr, dev->dev_addr);
7592 rif->mtu = dev->mtu;
7594 netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
7599 mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu);
7601 mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
7605 static int mlxsw_sp_router_port_pre_changeaddr_event(struct mlxsw_sp_rif *rif,
7606 struct netdev_notifier_pre_changeaddr_info *info)
7608 struct netlink_ext_ack *extack;
7610 extack = netdev_notifier_info_to_extack(&info->info);
7611 return mlxsw_sp_router_port_check_rif_addr(rif->mlxsw_sp, rif->dev,
7612 info->dev_addr, extack);
7615 int mlxsw_sp_netdevice_router_port_event(struct net_device *dev,
7616 unsigned long event, void *ptr)
7618 struct mlxsw_sp *mlxsw_sp;
7619 struct mlxsw_sp_rif *rif;
7622 mlxsw_sp = mlxsw_sp_lower_get(dev);
7626 mutex_lock(&mlxsw_sp->router->lock);
7627 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7632 case NETDEV_CHANGEMTU:
7633 case NETDEV_CHANGEADDR:
7634 err = mlxsw_sp_router_port_change_event(mlxsw_sp, rif);
7636 case NETDEV_PRE_CHANGEADDR:
7637 err = mlxsw_sp_router_port_pre_changeaddr_event(rif, ptr);
7642 mutex_unlock(&mlxsw_sp->router->lock);
7646 static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
7647 struct net_device *l3_dev,
7648 struct netlink_ext_ack *extack)
7650 struct mlxsw_sp_rif *rif;
7652 /* If netdev is already associated with a RIF, then we need to
7653 * destroy it and create a new one with the new virtual router ID.
7655 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
7657 __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN,
7660 return __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_UP, extack);
7663 static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
7664 struct net_device *l3_dev)
7666 struct mlxsw_sp_rif *rif;
7668 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
7671 __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN, NULL);
7674 int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
7675 struct netdev_notifier_changeupper_info *info)
7677 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
7680 /* We do not create a RIF for a macvlan, but only use it to
7681 * direct more MAC addresses to the router.
7683 if (!mlxsw_sp || netif_is_macvlan(l3_dev))
7686 mutex_lock(&mlxsw_sp->router->lock);
7688 case NETDEV_PRECHANGEUPPER:
7690 case NETDEV_CHANGEUPPER:
7691 if (info->linking) {
7692 struct netlink_ext_ack *extack;
7694 extack = netdev_notifier_info_to_extack(&info->info);
7695 err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev, extack);
7697 mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
7701 mutex_unlock(&mlxsw_sp->router->lock);
7706 static int __mlxsw_sp_rif_macvlan_flush(struct net_device *dev,
7707 struct netdev_nested_priv *priv)
7709 struct mlxsw_sp_rif *rif = (struct mlxsw_sp_rif *)priv->data;
7711 if (!netif_is_macvlan(dev))
7714 return mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
7715 mlxsw_sp_fid_index(rif->fid), false);
7718 static int mlxsw_sp_rif_macvlan_flush(struct mlxsw_sp_rif *rif)
7720 struct netdev_nested_priv priv = {
7721 .data = (void *)rif,
7724 if (!netif_is_macvlan_port(rif->dev))
7727 netdev_warn(rif->dev, "Router interface is deleted. Upper macvlans will not work\n");
7728 return netdev_walk_all_upper_dev_rcu(rif->dev,
7729 __mlxsw_sp_rif_macvlan_flush, &priv);
7732 static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
7733 const struct mlxsw_sp_rif_params *params)
7735 struct mlxsw_sp_rif_subport *rif_subport;
7737 rif_subport = mlxsw_sp_rif_subport_rif(rif);
7738 refcount_set(&rif_subport->ref_count, 1);
7739 rif_subport->vid = params->vid;
7740 rif_subport->lag = params->lag;
7742 rif_subport->lag_id = params->lag_id;
7744 rif_subport->system_port = params->system_port;
7747 static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
7749 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7750 struct mlxsw_sp_rif_subport *rif_subport;
7751 char ritr_pl[MLXSW_REG_RITR_LEN];
7753 rif_subport = mlxsw_sp_rif_subport_rif(rif);
7754 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
7755 rif->rif_index, rif->vr_id, rif->dev->mtu);
7756 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
7757 mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
7758 rif_subport->lag ? rif_subport->lag_id :
7759 rif_subport->system_port,
7762 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7765 static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif)
7769 err = mlxsw_sp_rif_subport_op(rif, true);
7773 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
7774 mlxsw_sp_fid_index(rif->fid), true);
7776 goto err_rif_fdb_op;
7778 mlxsw_sp_fid_rif_set(rif->fid, rif);
7782 mlxsw_sp_rif_subport_op(rif, false);
7786 static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
7788 struct mlxsw_sp_fid *fid = rif->fid;
7790 mlxsw_sp_fid_rif_set(fid, NULL);
7791 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
7792 mlxsw_sp_fid_index(fid), false);
7793 mlxsw_sp_rif_macvlan_flush(rif);
7794 mlxsw_sp_rif_subport_op(rif, false);
7797 static struct mlxsw_sp_fid *
7798 mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif,
7799 struct netlink_ext_ack *extack)
7801 return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
7804 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
7805 .type = MLXSW_SP_RIF_TYPE_SUBPORT,
7806 .rif_size = sizeof(struct mlxsw_sp_rif_subport),
7807 .setup = mlxsw_sp_rif_subport_setup,
7808 .configure = mlxsw_sp_rif_subport_configure,
7809 .deconfigure = mlxsw_sp_rif_subport_deconfigure,
7810 .fid_get = mlxsw_sp_rif_subport_fid_get,
7813 static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
7814 enum mlxsw_reg_ritr_if_type type,
7815 u16 vid_fid, bool enable)
7817 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7818 char ritr_pl[MLXSW_REG_RITR_LEN];
7820 mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
7822 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
7823 mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid);
7825 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7828 u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
7830 return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
7833 static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif)
7835 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7836 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
7839 err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
7844 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
7845 mlxsw_sp_router_port(mlxsw_sp), true);
7847 goto err_fid_mc_flood_set;
7849 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
7850 mlxsw_sp_router_port(mlxsw_sp), true);
7852 goto err_fid_bc_flood_set;
7854 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
7855 mlxsw_sp_fid_index(rif->fid), true);
7857 goto err_rif_fdb_op;
7859 mlxsw_sp_fid_rif_set(rif->fid, rif);
7863 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
7864 mlxsw_sp_router_port(mlxsw_sp), false);
7865 err_fid_bc_flood_set:
7866 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
7867 mlxsw_sp_router_port(mlxsw_sp), false);
7868 err_fid_mc_flood_set:
7869 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
7873 static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
7875 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
7876 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7877 struct mlxsw_sp_fid *fid = rif->fid;
7879 mlxsw_sp_fid_rif_set(fid, NULL);
7880 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
7881 mlxsw_sp_fid_index(fid), false);
7882 mlxsw_sp_rif_macvlan_flush(rif);
7883 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
7884 mlxsw_sp_router_port(mlxsw_sp), false);
7885 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
7886 mlxsw_sp_router_port(mlxsw_sp), false);
7887 mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
7890 static struct mlxsw_sp_fid *
7891 mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif,
7892 struct netlink_ext_ack *extack)
7894 return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
7897 static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
7899 struct switchdev_notifier_fdb_info info;
7900 struct net_device *dev;
7902 dev = br_fdb_find_port(rif->dev, mac, 0);
7908 call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
7912 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
7913 .type = MLXSW_SP_RIF_TYPE_FID,
7914 .rif_size = sizeof(struct mlxsw_sp_rif),
7915 .configure = mlxsw_sp_rif_fid_configure,
7916 .deconfigure = mlxsw_sp_rif_fid_deconfigure,
7917 .fid_get = mlxsw_sp_rif_fid_fid_get,
7918 .fdb_del = mlxsw_sp_rif_fid_fdb_del,
7921 static struct mlxsw_sp_fid *
7922 mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
7923 struct netlink_ext_ack *extack)
7925 struct net_device *br_dev;
7929 if (is_vlan_dev(rif->dev)) {
7930 vid = vlan_dev_vlan_id(rif->dev);
7931 br_dev = vlan_dev_real_dev(rif->dev);
7932 if (WARN_ON(!netif_is_bridge_master(br_dev)))
7933 return ERR_PTR(-EINVAL);
7935 err = br_vlan_get_pvid(rif->dev, &vid);
7936 if (err < 0 || !vid) {
7937 NL_SET_ERR_MSG_MOD(extack, "Couldn't determine bridge PVID");
7938 return ERR_PTR(-EINVAL);
7942 return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
7945 static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
7947 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
7948 struct switchdev_notifier_fdb_info info;
7949 struct net_device *br_dev;
7950 struct net_device *dev;
7952 br_dev = is_vlan_dev(rif->dev) ? vlan_dev_real_dev(rif->dev) : rif->dev;
7953 dev = br_fdb_find_port(br_dev, mac, vid);
7959 call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
7963 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_emu_ops = {
7964 .type = MLXSW_SP_RIF_TYPE_VLAN,
7965 .rif_size = sizeof(struct mlxsw_sp_rif),
7966 .configure = mlxsw_sp_rif_fid_configure,
7967 .deconfigure = mlxsw_sp_rif_fid_deconfigure,
7968 .fid_get = mlxsw_sp_rif_vlan_fid_get,
7969 .fdb_del = mlxsw_sp_rif_vlan_fdb_del,
7972 static struct mlxsw_sp_rif_ipip_lb *
7973 mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif *rif)
7975 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
7979 mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif,
7980 const struct mlxsw_sp_rif_params *params)
7982 struct mlxsw_sp_rif_params_ipip_lb *params_lb;
7983 struct mlxsw_sp_rif_ipip_lb *rif_lb;
7985 params_lb = container_of(params, struct mlxsw_sp_rif_params_ipip_lb,
7987 rif_lb = mlxsw_sp_rif_ipip_lb_rif(rif);
7988 rif_lb->lb_config = params_lb->lb_config;
7992 mlxsw_sp1_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
7994 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
7995 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
7996 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
7997 struct mlxsw_sp_vr *ul_vr;
8000 ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id, NULL);
8002 return PTR_ERR(ul_vr);
8004 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, true);
8006 goto err_loopback_op;
8008 lb_rif->ul_vr_id = ul_vr->id;
8009 lb_rif->ul_rif_id = 0;
8014 mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
8018 static void mlxsw_sp1_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
8020 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
8021 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
8022 struct mlxsw_sp_vr *ul_vr;
8024 ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
8025 mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, false);
8028 mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
8031 static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_ipip_lb_ops = {
8032 .type = MLXSW_SP_RIF_TYPE_IPIP_LB,
8033 .rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb),
8034 .setup = mlxsw_sp_rif_ipip_lb_setup,
8035 .configure = mlxsw_sp1_rif_ipip_lb_configure,
8036 .deconfigure = mlxsw_sp1_rif_ipip_lb_deconfigure,
8039 const struct mlxsw_sp_rif_ops *mlxsw_sp1_rif_ops_arr[] = {
8040 [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
8041 [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_emu_ops,
8042 [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
8043 [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp1_rif_ipip_lb_ops,
8047 mlxsw_sp_rif_ipip_lb_ul_rif_op(struct mlxsw_sp_rif *ul_rif, bool enable)
8049 struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
8050 char ritr_pl[MLXSW_REG_RITR_LEN];
8052 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
8053 ul_rif->rif_index, ul_rif->vr_id, IP_MAX_MTU);
8054 mlxsw_reg_ritr_loopback_protocol_set(ritr_pl,
8055 MLXSW_REG_RITR_LOOPBACK_GENERIC);
8057 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
8060 static struct mlxsw_sp_rif *
8061 mlxsw_sp_ul_rif_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
8062 struct netlink_ext_ack *extack)
8064 struct mlxsw_sp_rif *ul_rif;
8068 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
8070 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
8071 return ERR_PTR(err);
8074 ul_rif = mlxsw_sp_rif_alloc(sizeof(*ul_rif), rif_index, vr->id, NULL);
8076 return ERR_PTR(-ENOMEM);
8078 mlxsw_sp->router->rifs[rif_index] = ul_rif;
8079 ul_rif->mlxsw_sp = mlxsw_sp;
8080 err = mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, true);
8087 mlxsw_sp->router->rifs[rif_index] = NULL;
8089 return ERR_PTR(err);
8092 static void mlxsw_sp_ul_rif_destroy(struct mlxsw_sp_rif *ul_rif)
8094 struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
8096 mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, false);
8097 mlxsw_sp->router->rifs[ul_rif->rif_index] = NULL;
8101 static struct mlxsw_sp_rif *
8102 mlxsw_sp_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
8103 struct netlink_ext_ack *extack)
8105 struct mlxsw_sp_vr *vr;
8108 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, extack);
8110 return ERR_CAST(vr);
8112 if (refcount_inc_not_zero(&vr->ul_rif_refcnt))
8115 vr->ul_rif = mlxsw_sp_ul_rif_create(mlxsw_sp, vr, extack);
8116 if (IS_ERR(vr->ul_rif)) {
8117 err = PTR_ERR(vr->ul_rif);
8118 goto err_ul_rif_create;
8122 refcount_set(&vr->ul_rif_refcnt, 1);
8127 mlxsw_sp_vr_put(mlxsw_sp, vr);
8128 return ERR_PTR(err);
8131 static void mlxsw_sp_ul_rif_put(struct mlxsw_sp_rif *ul_rif)
8133 struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
8134 struct mlxsw_sp_vr *vr;
8136 vr = &mlxsw_sp->router->vrs[ul_rif->vr_id];
8138 if (!refcount_dec_and_test(&vr->ul_rif_refcnt))
8142 mlxsw_sp_ul_rif_destroy(ul_rif);
8143 mlxsw_sp_vr_put(mlxsw_sp, vr);
8146 int mlxsw_sp_router_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
8149 struct mlxsw_sp_rif *ul_rif;
8152 mutex_lock(&mlxsw_sp->router->lock);
8153 ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL);
8154 if (IS_ERR(ul_rif)) {
8155 err = PTR_ERR(ul_rif);
8158 *ul_rif_index = ul_rif->rif_index;
8160 mutex_unlock(&mlxsw_sp->router->lock);
8164 void mlxsw_sp_router_ul_rif_put(struct mlxsw_sp *mlxsw_sp, u16 ul_rif_index)
8166 struct mlxsw_sp_rif *ul_rif;
8168 mutex_lock(&mlxsw_sp->router->lock);
8169 ul_rif = mlxsw_sp->router->rifs[ul_rif_index];
8170 if (WARN_ON(!ul_rif))
8173 mlxsw_sp_ul_rif_put(ul_rif);
8175 mutex_unlock(&mlxsw_sp->router->lock);
8179 mlxsw_sp2_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
8181 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
8182 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
8183 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
8184 struct mlxsw_sp_rif *ul_rif;
8187 ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL);
8189 return PTR_ERR(ul_rif);
8191 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, ul_rif->rif_index, true);
8193 goto err_loopback_op;
8195 lb_rif->ul_vr_id = 0;
8196 lb_rif->ul_rif_id = ul_rif->rif_index;
8201 mlxsw_sp_ul_rif_put(ul_rif);
8205 static void mlxsw_sp2_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
8207 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
8208 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
8209 struct mlxsw_sp_rif *ul_rif;
8211 ul_rif = mlxsw_sp_rif_by_index(mlxsw_sp, lb_rif->ul_rif_id);
8212 mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, lb_rif->ul_rif_id, false);
8213 mlxsw_sp_ul_rif_put(ul_rif);
8216 static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_ipip_lb_ops = {
8217 .type = MLXSW_SP_RIF_TYPE_IPIP_LB,
8218 .rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb),
8219 .setup = mlxsw_sp_rif_ipip_lb_setup,
8220 .configure = mlxsw_sp2_rif_ipip_lb_configure,
8221 .deconfigure = mlxsw_sp2_rif_ipip_lb_deconfigure,
8224 const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[] = {
8225 [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
8226 [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_emu_ops,
8227 [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
8228 [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp2_rif_ipip_lb_ops,
8231 static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
8233 u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
8235 mlxsw_sp->router->rifs = kcalloc(max_rifs,
8236 sizeof(struct mlxsw_sp_rif *),
8238 if (!mlxsw_sp->router->rifs)
8244 static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
8248 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
8249 WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
8251 kfree(mlxsw_sp->router->rifs);
8255 mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
8257 char tigcr_pl[MLXSW_REG_TIGCR_LEN];
8259 mlxsw_reg_tigcr_pack(tigcr_pl, true, 0);
8260 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl);
8263 static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
8267 mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr;
8268 INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
8270 err = mlxsw_sp_ipip_ecn_encap_init(mlxsw_sp);
8273 err = mlxsw_sp_ipip_ecn_decap_init(mlxsw_sp);
8277 return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
8280 static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
8282 WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list));
8285 static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
8287 struct mlxsw_sp_router *router;
8289 /* Flush pending FIB notifications and then flush the device's
8290 * table before requesting another dump. The FIB notification
8291 * block is unregistered, so no need to take RTNL.
8293 mlxsw_core_flush_owq();
8294 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
8295 mlxsw_sp_router_fib_flush(router->mlxsw_sp);
8298 #ifdef CONFIG_IP_ROUTE_MULTIPATH
8299 static void mlxsw_sp_mp_hash_header_set(char *recr2_pl, int header)
8301 mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, header, true);
8304 static void mlxsw_sp_mp_hash_field_set(char *recr2_pl, int field)
8306 mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, field, true);
8309 static void mlxsw_sp_mp4_hash_init(struct mlxsw_sp *mlxsw_sp, char *recr2_pl)
8311 struct net *net = mlxsw_sp_net(mlxsw_sp);
8312 bool only_l3 = !net->ipv4.sysctl_fib_multipath_hash_policy;
8314 mlxsw_sp_mp_hash_header_set(recr2_pl,
8315 MLXSW_REG_RECR2_IPV4_EN_NOT_TCP_NOT_UDP);
8316 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV4_EN_TCP_UDP);
8317 mlxsw_reg_recr2_ipv4_sip_enable(recr2_pl);
8318 mlxsw_reg_recr2_ipv4_dip_enable(recr2_pl);
8321 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_EN_IPV4);
8322 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV4_PROTOCOL);
8323 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_SPORT);
8324 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_TCP_UDP_DPORT);
8327 static void mlxsw_sp_mp6_hash_init(struct mlxsw_sp *mlxsw_sp, char *recr2_pl)
8329 bool only_l3 = !ip6_multipath_hash_policy(mlxsw_sp_net(mlxsw_sp));
8331 mlxsw_sp_mp_hash_header_set(recr2_pl,
8332 MLXSW_REG_RECR2_IPV6_EN_NOT_TCP_NOT_UDP);
8333 mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV6_EN_TCP_UDP);
8334 mlxsw_reg_recr2_ipv6_sip_enable(recr2_pl);
8335 mlxsw_reg_recr2_ipv6_dip_enable(recr2_pl);
8336 mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_NEXT_HEADER);
8338 mlxsw_sp_mp_hash_field_set(recr2_pl,
8339 MLXSW_REG_RECR2_IPV6_FLOW_LABEL);
8341 mlxsw_sp_mp_hash_header_set(recr2_pl,
8342 MLXSW_REG_RECR2_TCP_UDP_EN_IPV6);
8343 mlxsw_sp_mp_hash_field_set(recr2_pl,
8344 MLXSW_REG_RECR2_TCP_UDP_SPORT);
8345 mlxsw_sp_mp_hash_field_set(recr2_pl,
8346 MLXSW_REG_RECR2_TCP_UDP_DPORT);
8350 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
8352 char recr2_pl[MLXSW_REG_RECR2_LEN];
8355 seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 0);
8356 mlxsw_reg_recr2_pack(recr2_pl, seed);
8357 mlxsw_sp_mp4_hash_init(mlxsw_sp, recr2_pl);
8358 mlxsw_sp_mp6_hash_init(mlxsw_sp, recr2_pl);
8360 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl);
8363 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
8369 static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
8371 char rdpm_pl[MLXSW_REG_RDPM_LEN];
8374 MLXSW_REG_ZERO(rdpm, rdpm_pl);
8376 /* HW is determining switch priority based on DSCP-bits, but the
8377 * kernel is still doing that based on the ToS. Since there's a
8378 * mismatch in bits we need to make sure to translate the right
8379 * value ToS would observe, skipping the 2 least-significant ECN bits.
8381 for (i = 0; i < MLXSW_REG_RDPM_DSCP_ENTRY_REC_MAX_COUNT; i++)
8382 mlxsw_reg_rdpm_pack(rdpm_pl, i, rt_tos2priority(i << 2));
8384 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rdpm), rdpm_pl);
8387 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
8389 struct net *net = mlxsw_sp_net(mlxsw_sp);
8390 bool usp = net->ipv4.sysctl_ip_fwd_update_priority;
8391 char rgcr_pl[MLXSW_REG_RGCR_LEN];
8394 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
8396 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
8398 mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
8399 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
8400 mlxsw_reg_rgcr_usp_set(rgcr_pl, usp);
8401 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
8404 static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
8406 char rgcr_pl[MLXSW_REG_RGCR_LEN];
8408 mlxsw_reg_rgcr_pack(rgcr_pl, false, false);
8409 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
8412 static const struct mlxsw_sp_router_ll_ops mlxsw_sp_router_ll_basic_ops = {
8413 .ralta_write = mlxsw_sp_router_ll_basic_ralta_write,
8414 .ralst_write = mlxsw_sp_router_ll_basic_ralst_write,
8415 .raltb_write = mlxsw_sp_router_ll_basic_raltb_write,
8416 .fib_entry_op_ctx_size = sizeof(struct mlxsw_sp_fib_entry_op_ctx_basic),
8417 .fib_entry_pack = mlxsw_sp_router_ll_basic_fib_entry_pack,
8418 .fib_entry_act_remote_pack = mlxsw_sp_router_ll_basic_fib_entry_act_remote_pack,
8419 .fib_entry_act_local_pack = mlxsw_sp_router_ll_basic_fib_entry_act_local_pack,
8420 .fib_entry_act_ip2me_pack = mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_pack,
8421 .fib_entry_act_ip2me_tun_pack = mlxsw_sp_router_ll_basic_fib_entry_act_ip2me_tun_pack,
8422 .fib_entry_commit = mlxsw_sp_router_ll_basic_fib_entry_commit,
8423 .fib_entry_is_committed = mlxsw_sp_router_ll_basic_fib_entry_is_committed,
8426 static int mlxsw_sp_router_ll_op_ctx_init(struct mlxsw_sp_router *router)
8428 size_t max_size = 0;
8431 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) {
8432 size_t size = router->proto_ll_ops[i]->fib_entry_op_ctx_size;
8434 if (size > max_size)
8437 router->ll_op_ctx = kzalloc(sizeof(*router->ll_op_ctx) + max_size,
8439 if (!router->ll_op_ctx)
8441 INIT_LIST_HEAD(&router->ll_op_ctx->fib_entry_priv_list);
8445 static void mlxsw_sp_router_ll_op_ctx_fini(struct mlxsw_sp_router *router)
8447 WARN_ON(!list_empty(&router->ll_op_ctx->fib_entry_priv_list));
8448 kfree(router->ll_op_ctx);
8451 int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
8452 struct netlink_ext_ack *extack)
8454 struct mlxsw_sp_router *router;
8457 router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
8460 mutex_init(&router->lock);
8461 mlxsw_sp->router = router;
8462 router->mlxsw_sp = mlxsw_sp;
8464 router->proto_ll_ops[MLXSW_SP_L3_PROTO_IPV4] = &mlxsw_sp_router_ll_basic_ops;
8465 router->proto_ll_ops[MLXSW_SP_L3_PROTO_IPV6] = &mlxsw_sp_router_ll_basic_ops;
8467 err = mlxsw_sp_router_ll_op_ctx_init(router);
8469 goto err_ll_op_ctx_init;
8471 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
8472 err = __mlxsw_sp_router_init(mlxsw_sp);
8474 goto err_router_init;
8476 err = mlxsw_sp_rifs_init(mlxsw_sp);
8480 err = mlxsw_sp_ipips_init(mlxsw_sp);
8482 goto err_ipips_init;
8484 err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
8485 &mlxsw_sp_nexthop_ht_params);
8487 goto err_nexthop_ht_init;
8489 err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
8490 &mlxsw_sp_nexthop_group_ht_params);
8492 goto err_nexthop_group_ht_init;
8494 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_list);
8495 err = mlxsw_sp_lpm_init(mlxsw_sp);
8499 err = mlxsw_sp_mr_init(mlxsw_sp, &mlxsw_sp_mr_tcam_ops);
8503 err = mlxsw_sp_vrs_init(mlxsw_sp);
8507 err = mlxsw_sp_neigh_init(mlxsw_sp);
8509 goto err_neigh_init;
8511 err = mlxsw_sp_mp_hash_init(mlxsw_sp);
8513 goto err_mp_hash_init;
8515 err = mlxsw_sp_dscp_init(mlxsw_sp);
8519 INIT_WORK(&router->fib_event_work, mlxsw_sp_router_fib_event_work);
8520 INIT_LIST_HEAD(&router->fib_event_queue);
8521 spin_lock_init(&router->fib_event_queue_lock);
8523 router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event;
8524 err = register_inetaddr_notifier(&router->inetaddr_nb);
8526 goto err_register_inetaddr_notifier;
8528 router->inet6addr_nb.notifier_call = mlxsw_sp_inet6addr_event;
8529 err = register_inet6addr_notifier(&router->inet6addr_nb);
8531 goto err_register_inet6addr_notifier;
8533 mlxsw_sp->router->netevent_nb.notifier_call =
8534 mlxsw_sp_router_netevent_event;
8535 err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
8537 goto err_register_netevent_notifier;
8539 mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
8540 err = register_fib_notifier(mlxsw_sp_net(mlxsw_sp),
8541 &mlxsw_sp->router->fib_nb,
8542 mlxsw_sp_router_fib_dump_flush, extack);
8544 goto err_register_fib_notifier;
8548 err_register_fib_notifier:
8549 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
8550 err_register_netevent_notifier:
8551 unregister_inet6addr_notifier(&router->inet6addr_nb);
8552 err_register_inet6addr_notifier:
8553 unregister_inetaddr_notifier(&router->inetaddr_nb);
8554 err_register_inetaddr_notifier:
8555 mlxsw_core_flush_owq();
8556 WARN_ON(!list_empty(&router->fib_event_queue));
8559 mlxsw_sp_neigh_fini(mlxsw_sp);
8561 mlxsw_sp_vrs_fini(mlxsw_sp);
8563 mlxsw_sp_mr_fini(mlxsw_sp);
8565 mlxsw_sp_lpm_fini(mlxsw_sp);
8567 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
8568 err_nexthop_group_ht_init:
8569 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
8570 err_nexthop_ht_init:
8571 mlxsw_sp_ipips_fini(mlxsw_sp);
8573 mlxsw_sp_rifs_fini(mlxsw_sp);
8575 __mlxsw_sp_router_fini(mlxsw_sp);
8577 mlxsw_sp_router_ll_op_ctx_fini(router);
8579 mutex_destroy(&mlxsw_sp->router->lock);
8580 kfree(mlxsw_sp->router);
8584 void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
8586 unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp),
8587 &mlxsw_sp->router->fib_nb);
8588 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
8589 unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
8590 unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);
8591 mlxsw_core_flush_owq();
8592 WARN_ON(!list_empty(&mlxsw_sp->router->fib_event_queue));
8593 mlxsw_sp_neigh_fini(mlxsw_sp);
8594 mlxsw_sp_vrs_fini(mlxsw_sp);
8595 mlxsw_sp_mr_fini(mlxsw_sp);
8596 mlxsw_sp_lpm_fini(mlxsw_sp);
8597 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
8598 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
8599 mlxsw_sp_ipips_fini(mlxsw_sp);
8600 mlxsw_sp_rifs_fini(mlxsw_sp);
8601 __mlxsw_sp_router_fini(mlxsw_sp);
8602 mlxsw_sp_router_ll_op_ctx_fini(mlxsw_sp->router);
8603 mutex_destroy(&mlxsw_sp->router->lock);
8604 kfree(mlxsw_sp->router);