1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/rhashtable.h>
7 #include <linux/bitops.h>
9 #include <linux/notifier.h>
10 #include <linux/inetdevice.h>
11 #include <linux/netdevice.h>
12 #include <linux/if_bridge.h>
13 #include <linux/socket.h>
14 #include <linux/route.h>
15 #include <linux/gcd.h>
16 #include <linux/if_macvlan.h>
17 #include <linux/refcount.h>
18 #include <linux/jhash.h>
19 #include <linux/net_namespace.h>
20 #include <linux/mutex.h>
21 #include <linux/genalloc.h>
22 #include <net/netevent.h>
23 #include <net/neighbour.h>
25 #include <net/inet_dscp.h>
26 #include <net/ip_fib.h>
27 #include <net/ip6_fib.h>
28 #include <net/nexthop.h>
29 #include <net/fib_rules.h>
30 #include <net/ip_tunnels.h>
31 #include <net/l3mdev.h>
32 #include <net/addrconf.h>
33 #include <net/ndisc.h>
35 #include <net/fib_notifier.h>
36 #include <net/switchdev.h>
41 #include "spectrum_cnt.h"
42 #include "spectrum_dpipe.h"
43 #include "spectrum_ipip.h"
44 #include "spectrum_mr.h"
45 #include "spectrum_mr_tcam.h"
46 #include "spectrum_router.h"
47 #include "spectrum_span.h"
51 struct mlxsw_sp_lpm_tree;
52 struct mlxsw_sp_rif_ops;
55 struct list_head nexthop_list;
56 struct list_head neigh_list;
57 struct net_device *dev; /* NULL for underlay RIF */
58 struct mlxsw_sp_fid *fid;
59 unsigned char addr[ETH_ALEN];
65 const struct mlxsw_sp_rif_ops *ops;
66 struct mlxsw_sp *mlxsw_sp;
68 unsigned int counter_ingress;
69 bool counter_ingress_valid;
70 unsigned int counter_egress;
71 bool counter_egress_valid;
74 struct mlxsw_sp_rif_params {
75 struct net_device *dev;
85 struct mlxsw_sp_rif_subport {
86 struct mlxsw_sp_rif common;
96 struct mlxsw_sp_rif_ipip_lb {
97 struct mlxsw_sp_rif common;
98 struct mlxsw_sp_rif_ipip_lb_config lb_config;
99 u16 ul_vr_id; /* Reserved for Spectrum-2. */
100 u16 ul_rif_id; /* Reserved for Spectrum. */
103 struct mlxsw_sp_rif_params_ipip_lb {
104 struct mlxsw_sp_rif_params common;
105 struct mlxsw_sp_rif_ipip_lb_config lb_config;
108 struct mlxsw_sp_rif_ops {
109 enum mlxsw_sp_rif_type type;
112 void (*setup)(struct mlxsw_sp_rif *rif,
113 const struct mlxsw_sp_rif_params *params);
114 int (*configure)(struct mlxsw_sp_rif *rif,
115 struct netlink_ext_ack *extack);
116 void (*deconfigure)(struct mlxsw_sp_rif *rif);
117 struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif,
118 struct netlink_ext_ack *extack);
119 void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac);
122 struct mlxsw_sp_rif_mac_profile {
123 unsigned char mac_prefix[ETH_ALEN];
124 refcount_t ref_count;
128 struct mlxsw_sp_router_ops {
129 int (*init)(struct mlxsw_sp *mlxsw_sp);
130 int (*ipips_init)(struct mlxsw_sp *mlxsw_sp);
133 static struct mlxsw_sp_rif *
134 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
135 const struct net_device *dev);
136 static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
137 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree);
138 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
139 struct mlxsw_sp_lpm_tree *lpm_tree);
140 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
141 const struct mlxsw_sp_fib *fib,
143 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
144 const struct mlxsw_sp_fib *fib);
146 static unsigned int *
147 mlxsw_sp_rif_p_counter_get(struct mlxsw_sp_rif *rif,
148 enum mlxsw_sp_rif_counter_dir dir)
151 case MLXSW_SP_RIF_COUNTER_EGRESS:
152 return &rif->counter_egress;
153 case MLXSW_SP_RIF_COUNTER_INGRESS:
154 return &rif->counter_ingress;
160 mlxsw_sp_rif_counter_valid_get(struct mlxsw_sp_rif *rif,
161 enum mlxsw_sp_rif_counter_dir dir)
164 case MLXSW_SP_RIF_COUNTER_EGRESS:
165 return rif->counter_egress_valid;
166 case MLXSW_SP_RIF_COUNTER_INGRESS:
167 return rif->counter_ingress_valid;
173 mlxsw_sp_rif_counter_valid_set(struct mlxsw_sp_rif *rif,
174 enum mlxsw_sp_rif_counter_dir dir,
178 case MLXSW_SP_RIF_COUNTER_EGRESS:
179 rif->counter_egress_valid = valid;
181 case MLXSW_SP_RIF_COUNTER_INGRESS:
182 rif->counter_ingress_valid = valid;
187 static int mlxsw_sp_rif_counter_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
188 unsigned int counter_index, bool enable,
189 enum mlxsw_sp_rif_counter_dir dir)
191 char ritr_pl[MLXSW_REG_RITR_LEN];
192 bool is_egress = false;
195 if (dir == MLXSW_SP_RIF_COUNTER_EGRESS)
197 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
198 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
202 mlxsw_reg_ritr_counter_pack(ritr_pl, counter_index, enable,
204 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
207 int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
208 struct mlxsw_sp_rif *rif,
209 enum mlxsw_sp_rif_counter_dir dir, u64 *cnt)
211 char ricnt_pl[MLXSW_REG_RICNT_LEN];
212 unsigned int *p_counter_index;
216 valid = mlxsw_sp_rif_counter_valid_get(rif, dir);
220 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
221 if (!p_counter_index)
223 mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
224 MLXSW_REG_RICNT_OPCODE_NOP);
225 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
228 *cnt = mlxsw_reg_ricnt_good_unicast_packets_get(ricnt_pl);
232 struct mlxsw_sp_rif_counter_set_basic {
233 u64 good_unicast_packets;
234 u64 good_multicast_packets;
235 u64 good_broadcast_packets;
236 u64 good_unicast_bytes;
237 u64 good_multicast_bytes;
238 u64 good_broadcast_bytes;
246 mlxsw_sp_rif_counter_fetch_clear(struct mlxsw_sp_rif *rif,
247 enum mlxsw_sp_rif_counter_dir dir,
248 struct mlxsw_sp_rif_counter_set_basic *set)
250 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
251 char ricnt_pl[MLXSW_REG_RICNT_LEN];
252 unsigned int *p_counter_index;
255 if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
258 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
259 if (!p_counter_index)
262 mlxsw_reg_ricnt_pack(ricnt_pl, *p_counter_index,
263 MLXSW_REG_RICNT_OPCODE_CLEAR);
264 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
271 #define MLXSW_SP_RIF_COUNTER_EXTRACT(NAME) \
272 (set->NAME = mlxsw_reg_ricnt_ ## NAME ## _get(ricnt_pl))
274 MLXSW_SP_RIF_COUNTER_EXTRACT(good_unicast_packets);
275 MLXSW_SP_RIF_COUNTER_EXTRACT(good_multicast_packets);
276 MLXSW_SP_RIF_COUNTER_EXTRACT(good_broadcast_packets);
277 MLXSW_SP_RIF_COUNTER_EXTRACT(good_unicast_bytes);
278 MLXSW_SP_RIF_COUNTER_EXTRACT(good_multicast_bytes);
279 MLXSW_SP_RIF_COUNTER_EXTRACT(good_broadcast_bytes);
280 MLXSW_SP_RIF_COUNTER_EXTRACT(error_packets);
281 MLXSW_SP_RIF_COUNTER_EXTRACT(discard_packets);
282 MLXSW_SP_RIF_COUNTER_EXTRACT(error_bytes);
283 MLXSW_SP_RIF_COUNTER_EXTRACT(discard_bytes);
285 #undef MLXSW_SP_RIF_COUNTER_EXTRACT
290 static int mlxsw_sp_rif_counter_clear(struct mlxsw_sp *mlxsw_sp,
291 unsigned int counter_index)
293 char ricnt_pl[MLXSW_REG_RICNT_LEN];
295 mlxsw_reg_ricnt_pack(ricnt_pl, counter_index,
296 MLXSW_REG_RICNT_OPCODE_CLEAR);
297 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ricnt), ricnt_pl);
300 int mlxsw_sp_rif_counter_alloc(struct mlxsw_sp_rif *rif,
301 enum mlxsw_sp_rif_counter_dir dir)
303 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
304 unsigned int *p_counter_index;
307 if (mlxsw_sp_rif_counter_valid_get(rif, dir))
310 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
311 if (!p_counter_index)
314 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
319 err = mlxsw_sp_rif_counter_clear(mlxsw_sp, *p_counter_index);
321 goto err_counter_clear;
323 err = mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
324 *p_counter_index, true, dir);
326 goto err_counter_edit;
327 mlxsw_sp_rif_counter_valid_set(rif, dir, true);
332 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
337 void mlxsw_sp_rif_counter_free(struct mlxsw_sp_rif *rif,
338 enum mlxsw_sp_rif_counter_dir dir)
340 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
341 unsigned int *p_counter_index;
343 if (!mlxsw_sp_rif_counter_valid_get(rif, dir))
346 p_counter_index = mlxsw_sp_rif_p_counter_get(rif, dir);
347 if (WARN_ON(!p_counter_index))
349 mlxsw_sp_rif_counter_edit(mlxsw_sp, rif->rif_index,
350 *p_counter_index, false, dir);
351 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_RIF,
353 mlxsw_sp_rif_counter_valid_set(rif, dir, false);
356 static void mlxsw_sp_rif_counters_alloc(struct mlxsw_sp_rif *rif)
358 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
359 struct devlink *devlink;
361 devlink = priv_to_devlink(mlxsw_sp->core);
362 if (!devlink_dpipe_table_counter_enabled(devlink,
363 MLXSW_SP_DPIPE_TABLE_NAME_ERIF))
365 mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
368 static void mlxsw_sp_rif_counters_free(struct mlxsw_sp_rif *rif)
370 mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
373 #define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE + 1)
375 struct mlxsw_sp_prefix_usage {
376 DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
379 #define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
380 for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
383 mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
384 struct mlxsw_sp_prefix_usage *prefix_usage2)
386 return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
390 mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
391 struct mlxsw_sp_prefix_usage *prefix_usage2)
393 memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
397 mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
398 unsigned char prefix_len)
400 set_bit(prefix_len, prefix_usage->b);
404 mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
405 unsigned char prefix_len)
407 clear_bit(prefix_len, prefix_usage->b);
410 struct mlxsw_sp_fib_key {
411 unsigned char addr[sizeof(struct in6_addr)];
412 unsigned char prefix_len;
415 enum mlxsw_sp_fib_entry_type {
416 MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
417 MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
418 MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
419 MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE,
420 MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE,
422 /* This is a special case of local delivery, where a packet should be
423 * decapsulated on reception. Note that there is no corresponding ENCAP,
424 * because that's a type of next hop, not of FIB entry. (There can be
425 * several next hops in a REMOTE entry, and some of them may be
426 * encapsulating entries.)
428 MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP,
429 MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP,
432 struct mlxsw_sp_nexthop_group_info;
433 struct mlxsw_sp_nexthop_group;
434 struct mlxsw_sp_fib_entry;
436 struct mlxsw_sp_fib_node {
437 struct mlxsw_sp_fib_entry *fib_entry;
438 struct list_head list;
439 struct rhash_head ht_node;
440 struct mlxsw_sp_fib *fib;
441 struct mlxsw_sp_fib_key key;
444 struct mlxsw_sp_fib_entry_decap {
445 struct mlxsw_sp_ipip_entry *ipip_entry;
449 struct mlxsw_sp_fib_entry {
450 struct mlxsw_sp_fib_node *fib_node;
451 enum mlxsw_sp_fib_entry_type type;
452 struct list_head nexthop_group_node;
453 struct mlxsw_sp_nexthop_group *nh_group;
454 struct mlxsw_sp_fib_entry_decap decap; /* Valid for decap entries. */
457 struct mlxsw_sp_fib4_entry {
458 struct mlxsw_sp_fib_entry common;
465 struct mlxsw_sp_fib6_entry {
466 struct mlxsw_sp_fib_entry common;
467 struct list_head rt6_list;
471 struct mlxsw_sp_rt6 {
472 struct list_head list;
473 struct fib6_info *rt;
476 struct mlxsw_sp_lpm_tree {
478 unsigned int ref_count;
479 enum mlxsw_sp_l3proto proto;
480 unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
481 struct mlxsw_sp_prefix_usage prefix_usage;
484 struct mlxsw_sp_fib {
485 struct rhashtable ht;
486 struct list_head node_list;
487 struct mlxsw_sp_vr *vr;
488 struct mlxsw_sp_lpm_tree *lpm_tree;
489 enum mlxsw_sp_l3proto proto;
493 u16 id; /* virtual router ID */
494 u32 tb_id; /* kernel fib table id */
495 unsigned int rif_count;
496 struct mlxsw_sp_fib *fib4;
497 struct mlxsw_sp_fib *fib6;
498 struct mlxsw_sp_mr_table *mr_table[MLXSW_SP_L3_PROTO_MAX];
499 struct mlxsw_sp_rif *ul_rif;
500 refcount_t ul_rif_refcnt;
503 static const struct rhashtable_params mlxsw_sp_fib_ht_params;
505 static struct mlxsw_sp_fib *mlxsw_sp_fib_create(struct mlxsw_sp *mlxsw_sp,
506 struct mlxsw_sp_vr *vr,
507 enum mlxsw_sp_l3proto proto)
509 struct mlxsw_sp_lpm_tree *lpm_tree;
510 struct mlxsw_sp_fib *fib;
513 lpm_tree = mlxsw_sp->router->lpm.proto_trees[proto];
514 fib = kzalloc(sizeof(*fib), GFP_KERNEL);
516 return ERR_PTR(-ENOMEM);
517 err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
519 goto err_rhashtable_init;
520 INIT_LIST_HEAD(&fib->node_list);
523 fib->lpm_tree = lpm_tree;
524 mlxsw_sp_lpm_tree_hold(lpm_tree);
525 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, lpm_tree->id);
527 goto err_lpm_tree_bind;
531 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
537 static void mlxsw_sp_fib_destroy(struct mlxsw_sp *mlxsw_sp,
538 struct mlxsw_sp_fib *fib)
540 mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib);
541 mlxsw_sp_lpm_tree_put(mlxsw_sp, fib->lpm_tree);
542 WARN_ON(!list_empty(&fib->node_list));
543 rhashtable_destroy(&fib->ht);
547 static struct mlxsw_sp_lpm_tree *
548 mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp)
550 static struct mlxsw_sp_lpm_tree *lpm_tree;
553 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
554 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
555 if (lpm_tree->ref_count == 0)
561 static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
562 struct mlxsw_sp_lpm_tree *lpm_tree)
564 char ralta_pl[MLXSW_REG_RALTA_LEN];
566 mlxsw_reg_ralta_pack(ralta_pl, true,
567 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
569 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
572 static void mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
573 struct mlxsw_sp_lpm_tree *lpm_tree)
575 char ralta_pl[MLXSW_REG_RALTA_LEN];
577 mlxsw_reg_ralta_pack(ralta_pl, false,
578 (enum mlxsw_reg_ralxx_protocol) lpm_tree->proto,
580 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
584 mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
585 struct mlxsw_sp_prefix_usage *prefix_usage,
586 struct mlxsw_sp_lpm_tree *lpm_tree)
588 char ralst_pl[MLXSW_REG_RALST_LEN];
591 u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
593 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
596 mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
597 mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
600 mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
601 MLXSW_REG_RALST_BIN_NO_CHILD);
602 last_prefix = prefix;
604 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
607 static struct mlxsw_sp_lpm_tree *
608 mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
609 struct mlxsw_sp_prefix_usage *prefix_usage,
610 enum mlxsw_sp_l3proto proto)
612 struct mlxsw_sp_lpm_tree *lpm_tree;
615 lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp);
617 return ERR_PTR(-EBUSY);
618 lpm_tree->proto = proto;
619 err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
623 err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
626 goto err_left_struct_set;
627 memcpy(&lpm_tree->prefix_usage, prefix_usage,
628 sizeof(lpm_tree->prefix_usage));
629 memset(&lpm_tree->prefix_ref_count, 0,
630 sizeof(lpm_tree->prefix_ref_count));
631 lpm_tree->ref_count = 1;
635 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
639 static void mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
640 struct mlxsw_sp_lpm_tree *lpm_tree)
642 mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
645 static struct mlxsw_sp_lpm_tree *
646 mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
647 struct mlxsw_sp_prefix_usage *prefix_usage,
648 enum mlxsw_sp_l3proto proto)
650 struct mlxsw_sp_lpm_tree *lpm_tree;
653 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
654 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
655 if (lpm_tree->ref_count != 0 &&
656 lpm_tree->proto == proto &&
657 mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
659 mlxsw_sp_lpm_tree_hold(lpm_tree);
663 return mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage, proto);
666 static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree)
668 lpm_tree->ref_count++;
671 static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
672 struct mlxsw_sp_lpm_tree *lpm_tree)
674 if (--lpm_tree->ref_count == 0)
675 mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
678 #define MLXSW_SP_LPM_TREE_MIN 1 /* tree 0 is reserved */
680 static int mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
682 struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } };
683 struct mlxsw_sp_lpm_tree *lpm_tree;
687 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LPM_TREES))
690 max_trees = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LPM_TREES);
691 mlxsw_sp->router->lpm.tree_count = max_trees - MLXSW_SP_LPM_TREE_MIN;
692 mlxsw_sp->router->lpm.trees = kcalloc(mlxsw_sp->router->lpm.tree_count,
693 sizeof(struct mlxsw_sp_lpm_tree),
695 if (!mlxsw_sp->router->lpm.trees)
698 for (i = 0; i < mlxsw_sp->router->lpm.tree_count; i++) {
699 lpm_tree = &mlxsw_sp->router->lpm.trees[i];
700 lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
703 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
704 MLXSW_SP_L3_PROTO_IPV4);
705 if (IS_ERR(lpm_tree)) {
706 err = PTR_ERR(lpm_tree);
707 goto err_ipv4_tree_get;
709 mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4] = lpm_tree;
711 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
712 MLXSW_SP_L3_PROTO_IPV6);
713 if (IS_ERR(lpm_tree)) {
714 err = PTR_ERR(lpm_tree);
715 goto err_ipv6_tree_get;
717 mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6] = lpm_tree;
722 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
723 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
725 kfree(mlxsw_sp->router->lpm.trees);
729 static void mlxsw_sp_lpm_fini(struct mlxsw_sp *mlxsw_sp)
731 struct mlxsw_sp_lpm_tree *lpm_tree;
733 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV6];
734 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
736 lpm_tree = mlxsw_sp->router->lpm.proto_trees[MLXSW_SP_L3_PROTO_IPV4];
737 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
739 kfree(mlxsw_sp->router->lpm.trees);
742 static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
744 return !!vr->fib4 || !!vr->fib6 ||
745 !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] ||
746 !!vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
749 static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
751 struct mlxsw_sp_vr *vr;
754 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
755 vr = &mlxsw_sp->router->vrs[i];
756 if (!mlxsw_sp_vr_is_used(vr))
762 static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
763 const struct mlxsw_sp_fib *fib, u8 tree_id)
765 char raltb_pl[MLXSW_REG_RALTB_LEN];
767 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
768 (enum mlxsw_reg_ralxx_protocol) fib->proto,
770 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
773 static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
774 const struct mlxsw_sp_fib *fib)
776 char raltb_pl[MLXSW_REG_RALTB_LEN];
778 /* Bind to tree 0 which is default */
779 mlxsw_reg_raltb_pack(raltb_pl, fib->vr->id,
780 (enum mlxsw_reg_ralxx_protocol) fib->proto, 0);
781 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
784 static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
786 /* For our purpose, squash main, default and local tables into one */
787 if (tb_id == RT_TABLE_LOCAL || tb_id == RT_TABLE_DEFAULT)
788 tb_id = RT_TABLE_MAIN;
792 static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
795 struct mlxsw_sp_vr *vr;
798 tb_id = mlxsw_sp_fix_tb_id(tb_id);
800 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
801 vr = &mlxsw_sp->router->vrs[i];
802 if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
808 int mlxsw_sp_router_tb_id_vr_id(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
811 struct mlxsw_sp_vr *vr;
814 mutex_lock(&mlxsw_sp->router->lock);
815 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
822 mutex_unlock(&mlxsw_sp->router->lock);
826 static struct mlxsw_sp_fib *mlxsw_sp_vr_fib(const struct mlxsw_sp_vr *vr,
827 enum mlxsw_sp_l3proto proto)
830 case MLXSW_SP_L3_PROTO_IPV4:
832 case MLXSW_SP_L3_PROTO_IPV6:
838 static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
840 struct netlink_ext_ack *extack)
842 struct mlxsw_sp_mr_table *mr4_table, *mr6_table;
843 struct mlxsw_sp_fib *fib4;
844 struct mlxsw_sp_fib *fib6;
845 struct mlxsw_sp_vr *vr;
848 vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
850 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported virtual routers");
851 return ERR_PTR(-EBUSY);
853 fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
855 return ERR_CAST(fib4);
856 fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
859 goto err_fib6_create;
861 mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
862 MLXSW_SP_L3_PROTO_IPV4);
863 if (IS_ERR(mr4_table)) {
864 err = PTR_ERR(mr4_table);
865 goto err_mr4_table_create;
867 mr6_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
868 MLXSW_SP_L3_PROTO_IPV6);
869 if (IS_ERR(mr6_table)) {
870 err = PTR_ERR(mr6_table);
871 goto err_mr6_table_create;
876 vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = mr4_table;
877 vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = mr6_table;
881 err_mr6_table_create:
882 mlxsw_sp_mr_table_destroy(mr4_table);
883 err_mr4_table_create:
884 mlxsw_sp_fib_destroy(mlxsw_sp, fib6);
886 mlxsw_sp_fib_destroy(mlxsw_sp, fib4);
890 static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
891 struct mlxsw_sp_vr *vr)
893 mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]);
894 vr->mr_table[MLXSW_SP_L3_PROTO_IPV6] = NULL;
895 mlxsw_sp_mr_table_destroy(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]);
896 vr->mr_table[MLXSW_SP_L3_PROTO_IPV4] = NULL;
897 mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6);
899 mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4);
903 static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
904 struct netlink_ext_ack *extack)
906 struct mlxsw_sp_vr *vr;
908 tb_id = mlxsw_sp_fix_tb_id(tb_id);
909 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
911 vr = mlxsw_sp_vr_create(mlxsw_sp, tb_id, extack);
915 static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
917 if (!vr->rif_count && list_empty(&vr->fib4->node_list) &&
918 list_empty(&vr->fib6->node_list) &&
919 mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV4]) &&
920 mlxsw_sp_mr_table_empty(vr->mr_table[MLXSW_SP_L3_PROTO_IPV6]))
921 mlxsw_sp_vr_destroy(mlxsw_sp, vr);
925 mlxsw_sp_vr_lpm_tree_should_replace(struct mlxsw_sp_vr *vr,
926 enum mlxsw_sp_l3proto proto, u8 tree_id)
928 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
930 if (!mlxsw_sp_vr_is_used(vr))
932 if (fib->lpm_tree->id == tree_id)
937 static int mlxsw_sp_vr_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
938 struct mlxsw_sp_fib *fib,
939 struct mlxsw_sp_lpm_tree *new_tree)
941 struct mlxsw_sp_lpm_tree *old_tree = fib->lpm_tree;
944 fib->lpm_tree = new_tree;
945 mlxsw_sp_lpm_tree_hold(new_tree);
946 err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, fib, new_tree->id);
949 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
953 mlxsw_sp_lpm_tree_put(mlxsw_sp, new_tree);
954 fib->lpm_tree = old_tree;
958 static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
959 struct mlxsw_sp_fib *fib,
960 struct mlxsw_sp_lpm_tree *new_tree)
962 enum mlxsw_sp_l3proto proto = fib->proto;
963 struct mlxsw_sp_lpm_tree *old_tree;
964 u8 old_id, new_id = new_tree->id;
965 struct mlxsw_sp_vr *vr;
968 old_tree = mlxsw_sp->router->lpm.proto_trees[proto];
969 old_id = old_tree->id;
971 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
972 vr = &mlxsw_sp->router->vrs[i];
973 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, old_id))
975 err = mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
976 mlxsw_sp_vr_fib(vr, proto),
979 goto err_tree_replace;
982 memcpy(new_tree->prefix_ref_count, old_tree->prefix_ref_count,
983 sizeof(new_tree->prefix_ref_count));
984 mlxsw_sp->router->lpm.proto_trees[proto] = new_tree;
985 mlxsw_sp_lpm_tree_put(mlxsw_sp, old_tree);
990 for (i--; i >= 0; i--) {
991 if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, new_id))
993 mlxsw_sp_vr_lpm_tree_replace(mlxsw_sp,
994 mlxsw_sp_vr_fib(vr, proto),
1000 static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
1002 struct mlxsw_sp_vr *vr;
1006 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_VRS))
1009 max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
1010 mlxsw_sp->router->vrs = kcalloc(max_vrs, sizeof(struct mlxsw_sp_vr),
1012 if (!mlxsw_sp->router->vrs)
1015 for (i = 0; i < max_vrs; i++) {
1016 vr = &mlxsw_sp->router->vrs[i];
1023 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp);
1025 static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
1027 /* At this stage we're guaranteed not to have new incoming
1028 * FIB notifications and the work queue is free from FIBs
1029 * sitting on top of mlxsw netdevs. However, we can still
1030 * have other FIBs queued. Flush the queue before flushing
1031 * the device's tables. No need for locks, as we're the only
1034 mlxsw_core_flush_owq();
1035 mlxsw_sp_router_fib_flush(mlxsw_sp);
1036 kfree(mlxsw_sp->router->vrs);
1039 u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
1041 struct net_device *d;
1045 d = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1047 tb_id = l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
1049 tb_id = RT_TABLE_MAIN;
1055 static struct mlxsw_sp_rif *
1056 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
1057 const struct mlxsw_sp_rif_params *params,
1058 struct netlink_ext_ack *extack);
1060 static struct mlxsw_sp_rif_ipip_lb *
1061 mlxsw_sp_ipip_ol_ipip_lb_create(struct mlxsw_sp *mlxsw_sp,
1062 enum mlxsw_sp_ipip_type ipipt,
1063 struct net_device *ol_dev,
1064 struct netlink_ext_ack *extack)
1066 struct mlxsw_sp_rif_params_ipip_lb lb_params;
1067 const struct mlxsw_sp_ipip_ops *ipip_ops;
1068 struct mlxsw_sp_rif *rif;
1070 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1071 lb_params = (struct mlxsw_sp_rif_params_ipip_lb) {
1072 .common.dev = ol_dev,
1073 .common.lag = false,
1074 .common.double_entry = ipip_ops->double_rif_entry,
1075 .lb_config = ipip_ops->ol_loopback_config(mlxsw_sp, ol_dev),
1078 rif = mlxsw_sp_rif_create(mlxsw_sp, &lb_params.common, extack);
1080 return ERR_CAST(rif);
1081 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
1084 static struct mlxsw_sp_ipip_entry *
1085 mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
1086 enum mlxsw_sp_ipip_type ipipt,
1087 struct net_device *ol_dev)
1089 const struct mlxsw_sp_ipip_ops *ipip_ops;
1090 struct mlxsw_sp_ipip_entry *ipip_entry;
1091 struct mlxsw_sp_ipip_entry *ret = NULL;
1094 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1095 ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL);
1097 return ERR_PTR(-ENOMEM);
1099 ipip_entry->ol_lb = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp, ipipt,
1101 if (IS_ERR(ipip_entry->ol_lb)) {
1102 ret = ERR_CAST(ipip_entry->ol_lb);
1103 goto err_ol_ipip_lb_create;
1106 ipip_entry->ipipt = ipipt;
1107 ipip_entry->ol_dev = ol_dev;
1108 ipip_entry->parms = ipip_ops->parms_init(ol_dev);
1110 err = ipip_ops->rem_ip_addr_set(mlxsw_sp, ipip_entry);
1113 goto err_rem_ip_addr_set;
1118 err_rem_ip_addr_set:
1119 mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1120 err_ol_ipip_lb_create:
1125 static void mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp *mlxsw_sp,
1126 struct mlxsw_sp_ipip_entry *ipip_entry)
1128 const struct mlxsw_sp_ipip_ops *ipip_ops =
1129 mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1131 ipip_ops->rem_ip_addr_unset(mlxsw_sp, ipip_entry);
1132 mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
1137 mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp,
1138 const enum mlxsw_sp_l3proto ul_proto,
1139 union mlxsw_sp_l3addr saddr,
1141 struct mlxsw_sp_ipip_entry *ipip_entry)
1143 u32 tun_ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1144 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1145 union mlxsw_sp_l3addr tun_saddr;
1147 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1150 tun_saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1151 return tun_ul_tb_id == ul_tb_id &&
1152 mlxsw_sp_l3addr_eq(&tun_saddr, &saddr);
1155 static int mlxsw_sp_ipip_decap_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp,
1156 enum mlxsw_sp_ipip_type ipipt)
1158 const struct mlxsw_sp_ipip_ops *ipip_ops;
1160 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
1162 /* Not all tunnels require to increase the default pasing depth
1165 if (ipip_ops->inc_parsing_depth)
1166 return mlxsw_sp_parsing_depth_inc(mlxsw_sp);
1171 static void mlxsw_sp_ipip_decap_parsing_depth_dec(struct mlxsw_sp *mlxsw_sp,
1172 enum mlxsw_sp_ipip_type ipipt)
1174 const struct mlxsw_sp_ipip_ops *ipip_ops =
1175 mlxsw_sp->router->ipip_ops_arr[ipipt];
1177 if (ipip_ops->inc_parsing_depth)
1178 mlxsw_sp_parsing_depth_dec(mlxsw_sp);
1182 mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
1183 struct mlxsw_sp_fib_entry *fib_entry,
1184 struct mlxsw_sp_ipip_entry *ipip_entry)
1189 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1194 err = mlxsw_sp_ipip_decap_parsing_depth_inc(mlxsw_sp,
1197 goto err_parsing_depth_inc;
1199 ipip_entry->decap_fib_entry = fib_entry;
1200 fib_entry->decap.ipip_entry = ipip_entry;
1201 fib_entry->decap.tunnel_index = tunnel_index;
1205 err_parsing_depth_inc:
1206 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
1207 fib_entry->decap.tunnel_index);
1211 static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
1212 struct mlxsw_sp_fib_entry *fib_entry)
1214 enum mlxsw_sp_ipip_type ipipt = fib_entry->decap.ipip_entry->ipipt;
1216 /* Unlink this node from the IPIP entry that it's the decap entry of. */
1217 fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
1218 fib_entry->decap.ipip_entry = NULL;
1219 mlxsw_sp_ipip_decap_parsing_depth_dec(mlxsw_sp, ipipt);
1220 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1221 1, fib_entry->decap.tunnel_index);
1224 static struct mlxsw_sp_fib_node *
1225 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
1226 size_t addr_len, unsigned char prefix_len);
1227 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
1228 struct mlxsw_sp_fib_entry *fib_entry);
1231 mlxsw_sp_ipip_entry_demote_decap(struct mlxsw_sp *mlxsw_sp,
1232 struct mlxsw_sp_ipip_entry *ipip_entry)
1234 struct mlxsw_sp_fib_entry *fib_entry = ipip_entry->decap_fib_entry;
1236 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
1237 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1239 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
1243 mlxsw_sp_ipip_entry_promote_decap(struct mlxsw_sp *mlxsw_sp,
1244 struct mlxsw_sp_ipip_entry *ipip_entry,
1245 struct mlxsw_sp_fib_entry *decap_fib_entry)
1247 if (mlxsw_sp_fib_entry_decap_init(mlxsw_sp, decap_fib_entry,
1250 decap_fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
1252 if (mlxsw_sp_fib_entry_update(mlxsw_sp, decap_fib_entry))
1253 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1256 static struct mlxsw_sp_fib_entry *
1257 mlxsw_sp_router_ip2me_fib_entry_find(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
1258 enum mlxsw_sp_l3proto proto,
1259 const union mlxsw_sp_l3addr *addr,
1260 enum mlxsw_sp_fib_entry_type type)
1262 struct mlxsw_sp_fib_node *fib_node;
1263 unsigned char addr_prefix_len;
1264 struct mlxsw_sp_fib *fib;
1265 struct mlxsw_sp_vr *vr;
1270 vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id);
1273 fib = mlxsw_sp_vr_fib(vr, proto);
1276 case MLXSW_SP_L3_PROTO_IPV4:
1277 addr4 = be32_to_cpu(addr->addr4);
1280 addr_prefix_len = 32;
1282 case MLXSW_SP_L3_PROTO_IPV6:
1283 addrp = &addr->addr6;
1285 addr_prefix_len = 128;
1292 fib_node = mlxsw_sp_fib_node_lookup(fib, addrp, addr_len,
1294 if (!fib_node || fib_node->fib_entry->type != type)
1297 return fib_node->fib_entry;
1300 /* Given an IPIP entry, find the corresponding decap route. */
1301 static struct mlxsw_sp_fib_entry *
1302 mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
1303 struct mlxsw_sp_ipip_entry *ipip_entry)
1305 static struct mlxsw_sp_fib_node *fib_node;
1306 const struct mlxsw_sp_ipip_ops *ipip_ops;
1307 unsigned char saddr_prefix_len;
1308 union mlxsw_sp_l3addr saddr;
1309 struct mlxsw_sp_fib *ul_fib;
1310 struct mlxsw_sp_vr *ul_vr;
1316 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1318 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
1319 ul_vr = mlxsw_sp_vr_find(mlxsw_sp, ul_tb_id);
1323 ul_fib = mlxsw_sp_vr_fib(ul_vr, ipip_ops->ul_proto);
1324 saddr = mlxsw_sp_ipip_netdev_saddr(ipip_ops->ul_proto,
1325 ipip_entry->ol_dev);
1327 switch (ipip_ops->ul_proto) {
1328 case MLXSW_SP_L3_PROTO_IPV4:
1329 saddr4 = be32_to_cpu(saddr.addr4);
1332 saddr_prefix_len = 32;
1334 case MLXSW_SP_L3_PROTO_IPV6:
1335 saddrp = &saddr.addr6;
1337 saddr_prefix_len = 128;
1344 fib_node = mlxsw_sp_fib_node_lookup(ul_fib, saddrp, saddr_len,
1347 fib_node->fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_TRAP)
1350 return fib_node->fib_entry;
1353 static struct mlxsw_sp_ipip_entry *
1354 mlxsw_sp_ipip_entry_create(struct mlxsw_sp *mlxsw_sp,
1355 enum mlxsw_sp_ipip_type ipipt,
1356 struct net_device *ol_dev)
1358 struct mlxsw_sp_ipip_entry *ipip_entry;
1360 ipip_entry = mlxsw_sp_ipip_entry_alloc(mlxsw_sp, ipipt, ol_dev);
1361 if (IS_ERR(ipip_entry))
1364 list_add_tail(&ipip_entry->ipip_list_node,
1365 &mlxsw_sp->router->ipip_list);
1371 mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1372 struct mlxsw_sp_ipip_entry *ipip_entry)
1374 list_del(&ipip_entry->ipip_list_node);
1375 mlxsw_sp_ipip_entry_dealloc(mlxsw_sp, ipip_entry);
1379 mlxsw_sp_ipip_entry_matches_decap(struct mlxsw_sp *mlxsw_sp,
1380 const struct net_device *ul_dev,
1381 enum mlxsw_sp_l3proto ul_proto,
1382 union mlxsw_sp_l3addr ul_dip,
1383 struct mlxsw_sp_ipip_entry *ipip_entry)
1385 u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1386 enum mlxsw_sp_ipip_type ipipt = ipip_entry->ipipt;
1388 if (mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto != ul_proto)
1391 return mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, ul_dip,
1392 ul_tb_id, ipip_entry);
1395 /* Given decap parameters, find the corresponding IPIP entry. */
1396 static struct mlxsw_sp_ipip_entry *
1397 mlxsw_sp_ipip_entry_find_by_decap(struct mlxsw_sp *mlxsw_sp, int ul_dev_ifindex,
1398 enum mlxsw_sp_l3proto ul_proto,
1399 union mlxsw_sp_l3addr ul_dip)
1401 struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
1402 struct net_device *ul_dev;
1406 ul_dev = dev_get_by_index_rcu(mlxsw_sp_net(mlxsw_sp), ul_dev_ifindex);
1410 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1412 if (mlxsw_sp_ipip_entry_matches_decap(mlxsw_sp, ul_dev,
1426 static bool mlxsw_sp_netdev_ipip_type(const struct mlxsw_sp *mlxsw_sp,
1427 const struct net_device *dev,
1428 enum mlxsw_sp_ipip_type *p_type)
1430 struct mlxsw_sp_router *router = mlxsw_sp->router;
1431 const struct mlxsw_sp_ipip_ops *ipip_ops;
1432 enum mlxsw_sp_ipip_type ipipt;
1434 for (ipipt = 0; ipipt < MLXSW_SP_IPIP_TYPE_MAX; ++ipipt) {
1435 ipip_ops = router->ipip_ops_arr[ipipt];
1436 if (dev->type == ipip_ops->dev_type) {
1445 static bool mlxsw_sp_netdev_is_ipip_ol(const struct mlxsw_sp *mlxsw_sp,
1446 const struct net_device *dev)
1448 return mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
1451 static struct mlxsw_sp_ipip_entry *
1452 mlxsw_sp_ipip_entry_find_by_ol_dev(struct mlxsw_sp *mlxsw_sp,
1453 const struct net_device *ol_dev)
1455 struct mlxsw_sp_ipip_entry *ipip_entry;
1457 list_for_each_entry(ipip_entry, &mlxsw_sp->router->ipip_list,
1459 if (ipip_entry->ol_dev == ol_dev)
1465 static struct mlxsw_sp_ipip_entry *
1466 mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp,
1467 const struct net_device *ul_dev,
1468 struct mlxsw_sp_ipip_entry *start)
1470 struct mlxsw_sp_ipip_entry *ipip_entry;
1472 ipip_entry = list_prepare_entry(start, &mlxsw_sp->router->ipip_list,
1474 list_for_each_entry_continue(ipip_entry, &mlxsw_sp->router->ipip_list,
1476 struct net_device *ol_dev = ipip_entry->ol_dev;
1477 struct net_device *ipip_ul_dev;
1480 ipip_ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1483 if (ipip_ul_dev == ul_dev)
1490 static bool mlxsw_sp_netdev_is_ipip_ul(struct mlxsw_sp *mlxsw_sp,
1491 const struct net_device *dev)
1493 return mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp, dev, NULL);
1496 static bool mlxsw_sp_netdevice_ipip_can_offload(struct mlxsw_sp *mlxsw_sp,
1497 const struct net_device *ol_dev,
1498 enum mlxsw_sp_ipip_type ipipt)
1500 const struct mlxsw_sp_ipip_ops *ops
1501 = mlxsw_sp->router->ipip_ops_arr[ipipt];
1503 return ops->can_offload(mlxsw_sp, ol_dev);
1506 static int mlxsw_sp_netdevice_ipip_ol_reg_event(struct mlxsw_sp *mlxsw_sp,
1507 struct net_device *ol_dev)
1509 enum mlxsw_sp_ipip_type ipipt = MLXSW_SP_IPIP_TYPE_MAX;
1510 struct mlxsw_sp_ipip_entry *ipip_entry;
1511 enum mlxsw_sp_l3proto ul_proto;
1512 union mlxsw_sp_l3addr saddr;
1515 mlxsw_sp_netdev_ipip_type(mlxsw_sp, ol_dev, &ipipt);
1516 if (mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev, ipipt)) {
1517 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1518 ul_proto = mlxsw_sp->router->ipip_ops_arr[ipipt]->ul_proto;
1519 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1520 if (!mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1523 ipip_entry = mlxsw_sp_ipip_entry_create(mlxsw_sp, ipipt,
1525 if (IS_ERR(ipip_entry))
1526 return PTR_ERR(ipip_entry);
1533 static void mlxsw_sp_netdevice_ipip_ol_unreg_event(struct mlxsw_sp *mlxsw_sp,
1534 struct net_device *ol_dev)
1536 struct mlxsw_sp_ipip_entry *ipip_entry;
1538 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1540 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1544 mlxsw_sp_ipip_entry_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1545 struct mlxsw_sp_ipip_entry *ipip_entry)
1547 struct mlxsw_sp_fib_entry *decap_fib_entry;
1549 decap_fib_entry = mlxsw_sp_ipip_entry_find_decap(mlxsw_sp, ipip_entry);
1550 if (decap_fib_entry)
1551 mlxsw_sp_ipip_entry_promote_decap(mlxsw_sp, ipip_entry,
1556 mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif, u16 ul_vr_id,
1557 u16 ul_rif_id, bool enable)
1559 struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
1560 enum mlxsw_reg_ritr_loopback_ipip_options ipip_options;
1561 struct mlxsw_sp_rif *rif = &lb_rif->common;
1562 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
1563 char ritr_pl[MLXSW_REG_RITR_LEN];
1564 struct in6_addr *saddr6;
1567 ipip_options = MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET;
1568 switch (lb_cf.ul_protocol) {
1569 case MLXSW_SP_L3_PROTO_IPV4:
1570 saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
1571 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
1572 rif->rif_index, rif->vr_id, rif->dev->mtu);
1573 mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
1574 ipip_options, ul_vr_id,
1579 case MLXSW_SP_L3_PROTO_IPV6:
1580 saddr6 = &lb_cf.saddr.addr6;
1581 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
1582 rif->rif_index, rif->vr_id, rif->dev->mtu);
1583 mlxsw_reg_ritr_loopback_ipip6_pack(ritr_pl, lb_cf.lb_ipipt,
1584 ipip_options, ul_vr_id,
1590 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
1593 static int mlxsw_sp_netdevice_ipip_ol_update_mtu(struct mlxsw_sp *mlxsw_sp,
1594 struct net_device *ol_dev)
1596 struct mlxsw_sp_ipip_entry *ipip_entry;
1597 struct mlxsw_sp_rif_ipip_lb *lb_rif;
1600 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1602 lb_rif = ipip_entry->ol_lb;
1603 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, lb_rif->ul_vr_id,
1604 lb_rif->ul_rif_id, true);
1607 lb_rif->common.mtu = ol_dev->mtu;
1614 static void mlxsw_sp_netdevice_ipip_ol_up_event(struct mlxsw_sp *mlxsw_sp,
1615 struct net_device *ol_dev)
1617 struct mlxsw_sp_ipip_entry *ipip_entry;
1619 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1621 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
1625 mlxsw_sp_ipip_entry_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1626 struct mlxsw_sp_ipip_entry *ipip_entry)
1628 if (ipip_entry->decap_fib_entry)
1629 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1632 static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1633 struct net_device *ol_dev)
1635 struct mlxsw_sp_ipip_entry *ipip_entry;
1637 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1639 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1642 static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
1643 struct mlxsw_sp_rif *old_rif,
1644 struct mlxsw_sp_rif *new_rif);
1646 mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
1647 struct mlxsw_sp_ipip_entry *ipip_entry,
1649 struct netlink_ext_ack *extack)
1651 struct mlxsw_sp_rif_ipip_lb *old_lb_rif = ipip_entry->ol_lb;
1652 struct mlxsw_sp_rif_ipip_lb *new_lb_rif;
1654 new_lb_rif = mlxsw_sp_ipip_ol_ipip_lb_create(mlxsw_sp,
1658 if (IS_ERR(new_lb_rif))
1659 return PTR_ERR(new_lb_rif);
1660 ipip_entry->ol_lb = new_lb_rif;
1663 mlxsw_sp_nexthop_rif_migrate(mlxsw_sp, &old_lb_rif->common,
1664 &new_lb_rif->common);
1666 mlxsw_sp_rif_destroy(&old_lb_rif->common);
1671 static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
1672 struct mlxsw_sp_rif *rif);
1675 * __mlxsw_sp_ipip_entry_update_tunnel - Update offload related to IPIP entry.
1676 * @mlxsw_sp: mlxsw_sp.
1677 * @ipip_entry: IPIP entry.
1678 * @recreate_loopback: Recreates the associated loopback RIF.
1679 * @keep_encap: Updates next hops that use the tunnel netdevice. This is only
1680 * relevant when recreate_loopback is true.
1681 * @update_nexthops: Updates next hops, keeping the current loopback RIF. This
1682 * is only relevant when recreate_loopback is false.
1685 * Return: Non-zero value on failure.
1687 int __mlxsw_sp_ipip_entry_update_tunnel(struct mlxsw_sp *mlxsw_sp,
1688 struct mlxsw_sp_ipip_entry *ipip_entry,
1689 bool recreate_loopback,
1691 bool update_nexthops,
1692 struct netlink_ext_ack *extack)
1696 /* RIFs can't be edited, so to update loopback, we need to destroy and
1697 * recreate it. That creates a window of opportunity where RALUE and
1698 * RATR registers end up referencing a RIF that's already gone. RATRs
1699 * are handled in mlxsw_sp_ipip_entry_ol_lb_update(), and to take care
1700 * of RALUE, demote the decap route back.
1702 if (ipip_entry->decap_fib_entry)
1703 mlxsw_sp_ipip_entry_demote_decap(mlxsw_sp, ipip_entry);
1705 if (recreate_loopback) {
1706 err = mlxsw_sp_ipip_entry_ol_lb_update(mlxsw_sp, ipip_entry,
1707 keep_encap, extack);
1710 } else if (update_nexthops) {
1711 mlxsw_sp_nexthop_rif_update(mlxsw_sp,
1712 &ipip_entry->ol_lb->common);
1715 if (ipip_entry->ol_dev->flags & IFF_UP)
1716 mlxsw_sp_ipip_entry_ol_up_event(mlxsw_sp, ipip_entry);
1721 static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
1722 struct net_device *ol_dev,
1723 struct netlink_ext_ack *extack)
1725 struct mlxsw_sp_ipip_entry *ipip_entry =
1726 mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1731 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1732 true, false, false, extack);
1736 mlxsw_sp_netdevice_ipip_ul_vrf_event(struct mlxsw_sp *mlxsw_sp,
1737 struct mlxsw_sp_ipip_entry *ipip_entry,
1738 struct net_device *ul_dev,
1740 struct netlink_ext_ack *extack)
1742 u32 ul_tb_id = l3mdev_fib_table(ul_dev) ? : RT_TABLE_MAIN;
1743 enum mlxsw_sp_l3proto ul_proto;
1744 union mlxsw_sp_l3addr saddr;
1746 /* Moving underlay to a different VRF might cause local address
1747 * conflict, and the conflicting tunnels need to be demoted.
1749 ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
1750 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ipip_entry->ol_dev);
1751 if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1754 *demote_this = true;
1758 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1759 true, true, false, extack);
1763 mlxsw_sp_netdevice_ipip_ul_up_event(struct mlxsw_sp *mlxsw_sp,
1764 struct mlxsw_sp_ipip_entry *ipip_entry,
1765 struct net_device *ul_dev)
1767 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1768 false, false, true, NULL);
1772 mlxsw_sp_netdevice_ipip_ul_down_event(struct mlxsw_sp *mlxsw_sp,
1773 struct mlxsw_sp_ipip_entry *ipip_entry,
1774 struct net_device *ul_dev)
1776 /* A down underlay device causes encapsulated packets to not be
1777 * forwarded, but decap still works. So refresh next hops without
1778 * touching anything else.
1780 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1781 false, false, true, NULL);
1785 mlxsw_sp_netdevice_ipip_ol_change_event(struct mlxsw_sp *mlxsw_sp,
1786 struct net_device *ol_dev,
1787 struct netlink_ext_ack *extack)
1789 const struct mlxsw_sp_ipip_ops *ipip_ops;
1790 struct mlxsw_sp_ipip_entry *ipip_entry;
1793 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1795 /* A change might make a tunnel eligible for offloading, but
1796 * that is currently not implemented. What falls to slow path
1801 /* A change might make a tunnel not eligible for offloading. */
1802 if (!mlxsw_sp_netdevice_ipip_can_offload(mlxsw_sp, ol_dev,
1803 ipip_entry->ipipt)) {
1804 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1808 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
1809 err = ipip_ops->ol_netdev_change(mlxsw_sp, ipip_entry, extack);
1813 void mlxsw_sp_ipip_entry_demote_tunnel(struct mlxsw_sp *mlxsw_sp,
1814 struct mlxsw_sp_ipip_entry *ipip_entry)
1816 struct net_device *ol_dev = ipip_entry->ol_dev;
1818 if (ol_dev->flags & IFF_UP)
1819 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1820 mlxsw_sp_ipip_entry_destroy(mlxsw_sp, ipip_entry);
1823 /* The configuration where several tunnels have the same local address in the
1824 * same underlay table needs special treatment in the HW. That is currently not
1825 * implemented in the driver. This function finds and demotes the first tunnel
1826 * with a given source address, except the one passed in the argument
1830 mlxsw_sp_ipip_demote_tunnel_by_saddr(struct mlxsw_sp *mlxsw_sp,
1831 enum mlxsw_sp_l3proto ul_proto,
1832 union mlxsw_sp_l3addr saddr,
1834 const struct mlxsw_sp_ipip_entry *except)
1836 struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1838 list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1840 if (ipip_entry != except &&
1841 mlxsw_sp_ipip_entry_saddr_matches(mlxsw_sp, ul_proto, saddr,
1842 ul_tb_id, ipip_entry)) {
1843 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1851 static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp,
1852 struct net_device *ul_dev)
1854 struct mlxsw_sp_ipip_entry *ipip_entry, *tmp;
1856 list_for_each_entry_safe(ipip_entry, tmp, &mlxsw_sp->router->ipip_list,
1858 struct net_device *ol_dev = ipip_entry->ol_dev;
1859 struct net_device *ipip_ul_dev;
1862 ipip_ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
1864 if (ipip_ul_dev == ul_dev)
1865 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1869 static int mlxsw_sp_netdevice_ipip_ol_event(struct mlxsw_sp *mlxsw_sp,
1870 struct net_device *ol_dev,
1871 unsigned long event,
1872 struct netdev_notifier_info *info)
1874 struct netdev_notifier_changeupper_info *chup;
1875 struct netlink_ext_ack *extack;
1879 case NETDEV_REGISTER:
1880 err = mlxsw_sp_netdevice_ipip_ol_reg_event(mlxsw_sp, ol_dev);
1882 case NETDEV_UNREGISTER:
1883 mlxsw_sp_netdevice_ipip_ol_unreg_event(mlxsw_sp, ol_dev);
1886 mlxsw_sp_netdevice_ipip_ol_up_event(mlxsw_sp, ol_dev);
1889 mlxsw_sp_netdevice_ipip_ol_down_event(mlxsw_sp, ol_dev);
1891 case NETDEV_CHANGEUPPER:
1892 chup = container_of(info, typeof(*chup), info);
1893 extack = info->extack;
1894 if (netif_is_l3_master(chup->upper_dev))
1895 err = mlxsw_sp_netdevice_ipip_ol_vrf_event(mlxsw_sp,
1900 extack = info->extack;
1901 err = mlxsw_sp_netdevice_ipip_ol_change_event(mlxsw_sp,
1904 case NETDEV_CHANGEMTU:
1905 err = mlxsw_sp_netdevice_ipip_ol_update_mtu(mlxsw_sp, ol_dev);
1912 __mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
1913 struct mlxsw_sp_ipip_entry *ipip_entry,
1914 struct net_device *ul_dev,
1916 unsigned long event,
1917 struct netdev_notifier_info *info)
1919 struct netdev_notifier_changeupper_info *chup;
1920 struct netlink_ext_ack *extack;
1923 case NETDEV_CHANGEUPPER:
1924 chup = container_of(info, typeof(*chup), info);
1925 extack = info->extack;
1926 if (netif_is_l3_master(chup->upper_dev))
1927 return mlxsw_sp_netdevice_ipip_ul_vrf_event(mlxsw_sp,
1935 return mlxsw_sp_netdevice_ipip_ul_up_event(mlxsw_sp, ipip_entry,
1938 return mlxsw_sp_netdevice_ipip_ul_down_event(mlxsw_sp,
1946 mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp,
1947 struct net_device *ul_dev,
1948 unsigned long event,
1949 struct netdev_notifier_info *info)
1951 struct mlxsw_sp_ipip_entry *ipip_entry = NULL;
1954 while ((ipip_entry = mlxsw_sp_ipip_entry_find_by_ul_dev(mlxsw_sp,
1957 struct mlxsw_sp_ipip_entry *prev;
1958 bool demote_this = false;
1960 err = __mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, ipip_entry,
1961 ul_dev, &demote_this,
1964 mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(mlxsw_sp,
1970 if (list_is_first(&ipip_entry->ipip_list_node,
1971 &mlxsw_sp->router->ipip_list))
1974 /* This can't be cached from previous iteration,
1975 * because that entry could be gone now.
1977 prev = list_prev_entry(ipip_entry,
1979 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1987 int mlxsw_sp_router_nve_promote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
1988 enum mlxsw_sp_l3proto ul_proto,
1989 const union mlxsw_sp_l3addr *ul_sip,
1992 enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
1993 struct mlxsw_sp_router *router = mlxsw_sp->router;
1994 struct mlxsw_sp_fib_entry *fib_entry;
1997 mutex_lock(&mlxsw_sp->router->lock);
1999 if (WARN_ON_ONCE(router->nve_decap_config.valid)) {
2004 router->nve_decap_config.ul_tb_id = ul_tb_id;
2005 router->nve_decap_config.tunnel_index = tunnel_index;
2006 router->nve_decap_config.ul_proto = ul_proto;
2007 router->nve_decap_config.ul_sip = *ul_sip;
2008 router->nve_decap_config.valid = true;
2010 /* It is valid to create a tunnel with a local IP and only later
2011 * assign this IP address to a local interface
2013 fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
2019 fib_entry->decap.tunnel_index = tunnel_index;
2020 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
2022 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2024 goto err_fib_entry_update;
2028 err_fib_entry_update:
2029 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2030 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2032 mutex_unlock(&mlxsw_sp->router->lock);
2036 void mlxsw_sp_router_nve_demote_decap(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
2037 enum mlxsw_sp_l3proto ul_proto,
2038 const union mlxsw_sp_l3addr *ul_sip)
2040 enum mlxsw_sp_fib_entry_type type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
2041 struct mlxsw_sp_router *router = mlxsw_sp->router;
2042 struct mlxsw_sp_fib_entry *fib_entry;
2044 mutex_lock(&mlxsw_sp->router->lock);
2046 if (WARN_ON_ONCE(!router->nve_decap_config.valid))
2049 router->nve_decap_config.valid = false;
2051 fib_entry = mlxsw_sp_router_ip2me_fib_entry_find(mlxsw_sp, ul_tb_id,
2057 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
2058 mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
2060 mutex_unlock(&mlxsw_sp->router->lock);
2063 static bool mlxsw_sp_router_nve_is_decap(struct mlxsw_sp *mlxsw_sp,
2065 enum mlxsw_sp_l3proto ul_proto,
2066 const union mlxsw_sp_l3addr *ul_sip)
2068 struct mlxsw_sp_router *router = mlxsw_sp->router;
2070 return router->nve_decap_config.valid &&
2071 router->nve_decap_config.ul_tb_id == ul_tb_id &&
2072 router->nve_decap_config.ul_proto == ul_proto &&
2073 !memcmp(&router->nve_decap_config.ul_sip, ul_sip,
2077 struct mlxsw_sp_neigh_key {
2078 struct neighbour *n;
2081 struct mlxsw_sp_neigh_entry {
2082 struct list_head rif_list_node;
2083 struct rhash_head ht_node;
2084 struct mlxsw_sp_neigh_key key;
2087 unsigned char ha[ETH_ALEN];
2088 struct list_head nexthop_list; /* list of nexthops using
2091 struct list_head nexthop_neighs_list_node;
2092 unsigned int counter_index;
2096 static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
2097 .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
2098 .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
2099 .key_len = sizeof(struct mlxsw_sp_neigh_key),
2102 struct mlxsw_sp_neigh_entry *
2103 mlxsw_sp_rif_neigh_next(struct mlxsw_sp_rif *rif,
2104 struct mlxsw_sp_neigh_entry *neigh_entry)
2107 if (list_empty(&rif->neigh_list))
2110 return list_first_entry(&rif->neigh_list,
2111 typeof(*neigh_entry),
2114 if (list_is_last(&neigh_entry->rif_list_node, &rif->neigh_list))
2116 return list_next_entry(neigh_entry, rif_list_node);
2119 int mlxsw_sp_neigh_entry_type(struct mlxsw_sp_neigh_entry *neigh_entry)
2121 return neigh_entry->key.n->tbl->family;
2125 mlxsw_sp_neigh_entry_ha(struct mlxsw_sp_neigh_entry *neigh_entry)
2127 return neigh_entry->ha;
2130 u32 mlxsw_sp_neigh4_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
2132 struct neighbour *n;
2134 n = neigh_entry->key.n;
2135 return ntohl(*((__be32 *) n->primary_key));
2139 mlxsw_sp_neigh6_entry_dip(struct mlxsw_sp_neigh_entry *neigh_entry)
2141 struct neighbour *n;
2143 n = neigh_entry->key.n;
2144 return (struct in6_addr *) &n->primary_key;
2147 int mlxsw_sp_neigh_counter_get(struct mlxsw_sp *mlxsw_sp,
2148 struct mlxsw_sp_neigh_entry *neigh_entry,
2151 if (!neigh_entry->counter_valid)
2154 return mlxsw_sp_flow_counter_get(mlxsw_sp, neigh_entry->counter_index,
2158 static struct mlxsw_sp_neigh_entry *
2159 mlxsw_sp_neigh_entry_alloc(struct mlxsw_sp *mlxsw_sp, struct neighbour *n,
2162 struct mlxsw_sp_neigh_entry *neigh_entry;
2164 neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_KERNEL);
2168 neigh_entry->key.n = n;
2169 neigh_entry->rif = rif;
2170 INIT_LIST_HEAD(&neigh_entry->nexthop_list);
2175 static void mlxsw_sp_neigh_entry_free(struct mlxsw_sp_neigh_entry *neigh_entry)
2181 mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
2182 struct mlxsw_sp_neigh_entry *neigh_entry)
2184 return rhashtable_insert_fast(&mlxsw_sp->router->neigh_ht,
2185 &neigh_entry->ht_node,
2186 mlxsw_sp_neigh_ht_params);
2190 mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
2191 struct mlxsw_sp_neigh_entry *neigh_entry)
2193 rhashtable_remove_fast(&mlxsw_sp->router->neigh_ht,
2194 &neigh_entry->ht_node,
2195 mlxsw_sp_neigh_ht_params);
2199 mlxsw_sp_neigh_counter_should_alloc(struct mlxsw_sp *mlxsw_sp,
2200 struct mlxsw_sp_neigh_entry *neigh_entry)
2202 struct devlink *devlink;
2203 const char *table_name;
2205 switch (mlxsw_sp_neigh_entry_type(neigh_entry)) {
2207 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST4;
2210 table_name = MLXSW_SP_DPIPE_TABLE_NAME_HOST6;
2217 devlink = priv_to_devlink(mlxsw_sp->core);
2218 return devlink_dpipe_table_counter_enabled(devlink, table_name);
2222 mlxsw_sp_neigh_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2223 struct mlxsw_sp_neigh_entry *neigh_entry)
2225 if (!mlxsw_sp_neigh_counter_should_alloc(mlxsw_sp, neigh_entry))
2228 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &neigh_entry->counter_index))
2231 neigh_entry->counter_valid = true;
2235 mlxsw_sp_neigh_counter_free(struct mlxsw_sp *mlxsw_sp,
2236 struct mlxsw_sp_neigh_entry *neigh_entry)
2238 if (!neigh_entry->counter_valid)
2240 mlxsw_sp_flow_counter_free(mlxsw_sp,
2241 neigh_entry->counter_index);
2242 neigh_entry->counter_valid = false;
2245 static struct mlxsw_sp_neigh_entry *
2246 mlxsw_sp_neigh_entry_create(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
2248 struct mlxsw_sp_neigh_entry *neigh_entry;
2249 struct mlxsw_sp_rif *rif;
2252 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
2254 return ERR_PTR(-EINVAL);
2256 neigh_entry = mlxsw_sp_neigh_entry_alloc(mlxsw_sp, n, rif->rif_index);
2258 return ERR_PTR(-ENOMEM);
2260 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
2262 goto err_neigh_entry_insert;
2264 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2265 atomic_inc(&mlxsw_sp->router->neighs_update.neigh_count);
2266 list_add(&neigh_entry->rif_list_node, &rif->neigh_list);
2270 err_neigh_entry_insert:
2271 mlxsw_sp_neigh_entry_free(neigh_entry);
2272 return ERR_PTR(err);
2276 mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp *mlxsw_sp,
2277 struct mlxsw_sp_neigh_entry *neigh_entry)
2279 list_del(&neigh_entry->rif_list_node);
2280 atomic_dec(&mlxsw_sp->router->neighs_update.neigh_count);
2281 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2282 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
2283 mlxsw_sp_neigh_entry_free(neigh_entry);
2286 static struct mlxsw_sp_neigh_entry *
2287 mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n)
2289 struct mlxsw_sp_neigh_key key;
2292 return rhashtable_lookup_fast(&mlxsw_sp->router->neigh_ht,
2293 &key, mlxsw_sp_neigh_ht_params);
2297 mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
2299 unsigned long interval;
2301 #if IS_ENABLED(CONFIG_IPV6)
2302 interval = min_t(unsigned long,
2303 NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME),
2304 NEIGH_VAR(&nd_tbl.parms, DELAY_PROBE_TIME));
2306 interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
2308 mlxsw_sp->router->neighs_update.interval = jiffies_to_msecs(interval);
2311 static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2315 u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
2316 struct net_device *dev;
2317 struct neighbour *n;
2322 mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
2324 if (WARN_ON_ONCE(rif >= max_rifs))
2326 if (!mlxsw_sp->router->rifs[rif]) {
2327 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2332 dev = mlxsw_sp->router->rifs[rif]->dev;
2333 n = neigh_lookup(&arp_tbl, &dipn, dev);
2337 netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
2338 neigh_event_send(n, NULL);
2342 #if IS_ENABLED(CONFIG_IPV6)
2343 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2347 struct net_device *dev;
2348 struct neighbour *n;
2349 struct in6_addr dip;
2352 mlxsw_reg_rauhtd_ent_ipv6_unpack(rauhtd_pl, rec_index, &rif,
2355 if (!mlxsw_sp->router->rifs[rif]) {
2356 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
2360 dev = mlxsw_sp->router->rifs[rif]->dev;
2361 n = neigh_lookup(&nd_tbl, &dip, dev);
2365 netdev_dbg(dev, "Updating neighbour with IP=%pI6c\n", &dip);
2366 neigh_event_send(n, NULL);
2370 static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2377 static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
2384 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2386 /* Hardware starts counting at 0, so add 1. */
2389 /* Each record consists of several neighbour entries. */
2390 for (i = 0; i < num_entries; i++) {
2393 ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
2394 mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
2400 static void mlxsw_sp_router_neigh_rec_ipv6_process(struct mlxsw_sp *mlxsw_sp,
2404 /* One record contains one entry. */
2405 mlxsw_sp_router_neigh_ent_ipv6_process(mlxsw_sp, rauhtd_pl,
2409 static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
2410 char *rauhtd_pl, int rec_index)
2412 switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
2413 case MLXSW_REG_RAUHTD_TYPE_IPV4:
2414 mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
2417 case MLXSW_REG_RAUHTD_TYPE_IPV6:
2418 mlxsw_sp_router_neigh_rec_ipv6_process(mlxsw_sp, rauhtd_pl,
2424 static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl)
2426 u8 num_rec, last_rec_index, num_entries;
2428 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2429 last_rec_index = num_rec - 1;
2431 if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM)
2433 if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) ==
2434 MLXSW_REG_RAUHTD_TYPE_IPV6)
2437 num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
2439 if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC)
2445 __mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp,
2447 enum mlxsw_reg_rauhtd_type type)
2452 /* Ensure the RIF we read from the device does not change mid-dump. */
2453 mutex_lock(&mlxsw_sp->router->lock);
2455 mlxsw_reg_rauhtd_pack(rauhtd_pl, type);
2456 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
2459 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour table\n");
2462 num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
2463 for (i = 0; i < num_rec; i++)
2464 mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
2466 } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl));
2467 mutex_unlock(&mlxsw_sp->router->lock);
2472 static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
2474 enum mlxsw_reg_rauhtd_type type;
2478 if (!atomic_read(&mlxsw_sp->router->neighs_update.neigh_count))
2481 rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
2485 type = MLXSW_REG_RAUHTD_TYPE_IPV4;
2486 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2490 type = MLXSW_REG_RAUHTD_TYPE_IPV6;
2491 err = __mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp, rauhtd_pl, type);
2497 static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
2499 struct mlxsw_sp_neigh_entry *neigh_entry;
2501 mutex_lock(&mlxsw_sp->router->lock);
2502 list_for_each_entry(neigh_entry, &mlxsw_sp->router->nexthop_neighs_list,
2503 nexthop_neighs_list_node)
2504 /* If this neigh have nexthops, make the kernel think this neigh
2505 * is active regardless of the traffic.
2507 neigh_event_send(neigh_entry->key.n, NULL);
2508 mutex_unlock(&mlxsw_sp->router->lock);
2512 mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
2514 unsigned long interval = mlxsw_sp->router->neighs_update.interval;
2516 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw,
2517 msecs_to_jiffies(interval));
2520 static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
2522 struct mlxsw_sp_router *router;
2525 router = container_of(work, struct mlxsw_sp_router,
2526 neighs_update.dw.work);
2527 err = mlxsw_sp_router_neighs_update_rauhtd(router->mlxsw_sp);
2529 dev_err(router->mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
2531 mlxsw_sp_router_neighs_update_nh(router->mlxsw_sp);
2533 mlxsw_sp_router_neighs_update_work_schedule(router->mlxsw_sp);
2536 static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
2538 struct mlxsw_sp_neigh_entry *neigh_entry;
2539 struct mlxsw_sp_router *router;
2541 router = container_of(work, struct mlxsw_sp_router,
2542 nexthop_probe_dw.work);
2543 /* Iterate over nexthop neighbours, find those who are unresolved and
2544 * send arp on them. This solves the chicken-egg problem when
2545 * the nexthop wouldn't get offloaded until the neighbor is resolved
2546 * but it wouldn't get resolved ever in case traffic is flowing in HW
2547 * using different nexthop.
2549 mutex_lock(&router->lock);
2550 list_for_each_entry(neigh_entry, &router->nexthop_neighs_list,
2551 nexthop_neighs_list_node)
2552 if (!neigh_entry->connected)
2553 neigh_event_send(neigh_entry->key.n, NULL);
2554 mutex_unlock(&router->lock);
2556 mlxsw_core_schedule_dw(&router->nexthop_probe_dw,
2557 MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
2561 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
2562 struct mlxsw_sp_neigh_entry *neigh_entry,
2563 bool removing, bool dead);
2565 static enum mlxsw_reg_rauht_op mlxsw_sp_rauht_op(bool adding)
2567 return adding ? MLXSW_REG_RAUHT_OP_WRITE_ADD :
2568 MLXSW_REG_RAUHT_OP_WRITE_DELETE;
2572 mlxsw_sp_router_neigh_entry_op4(struct mlxsw_sp *mlxsw_sp,
2573 struct mlxsw_sp_neigh_entry *neigh_entry,
2574 enum mlxsw_reg_rauht_op op)
2576 struct neighbour *n = neigh_entry->key.n;
2577 u32 dip = ntohl(*((__be32 *) n->primary_key));
2578 char rauht_pl[MLXSW_REG_RAUHT_LEN];
2580 mlxsw_reg_rauht_pack4(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2582 if (neigh_entry->counter_valid)
2583 mlxsw_reg_rauht_pack_counter(rauht_pl,
2584 neigh_entry->counter_index);
2585 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2589 mlxsw_sp_router_neigh_entry_op6(struct mlxsw_sp *mlxsw_sp,
2590 struct mlxsw_sp_neigh_entry *neigh_entry,
2591 enum mlxsw_reg_rauht_op op)
2593 struct neighbour *n = neigh_entry->key.n;
2594 char rauht_pl[MLXSW_REG_RAUHT_LEN];
2595 const char *dip = n->primary_key;
2597 mlxsw_reg_rauht_pack6(rauht_pl, op, neigh_entry->rif, neigh_entry->ha,
2599 if (neigh_entry->counter_valid)
2600 mlxsw_reg_rauht_pack_counter(rauht_pl,
2601 neigh_entry->counter_index);
2602 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht), rauht_pl);
2605 bool mlxsw_sp_neigh_ipv6_ignore(struct mlxsw_sp_neigh_entry *neigh_entry)
2607 struct neighbour *n = neigh_entry->key.n;
2609 /* Packets with a link-local destination address are trapped
2610 * after LPM lookup and never reach the neighbour table, so
2611 * there is no need to program such neighbours to the device.
2613 if (ipv6_addr_type((struct in6_addr *) &n->primary_key) &
2614 IPV6_ADDR_LINKLOCAL)
2620 mlxsw_sp_neigh_entry_update(struct mlxsw_sp *mlxsw_sp,
2621 struct mlxsw_sp_neigh_entry *neigh_entry,
2624 enum mlxsw_reg_rauht_op op = mlxsw_sp_rauht_op(adding);
2627 if (!adding && !neigh_entry->connected)
2629 neigh_entry->connected = adding;
2630 if (neigh_entry->key.n->tbl->family == AF_INET) {
2631 err = mlxsw_sp_router_neigh_entry_op4(mlxsw_sp, neigh_entry,
2635 } else if (neigh_entry->key.n->tbl->family == AF_INET6) {
2636 if (mlxsw_sp_neigh_ipv6_ignore(neigh_entry))
2638 err = mlxsw_sp_router_neigh_entry_op6(mlxsw_sp, neigh_entry,
2648 neigh_entry->key.n->flags |= NTF_OFFLOADED;
2650 neigh_entry->key.n->flags &= ~NTF_OFFLOADED;
2654 mlxsw_sp_neigh_entry_counter_update(struct mlxsw_sp *mlxsw_sp,
2655 struct mlxsw_sp_neigh_entry *neigh_entry,
2659 mlxsw_sp_neigh_counter_alloc(mlxsw_sp, neigh_entry);
2661 mlxsw_sp_neigh_counter_free(mlxsw_sp, neigh_entry);
2662 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, true);
2665 struct mlxsw_sp_netevent_work {
2666 struct work_struct work;
2667 struct mlxsw_sp *mlxsw_sp;
2668 struct neighbour *n;
2671 static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
2673 struct mlxsw_sp_netevent_work *net_work =
2674 container_of(work, struct mlxsw_sp_netevent_work, work);
2675 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2676 struct mlxsw_sp_neigh_entry *neigh_entry;
2677 struct neighbour *n = net_work->n;
2678 unsigned char ha[ETH_ALEN];
2679 bool entry_connected;
2682 /* If these parameters are changed after we release the lock,
2683 * then we are guaranteed to receive another event letting us
2686 read_lock_bh(&n->lock);
2687 memcpy(ha, n->ha, ETH_ALEN);
2688 nud_state = n->nud_state;
2690 read_unlock_bh(&n->lock);
2692 mutex_lock(&mlxsw_sp->router->lock);
2693 mlxsw_sp_span_respin(mlxsw_sp);
2695 entry_connected = nud_state & NUD_VALID && !dead;
2696 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
2697 if (!entry_connected && !neigh_entry)
2700 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
2701 if (IS_ERR(neigh_entry))
2705 if (neigh_entry->connected && entry_connected &&
2706 !memcmp(neigh_entry->ha, ha, ETH_ALEN))
2709 memcpy(neigh_entry->ha, ha, ETH_ALEN);
2710 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, entry_connected);
2711 mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, !entry_connected,
2714 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
2715 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2718 mutex_unlock(&mlxsw_sp->router->lock);
2723 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp);
2725 static void mlxsw_sp_router_mp_hash_event_work(struct work_struct *work)
2727 struct mlxsw_sp_netevent_work *net_work =
2728 container_of(work, struct mlxsw_sp_netevent_work, work);
2729 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2731 mlxsw_sp_mp_hash_init(mlxsw_sp);
2735 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
2737 static void mlxsw_sp_router_update_priority_work(struct work_struct *work)
2739 struct mlxsw_sp_netevent_work *net_work =
2740 container_of(work, struct mlxsw_sp_netevent_work, work);
2741 struct mlxsw_sp *mlxsw_sp = net_work->mlxsw_sp;
2743 __mlxsw_sp_router_init(mlxsw_sp);
2747 static int mlxsw_sp_router_schedule_work(struct net *net,
2748 struct notifier_block *nb,
2749 void (*cb)(struct work_struct *))
2751 struct mlxsw_sp_netevent_work *net_work;
2752 struct mlxsw_sp_router *router;
2754 router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
2755 if (!net_eq(net, mlxsw_sp_net(router->mlxsw_sp)))
2758 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2762 INIT_WORK(&net_work->work, cb);
2763 net_work->mlxsw_sp = router->mlxsw_sp;
2764 mlxsw_core_schedule_work(&net_work->work);
2768 static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
2769 unsigned long event, void *ptr)
2771 struct mlxsw_sp_netevent_work *net_work;
2772 struct mlxsw_sp_port *mlxsw_sp_port;
2773 struct mlxsw_sp *mlxsw_sp;
2774 unsigned long interval;
2775 struct neigh_parms *p;
2776 struct neighbour *n;
2779 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
2782 /* We don't care about changes in the default table. */
2783 if (!p->dev || (p->tbl->family != AF_INET &&
2784 p->tbl->family != AF_INET6))
2787 /* We are in atomic context and can't take RTNL mutex,
2788 * so use RCU variant to walk the device chain.
2790 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
2794 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2795 interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
2796 mlxsw_sp->router->neighs_update.interval = interval;
2798 mlxsw_sp_port_dev_put(mlxsw_sp_port);
2800 case NETEVENT_NEIGH_UPDATE:
2803 if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6)
2806 mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
2810 net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
2812 mlxsw_sp_port_dev_put(mlxsw_sp_port);
2816 INIT_WORK(&net_work->work, mlxsw_sp_router_neigh_event_work);
2817 net_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2820 /* Take a reference to ensure the neighbour won't be
2821 * destructed until we drop the reference in delayed
2825 mlxsw_core_schedule_work(&net_work->work);
2826 mlxsw_sp_port_dev_put(mlxsw_sp_port);
2828 case NETEVENT_IPV4_MPATH_HASH_UPDATE:
2829 case NETEVENT_IPV6_MPATH_HASH_UPDATE:
2830 return mlxsw_sp_router_schedule_work(ptr, nb,
2831 mlxsw_sp_router_mp_hash_event_work);
2833 case NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE:
2834 return mlxsw_sp_router_schedule_work(ptr, nb,
2835 mlxsw_sp_router_update_priority_work);
2841 static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
2845 err = rhashtable_init(&mlxsw_sp->router->neigh_ht,
2846 &mlxsw_sp_neigh_ht_params);
2850 /* Initialize the polling interval according to the default
2853 mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
2855 /* Create the delayed works for the activity_update */
2856 INIT_DELAYED_WORK(&mlxsw_sp->router->neighs_update.dw,
2857 mlxsw_sp_router_neighs_update_work);
2858 INIT_DELAYED_WORK(&mlxsw_sp->router->nexthop_probe_dw,
2859 mlxsw_sp_router_probe_unresolved_nexthops);
2860 atomic_set(&mlxsw_sp->router->neighs_update.neigh_count, 0);
2861 mlxsw_core_schedule_dw(&mlxsw_sp->router->neighs_update.dw, 0);
2862 mlxsw_core_schedule_dw(&mlxsw_sp->router->nexthop_probe_dw, 0);
2866 static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
2868 cancel_delayed_work_sync(&mlxsw_sp->router->neighs_update.dw);
2869 cancel_delayed_work_sync(&mlxsw_sp->router->nexthop_probe_dw);
2870 rhashtable_destroy(&mlxsw_sp->router->neigh_ht);
2873 static void mlxsw_sp_neigh_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
2874 struct mlxsw_sp_rif *rif)
2876 struct mlxsw_sp_neigh_entry *neigh_entry, *tmp;
2878 list_for_each_entry_safe(neigh_entry, tmp, &rif->neigh_list,
2880 mlxsw_sp_neigh_entry_update(mlxsw_sp, neigh_entry, false);
2881 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
2885 enum mlxsw_sp_nexthop_type {
2886 MLXSW_SP_NEXTHOP_TYPE_ETH,
2887 MLXSW_SP_NEXTHOP_TYPE_IPIP,
2890 enum mlxsw_sp_nexthop_action {
2891 /* Nexthop forwards packets to an egress RIF */
2892 MLXSW_SP_NEXTHOP_ACTION_FORWARD,
2893 /* Nexthop discards packets */
2894 MLXSW_SP_NEXTHOP_ACTION_DISCARD,
2895 /* Nexthop traps packets */
2896 MLXSW_SP_NEXTHOP_ACTION_TRAP,
2899 struct mlxsw_sp_nexthop_key {
2900 struct fib_nh *fib_nh;
2903 struct mlxsw_sp_nexthop {
2904 struct list_head neigh_list_node; /* member of neigh entry list */
2905 struct list_head rif_list_node;
2906 struct list_head router_list_node;
2907 struct mlxsw_sp_nexthop_group_info *nhgi; /* pointer back to the group
2908 * this nexthop belongs to
2910 struct rhash_head ht_node;
2911 struct neigh_table *neigh_tbl;
2912 struct mlxsw_sp_nexthop_key key;
2913 unsigned char gw_addr[sizeof(struct in6_addr)];
2917 int num_adj_entries;
2918 struct mlxsw_sp_rif *rif;
2919 u8 should_offload:1, /* set indicates this nexthop should be written
2920 * to the adjacency table.
2922 offloaded:1, /* set indicates this nexthop was written to the
2925 update:1; /* set indicates this nexthop should be updated in the
2926 * adjacency table (f.e., its MAC changed).
2928 enum mlxsw_sp_nexthop_action action;
2929 enum mlxsw_sp_nexthop_type type;
2931 struct mlxsw_sp_neigh_entry *neigh_entry;
2932 struct mlxsw_sp_ipip_entry *ipip_entry;
2934 unsigned int counter_index;
2938 enum mlxsw_sp_nexthop_group_type {
2939 MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4,
2940 MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6,
2941 MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ,
2944 struct mlxsw_sp_nexthop_group_info {
2945 struct mlxsw_sp_nexthop_group *nh_grp;
2949 int sum_norm_weight;
2950 u8 adj_index_valid:1,
2951 gateway:1, /* routes using the group use a gateway */
2953 struct list_head list; /* member in nh_res_grp_list */
2954 struct mlxsw_sp_nexthop nexthops[];
2955 #define nh_rif nexthops[0].rif
2958 struct mlxsw_sp_nexthop_group_vr_key {
2960 enum mlxsw_sp_l3proto proto;
2963 struct mlxsw_sp_nexthop_group_vr_entry {
2964 struct list_head list; /* member in vr_list */
2965 struct rhash_head ht_node; /* member in vr_ht */
2966 refcount_t ref_count;
2967 struct mlxsw_sp_nexthop_group_vr_key key;
2970 struct mlxsw_sp_nexthop_group {
2971 struct rhash_head ht_node;
2972 struct list_head fib_list; /* list of fib entries that use this group */
2975 struct fib_info *fi;
2981 struct mlxsw_sp_nexthop_group_info *nhgi;
2982 struct list_head vr_list;
2983 struct rhashtable vr_ht;
2984 enum mlxsw_sp_nexthop_group_type type;
2988 void mlxsw_sp_nexthop_counter_alloc(struct mlxsw_sp *mlxsw_sp,
2989 struct mlxsw_sp_nexthop *nh)
2991 struct devlink *devlink;
2993 devlink = priv_to_devlink(mlxsw_sp->core);
2994 if (!devlink_dpipe_table_counter_enabled(devlink,
2995 MLXSW_SP_DPIPE_TABLE_NAME_ADJ))
2998 if (mlxsw_sp_flow_counter_alloc(mlxsw_sp, &nh->counter_index))
3001 nh->counter_valid = true;
3004 void mlxsw_sp_nexthop_counter_free(struct mlxsw_sp *mlxsw_sp,
3005 struct mlxsw_sp_nexthop *nh)
3007 if (!nh->counter_valid)
3009 mlxsw_sp_flow_counter_free(mlxsw_sp, nh->counter_index);
3010 nh->counter_valid = false;
3013 int mlxsw_sp_nexthop_counter_get(struct mlxsw_sp *mlxsw_sp,
3014 struct mlxsw_sp_nexthop *nh, u64 *p_counter)
3016 if (!nh->counter_valid)
3019 return mlxsw_sp_flow_counter_get(mlxsw_sp, nh->counter_index,
3023 struct mlxsw_sp_nexthop *mlxsw_sp_nexthop_next(struct mlxsw_sp_router *router,
3024 struct mlxsw_sp_nexthop *nh)
3027 if (list_empty(&router->nexthop_list))
3030 return list_first_entry(&router->nexthop_list,
3031 typeof(*nh), router_list_node);
3033 if (list_is_last(&nh->router_list_node, &router->nexthop_list))
3035 return list_next_entry(nh, router_list_node);
3038 bool mlxsw_sp_nexthop_is_forward(const struct mlxsw_sp_nexthop *nh)
3040 return nh->offloaded && nh->action == MLXSW_SP_NEXTHOP_ACTION_FORWARD;
3043 unsigned char *mlxsw_sp_nexthop_ha(struct mlxsw_sp_nexthop *nh)
3045 if (nh->type != MLXSW_SP_NEXTHOP_TYPE_ETH ||
3046 !mlxsw_sp_nexthop_is_forward(nh))
3048 return nh->neigh_entry->ha;
3051 int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
3052 u32 *p_adj_size, u32 *p_adj_hash_index)
3054 struct mlxsw_sp_nexthop_group_info *nhgi = nh->nhgi;
3055 u32 adj_hash_index = 0;
3058 if (!nh->offloaded || !nhgi->adj_index_valid)
3061 *p_adj_index = nhgi->adj_index;
3062 *p_adj_size = nhgi->ecmp_size;
3064 for (i = 0; i < nhgi->count; i++) {
3065 struct mlxsw_sp_nexthop *nh_iter = &nhgi->nexthops[i];
3069 if (nh_iter->offloaded)
3070 adj_hash_index += nh_iter->num_adj_entries;
3073 *p_adj_hash_index = adj_hash_index;
3077 struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh)
3082 bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
3084 struct mlxsw_sp_nexthop_group_info *nhgi = nh->nhgi;
3087 for (i = 0; i < nhgi->count; i++) {
3088 struct mlxsw_sp_nexthop *nh_iter = &nhgi->nexthops[i];
3090 if (nh_iter->type == MLXSW_SP_NEXTHOP_TYPE_IPIP)
3096 static const struct rhashtable_params mlxsw_sp_nexthop_group_vr_ht_params = {
3097 .key_offset = offsetof(struct mlxsw_sp_nexthop_group_vr_entry, key),
3098 .head_offset = offsetof(struct mlxsw_sp_nexthop_group_vr_entry, ht_node),
3099 .key_len = sizeof(struct mlxsw_sp_nexthop_group_vr_key),
3100 .automatic_shrinking = true,
3103 static struct mlxsw_sp_nexthop_group_vr_entry *
3104 mlxsw_sp_nexthop_group_vr_entry_lookup(struct mlxsw_sp_nexthop_group *nh_grp,
3105 const struct mlxsw_sp_fib *fib)
3107 struct mlxsw_sp_nexthop_group_vr_key key;
3109 memset(&key, 0, sizeof(key));
3110 key.vr_id = fib->vr->id;
3111 key.proto = fib->proto;
3112 return rhashtable_lookup_fast(&nh_grp->vr_ht, &key,
3113 mlxsw_sp_nexthop_group_vr_ht_params);
3117 mlxsw_sp_nexthop_group_vr_entry_create(struct mlxsw_sp_nexthop_group *nh_grp,
3118 const struct mlxsw_sp_fib *fib)
3120 struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3123 vr_entry = kzalloc(sizeof(*vr_entry), GFP_KERNEL);
3127 vr_entry->key.vr_id = fib->vr->id;
3128 vr_entry->key.proto = fib->proto;
3129 refcount_set(&vr_entry->ref_count, 1);
3131 err = rhashtable_insert_fast(&nh_grp->vr_ht, &vr_entry->ht_node,
3132 mlxsw_sp_nexthop_group_vr_ht_params);
3134 goto err_hashtable_insert;
3136 list_add(&vr_entry->list, &nh_grp->vr_list);
3140 err_hashtable_insert:
3146 mlxsw_sp_nexthop_group_vr_entry_destroy(struct mlxsw_sp_nexthop_group *nh_grp,
3147 struct mlxsw_sp_nexthop_group_vr_entry *vr_entry)
3149 list_del(&vr_entry->list);
3150 rhashtable_remove_fast(&nh_grp->vr_ht, &vr_entry->ht_node,
3151 mlxsw_sp_nexthop_group_vr_ht_params);
3156 mlxsw_sp_nexthop_group_vr_link(struct mlxsw_sp_nexthop_group *nh_grp,
3157 const struct mlxsw_sp_fib *fib)
3159 struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3161 vr_entry = mlxsw_sp_nexthop_group_vr_entry_lookup(nh_grp, fib);
3163 refcount_inc(&vr_entry->ref_count);
3167 return mlxsw_sp_nexthop_group_vr_entry_create(nh_grp, fib);
3171 mlxsw_sp_nexthop_group_vr_unlink(struct mlxsw_sp_nexthop_group *nh_grp,
3172 const struct mlxsw_sp_fib *fib)
3174 struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3176 vr_entry = mlxsw_sp_nexthop_group_vr_entry_lookup(nh_grp, fib);
3177 if (WARN_ON_ONCE(!vr_entry))
3180 if (!refcount_dec_and_test(&vr_entry->ref_count))
3183 mlxsw_sp_nexthop_group_vr_entry_destroy(nh_grp, vr_entry);
3186 struct mlxsw_sp_nexthop_group_cmp_arg {
3187 enum mlxsw_sp_nexthop_group_type type;
3189 struct fib_info *fi;
3190 struct mlxsw_sp_fib6_entry *fib6_entry;
3196 mlxsw_sp_nexthop6_group_has_nexthop(const struct mlxsw_sp_nexthop_group *nh_grp,
3197 const struct in6_addr *gw, int ifindex,
3202 for (i = 0; i < nh_grp->nhgi->count; i++) {
3203 const struct mlxsw_sp_nexthop *nh;
3205 nh = &nh_grp->nhgi->nexthops[i];
3206 if (nh->ifindex == ifindex && nh->nh_weight == weight &&
3207 ipv6_addr_equal(gw, (struct in6_addr *) nh->gw_addr))
3215 mlxsw_sp_nexthop6_group_cmp(const struct mlxsw_sp_nexthop_group *nh_grp,
3216 const struct mlxsw_sp_fib6_entry *fib6_entry)
3218 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3220 if (nh_grp->nhgi->count != fib6_entry->nrt6)
3223 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3224 struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3225 struct in6_addr *gw;
3226 int ifindex, weight;
3228 ifindex = fib6_nh->fib_nh_dev->ifindex;
3229 weight = fib6_nh->fib_nh_weight;
3230 gw = &fib6_nh->fib_nh_gw6;
3231 if (!mlxsw_sp_nexthop6_group_has_nexthop(nh_grp, gw, ifindex,
3240 mlxsw_sp_nexthop_group_cmp(struct rhashtable_compare_arg *arg, const void *ptr)
3242 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = arg->key;
3243 const struct mlxsw_sp_nexthop_group *nh_grp = ptr;
3245 if (nh_grp->type != cmp_arg->type)
3248 switch (cmp_arg->type) {
3249 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3250 return cmp_arg->fi != nh_grp->ipv4.fi;
3251 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3252 return !mlxsw_sp_nexthop6_group_cmp(nh_grp,
3253 cmp_arg->fib6_entry);
3254 case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3255 return cmp_arg->id != nh_grp->obj.id;
3262 static u32 mlxsw_sp_nexthop_group_hash_obj(const void *data, u32 len, u32 seed)
3264 const struct mlxsw_sp_nexthop_group *nh_grp = data;
3265 const struct mlxsw_sp_nexthop *nh;
3266 struct fib_info *fi;
3270 switch (nh_grp->type) {
3271 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3272 fi = nh_grp->ipv4.fi;
3273 return jhash(&fi, sizeof(fi), seed);
3274 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3275 val = nh_grp->nhgi->count;
3276 for (i = 0; i < nh_grp->nhgi->count; i++) {
3277 nh = &nh_grp->nhgi->nexthops[i];
3278 val ^= jhash(&nh->ifindex, sizeof(nh->ifindex), seed);
3279 val ^= jhash(&nh->gw_addr, sizeof(nh->gw_addr), seed);
3281 return jhash(&val, sizeof(val), seed);
3282 case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3283 return jhash(&nh_grp->obj.id, sizeof(nh_grp->obj.id), seed);
3291 mlxsw_sp_nexthop6_group_hash(struct mlxsw_sp_fib6_entry *fib6_entry, u32 seed)
3293 unsigned int val = fib6_entry->nrt6;
3294 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3296 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3297 struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3298 struct net_device *dev = fib6_nh->fib_nh_dev;
3299 struct in6_addr *gw = &fib6_nh->fib_nh_gw6;
3301 val ^= jhash(&dev->ifindex, sizeof(dev->ifindex), seed);
3302 val ^= jhash(gw, sizeof(*gw), seed);
3305 return jhash(&val, sizeof(val), seed);
3309 mlxsw_sp_nexthop_group_hash(const void *data, u32 len, u32 seed)
3311 const struct mlxsw_sp_nexthop_group_cmp_arg *cmp_arg = data;
3313 switch (cmp_arg->type) {
3314 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3315 return jhash(&cmp_arg->fi, sizeof(cmp_arg->fi), seed);
3316 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3317 return mlxsw_sp_nexthop6_group_hash(cmp_arg->fib6_entry, seed);
3318 case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3319 return jhash(&cmp_arg->id, sizeof(cmp_arg->id), seed);
3326 static const struct rhashtable_params mlxsw_sp_nexthop_group_ht_params = {
3327 .head_offset = offsetof(struct mlxsw_sp_nexthop_group, ht_node),
3328 .hashfn = mlxsw_sp_nexthop_group_hash,
3329 .obj_hashfn = mlxsw_sp_nexthop_group_hash_obj,
3330 .obj_cmpfn = mlxsw_sp_nexthop_group_cmp,
3333 static int mlxsw_sp_nexthop_group_insert(struct mlxsw_sp *mlxsw_sp,
3334 struct mlxsw_sp_nexthop_group *nh_grp)
3336 if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6 &&
3337 !nh_grp->nhgi->gateway)
3340 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_group_ht,
3342 mlxsw_sp_nexthop_group_ht_params);
3345 static void mlxsw_sp_nexthop_group_remove(struct mlxsw_sp *mlxsw_sp,
3346 struct mlxsw_sp_nexthop_group *nh_grp)
3348 if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6 &&
3349 !nh_grp->nhgi->gateway)
3352 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_group_ht,
3354 mlxsw_sp_nexthop_group_ht_params);
3357 static struct mlxsw_sp_nexthop_group *
3358 mlxsw_sp_nexthop4_group_lookup(struct mlxsw_sp *mlxsw_sp,
3359 struct fib_info *fi)
3361 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
3363 cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4;
3365 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
3367 mlxsw_sp_nexthop_group_ht_params);
3370 static struct mlxsw_sp_nexthop_group *
3371 mlxsw_sp_nexthop6_group_lookup(struct mlxsw_sp *mlxsw_sp,
3372 struct mlxsw_sp_fib6_entry *fib6_entry)
3374 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
3376 cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6;
3377 cmp_arg.fib6_entry = fib6_entry;
3378 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
3380 mlxsw_sp_nexthop_group_ht_params);
3383 static const struct rhashtable_params mlxsw_sp_nexthop_ht_params = {
3384 .key_offset = offsetof(struct mlxsw_sp_nexthop, key),
3385 .head_offset = offsetof(struct mlxsw_sp_nexthop, ht_node),
3386 .key_len = sizeof(struct mlxsw_sp_nexthop_key),
3389 static int mlxsw_sp_nexthop_insert(struct mlxsw_sp *mlxsw_sp,
3390 struct mlxsw_sp_nexthop *nh)
3392 return rhashtable_insert_fast(&mlxsw_sp->router->nexthop_ht,
3393 &nh->ht_node, mlxsw_sp_nexthop_ht_params);
3396 static void mlxsw_sp_nexthop_remove(struct mlxsw_sp *mlxsw_sp,
3397 struct mlxsw_sp_nexthop *nh)
3399 rhashtable_remove_fast(&mlxsw_sp->router->nexthop_ht, &nh->ht_node,
3400 mlxsw_sp_nexthop_ht_params);
3403 static struct mlxsw_sp_nexthop *
3404 mlxsw_sp_nexthop_lookup(struct mlxsw_sp *mlxsw_sp,
3405 struct mlxsw_sp_nexthop_key key)
3407 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_ht, &key,
3408 mlxsw_sp_nexthop_ht_params);
3411 static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
3412 enum mlxsw_sp_l3proto proto,
3414 u32 adj_index, u16 ecmp_size,
3418 char raleu_pl[MLXSW_REG_RALEU_LEN];
3420 mlxsw_reg_raleu_pack(raleu_pl,
3421 (enum mlxsw_reg_ralxx_protocol) proto, vr_id,
3422 adj_index, ecmp_size, new_adj_index,
3424 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
3427 static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
3428 struct mlxsw_sp_nexthop_group *nh_grp,
3429 u32 old_adj_index, u16 old_ecmp_size)
3431 struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
3432 struct mlxsw_sp_nexthop_group_vr_entry *vr_entry;
3435 list_for_each_entry(vr_entry, &nh_grp->vr_list, list) {
3436 err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp,
3437 vr_entry->key.proto,
3438 vr_entry->key.vr_id,
3444 goto err_mass_update_vr;
3449 list_for_each_entry_continue_reverse(vr_entry, &nh_grp->vr_list, list)
3450 mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, vr_entry->key.proto,
3451 vr_entry->key.vr_id,
3454 old_adj_index, old_ecmp_size);
3458 static int __mlxsw_sp_nexthop_eth_update(struct mlxsw_sp *mlxsw_sp,
3460 struct mlxsw_sp_nexthop *nh,
3461 bool force, char *ratr_pl)
3463 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
3464 enum mlxsw_reg_ratr_op op;
3467 rif_index = nh->rif ? nh->rif->rif_index :
3468 mlxsw_sp->router->lb_rif_index;
3469 op = force ? MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY :
3470 MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY_ON_ACTIVITY;
3471 mlxsw_reg_ratr_pack(ratr_pl, op, true, MLXSW_REG_RATR_TYPE_ETHERNET,
3472 adj_index, rif_index);
3473 switch (nh->action) {
3474 case MLXSW_SP_NEXTHOP_ACTION_FORWARD:
3475 mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
3477 case MLXSW_SP_NEXTHOP_ACTION_DISCARD:
3478 mlxsw_reg_ratr_trap_action_set(ratr_pl,
3479 MLXSW_REG_RATR_TRAP_ACTION_DISCARD_ERRORS);
3481 case MLXSW_SP_NEXTHOP_ACTION_TRAP:
3482 mlxsw_reg_ratr_trap_action_set(ratr_pl,
3483 MLXSW_REG_RATR_TRAP_ACTION_TRAP);
3484 mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0);
3490 if (nh->counter_valid)
3491 mlxsw_reg_ratr_counter_pack(ratr_pl, nh->counter_index, true);
3493 mlxsw_reg_ratr_counter_pack(ratr_pl, 0, false);
3495 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
3498 int mlxsw_sp_nexthop_eth_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3499 struct mlxsw_sp_nexthop *nh, bool force,
3504 for (i = 0; i < nh->num_adj_entries; i++) {
3507 err = __mlxsw_sp_nexthop_eth_update(mlxsw_sp, adj_index + i,
3508 nh, force, ratr_pl);
3516 static int __mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3518 struct mlxsw_sp_nexthop *nh,
3519 bool force, char *ratr_pl)
3521 const struct mlxsw_sp_ipip_ops *ipip_ops;
3523 ipip_ops = mlxsw_sp->router->ipip_ops_arr[nh->ipip_entry->ipipt];
3524 return ipip_ops->nexthop_update(mlxsw_sp, adj_index, nh->ipip_entry,
3528 static int mlxsw_sp_nexthop_ipip_update(struct mlxsw_sp *mlxsw_sp,
3530 struct mlxsw_sp_nexthop *nh, bool force,
3535 for (i = 0; i < nh->num_adj_entries; i++) {
3538 err = __mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index + i,
3539 nh, force, ratr_pl);
3547 static int mlxsw_sp_nexthop_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
3548 struct mlxsw_sp_nexthop *nh, bool force,
3551 /* When action is discard or trap, the nexthop must be
3552 * programmed as an Ethernet nexthop.
3554 if (nh->type == MLXSW_SP_NEXTHOP_TYPE_ETH ||
3555 nh->action == MLXSW_SP_NEXTHOP_ACTION_DISCARD ||
3556 nh->action == MLXSW_SP_NEXTHOP_ACTION_TRAP)
3557 return mlxsw_sp_nexthop_eth_update(mlxsw_sp, adj_index, nh,
3560 return mlxsw_sp_nexthop_ipip_update(mlxsw_sp, adj_index, nh,
3565 mlxsw_sp_nexthop_group_update(struct mlxsw_sp *mlxsw_sp,
3566 struct mlxsw_sp_nexthop_group_info *nhgi,
3569 char ratr_pl[MLXSW_REG_RATR_LEN];
3570 u32 adj_index = nhgi->adj_index; /* base */
3571 struct mlxsw_sp_nexthop *nh;
3574 for (i = 0; i < nhgi->count; i++) {
3575 nh = &nhgi->nexthops[i];
3577 if (!nh->should_offload) {
3582 if (nh->update || reallocate) {
3585 err = mlxsw_sp_nexthop_update(mlxsw_sp, adj_index, nh,
3592 adj_index += nh->num_adj_entries;
3598 mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
3599 struct mlxsw_sp_nexthop_group *nh_grp)
3601 struct mlxsw_sp_fib_entry *fib_entry;
3604 list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
3605 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
3612 struct mlxsw_sp_adj_grp_size_range {
3613 u16 start; /* Inclusive */
3614 u16 end; /* Inclusive */
3617 /* Ordered by range start value */
3618 static const struct mlxsw_sp_adj_grp_size_range
3619 mlxsw_sp1_adj_grp_size_ranges[] = {
3620 { .start = 1, .end = 64 },
3621 { .start = 512, .end = 512 },
3622 { .start = 1024, .end = 1024 },
3623 { .start = 2048, .end = 2048 },
3624 { .start = 4096, .end = 4096 },
3627 /* Ordered by range start value */
3628 static const struct mlxsw_sp_adj_grp_size_range
3629 mlxsw_sp2_adj_grp_size_ranges[] = {
3630 { .start = 1, .end = 128 },
3631 { .start = 256, .end = 256 },
3632 { .start = 512, .end = 512 },
3633 { .start = 1024, .end = 1024 },
3634 { .start = 2048, .end = 2048 },
3635 { .start = 4096, .end = 4096 },
3638 static void mlxsw_sp_adj_grp_size_round_up(const struct mlxsw_sp *mlxsw_sp,
3639 u16 *p_adj_grp_size)
3643 for (i = 0; i < mlxsw_sp->router->adj_grp_size_ranges_count; i++) {
3644 const struct mlxsw_sp_adj_grp_size_range *size_range;
3646 size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
3648 if (*p_adj_grp_size >= size_range->start &&
3649 *p_adj_grp_size <= size_range->end)
3652 if (*p_adj_grp_size <= size_range->end) {
3653 *p_adj_grp_size = size_range->end;
3659 static void mlxsw_sp_adj_grp_size_round_down(const struct mlxsw_sp *mlxsw_sp,
3660 u16 *p_adj_grp_size,
3661 unsigned int alloc_size)
3665 for (i = mlxsw_sp->router->adj_grp_size_ranges_count - 1; i >= 0; i--) {
3666 const struct mlxsw_sp_adj_grp_size_range *size_range;
3668 size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
3670 if (alloc_size >= size_range->end) {
3671 *p_adj_grp_size = size_range->end;
3677 static int mlxsw_sp_fix_adj_grp_size(struct mlxsw_sp *mlxsw_sp,
3678 u16 *p_adj_grp_size)
3680 unsigned int alloc_size;
3683 /* Round up the requested group size to the next size supported
3684 * by the device and make sure the request can be satisfied.
3686 mlxsw_sp_adj_grp_size_round_up(mlxsw_sp, p_adj_grp_size);
3687 err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
3688 MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3689 *p_adj_grp_size, &alloc_size);
3692 /* It is possible the allocation results in more allocated
3693 * entries than requested. Try to use as much of them as
3696 mlxsw_sp_adj_grp_size_round_down(mlxsw_sp, p_adj_grp_size, alloc_size);
3702 mlxsw_sp_nexthop_group_normalize(struct mlxsw_sp_nexthop_group_info *nhgi)
3704 int i, g = 0, sum_norm_weight = 0;
3705 struct mlxsw_sp_nexthop *nh;
3707 for (i = 0; i < nhgi->count; i++) {
3708 nh = &nhgi->nexthops[i];
3710 if (!nh->should_offload)
3713 g = gcd(nh->nh_weight, g);
3718 for (i = 0; i < nhgi->count; i++) {
3719 nh = &nhgi->nexthops[i];
3721 if (!nh->should_offload)
3723 nh->norm_nh_weight = nh->nh_weight / g;
3724 sum_norm_weight += nh->norm_nh_weight;
3727 nhgi->sum_norm_weight = sum_norm_weight;
3731 mlxsw_sp_nexthop_group_rebalance(struct mlxsw_sp_nexthop_group_info *nhgi)
3733 int i, weight = 0, lower_bound = 0;
3734 int total = nhgi->sum_norm_weight;
3735 u16 ecmp_size = nhgi->ecmp_size;
3737 for (i = 0; i < nhgi->count; i++) {
3738 struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
3741 if (!nh->should_offload)
3743 weight += nh->norm_nh_weight;
3744 upper_bound = DIV_ROUND_CLOSEST(ecmp_size * weight, total);
3745 nh->num_adj_entries = upper_bound - lower_bound;
3746 lower_bound = upper_bound;
3750 static struct mlxsw_sp_nexthop *
3751 mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
3752 const struct mlxsw_sp_rt6 *mlxsw_sp_rt6);
3755 mlxsw_sp_nexthop4_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3756 struct mlxsw_sp_nexthop_group *nh_grp)
3760 for (i = 0; i < nh_grp->nhgi->count; i++) {
3761 struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
3764 nh->key.fib_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
3766 nh->key.fib_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
3771 __mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp_nexthop_group *nh_grp,
3772 struct mlxsw_sp_fib6_entry *fib6_entry)
3774 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
3776 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
3777 struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
3778 struct mlxsw_sp_nexthop *nh;
3780 nh = mlxsw_sp_rt6_nexthop(nh_grp, mlxsw_sp_rt6);
3781 if (nh && nh->offloaded)
3782 fib6_nh->fib_nh_flags |= RTNH_F_OFFLOAD;
3784 fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
3789 mlxsw_sp_nexthop6_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3790 struct mlxsw_sp_nexthop_group *nh_grp)
3792 struct mlxsw_sp_fib6_entry *fib6_entry;
3794 /* Unfortunately, in IPv6 the route and the nexthop are described by
3795 * the same struct, so we need to iterate over all the routes using the
3796 * nexthop group and set / clear the offload indication for them.
3798 list_for_each_entry(fib6_entry, &nh_grp->fib_list,
3799 common.nexthop_group_node)
3800 __mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
3804 mlxsw_sp_nexthop_bucket_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3805 const struct mlxsw_sp_nexthop *nh,
3808 struct mlxsw_sp_nexthop_group *nh_grp = nh->nhgi->nh_grp;
3809 bool offload = false, trap = false;
3811 if (nh->offloaded) {
3812 if (nh->action == MLXSW_SP_NEXTHOP_ACTION_TRAP)
3817 nexthop_bucket_set_hw_flags(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
3818 bucket_index, offload, trap);
3822 mlxsw_sp_nexthop_obj_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3823 struct mlxsw_sp_nexthop_group *nh_grp)
3827 /* Do not update the flags if the nexthop group is being destroyed
3829 * 1. The nexthop objects is being deleted, in which case the flags are
3831 * 2. The nexthop group was replaced by a newer group, in which case
3832 * the flags of the nexthop object were already updated based on the
3835 if (nh_grp->can_destroy)
3838 nexthop_set_hw_flags(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
3839 nh_grp->nhgi->adj_index_valid, false);
3841 /* Update flags of individual nexthop buckets in case of a resilient
3844 if (!nh_grp->nhgi->is_resilient)
3847 for (i = 0; i < nh_grp->nhgi->count; i++) {
3848 struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
3850 mlxsw_sp_nexthop_bucket_offload_refresh(mlxsw_sp, nh, i);
3855 mlxsw_sp_nexthop_group_offload_refresh(struct mlxsw_sp *mlxsw_sp,
3856 struct mlxsw_sp_nexthop_group *nh_grp)
3858 switch (nh_grp->type) {
3859 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4:
3860 mlxsw_sp_nexthop4_group_offload_refresh(mlxsw_sp, nh_grp);
3862 case MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6:
3863 mlxsw_sp_nexthop6_group_offload_refresh(mlxsw_sp, nh_grp);
3865 case MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ:
3866 mlxsw_sp_nexthop_obj_group_offload_refresh(mlxsw_sp, nh_grp);
3872 mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
3873 struct mlxsw_sp_nexthop_group *nh_grp)
3875 struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
3876 u16 ecmp_size, old_ecmp_size;
3877 struct mlxsw_sp_nexthop *nh;
3878 bool offload_change = false;
3880 bool old_adj_index_valid;
3885 return mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3887 for (i = 0; i < nhgi->count; i++) {
3888 nh = &nhgi->nexthops[i];
3890 if (nh->should_offload != nh->offloaded) {
3891 offload_change = true;
3892 if (nh->should_offload)
3896 if (!offload_change) {
3897 /* Nothing was added or removed, so no need to reallocate. Just
3898 * update MAC on existing adjacency indexes.
3900 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nhgi, false);
3902 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3905 /* Flags of individual nexthop buckets might need to be
3908 mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
3911 mlxsw_sp_nexthop_group_normalize(nhgi);
3912 if (!nhgi->sum_norm_weight) {
3913 /* No neigh of this group is connected so we just set
3914 * the trap and let everthing flow through kernel.
3920 ecmp_size = nhgi->sum_norm_weight;
3921 err = mlxsw_sp_fix_adj_grp_size(mlxsw_sp, &ecmp_size);
3923 /* No valid allocation size available. */
3926 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3927 ecmp_size, &adj_index);
3929 /* We ran out of KVD linear space, just set the
3930 * trap and let everything flow through kernel.
3932 dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
3935 old_adj_index_valid = nhgi->adj_index_valid;
3936 old_adj_index = nhgi->adj_index;
3937 old_ecmp_size = nhgi->ecmp_size;
3938 nhgi->adj_index_valid = 1;
3939 nhgi->adj_index = adj_index;
3940 nhgi->ecmp_size = ecmp_size;
3941 mlxsw_sp_nexthop_group_rebalance(nhgi);
3942 err = mlxsw_sp_nexthop_group_update(mlxsw_sp, nhgi, true);
3944 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
3948 mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
3950 if (!old_adj_index_valid) {
3951 /* The trap was set for fib entries, so we have to call
3952 * fib entry update to unset it and use adjacency index.
3954 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3956 dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
3962 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
3963 old_adj_index, old_ecmp_size);
3964 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3965 old_ecmp_size, old_adj_index);
3967 dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
3974 old_adj_index_valid = nhgi->adj_index_valid;
3975 nhgi->adj_index_valid = 0;
3976 for (i = 0; i < nhgi->count; i++) {
3977 nh = &nhgi->nexthops[i];
3980 err2 = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
3982 dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
3983 mlxsw_sp_nexthop_group_offload_refresh(mlxsw_sp, nh_grp);
3984 if (old_adj_index_valid)
3985 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
3986 nhgi->ecmp_size, nhgi->adj_index);
3990 static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
3994 nh->action = MLXSW_SP_NEXTHOP_ACTION_FORWARD;
3995 nh->should_offload = 1;
3996 } else if (nh->nhgi->is_resilient) {
3997 nh->action = MLXSW_SP_NEXTHOP_ACTION_TRAP;
3998 nh->should_offload = 1;
4000 nh->should_offload = 0;
4006 mlxsw_sp_nexthop_dead_neigh_replace(struct mlxsw_sp *mlxsw_sp,
4007 struct mlxsw_sp_neigh_entry *neigh_entry)
4009 struct neighbour *n, *old_n = neigh_entry->key.n;
4010 struct mlxsw_sp_nexthop *nh;
4011 bool entry_connected;
4015 nh = list_first_entry(&neigh_entry->nexthop_list,
4016 struct mlxsw_sp_nexthop, neigh_list_node);
4018 n = neigh_lookup(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
4020 n = neigh_create(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
4023 neigh_event_send(n, NULL);
4026 mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
4027 neigh_entry->key.n = n;
4028 err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
4030 goto err_neigh_entry_insert;
4032 read_lock_bh(&n->lock);
4033 nud_state = n->nud_state;
4035 read_unlock_bh(&n->lock);
4036 entry_connected = nud_state & NUD_VALID && !dead;
4038 list_for_each_entry(nh, &neigh_entry->nexthop_list,
4040 neigh_release(old_n);
4042 __mlxsw_sp_nexthop_neigh_update(nh, !entry_connected);
4043 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4050 err_neigh_entry_insert:
4051 neigh_entry->key.n = old_n;
4052 mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
4058 mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
4059 struct mlxsw_sp_neigh_entry *neigh_entry,
4060 bool removing, bool dead)
4062 struct mlxsw_sp_nexthop *nh;
4064 if (list_empty(&neigh_entry->nexthop_list))
4070 err = mlxsw_sp_nexthop_dead_neigh_replace(mlxsw_sp,
4073 dev_err(mlxsw_sp->bus_info->dev, "Failed to replace dead neigh\n");
4077 list_for_each_entry(nh, &neigh_entry->nexthop_list,
4079 __mlxsw_sp_nexthop_neigh_update(nh, removing);
4080 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4084 static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
4085 struct mlxsw_sp_rif *rif)
4091 list_add(&nh->rif_list_node, &rif->nexthop_list);
4094 static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
4099 list_del(&nh->rif_list_node);
4103 static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
4104 struct mlxsw_sp_nexthop *nh)
4106 struct mlxsw_sp_neigh_entry *neigh_entry;
4107 struct neighbour *n;
4111 if (!nh->nhgi->gateway || nh->neigh_entry)
4114 /* Take a reference of neigh here ensuring that neigh would
4115 * not be destructed before the nexthop entry is finished.
4116 * The reference is taken either in neigh_lookup() or
4117 * in neigh_create() in case n is not found.
4119 n = neigh_lookup(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
4121 n = neigh_create(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
4124 neigh_event_send(n, NULL);
4126 neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
4128 neigh_entry = mlxsw_sp_neigh_entry_create(mlxsw_sp, n);
4129 if (IS_ERR(neigh_entry)) {
4131 goto err_neigh_entry_create;
4135 /* If that is the first nexthop connected to that neigh, add to
4136 * nexthop_neighs_list
4138 if (list_empty(&neigh_entry->nexthop_list))
4139 list_add_tail(&neigh_entry->nexthop_neighs_list_node,
4140 &mlxsw_sp->router->nexthop_neighs_list);
4142 nh->neigh_entry = neigh_entry;
4143 list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
4144 read_lock_bh(&n->lock);
4145 nud_state = n->nud_state;
4147 read_unlock_bh(&n->lock);
4148 __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID && !dead));
4152 err_neigh_entry_create:
4157 static void mlxsw_sp_nexthop_neigh_fini(struct mlxsw_sp *mlxsw_sp,
4158 struct mlxsw_sp_nexthop *nh)
4160 struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
4161 struct neighbour *n;
4165 n = neigh_entry->key.n;
4167 __mlxsw_sp_nexthop_neigh_update(nh, true);
4168 list_del(&nh->neigh_list_node);
4169 nh->neigh_entry = NULL;
4171 /* If that is the last nexthop connected to that neigh, remove from
4172 * nexthop_neighs_list
4174 if (list_empty(&neigh_entry->nexthop_list))
4175 list_del(&neigh_entry->nexthop_neighs_list_node);
4177 if (!neigh_entry->connected && list_empty(&neigh_entry->nexthop_list))
4178 mlxsw_sp_neigh_entry_destroy(mlxsw_sp, neigh_entry);
4183 static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
4185 struct net_device *ul_dev;
4189 ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
4190 is_up = ul_dev ? (ul_dev->flags & IFF_UP) : true;
4196 static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
4197 struct mlxsw_sp_nexthop *nh,
4198 struct mlxsw_sp_ipip_entry *ipip_entry)
4202 if (!nh->nhgi->gateway || nh->ipip_entry)
4205 nh->ipip_entry = ipip_entry;
4206 removing = !mlxsw_sp_ipip_netdev_ul_up(ipip_entry->ol_dev);
4207 __mlxsw_sp_nexthop_neigh_update(nh, removing);
4208 mlxsw_sp_nexthop_rif_init(nh, &ipip_entry->ol_lb->common);
4211 static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
4212 struct mlxsw_sp_nexthop *nh)
4214 struct mlxsw_sp_ipip_entry *ipip_entry = nh->ipip_entry;
4219 __mlxsw_sp_nexthop_neigh_update(nh, true);
4220 nh->ipip_entry = NULL;
4223 static bool mlxsw_sp_nexthop4_ipip_type(const struct mlxsw_sp *mlxsw_sp,
4224 const struct fib_nh *fib_nh,
4225 enum mlxsw_sp_ipip_type *p_ipipt)
4227 struct net_device *dev = fib_nh->fib_nh_dev;
4230 fib_nh->nh_parent->fib_type == RTN_UNICAST &&
4231 mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, p_ipipt);
4234 static int mlxsw_sp_nexthop_type_init(struct mlxsw_sp *mlxsw_sp,
4235 struct mlxsw_sp_nexthop *nh,
4236 const struct net_device *dev)
4238 const struct mlxsw_sp_ipip_ops *ipip_ops;
4239 struct mlxsw_sp_ipip_entry *ipip_entry;
4240 struct mlxsw_sp_rif *rif;
4243 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
4245 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
4246 if (ipip_ops->can_offload(mlxsw_sp, dev)) {
4247 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
4248 mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
4253 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
4254 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
4258 mlxsw_sp_nexthop_rif_init(nh, rif);
4259 err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
4261 goto err_neigh_init;
4266 mlxsw_sp_nexthop_rif_fini(nh);
4270 static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
4271 struct mlxsw_sp_nexthop *nh)
4274 case MLXSW_SP_NEXTHOP_TYPE_ETH:
4275 mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
4276 mlxsw_sp_nexthop_rif_fini(nh);
4278 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
4279 mlxsw_sp_nexthop_rif_fini(nh);
4280 mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
4285 static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
4286 struct mlxsw_sp_nexthop_group *nh_grp,
4287 struct mlxsw_sp_nexthop *nh,
4288 struct fib_nh *fib_nh)
4290 struct net_device *dev = fib_nh->fib_nh_dev;
4291 struct in_device *in_dev;
4294 nh->nhgi = nh_grp->nhgi;
4295 nh->key.fib_nh = fib_nh;
4296 #ifdef CONFIG_IP_ROUTE_MULTIPATH
4297 nh->nh_weight = fib_nh->fib_nh_weight;
4301 memcpy(&nh->gw_addr, &fib_nh->fib_nh_gw4, sizeof(fib_nh->fib_nh_gw4));
4302 nh->neigh_tbl = &arp_tbl;
4303 err = mlxsw_sp_nexthop_insert(mlxsw_sp, nh);
4307 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
4308 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
4312 nh->ifindex = dev->ifindex;
4315 in_dev = __in_dev_get_rcu(dev);
4316 if (in_dev && IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
4317 fib_nh->fib_nh_flags & RTNH_F_LINKDOWN) {
4323 err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
4325 goto err_nexthop_neigh_init;
4329 err_nexthop_neigh_init:
4330 list_del(&nh->router_list_node);
4331 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
4332 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
4336 static void mlxsw_sp_nexthop4_fini(struct mlxsw_sp *mlxsw_sp,
4337 struct mlxsw_sp_nexthop *nh)
4339 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4340 list_del(&nh->router_list_node);
4341 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
4342 mlxsw_sp_nexthop_remove(mlxsw_sp, nh);
4345 static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
4346 unsigned long event, struct fib_nh *fib_nh)
4348 struct mlxsw_sp_nexthop_key key;
4349 struct mlxsw_sp_nexthop *nh;
4351 key.fib_nh = fib_nh;
4352 nh = mlxsw_sp_nexthop_lookup(mlxsw_sp, key);
4357 case FIB_EVENT_NH_ADD:
4358 mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, fib_nh->fib_nh_dev);
4360 case FIB_EVENT_NH_DEL:
4361 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4365 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4368 static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
4369 struct mlxsw_sp_rif *rif)
4371 struct mlxsw_sp_nexthop *nh;
4374 list_for_each_entry(nh, &rif->nexthop_list, rif_list_node) {
4376 case MLXSW_SP_NEXTHOP_TYPE_ETH:
4379 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
4380 removing = !mlxsw_sp_ipip_netdev_ul_up(rif->dev);
4387 __mlxsw_sp_nexthop_neigh_update(nh, removing);
4388 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4392 static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
4393 struct mlxsw_sp_rif *old_rif,
4394 struct mlxsw_sp_rif *new_rif)
4396 struct mlxsw_sp_nexthop *nh;
4398 list_splice_init(&old_rif->nexthop_list, &new_rif->nexthop_list);
4399 list_for_each_entry(nh, &new_rif->nexthop_list, rif_list_node)
4401 mlxsw_sp_nexthop_rif_update(mlxsw_sp, new_rif);
4404 static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
4405 struct mlxsw_sp_rif *rif)
4407 struct mlxsw_sp_nexthop *nh, *tmp;
4409 list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
4410 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4411 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
4415 static int mlxsw_sp_adj_trap_entry_init(struct mlxsw_sp *mlxsw_sp)
4417 enum mlxsw_reg_ratr_trap_action trap_action;
4418 char ratr_pl[MLXSW_REG_RATR_LEN];
4421 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4422 &mlxsw_sp->router->adj_trap_index);
4426 trap_action = MLXSW_REG_RATR_TRAP_ACTION_TRAP;
4427 mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, true,
4428 MLXSW_REG_RATR_TYPE_ETHERNET,
4429 mlxsw_sp->router->adj_trap_index,
4430 mlxsw_sp->router->lb_rif_index);
4431 mlxsw_reg_ratr_trap_action_set(ratr_pl, trap_action);
4432 mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0);
4433 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
4435 goto err_ratr_write;
4440 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4441 mlxsw_sp->router->adj_trap_index);
4445 static void mlxsw_sp_adj_trap_entry_fini(struct mlxsw_sp *mlxsw_sp)
4447 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
4448 mlxsw_sp->router->adj_trap_index);
4451 static int mlxsw_sp_nexthop_group_inc(struct mlxsw_sp *mlxsw_sp)
4455 if (refcount_inc_not_zero(&mlxsw_sp->router->num_groups))
4458 err = mlxsw_sp_adj_trap_entry_init(mlxsw_sp);
4462 refcount_set(&mlxsw_sp->router->num_groups, 1);
4467 static void mlxsw_sp_nexthop_group_dec(struct mlxsw_sp *mlxsw_sp)
4469 if (!refcount_dec_and_test(&mlxsw_sp->router->num_groups))
4472 mlxsw_sp_adj_trap_entry_fini(mlxsw_sp);
4476 mlxsw_sp_nh_grp_activity_get(struct mlxsw_sp *mlxsw_sp,
4477 const struct mlxsw_sp_nexthop_group *nh_grp,
4478 unsigned long *activity)
4483 ratrad_pl = kmalloc(MLXSW_REG_RATRAD_LEN, GFP_KERNEL);
4487 mlxsw_reg_ratrad_pack(ratrad_pl, nh_grp->nhgi->adj_index,
4488 nh_grp->nhgi->count);
4489 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ratrad), ratrad_pl);
4493 for (i = 0; i < nh_grp->nhgi->count; i++) {
4494 if (!mlxsw_reg_ratrad_activity_vector_get(ratrad_pl, i))
4496 bitmap_set(activity, i, 1);
4503 #define MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL 1000 /* ms */
4506 mlxsw_sp_nh_grp_activity_update(struct mlxsw_sp *mlxsw_sp,
4507 const struct mlxsw_sp_nexthop_group *nh_grp)
4509 unsigned long *activity;
4511 activity = bitmap_zalloc(nh_grp->nhgi->count, GFP_KERNEL);
4515 mlxsw_sp_nh_grp_activity_get(mlxsw_sp, nh_grp, activity);
4516 nexthop_res_grp_activity_update(mlxsw_sp_net(mlxsw_sp), nh_grp->obj.id,
4517 nh_grp->nhgi->count, activity);
4519 bitmap_free(activity);
4523 mlxsw_sp_nh_grp_activity_work_schedule(struct mlxsw_sp *mlxsw_sp)
4525 unsigned int interval = MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL;
4527 mlxsw_core_schedule_dw(&mlxsw_sp->router->nh_grp_activity_dw,
4528 msecs_to_jiffies(interval));
4531 static void mlxsw_sp_nh_grp_activity_work(struct work_struct *work)
4533 struct mlxsw_sp_nexthop_group_info *nhgi;
4534 struct mlxsw_sp_router *router;
4535 bool reschedule = false;
4537 router = container_of(work, struct mlxsw_sp_router,
4538 nh_grp_activity_dw.work);
4540 mutex_lock(&router->lock);
4542 list_for_each_entry(nhgi, &router->nh_res_grp_list, list) {
4543 mlxsw_sp_nh_grp_activity_update(router->mlxsw_sp, nhgi->nh_grp);
4547 mutex_unlock(&router->lock);
4551 mlxsw_sp_nh_grp_activity_work_schedule(router->mlxsw_sp);
4555 mlxsw_sp_nexthop_obj_single_validate(struct mlxsw_sp *mlxsw_sp,
4556 const struct nh_notifier_single_info *nh,
4557 struct netlink_ext_ack *extack)
4562 NL_SET_ERR_MSG_MOD(extack, "FDB nexthops are not supported");
4563 else if (nh->has_encap)
4564 NL_SET_ERR_MSG_MOD(extack, "Encapsulating nexthops are not supported");
4572 mlxsw_sp_nexthop_obj_group_entry_validate(struct mlxsw_sp *mlxsw_sp,
4573 const struct nh_notifier_single_info *nh,
4574 struct netlink_ext_ack *extack)
4578 err = mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, nh, extack);
4582 /* Device only nexthops with an IPIP device are programmed as
4583 * encapsulating adjacency entries.
4585 if (!nh->gw_family && !nh->is_reject &&
4586 !mlxsw_sp_netdev_ipip_type(mlxsw_sp, nh->dev, NULL)) {
4587 NL_SET_ERR_MSG_MOD(extack, "Nexthop group entry does not have a gateway");
4595 mlxsw_sp_nexthop_obj_group_validate(struct mlxsw_sp *mlxsw_sp,
4596 const struct nh_notifier_grp_info *nh_grp,
4597 struct netlink_ext_ack *extack)
4601 if (nh_grp->is_fdb) {
4602 NL_SET_ERR_MSG_MOD(extack, "FDB nexthop groups are not supported");
4606 for (i = 0; i < nh_grp->num_nh; i++) {
4607 const struct nh_notifier_single_info *nh;
4610 nh = &nh_grp->nh_entries[i].nh;
4611 err = mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
4621 mlxsw_sp_nexthop_obj_res_group_size_validate(struct mlxsw_sp *mlxsw_sp,
4622 const struct nh_notifier_res_table_info *nh_res_table,
4623 struct netlink_ext_ack *extack)
4625 unsigned int alloc_size;
4626 bool valid_size = false;
4629 if (nh_res_table->num_nh_buckets < 32) {
4630 NL_SET_ERR_MSG_MOD(extack, "Minimum number of buckets is 32");
4634 for (i = 0; i < mlxsw_sp->router->adj_grp_size_ranges_count; i++) {
4635 const struct mlxsw_sp_adj_grp_size_range *size_range;
4637 size_range = &mlxsw_sp->router->adj_grp_size_ranges[i];
4639 if (nh_res_table->num_nh_buckets >= size_range->start &&
4640 nh_res_table->num_nh_buckets <= size_range->end) {
4647 NL_SET_ERR_MSG_MOD(extack, "Invalid number of buckets");
4651 err = mlxsw_sp_kvdl_alloc_count_query(mlxsw_sp,
4652 MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
4653 nh_res_table->num_nh_buckets,
4655 if (err || nh_res_table->num_nh_buckets != alloc_size) {
4656 NL_SET_ERR_MSG_MOD(extack, "Number of buckets does not fit allocation size of any KVDL partition");
4664 mlxsw_sp_nexthop_obj_res_group_validate(struct mlxsw_sp *mlxsw_sp,
4665 const struct nh_notifier_res_table_info *nh_res_table,
4666 struct netlink_ext_ack *extack)
4671 err = mlxsw_sp_nexthop_obj_res_group_size_validate(mlxsw_sp,
4677 for (i = 0; i < nh_res_table->num_nh_buckets; i++) {
4678 const struct nh_notifier_single_info *nh;
4681 nh = &nh_res_table->nhs[i];
4682 err = mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
4691 static int mlxsw_sp_nexthop_obj_validate(struct mlxsw_sp *mlxsw_sp,
4692 unsigned long event,
4693 struct nh_notifier_info *info)
4695 struct nh_notifier_single_info *nh;
4697 if (event != NEXTHOP_EVENT_REPLACE &&
4698 event != NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE &&
4699 event != NEXTHOP_EVENT_BUCKET_REPLACE)
4702 switch (info->type) {
4703 case NH_NOTIFIER_INFO_TYPE_SINGLE:
4704 return mlxsw_sp_nexthop_obj_single_validate(mlxsw_sp, info->nh,
4706 case NH_NOTIFIER_INFO_TYPE_GRP:
4707 return mlxsw_sp_nexthop_obj_group_validate(mlxsw_sp,
4710 case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
4711 return mlxsw_sp_nexthop_obj_res_group_validate(mlxsw_sp,
4714 case NH_NOTIFIER_INFO_TYPE_RES_BUCKET:
4715 nh = &info->nh_res_bucket->new_nh;
4716 return mlxsw_sp_nexthop_obj_group_entry_validate(mlxsw_sp, nh,
4719 NL_SET_ERR_MSG_MOD(info->extack, "Unsupported nexthop type");
4724 static bool mlxsw_sp_nexthop_obj_is_gateway(struct mlxsw_sp *mlxsw_sp,
4725 const struct nh_notifier_info *info)
4727 const struct net_device *dev;
4729 switch (info->type) {
4730 case NH_NOTIFIER_INFO_TYPE_SINGLE:
4731 dev = info->nh->dev;
4732 return info->nh->gw_family || info->nh->is_reject ||
4733 mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL);
4734 case NH_NOTIFIER_INFO_TYPE_GRP:
4735 case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
4736 /* Already validated earlier. */
4743 static void mlxsw_sp_nexthop_obj_blackhole_init(struct mlxsw_sp *mlxsw_sp,
4744 struct mlxsw_sp_nexthop *nh)
4746 u16 lb_rif_index = mlxsw_sp->router->lb_rif_index;
4748 nh->action = MLXSW_SP_NEXTHOP_ACTION_DISCARD;
4749 nh->should_offload = 1;
4750 /* While nexthops that discard packets do not forward packets
4751 * via an egress RIF, they still need to be programmed using a
4752 * valid RIF, so use the loopback RIF created during init.
4754 nh->rif = mlxsw_sp->router->rifs[lb_rif_index];
4757 static void mlxsw_sp_nexthop_obj_blackhole_fini(struct mlxsw_sp *mlxsw_sp,
4758 struct mlxsw_sp_nexthop *nh)
4761 nh->should_offload = 0;
4765 mlxsw_sp_nexthop_obj_init(struct mlxsw_sp *mlxsw_sp,
4766 struct mlxsw_sp_nexthop_group *nh_grp,
4767 struct mlxsw_sp_nexthop *nh,
4768 struct nh_notifier_single_info *nh_obj, int weight)
4770 struct net_device *dev = nh_obj->dev;
4773 nh->nhgi = nh_grp->nhgi;
4774 nh->nh_weight = weight;
4776 switch (nh_obj->gw_family) {
4778 memcpy(&nh->gw_addr, &nh_obj->ipv4, sizeof(nh_obj->ipv4));
4779 nh->neigh_tbl = &arp_tbl;
4782 memcpy(&nh->gw_addr, &nh_obj->ipv6, sizeof(nh_obj->ipv6));
4783 #if IS_ENABLED(CONFIG_IPV6)
4784 nh->neigh_tbl = &nd_tbl;
4789 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
4790 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
4791 nh->ifindex = dev->ifindex;
4793 err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
4797 if (nh_obj->is_reject)
4798 mlxsw_sp_nexthop_obj_blackhole_init(mlxsw_sp, nh);
4800 /* In a resilient nexthop group, all the nexthops must be written to
4801 * the adjacency table. Even if they do not have a valid neighbour or
4804 if (nh_grp->nhgi->is_resilient && !nh->should_offload) {
4805 nh->action = MLXSW_SP_NEXTHOP_ACTION_TRAP;
4806 nh->should_offload = 1;
4812 list_del(&nh->router_list_node);
4813 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
4817 static void mlxsw_sp_nexthop_obj_fini(struct mlxsw_sp *mlxsw_sp,
4818 struct mlxsw_sp_nexthop *nh)
4820 if (nh->action == MLXSW_SP_NEXTHOP_ACTION_DISCARD)
4821 mlxsw_sp_nexthop_obj_blackhole_fini(mlxsw_sp, nh);
4822 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
4823 list_del(&nh->router_list_node);
4824 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
4825 nh->should_offload = 0;
4829 mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp,
4830 struct mlxsw_sp_nexthop_group *nh_grp,
4831 struct nh_notifier_info *info)
4833 struct mlxsw_sp_nexthop_group_info *nhgi;
4834 struct mlxsw_sp_nexthop *nh;
4835 bool is_resilient = false;
4839 switch (info->type) {
4840 case NH_NOTIFIER_INFO_TYPE_SINGLE:
4843 case NH_NOTIFIER_INFO_TYPE_GRP:
4844 nhs = info->nh_grp->num_nh;
4846 case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
4847 nhs = info->nh_res_table->num_nh_buckets;
4848 is_resilient = true;
4854 nhgi = kzalloc(struct_size(nhgi, nexthops, nhs), GFP_KERNEL);
4857 nh_grp->nhgi = nhgi;
4858 nhgi->nh_grp = nh_grp;
4859 nhgi->gateway = mlxsw_sp_nexthop_obj_is_gateway(mlxsw_sp, info);
4860 nhgi->is_resilient = is_resilient;
4862 for (i = 0; i < nhgi->count; i++) {
4863 struct nh_notifier_single_info *nh_obj;
4866 nh = &nhgi->nexthops[i];
4867 switch (info->type) {
4868 case NH_NOTIFIER_INFO_TYPE_SINGLE:
4872 case NH_NOTIFIER_INFO_TYPE_GRP:
4873 nh_obj = &info->nh_grp->nh_entries[i].nh;
4874 weight = info->nh_grp->nh_entries[i].weight;
4876 case NH_NOTIFIER_INFO_TYPE_RES_TABLE:
4877 nh_obj = &info->nh_res_table->nhs[i];
4882 goto err_nexthop_obj_init;
4884 err = mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj,
4887 goto err_nexthop_obj_init;
4889 err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
4892 err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4894 NL_SET_ERR_MSG_MOD(info->extack, "Failed to write adjacency entries to the device");
4895 goto err_group_refresh;
4898 /* Add resilient nexthop groups to a list so that the activity of their
4899 * nexthop buckets will be periodically queried and cleared.
4901 if (nhgi->is_resilient) {
4902 if (list_empty(&mlxsw_sp->router->nh_res_grp_list))
4903 mlxsw_sp_nh_grp_activity_work_schedule(mlxsw_sp);
4904 list_add(&nhgi->list, &mlxsw_sp->router->nh_res_grp_list);
4910 mlxsw_sp_nexthop_group_dec(mlxsw_sp);
4913 err_nexthop_obj_init:
4914 for (i--; i >= 0; i--) {
4915 nh = &nhgi->nexthops[i];
4916 mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
4923 mlxsw_sp_nexthop_obj_group_info_fini(struct mlxsw_sp *mlxsw_sp,
4924 struct mlxsw_sp_nexthop_group *nh_grp)
4926 struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
4927 struct mlxsw_sp_router *router = mlxsw_sp->router;
4930 if (nhgi->is_resilient) {
4931 list_del(&nhgi->list);
4932 if (list_empty(&mlxsw_sp->router->nh_res_grp_list))
4933 cancel_delayed_work(&router->nh_grp_activity_dw);
4936 mlxsw_sp_nexthop_group_dec(mlxsw_sp);
4937 for (i = nhgi->count - 1; i >= 0; i--) {
4938 struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
4940 mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
4942 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
4943 WARN_ON_ONCE(nhgi->adj_index_valid);
4947 static struct mlxsw_sp_nexthop_group *
4948 mlxsw_sp_nexthop_obj_group_create(struct mlxsw_sp *mlxsw_sp,
4949 struct nh_notifier_info *info)
4951 struct mlxsw_sp_nexthop_group *nh_grp;
4954 nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
4956 return ERR_PTR(-ENOMEM);
4957 INIT_LIST_HEAD(&nh_grp->vr_list);
4958 err = rhashtable_init(&nh_grp->vr_ht,
4959 &mlxsw_sp_nexthop_group_vr_ht_params);
4961 goto err_nexthop_group_vr_ht_init;
4962 INIT_LIST_HEAD(&nh_grp->fib_list);
4963 nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ;
4964 nh_grp->obj.id = info->id;
4966 err = mlxsw_sp_nexthop_obj_group_info_init(mlxsw_sp, nh_grp, info);
4968 goto err_nexthop_group_info_init;
4970 nh_grp->can_destroy = false;
4974 err_nexthop_group_info_init:
4975 rhashtable_destroy(&nh_grp->vr_ht);
4976 err_nexthop_group_vr_ht_init:
4978 return ERR_PTR(err);
4982 mlxsw_sp_nexthop_obj_group_destroy(struct mlxsw_sp *mlxsw_sp,
4983 struct mlxsw_sp_nexthop_group *nh_grp)
4985 if (!nh_grp->can_destroy)
4987 mlxsw_sp_nexthop_obj_group_info_fini(mlxsw_sp, nh_grp);
4988 WARN_ON_ONCE(!list_empty(&nh_grp->fib_list));
4989 WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
4990 rhashtable_destroy(&nh_grp->vr_ht);
4994 static struct mlxsw_sp_nexthop_group *
4995 mlxsw_sp_nexthop_obj_group_lookup(struct mlxsw_sp *mlxsw_sp, u32 id)
4997 struct mlxsw_sp_nexthop_group_cmp_arg cmp_arg;
4999 cmp_arg.type = MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ;
5001 return rhashtable_lookup_fast(&mlxsw_sp->router->nexthop_group_ht,
5003 mlxsw_sp_nexthop_group_ht_params);
5006 static int mlxsw_sp_nexthop_obj_group_add(struct mlxsw_sp *mlxsw_sp,
5007 struct mlxsw_sp_nexthop_group *nh_grp)
5009 return mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
5013 mlxsw_sp_nexthop_obj_group_replace(struct mlxsw_sp *mlxsw_sp,
5014 struct mlxsw_sp_nexthop_group *nh_grp,
5015 struct mlxsw_sp_nexthop_group *old_nh_grp,
5016 struct netlink_ext_ack *extack)
5018 struct mlxsw_sp_nexthop_group_info *old_nhgi = old_nh_grp->nhgi;
5019 struct mlxsw_sp_nexthop_group_info *new_nhgi = nh_grp->nhgi;
5022 old_nh_grp->nhgi = new_nhgi;
5023 new_nhgi->nh_grp = old_nh_grp;
5024 nh_grp->nhgi = old_nhgi;
5025 old_nhgi->nh_grp = nh_grp;
5027 if (old_nhgi->adj_index_valid && new_nhgi->adj_index_valid) {
5028 /* Both the old adjacency index and the new one are valid.
5029 * Routes are currently using the old one. Tell the device to
5030 * replace the old adjacency index with the new one.
5032 err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, old_nh_grp,
5033 old_nhgi->adj_index,
5034 old_nhgi->ecmp_size);
5036 NL_SET_ERR_MSG_MOD(extack, "Failed to replace old adjacency index with new one");
5039 } else if (old_nhgi->adj_index_valid && !new_nhgi->adj_index_valid) {
5040 /* The old adjacency index is valid, while the new one is not.
5041 * Iterate over all the routes using the group and change them
5042 * to trap packets to the CPU.
5044 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, old_nh_grp);
5046 NL_SET_ERR_MSG_MOD(extack, "Failed to update routes to trap packets");
5049 } else if (!old_nhgi->adj_index_valid && new_nhgi->adj_index_valid) {
5050 /* The old adjacency index is invalid, while the new one is.
5051 * Iterate over all the routes using the group and change them
5052 * to forward packets using the new valid index.
5054 err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, old_nh_grp);
5056 NL_SET_ERR_MSG_MOD(extack, "Failed to update routes to forward packets");
5061 /* Make sure the flags are set / cleared based on the new nexthop group
5064 mlxsw_sp_nexthop_obj_group_offload_refresh(mlxsw_sp, old_nh_grp);
5066 /* At this point 'nh_grp' is just a shell that is not used by anyone
5067 * and its nexthop group info is the old info that was just replaced
5068 * with the new one. Remove it.
5070 nh_grp->can_destroy = true;
5071 mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5076 old_nhgi->nh_grp = old_nh_grp;
5077 nh_grp->nhgi = new_nhgi;
5078 new_nhgi->nh_grp = nh_grp;
5079 old_nh_grp->nhgi = old_nhgi;
5083 static int mlxsw_sp_nexthop_obj_new(struct mlxsw_sp *mlxsw_sp,
5084 struct nh_notifier_info *info)
5086 struct mlxsw_sp_nexthop_group *nh_grp, *old_nh_grp;
5087 struct netlink_ext_ack *extack = info->extack;
5090 nh_grp = mlxsw_sp_nexthop_obj_group_create(mlxsw_sp, info);
5092 return PTR_ERR(nh_grp);
5094 old_nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
5096 err = mlxsw_sp_nexthop_obj_group_add(mlxsw_sp, nh_grp);
5098 err = mlxsw_sp_nexthop_obj_group_replace(mlxsw_sp, nh_grp,
5099 old_nh_grp, extack);
5102 nh_grp->can_destroy = true;
5103 mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5109 static void mlxsw_sp_nexthop_obj_del(struct mlxsw_sp *mlxsw_sp,
5110 struct nh_notifier_info *info)
5112 struct mlxsw_sp_nexthop_group *nh_grp;
5114 nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
5118 nh_grp->can_destroy = true;
5119 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
5121 /* If the group still has routes using it, then defer the delete
5122 * operation until the last route using it is deleted.
5124 if (!list_empty(&nh_grp->fib_list))
5126 mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5129 static int mlxsw_sp_nexthop_obj_bucket_query(struct mlxsw_sp *mlxsw_sp,
5130 u32 adj_index, char *ratr_pl)
5132 MLXSW_REG_ZERO(ratr, ratr_pl);
5133 mlxsw_reg_ratr_op_set(ratr_pl, MLXSW_REG_RATR_OP_QUERY_READ);
5134 mlxsw_reg_ratr_adjacency_index_low_set(ratr_pl, adj_index);
5135 mlxsw_reg_ratr_adjacency_index_high_set(ratr_pl, adj_index >> 16);
5137 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
5140 static int mlxsw_sp_nexthop_obj_bucket_compare(char *ratr_pl, char *ratr_pl_new)
5142 /* Clear the opcode and activity on both the old and new payload as
5143 * they are irrelevant for the comparison.
5145 mlxsw_reg_ratr_op_set(ratr_pl, MLXSW_REG_RATR_OP_QUERY_READ);
5146 mlxsw_reg_ratr_a_set(ratr_pl, 0);
5147 mlxsw_reg_ratr_op_set(ratr_pl_new, MLXSW_REG_RATR_OP_QUERY_READ);
5148 mlxsw_reg_ratr_a_set(ratr_pl_new, 0);
5150 /* If the contents of the adjacency entry are consistent with the
5151 * replacement request, then replacement was successful.
5153 if (!memcmp(ratr_pl, ratr_pl_new, MLXSW_REG_RATR_LEN))
5160 mlxsw_sp_nexthop_obj_bucket_adj_update(struct mlxsw_sp *mlxsw_sp,
5161 struct mlxsw_sp_nexthop *nh,
5162 struct nh_notifier_info *info)
5164 u16 bucket_index = info->nh_res_bucket->bucket_index;
5165 struct netlink_ext_ack *extack = info->extack;
5166 bool force = info->nh_res_bucket->force;
5167 char ratr_pl_new[MLXSW_REG_RATR_LEN];
5168 char ratr_pl[MLXSW_REG_RATR_LEN];
5172 /* No point in trying an atomic replacement if the idle timer interval
5173 * is smaller than the interval in which we query and clear activity.
5175 if (!force && info->nh_res_bucket->idle_timer_ms <
5176 MLXSW_SP_NH_GRP_ACTIVITY_UPDATE_INTERVAL)
5179 adj_index = nh->nhgi->adj_index + bucket_index;
5180 err = mlxsw_sp_nexthop_update(mlxsw_sp, adj_index, nh, force, ratr_pl);
5182 NL_SET_ERR_MSG_MOD(extack, "Failed to overwrite nexthop bucket");
5187 err = mlxsw_sp_nexthop_obj_bucket_query(mlxsw_sp, adj_index,
5190 NL_SET_ERR_MSG_MOD(extack, "Failed to query nexthop bucket state after replacement. State might be inconsistent");
5194 err = mlxsw_sp_nexthop_obj_bucket_compare(ratr_pl, ratr_pl_new);
5196 NL_SET_ERR_MSG_MOD(extack, "Nexthop bucket was not replaced because it was active during replacement");
5203 mlxsw_sp_nexthop_bucket_offload_refresh(mlxsw_sp, nh, bucket_index);
5208 static int mlxsw_sp_nexthop_obj_bucket_replace(struct mlxsw_sp *mlxsw_sp,
5209 struct nh_notifier_info *info)
5211 u16 bucket_index = info->nh_res_bucket->bucket_index;
5212 struct netlink_ext_ack *extack = info->extack;
5213 struct mlxsw_sp_nexthop_group_info *nhgi;
5214 struct nh_notifier_single_info *nh_obj;
5215 struct mlxsw_sp_nexthop_group *nh_grp;
5216 struct mlxsw_sp_nexthop *nh;
5219 nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, info->id);
5221 NL_SET_ERR_MSG_MOD(extack, "Nexthop group was not found");
5225 nhgi = nh_grp->nhgi;
5227 if (bucket_index >= nhgi->count) {
5228 NL_SET_ERR_MSG_MOD(extack, "Nexthop bucket index out of range");
5232 nh = &nhgi->nexthops[bucket_index];
5233 mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
5235 nh_obj = &info->nh_res_bucket->new_nh;
5236 err = mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj, 1);
5238 NL_SET_ERR_MSG_MOD(extack, "Failed to initialize nexthop object for nexthop bucket replacement");
5239 goto err_nexthop_obj_init;
5242 err = mlxsw_sp_nexthop_obj_bucket_adj_update(mlxsw_sp, nh, info);
5244 goto err_nexthop_obj_bucket_adj_update;
5248 err_nexthop_obj_bucket_adj_update:
5249 mlxsw_sp_nexthop_obj_fini(mlxsw_sp, nh);
5250 err_nexthop_obj_init:
5251 nh_obj = &info->nh_res_bucket->old_nh;
5252 mlxsw_sp_nexthop_obj_init(mlxsw_sp, nh_grp, nh, nh_obj, 1);
5253 /* The old adjacency entry was not overwritten */
5259 static int mlxsw_sp_nexthop_obj_event(struct notifier_block *nb,
5260 unsigned long event, void *ptr)
5262 struct nh_notifier_info *info = ptr;
5263 struct mlxsw_sp_router *router;
5266 router = container_of(nb, struct mlxsw_sp_router, nexthop_nb);
5267 err = mlxsw_sp_nexthop_obj_validate(router->mlxsw_sp, event, info);
5271 mutex_lock(&router->lock);
5274 case NEXTHOP_EVENT_REPLACE:
5275 err = mlxsw_sp_nexthop_obj_new(router->mlxsw_sp, info);
5277 case NEXTHOP_EVENT_DEL:
5278 mlxsw_sp_nexthop_obj_del(router->mlxsw_sp, info);
5280 case NEXTHOP_EVENT_BUCKET_REPLACE:
5281 err = mlxsw_sp_nexthop_obj_bucket_replace(router->mlxsw_sp,
5288 mutex_unlock(&router->lock);
5291 return notifier_from_errno(err);
5294 static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp,
5295 struct fib_info *fi)
5297 const struct fib_nh *nh = fib_info_nh(fi, 0);
5299 return nh->fib_nh_gw_family ||
5300 mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, nh, NULL);
5304 mlxsw_sp_nexthop4_group_info_init(struct mlxsw_sp *mlxsw_sp,
5305 struct mlxsw_sp_nexthop_group *nh_grp)
5307 unsigned int nhs = fib_info_num_path(nh_grp->ipv4.fi);
5308 struct mlxsw_sp_nexthop_group_info *nhgi;
5309 struct mlxsw_sp_nexthop *nh;
5312 nhgi = kzalloc(struct_size(nhgi, nexthops, nhs), GFP_KERNEL);
5315 nh_grp->nhgi = nhgi;
5316 nhgi->nh_grp = nh_grp;
5317 nhgi->gateway = mlxsw_sp_fi_is_gateway(mlxsw_sp, nh_grp->ipv4.fi);
5319 for (i = 0; i < nhgi->count; i++) {
5320 struct fib_nh *fib_nh;
5322 nh = &nhgi->nexthops[i];
5323 fib_nh = fib_info_nh(nh_grp->ipv4.fi, i);
5324 err = mlxsw_sp_nexthop4_init(mlxsw_sp, nh_grp, nh, fib_nh);
5326 goto err_nexthop4_init;
5328 err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
5331 err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5333 goto err_group_refresh;
5338 mlxsw_sp_nexthop_group_dec(mlxsw_sp);
5342 for (i--; i >= 0; i--) {
5343 nh = &nhgi->nexthops[i];
5344 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
5351 mlxsw_sp_nexthop4_group_info_fini(struct mlxsw_sp *mlxsw_sp,
5352 struct mlxsw_sp_nexthop_group *nh_grp)
5354 struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
5357 mlxsw_sp_nexthop_group_dec(mlxsw_sp);
5358 for (i = nhgi->count - 1; i >= 0; i--) {
5359 struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
5361 mlxsw_sp_nexthop4_fini(mlxsw_sp, nh);
5363 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
5364 WARN_ON_ONCE(nhgi->adj_index_valid);
5368 static struct mlxsw_sp_nexthop_group *
5369 mlxsw_sp_nexthop4_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
5371 struct mlxsw_sp_nexthop_group *nh_grp;
5374 nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
5376 return ERR_PTR(-ENOMEM);
5377 INIT_LIST_HEAD(&nh_grp->vr_list);
5378 err = rhashtable_init(&nh_grp->vr_ht,
5379 &mlxsw_sp_nexthop_group_vr_ht_params);
5381 goto err_nexthop_group_vr_ht_init;
5382 INIT_LIST_HEAD(&nh_grp->fib_list);
5383 nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4;
5384 nh_grp->ipv4.fi = fi;
5387 err = mlxsw_sp_nexthop4_group_info_init(mlxsw_sp, nh_grp);
5389 goto err_nexthop_group_info_init;
5391 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
5393 goto err_nexthop_group_insert;
5395 nh_grp->can_destroy = true;
5399 err_nexthop_group_insert:
5400 mlxsw_sp_nexthop4_group_info_fini(mlxsw_sp, nh_grp);
5401 err_nexthop_group_info_init:
5403 rhashtable_destroy(&nh_grp->vr_ht);
5404 err_nexthop_group_vr_ht_init:
5406 return ERR_PTR(err);
5410 mlxsw_sp_nexthop4_group_destroy(struct mlxsw_sp *mlxsw_sp,
5411 struct mlxsw_sp_nexthop_group *nh_grp)
5413 if (!nh_grp->can_destroy)
5415 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
5416 mlxsw_sp_nexthop4_group_info_fini(mlxsw_sp, nh_grp);
5417 fib_info_put(nh_grp->ipv4.fi);
5418 WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
5419 rhashtable_destroy(&nh_grp->vr_ht);
5423 static int mlxsw_sp_nexthop4_group_get(struct mlxsw_sp *mlxsw_sp,
5424 struct mlxsw_sp_fib_entry *fib_entry,
5425 struct fib_info *fi)
5427 struct mlxsw_sp_nexthop_group *nh_grp;
5430 nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp,
5432 if (WARN_ON_ONCE(!nh_grp))
5437 nh_grp = mlxsw_sp_nexthop4_group_lookup(mlxsw_sp, fi);
5439 nh_grp = mlxsw_sp_nexthop4_group_create(mlxsw_sp, fi);
5441 return PTR_ERR(nh_grp);
5444 list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
5445 fib_entry->nh_group = nh_grp;
5449 static void mlxsw_sp_nexthop4_group_put(struct mlxsw_sp *mlxsw_sp,
5450 struct mlxsw_sp_fib_entry *fib_entry)
5452 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
5454 list_del(&fib_entry->nexthop_group_node);
5455 if (!list_empty(&nh_grp->fib_list))
5458 if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ) {
5459 mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
5463 mlxsw_sp_nexthop4_group_destroy(mlxsw_sp, nh_grp);
5467 mlxsw_sp_fib4_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
5469 struct mlxsw_sp_fib4_entry *fib4_entry;
5471 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
5473 return !fib4_entry->dscp;
5477 mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
5479 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
5481 switch (fib_entry->fib_node->fib->proto) {
5482 case MLXSW_SP_L3_PROTO_IPV4:
5483 if (!mlxsw_sp_fib4_entry_should_offload(fib_entry))
5486 case MLXSW_SP_L3_PROTO_IPV6:
5490 switch (fib_entry->type) {
5491 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
5492 return !!nh_group->nhgi->adj_index_valid;
5493 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
5494 return !!nh_group->nhgi->nh_rif;
5495 case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
5496 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
5497 case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
5504 static struct mlxsw_sp_nexthop *
5505 mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
5506 const struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
5510 for (i = 0; i < nh_grp->nhgi->count; i++) {
5511 struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
5512 struct fib6_info *rt = mlxsw_sp_rt6->rt;
5514 if (nh->rif && nh->rif->dev == rt->fib6_nh->fib_nh_dev &&
5515 ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
5516 &rt->fib6_nh->fib_nh_gw6))
5524 mlxsw_sp_fib4_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
5525 struct fib_entry_notifier_info *fen_info)
5527 u32 *p_dst = (u32 *) &fen_info->dst;
5528 struct fib_rt_info fri;
5530 fri.fi = fen_info->fi;
5531 fri.tb_id = fen_info->tb_id;
5532 fri.dst = cpu_to_be32(*p_dst);
5533 fri.dst_len = fen_info->dst_len;
5534 fri.dscp = fen_info->dscp;
5535 fri.type = fen_info->type;
5536 fri.offload = false;
5538 fri.offload_failed = true;
5539 fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
5543 mlxsw_sp_fib4_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5544 struct mlxsw_sp_fib_entry *fib_entry)
5546 u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
5547 int dst_len = fib_entry->fib_node->key.prefix_len;
5548 struct mlxsw_sp_fib4_entry *fib4_entry;
5549 struct fib_rt_info fri;
5550 bool should_offload;
5552 should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
5553 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
5555 fri.fi = fib4_entry->fi;
5556 fri.tb_id = fib4_entry->tb_id;
5557 fri.dst = cpu_to_be32(*p_dst);
5558 fri.dst_len = dst_len;
5559 fri.dscp = fib4_entry->dscp;
5560 fri.type = fib4_entry->type;
5561 fri.offload = should_offload;
5562 fri.trap = !should_offload;
5563 fri.offload_failed = false;
5564 fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
5568 mlxsw_sp_fib4_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5569 struct mlxsw_sp_fib_entry *fib_entry)
5571 u32 *p_dst = (u32 *) fib_entry->fib_node->key.addr;
5572 int dst_len = fib_entry->fib_node->key.prefix_len;
5573 struct mlxsw_sp_fib4_entry *fib4_entry;
5574 struct fib_rt_info fri;
5576 fib4_entry = container_of(fib_entry, struct mlxsw_sp_fib4_entry,
5578 fri.fi = fib4_entry->fi;
5579 fri.tb_id = fib4_entry->tb_id;
5580 fri.dst = cpu_to_be32(*p_dst);
5581 fri.dst_len = dst_len;
5582 fri.dscp = fib4_entry->dscp;
5583 fri.type = fib4_entry->type;
5584 fri.offload = false;
5586 fri.offload_failed = false;
5587 fib_alias_hw_flags_set(mlxsw_sp_net(mlxsw_sp), &fri);
5590 #if IS_ENABLED(CONFIG_IPV6)
5592 mlxsw_sp_fib6_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
5593 struct fib6_info **rt_arr,
5598 /* In IPv6 a multipath route is represented using multiple routes, so
5599 * we need to set the flags on all of them.
5601 for (i = 0; i < nrt6; i++)
5602 fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), rt_arr[i],
5603 false, false, true);
5607 mlxsw_sp_fib6_offload_failed_flag_set(struct mlxsw_sp *mlxsw_sp,
5608 struct fib6_info **rt_arr,
5614 #if IS_ENABLED(CONFIG_IPV6)
5616 mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5617 struct mlxsw_sp_fib_entry *fib_entry)
5619 struct mlxsw_sp_fib6_entry *fib6_entry;
5620 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5621 bool should_offload;
5623 should_offload = mlxsw_sp_fib_entry_should_offload(fib_entry);
5625 /* In IPv6 a multipath route is represented using multiple routes, so
5626 * we need to set the flags on all of them.
5628 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
5630 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
5631 fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), mlxsw_sp_rt6->rt,
5632 should_offload, !should_offload, false);
5636 mlxsw_sp_fib6_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5637 struct mlxsw_sp_fib_entry *fib_entry)
5642 #if IS_ENABLED(CONFIG_IPV6)
5644 mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5645 struct mlxsw_sp_fib_entry *fib_entry)
5647 struct mlxsw_sp_fib6_entry *fib6_entry;
5648 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
5650 fib6_entry = container_of(fib_entry, struct mlxsw_sp_fib6_entry,
5652 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list)
5653 fib6_info_hw_flags_set(mlxsw_sp_net(mlxsw_sp), mlxsw_sp_rt6->rt,
5654 false, false, false);
5658 mlxsw_sp_fib6_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5659 struct mlxsw_sp_fib_entry *fib_entry)
5665 mlxsw_sp_fib_entry_hw_flags_set(struct mlxsw_sp *mlxsw_sp,
5666 struct mlxsw_sp_fib_entry *fib_entry)
5668 switch (fib_entry->fib_node->fib->proto) {
5669 case MLXSW_SP_L3_PROTO_IPV4:
5670 mlxsw_sp_fib4_entry_hw_flags_set(mlxsw_sp, fib_entry);
5672 case MLXSW_SP_L3_PROTO_IPV6:
5673 mlxsw_sp_fib6_entry_hw_flags_set(mlxsw_sp, fib_entry);
5679 mlxsw_sp_fib_entry_hw_flags_clear(struct mlxsw_sp *mlxsw_sp,
5680 struct mlxsw_sp_fib_entry *fib_entry)
5682 switch (fib_entry->fib_node->fib->proto) {
5683 case MLXSW_SP_L3_PROTO_IPV4:
5684 mlxsw_sp_fib4_entry_hw_flags_clear(mlxsw_sp, fib_entry);
5686 case MLXSW_SP_L3_PROTO_IPV6:
5687 mlxsw_sp_fib6_entry_hw_flags_clear(mlxsw_sp, fib_entry);
5693 mlxsw_sp_fib_entry_hw_flags_refresh(struct mlxsw_sp *mlxsw_sp,
5694 struct mlxsw_sp_fib_entry *fib_entry,
5695 enum mlxsw_reg_ralue_op op)
5698 case MLXSW_REG_RALUE_OP_WRITE_WRITE:
5699 mlxsw_sp_fib_entry_hw_flags_set(mlxsw_sp, fib_entry);
5701 case MLXSW_REG_RALUE_OP_WRITE_DELETE:
5702 mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, fib_entry);
5710 mlxsw_sp_fib_entry_ralue_pack(char *ralue_pl,
5711 const struct mlxsw_sp_fib_entry *fib_entry,
5712 enum mlxsw_reg_ralue_op op)
5714 struct mlxsw_sp_fib *fib = fib_entry->fib_node->fib;
5715 enum mlxsw_reg_ralxx_protocol proto;
5718 proto = (enum mlxsw_reg_ralxx_protocol) fib->proto;
5720 switch (fib->proto) {
5721 case MLXSW_SP_L3_PROTO_IPV4:
5722 p_dip = (u32 *) fib_entry->fib_node->key.addr;
5723 mlxsw_reg_ralue_pack4(ralue_pl, proto, op, fib->vr->id,
5724 fib_entry->fib_node->key.prefix_len,
5727 case MLXSW_SP_L3_PROTO_IPV6:
5728 mlxsw_reg_ralue_pack6(ralue_pl, proto, op, fib->vr->id,
5729 fib_entry->fib_node->key.prefix_len,
5730 fib_entry->fib_node->key.addr);
5735 static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
5736 struct mlxsw_sp_fib_entry *fib_entry,
5737 enum mlxsw_reg_ralue_op op)
5739 struct mlxsw_sp_nexthop_group *nh_group = fib_entry->nh_group;
5740 struct mlxsw_sp_nexthop_group_info *nhgi = nh_group->nhgi;
5741 char ralue_pl[MLXSW_REG_RALUE_LEN];
5742 enum mlxsw_reg_ralue_trap_action trap_action;
5744 u32 adjacency_index = 0;
5747 /* In case the nexthop group adjacency index is valid, use it
5748 * with provided ECMP size. Otherwise, setup trap and pass
5749 * traffic to kernel.
5751 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
5752 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
5753 adjacency_index = nhgi->adj_index;
5754 ecmp_size = nhgi->ecmp_size;
5755 } else if (!nhgi->adj_index_valid && nhgi->count && nhgi->nh_rif) {
5756 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
5757 adjacency_index = mlxsw_sp->router->adj_trap_index;
5760 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
5761 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
5764 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
5765 mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
5766 adjacency_index, ecmp_size);
5767 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
5770 static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
5771 struct mlxsw_sp_fib_entry *fib_entry,
5772 enum mlxsw_reg_ralue_op op)
5774 struct mlxsw_sp_rif *rif = fib_entry->nh_group->nhgi->nh_rif;
5775 enum mlxsw_reg_ralue_trap_action trap_action;
5776 char ralue_pl[MLXSW_REG_RALUE_LEN];
5780 if (mlxsw_sp_fib_entry_should_offload(fib_entry)) {
5781 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
5782 rif_index = rif->rif_index;
5784 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
5785 trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
5788 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
5789 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id,
5791 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
5794 static int mlxsw_sp_fib_entry_op_trap(struct mlxsw_sp *mlxsw_sp,
5795 struct mlxsw_sp_fib_entry *fib_entry,
5796 enum mlxsw_reg_ralue_op op)
5798 char ralue_pl[MLXSW_REG_RALUE_LEN];
5800 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
5801 mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
5802 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
5805 static int mlxsw_sp_fib_entry_op_blackhole(struct mlxsw_sp *mlxsw_sp,
5806 struct mlxsw_sp_fib_entry *fib_entry,
5807 enum mlxsw_reg_ralue_op op)
5809 enum mlxsw_reg_ralue_trap_action trap_action;
5810 char ralue_pl[MLXSW_REG_RALUE_LEN];
5812 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_DISCARD_ERROR;
5813 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
5814 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, 0, 0);
5815 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
5819 mlxsw_sp_fib_entry_op_unreachable(struct mlxsw_sp *mlxsw_sp,
5820 struct mlxsw_sp_fib_entry *fib_entry,
5821 enum mlxsw_reg_ralue_op op)
5823 enum mlxsw_reg_ralue_trap_action trap_action;
5824 char ralue_pl[MLXSW_REG_RALUE_LEN];
5827 trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
5828 trap_id = MLXSW_TRAP_ID_RTR_INGRESS1;
5830 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
5831 mlxsw_reg_ralue_act_local_pack(ralue_pl, trap_action, trap_id, 0);
5832 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
5836 mlxsw_sp_fib_entry_op_ipip_decap(struct mlxsw_sp *mlxsw_sp,
5837 struct mlxsw_sp_fib_entry *fib_entry,
5838 enum mlxsw_reg_ralue_op op)
5840 struct mlxsw_sp_ipip_entry *ipip_entry = fib_entry->decap.ipip_entry;
5841 const struct mlxsw_sp_ipip_ops *ipip_ops;
5842 char ralue_pl[MLXSW_REG_RALUE_LEN];
5845 if (WARN_ON(!ipip_entry))
5848 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
5849 err = ipip_ops->decap_config(mlxsw_sp, ipip_entry,
5850 fib_entry->decap.tunnel_index);
5854 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
5855 mlxsw_reg_ralue_act_ip2me_tun_pack(ralue_pl,
5856 fib_entry->decap.tunnel_index);
5857 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
5860 static int mlxsw_sp_fib_entry_op_nve_decap(struct mlxsw_sp *mlxsw_sp,
5861 struct mlxsw_sp_fib_entry *fib_entry,
5862 enum mlxsw_reg_ralue_op op)
5864 char ralue_pl[MLXSW_REG_RALUE_LEN];
5866 mlxsw_sp_fib_entry_ralue_pack(ralue_pl, fib_entry, op);
5867 mlxsw_reg_ralue_act_ip2me_tun_pack(ralue_pl,
5868 fib_entry->decap.tunnel_index);
5869 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
5872 static int __mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
5873 struct mlxsw_sp_fib_entry *fib_entry,
5874 enum mlxsw_reg_ralue_op op)
5876 switch (fib_entry->type) {
5877 case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
5878 return mlxsw_sp_fib_entry_op_remote(mlxsw_sp, fib_entry, op);
5879 case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
5880 return mlxsw_sp_fib_entry_op_local(mlxsw_sp, fib_entry, op);
5881 case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
5882 return mlxsw_sp_fib_entry_op_trap(mlxsw_sp, fib_entry, op);
5883 case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
5884 return mlxsw_sp_fib_entry_op_blackhole(mlxsw_sp, fib_entry, op);
5885 case MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE:
5886 return mlxsw_sp_fib_entry_op_unreachable(mlxsw_sp, fib_entry,
5888 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
5889 return mlxsw_sp_fib_entry_op_ipip_decap(mlxsw_sp,
5891 case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
5892 return mlxsw_sp_fib_entry_op_nve_decap(mlxsw_sp, fib_entry, op);
5897 static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
5898 struct mlxsw_sp_fib_entry *fib_entry,
5899 enum mlxsw_reg_ralue_op op)
5901 int err = __mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
5906 mlxsw_sp_fib_entry_hw_flags_refresh(mlxsw_sp, fib_entry, op);
5911 static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
5912 struct mlxsw_sp_fib_entry *fib_entry)
5914 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
5915 MLXSW_REG_RALUE_OP_WRITE_WRITE);
5918 static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
5919 struct mlxsw_sp_fib_entry *fib_entry)
5921 return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
5922 MLXSW_REG_RALUE_OP_WRITE_DELETE);
5926 mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
5927 const struct fib_entry_notifier_info *fen_info,
5928 struct mlxsw_sp_fib_entry *fib_entry)
5930 struct mlxsw_sp_nexthop_group_info *nhgi = fib_entry->nh_group->nhgi;
5931 union mlxsw_sp_l3addr dip = { .addr4 = htonl(fen_info->dst) };
5932 struct mlxsw_sp_router *router = mlxsw_sp->router;
5933 u32 tb_id = mlxsw_sp_fix_tb_id(fen_info->tb_id);
5934 int ifindex = nhgi->nexthops[0].ifindex;
5935 struct mlxsw_sp_ipip_entry *ipip_entry;
5937 switch (fen_info->type) {
5939 ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, ifindex,
5940 MLXSW_SP_L3_PROTO_IPV4, dip);
5941 if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
5942 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
5943 return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
5947 if (mlxsw_sp_router_nve_is_decap(mlxsw_sp, tb_id,
5948 MLXSW_SP_L3_PROTO_IPV4,
5952 tunnel_index = router->nve_decap_config.tunnel_index;
5953 fib_entry->decap.tunnel_index = tunnel_index;
5954 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
5959 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
5962 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
5964 case RTN_UNREACHABLE:
5966 /* Packets hitting these routes need to be trapped, but
5967 * can do so with a lower priority than packets directed
5968 * at the host, so use action type local instead of trap.
5970 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
5974 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
5976 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
5984 mlxsw_sp_fib_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
5985 struct mlxsw_sp_fib_entry *fib_entry)
5987 switch (fib_entry->type) {
5988 case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
5989 mlxsw_sp_fib_entry_decap_fini(mlxsw_sp, fib_entry);
5997 mlxsw_sp_fib4_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
5998 struct mlxsw_sp_fib4_entry *fib4_entry)
6000 mlxsw_sp_fib_entry_type_unset(mlxsw_sp, &fib4_entry->common);
6003 static struct mlxsw_sp_fib4_entry *
6004 mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
6005 struct mlxsw_sp_fib_node *fib_node,
6006 const struct fib_entry_notifier_info *fen_info)
6008 struct mlxsw_sp_fib4_entry *fib4_entry;
6009 struct mlxsw_sp_fib_entry *fib_entry;
6012 fib4_entry = kzalloc(sizeof(*fib4_entry), GFP_KERNEL);
6014 return ERR_PTR(-ENOMEM);
6015 fib_entry = &fib4_entry->common;
6017 err = mlxsw_sp_nexthop4_group_get(mlxsw_sp, fib_entry, fen_info->fi);
6019 goto err_nexthop4_group_get;
6021 err = mlxsw_sp_nexthop_group_vr_link(fib_entry->nh_group,
6024 goto err_nexthop_group_vr_link;
6026 err = mlxsw_sp_fib4_entry_type_set(mlxsw_sp, fen_info, fib_entry);
6028 goto err_fib4_entry_type_set;
6030 fib4_entry->fi = fen_info->fi;
6031 fib_info_hold(fib4_entry->fi);
6032 fib4_entry->tb_id = fen_info->tb_id;
6033 fib4_entry->type = fen_info->type;
6034 fib4_entry->dscp = fen_info->dscp;
6036 fib_entry->fib_node = fib_node;
6040 err_fib4_entry_type_set:
6041 mlxsw_sp_nexthop_group_vr_unlink(fib_entry->nh_group, fib_node->fib);
6042 err_nexthop_group_vr_link:
6043 mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
6044 err_nexthop4_group_get:
6046 return ERR_PTR(err);
6049 static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
6050 struct mlxsw_sp_fib4_entry *fib4_entry)
6052 struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
6054 fib_info_put(fib4_entry->fi);
6055 mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, fib4_entry);
6056 mlxsw_sp_nexthop_group_vr_unlink(fib4_entry->common.nh_group,
6058 mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
6062 static struct mlxsw_sp_fib4_entry *
6063 mlxsw_sp_fib4_entry_lookup(struct mlxsw_sp *mlxsw_sp,
6064 const struct fib_entry_notifier_info *fen_info)
6066 struct mlxsw_sp_fib4_entry *fib4_entry;
6067 struct mlxsw_sp_fib_node *fib_node;
6068 struct mlxsw_sp_fib *fib;
6069 struct mlxsw_sp_vr *vr;
6071 vr = mlxsw_sp_vr_find(mlxsw_sp, fen_info->tb_id);
6074 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV4);
6076 fib_node = mlxsw_sp_fib_node_lookup(fib, &fen_info->dst,
6077 sizeof(fen_info->dst),
6082 fib4_entry = container_of(fib_node->fib_entry,
6083 struct mlxsw_sp_fib4_entry, common);
6084 if (fib4_entry->tb_id == fen_info->tb_id &&
6085 fib4_entry->dscp == fen_info->dscp &&
6086 fib4_entry->type == fen_info->type &&
6087 fib4_entry->fi == fen_info->fi)
6093 static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
6094 .key_offset = offsetof(struct mlxsw_sp_fib_node, key),
6095 .head_offset = offsetof(struct mlxsw_sp_fib_node, ht_node),
6096 .key_len = sizeof(struct mlxsw_sp_fib_key),
6097 .automatic_shrinking = true,
6100 static int mlxsw_sp_fib_node_insert(struct mlxsw_sp_fib *fib,
6101 struct mlxsw_sp_fib_node *fib_node)
6103 return rhashtable_insert_fast(&fib->ht, &fib_node->ht_node,
6104 mlxsw_sp_fib_ht_params);
6107 static void mlxsw_sp_fib_node_remove(struct mlxsw_sp_fib *fib,
6108 struct mlxsw_sp_fib_node *fib_node)
6110 rhashtable_remove_fast(&fib->ht, &fib_node->ht_node,
6111 mlxsw_sp_fib_ht_params);
6114 static struct mlxsw_sp_fib_node *
6115 mlxsw_sp_fib_node_lookup(struct mlxsw_sp_fib *fib, const void *addr,
6116 size_t addr_len, unsigned char prefix_len)
6118 struct mlxsw_sp_fib_key key;
6120 memset(&key, 0, sizeof(key));
6121 memcpy(key.addr, addr, addr_len);
6122 key.prefix_len = prefix_len;
6123 return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
6126 static struct mlxsw_sp_fib_node *
6127 mlxsw_sp_fib_node_create(struct mlxsw_sp_fib *fib, const void *addr,
6128 size_t addr_len, unsigned char prefix_len)
6130 struct mlxsw_sp_fib_node *fib_node;
6132 fib_node = kzalloc(sizeof(*fib_node), GFP_KERNEL);
6136 list_add(&fib_node->list, &fib->node_list);
6137 memcpy(fib_node->key.addr, addr, addr_len);
6138 fib_node->key.prefix_len = prefix_len;
6143 static void mlxsw_sp_fib_node_destroy(struct mlxsw_sp_fib_node *fib_node)
6145 list_del(&fib_node->list);
6149 static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp,
6150 struct mlxsw_sp_fib_node *fib_node)
6152 struct mlxsw_sp_prefix_usage req_prefix_usage;
6153 struct mlxsw_sp_fib *fib = fib_node->fib;
6154 struct mlxsw_sp_lpm_tree *lpm_tree;
6157 lpm_tree = mlxsw_sp->router->lpm.proto_trees[fib->proto];
6158 if (lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
6161 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
6162 mlxsw_sp_prefix_usage_set(&req_prefix_usage, fib_node->key.prefix_len);
6163 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
6165 if (IS_ERR(lpm_tree))
6166 return PTR_ERR(lpm_tree);
6168 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
6170 goto err_lpm_tree_replace;
6173 lpm_tree->prefix_ref_count[fib_node->key.prefix_len]++;
6176 err_lpm_tree_replace:
6177 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
6181 static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp,
6182 struct mlxsw_sp_fib_node *fib_node)
6184 struct mlxsw_sp_lpm_tree *lpm_tree = fib_node->fib->lpm_tree;
6185 struct mlxsw_sp_prefix_usage req_prefix_usage;
6186 struct mlxsw_sp_fib *fib = fib_node->fib;
6189 if (--lpm_tree->prefix_ref_count[fib_node->key.prefix_len] != 0)
6191 /* Try to construct a new LPM tree from the current prefix usage
6192 * minus the unused one. If we fail, continue using the old one.
6194 mlxsw_sp_prefix_usage_cpy(&req_prefix_usage, &lpm_tree->prefix_usage);
6195 mlxsw_sp_prefix_usage_clear(&req_prefix_usage,
6196 fib_node->key.prefix_len);
6197 lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
6199 if (IS_ERR(lpm_tree))
6202 err = mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree);
6204 goto err_lpm_tree_replace;
6208 err_lpm_tree_replace:
6209 mlxsw_sp_lpm_tree_put(mlxsw_sp, lpm_tree);
6212 static int mlxsw_sp_fib_node_init(struct mlxsw_sp *mlxsw_sp,
6213 struct mlxsw_sp_fib_node *fib_node,
6214 struct mlxsw_sp_fib *fib)
6218 err = mlxsw_sp_fib_node_insert(fib, fib_node);
6221 fib_node->fib = fib;
6223 err = mlxsw_sp_fib_lpm_tree_link(mlxsw_sp, fib_node);
6225 goto err_fib_lpm_tree_link;
6229 err_fib_lpm_tree_link:
6230 fib_node->fib = NULL;
6231 mlxsw_sp_fib_node_remove(fib, fib_node);
6235 static void mlxsw_sp_fib_node_fini(struct mlxsw_sp *mlxsw_sp,
6236 struct mlxsw_sp_fib_node *fib_node)
6238 struct mlxsw_sp_fib *fib = fib_node->fib;
6240 mlxsw_sp_fib_lpm_tree_unlink(mlxsw_sp, fib_node);
6241 fib_node->fib = NULL;
6242 mlxsw_sp_fib_node_remove(fib, fib_node);
6245 static struct mlxsw_sp_fib_node *
6246 mlxsw_sp_fib_node_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id, const void *addr,
6247 size_t addr_len, unsigned char prefix_len,
6248 enum mlxsw_sp_l3proto proto)
6250 struct mlxsw_sp_fib_node *fib_node;
6251 struct mlxsw_sp_fib *fib;
6252 struct mlxsw_sp_vr *vr;
6255 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, NULL);
6257 return ERR_CAST(vr);
6258 fib = mlxsw_sp_vr_fib(vr, proto);
6260 fib_node = mlxsw_sp_fib_node_lookup(fib, addr, addr_len, prefix_len);
6264 fib_node = mlxsw_sp_fib_node_create(fib, addr, addr_len, prefix_len);
6267 goto err_fib_node_create;
6270 err = mlxsw_sp_fib_node_init(mlxsw_sp, fib_node, fib);
6272 goto err_fib_node_init;
6277 mlxsw_sp_fib_node_destroy(fib_node);
6278 err_fib_node_create:
6279 mlxsw_sp_vr_put(mlxsw_sp, vr);
6280 return ERR_PTR(err);
6283 static void mlxsw_sp_fib_node_put(struct mlxsw_sp *mlxsw_sp,
6284 struct mlxsw_sp_fib_node *fib_node)
6286 struct mlxsw_sp_vr *vr = fib_node->fib->vr;
6288 if (fib_node->fib_entry)
6290 mlxsw_sp_fib_node_fini(mlxsw_sp, fib_node);
6291 mlxsw_sp_fib_node_destroy(fib_node);
6292 mlxsw_sp_vr_put(mlxsw_sp, vr);
6295 static int mlxsw_sp_fib_node_entry_link(struct mlxsw_sp *mlxsw_sp,
6296 struct mlxsw_sp_fib_entry *fib_entry)
6298 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
6301 fib_node->fib_entry = fib_entry;
6303 err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
6305 goto err_fib_entry_update;
6309 err_fib_entry_update:
6310 fib_node->fib_entry = NULL;
6315 mlxsw_sp_fib_node_entry_unlink(struct mlxsw_sp *mlxsw_sp,
6316 struct mlxsw_sp_fib_entry *fib_entry)
6318 struct mlxsw_sp_fib_node *fib_node = fib_entry->fib_node;
6320 mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
6321 fib_node->fib_entry = NULL;
6324 static bool mlxsw_sp_fib4_allow_replace(struct mlxsw_sp_fib4_entry *fib4_entry)
6326 struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
6327 struct mlxsw_sp_fib4_entry *fib4_replaced;
6329 if (!fib_node->fib_entry)
6332 fib4_replaced = container_of(fib_node->fib_entry,
6333 struct mlxsw_sp_fib4_entry, common);
6334 if (fib4_entry->tb_id == RT_TABLE_MAIN &&
6335 fib4_replaced->tb_id == RT_TABLE_LOCAL)
6342 mlxsw_sp_router_fib4_replace(struct mlxsw_sp *mlxsw_sp,
6343 const struct fib_entry_notifier_info *fen_info)
6345 struct mlxsw_sp_fib4_entry *fib4_entry, *fib4_replaced;
6346 struct mlxsw_sp_fib_entry *replaced;
6347 struct mlxsw_sp_fib_node *fib_node;
6350 if (fen_info->fi->nh &&
6351 !mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, fen_info->fi->nh->id))
6354 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, fen_info->tb_id,
6355 &fen_info->dst, sizeof(fen_info->dst),
6357 MLXSW_SP_L3_PROTO_IPV4);
6358 if (IS_ERR(fib_node)) {
6359 dev_warn(mlxsw_sp->bus_info->dev, "Failed to get FIB node\n");
6360 return PTR_ERR(fib_node);
6363 fib4_entry = mlxsw_sp_fib4_entry_create(mlxsw_sp, fib_node, fen_info);
6364 if (IS_ERR(fib4_entry)) {
6365 dev_warn(mlxsw_sp->bus_info->dev, "Failed to create FIB entry\n");
6366 err = PTR_ERR(fib4_entry);
6367 goto err_fib4_entry_create;
6370 if (!mlxsw_sp_fib4_allow_replace(fib4_entry)) {
6371 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6372 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6376 replaced = fib_node->fib_entry;
6377 err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib4_entry->common);
6379 dev_warn(mlxsw_sp->bus_info->dev, "Failed to link FIB entry to node\n");
6380 goto err_fib_node_entry_link;
6383 /* Nothing to replace */
6387 mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
6388 fib4_replaced = container_of(replaced, struct mlxsw_sp_fib4_entry,
6390 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_replaced);
6394 err_fib_node_entry_link:
6395 fib_node->fib_entry = replaced;
6396 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6397 err_fib4_entry_create:
6398 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6402 static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
6403 struct fib_entry_notifier_info *fen_info)
6405 struct mlxsw_sp_fib4_entry *fib4_entry;
6406 struct mlxsw_sp_fib_node *fib_node;
6408 fib4_entry = mlxsw_sp_fib4_entry_lookup(mlxsw_sp, fen_info);
6411 fib_node = fib4_entry->common.fib_node;
6413 mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, &fib4_entry->common);
6414 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
6415 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
6418 static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt)
6420 /* Multicast routes aren't supported, so ignore them. Neighbour
6421 * Discovery packets are specifically trapped.
6423 if (ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_MULTICAST)
6426 /* Cloned routes are irrelevant in the forwarding path. */
6427 if (rt->fib6_flags & RTF_CACHE)
6433 static struct mlxsw_sp_rt6 *mlxsw_sp_rt6_create(struct fib6_info *rt)
6435 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6437 mlxsw_sp_rt6 = kzalloc(sizeof(*mlxsw_sp_rt6), GFP_KERNEL);
6439 return ERR_PTR(-ENOMEM);
6441 /* In case of route replace, replaced route is deleted with
6442 * no notification. Take reference to prevent accessing freed
6445 mlxsw_sp_rt6->rt = rt;
6448 return mlxsw_sp_rt6;
6451 #if IS_ENABLED(CONFIG_IPV6)
6452 static void mlxsw_sp_rt6_release(struct fib6_info *rt)
6454 fib6_info_release(rt);
6457 static void mlxsw_sp_rt6_release(struct fib6_info *rt)
6462 static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
6464 struct fib6_nh *fib6_nh = mlxsw_sp_rt6->rt->fib6_nh;
6466 if (!mlxsw_sp_rt6->rt->nh)
6467 fib6_nh->fib_nh_flags &= ~RTNH_F_OFFLOAD;
6468 mlxsw_sp_rt6_release(mlxsw_sp_rt6->rt);
6469 kfree(mlxsw_sp_rt6);
6472 static struct fib6_info *
6473 mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
6475 return list_first_entry(&fib6_entry->rt6_list, struct mlxsw_sp_rt6,
6479 static struct mlxsw_sp_rt6 *
6480 mlxsw_sp_fib6_entry_rt_find(const struct mlxsw_sp_fib6_entry *fib6_entry,
6481 const struct fib6_info *rt)
6483 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6485 list_for_each_entry(mlxsw_sp_rt6, &fib6_entry->rt6_list, list) {
6486 if (mlxsw_sp_rt6->rt == rt)
6487 return mlxsw_sp_rt6;
6493 static bool mlxsw_sp_nexthop6_ipip_type(const struct mlxsw_sp *mlxsw_sp,
6494 const struct fib6_info *rt,
6495 enum mlxsw_sp_ipip_type *ret)
6497 return rt->fib6_nh->fib_nh_dev &&
6498 mlxsw_sp_netdev_ipip_type(mlxsw_sp, rt->fib6_nh->fib_nh_dev, ret);
6501 static int mlxsw_sp_nexthop6_init(struct mlxsw_sp *mlxsw_sp,
6502 struct mlxsw_sp_nexthop_group *nh_grp,
6503 struct mlxsw_sp_nexthop *nh,
6504 const struct fib6_info *rt)
6506 struct net_device *dev = rt->fib6_nh->fib_nh_dev;
6509 nh->nhgi = nh_grp->nhgi;
6510 nh->nh_weight = rt->fib6_nh->fib_nh_weight;
6511 memcpy(&nh->gw_addr, &rt->fib6_nh->fib_nh_gw6, sizeof(nh->gw_addr));
6512 #if IS_ENABLED(CONFIG_IPV6)
6513 nh->neigh_tbl = &nd_tbl;
6515 mlxsw_sp_nexthop_counter_alloc(mlxsw_sp, nh);
6517 list_add_tail(&nh->router_list_node, &mlxsw_sp->router->nexthop_list);
6521 nh->ifindex = dev->ifindex;
6523 err = mlxsw_sp_nexthop_type_init(mlxsw_sp, nh, dev);
6525 goto err_nexthop_type_init;
6529 err_nexthop_type_init:
6530 list_del(&nh->router_list_node);
6531 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
6535 static void mlxsw_sp_nexthop6_fini(struct mlxsw_sp *mlxsw_sp,
6536 struct mlxsw_sp_nexthop *nh)
6538 mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
6539 list_del(&nh->router_list_node);
6540 mlxsw_sp_nexthop_counter_free(mlxsw_sp, nh);
6543 static bool mlxsw_sp_rt6_is_gateway(const struct mlxsw_sp *mlxsw_sp,
6544 const struct fib6_info *rt)
6546 return rt->fib6_nh->fib_nh_gw_family ||
6547 mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, NULL);
6551 mlxsw_sp_nexthop6_group_info_init(struct mlxsw_sp *mlxsw_sp,
6552 struct mlxsw_sp_nexthop_group *nh_grp,
6553 struct mlxsw_sp_fib6_entry *fib6_entry)
6555 struct mlxsw_sp_nexthop_group_info *nhgi;
6556 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6557 struct mlxsw_sp_nexthop *nh;
6560 nhgi = kzalloc(struct_size(nhgi, nexthops, fib6_entry->nrt6),
6564 nh_grp->nhgi = nhgi;
6565 nhgi->nh_grp = nh_grp;
6566 mlxsw_sp_rt6 = list_first_entry(&fib6_entry->rt6_list,
6567 struct mlxsw_sp_rt6, list);
6568 nhgi->gateway = mlxsw_sp_rt6_is_gateway(mlxsw_sp, mlxsw_sp_rt6->rt);
6569 nhgi->count = fib6_entry->nrt6;
6570 for (i = 0; i < nhgi->count; i++) {
6571 struct fib6_info *rt = mlxsw_sp_rt6->rt;
6573 nh = &nhgi->nexthops[i];
6574 err = mlxsw_sp_nexthop6_init(mlxsw_sp, nh_grp, nh, rt);
6576 goto err_nexthop6_init;
6577 mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
6579 nh_grp->nhgi = nhgi;
6580 err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
6583 err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
6585 goto err_group_refresh;
6590 mlxsw_sp_nexthop_group_dec(mlxsw_sp);
6594 for (i--; i >= 0; i--) {
6595 nh = &nhgi->nexthops[i];
6596 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
6603 mlxsw_sp_nexthop6_group_info_fini(struct mlxsw_sp *mlxsw_sp,
6604 struct mlxsw_sp_nexthop_group *nh_grp)
6606 struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
6609 mlxsw_sp_nexthop_group_dec(mlxsw_sp);
6610 for (i = nhgi->count - 1; i >= 0; i--) {
6611 struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
6613 mlxsw_sp_nexthop6_fini(mlxsw_sp, nh);
6615 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
6616 WARN_ON_ONCE(nhgi->adj_index_valid);
6620 static struct mlxsw_sp_nexthop_group *
6621 mlxsw_sp_nexthop6_group_create(struct mlxsw_sp *mlxsw_sp,
6622 struct mlxsw_sp_fib6_entry *fib6_entry)
6624 struct mlxsw_sp_nexthop_group *nh_grp;
6627 nh_grp = kzalloc(sizeof(*nh_grp), GFP_KERNEL);
6629 return ERR_PTR(-ENOMEM);
6630 INIT_LIST_HEAD(&nh_grp->vr_list);
6631 err = rhashtable_init(&nh_grp->vr_ht,
6632 &mlxsw_sp_nexthop_group_vr_ht_params);
6634 goto err_nexthop_group_vr_ht_init;
6635 INIT_LIST_HEAD(&nh_grp->fib_list);
6636 nh_grp->type = MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6;
6638 err = mlxsw_sp_nexthop6_group_info_init(mlxsw_sp, nh_grp, fib6_entry);
6640 goto err_nexthop_group_info_init;
6642 err = mlxsw_sp_nexthop_group_insert(mlxsw_sp, nh_grp);
6644 goto err_nexthop_group_insert;
6646 nh_grp->can_destroy = true;
6650 err_nexthop_group_insert:
6651 mlxsw_sp_nexthop6_group_info_fini(mlxsw_sp, nh_grp);
6652 err_nexthop_group_info_init:
6653 rhashtable_destroy(&nh_grp->vr_ht);
6654 err_nexthop_group_vr_ht_init:
6656 return ERR_PTR(err);
6660 mlxsw_sp_nexthop6_group_destroy(struct mlxsw_sp *mlxsw_sp,
6661 struct mlxsw_sp_nexthop_group *nh_grp)
6663 if (!nh_grp->can_destroy)
6665 mlxsw_sp_nexthop_group_remove(mlxsw_sp, nh_grp);
6666 mlxsw_sp_nexthop6_group_info_fini(mlxsw_sp, nh_grp);
6667 WARN_ON_ONCE(!list_empty(&nh_grp->vr_list));
6668 rhashtable_destroy(&nh_grp->vr_ht);
6672 static int mlxsw_sp_nexthop6_group_get(struct mlxsw_sp *mlxsw_sp,
6673 struct mlxsw_sp_fib6_entry *fib6_entry)
6675 struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
6676 struct mlxsw_sp_nexthop_group *nh_grp;
6679 nh_grp = mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp,
6681 if (WARN_ON_ONCE(!nh_grp))
6686 nh_grp = mlxsw_sp_nexthop6_group_lookup(mlxsw_sp, fib6_entry);
6688 nh_grp = mlxsw_sp_nexthop6_group_create(mlxsw_sp, fib6_entry);
6690 return PTR_ERR(nh_grp);
6693 /* The route and the nexthop are described by the same struct, so we
6694 * need to the update the nexthop offload indication for the new route.
6696 __mlxsw_sp_nexthop6_group_offload_refresh(nh_grp, fib6_entry);
6699 list_add_tail(&fib6_entry->common.nexthop_group_node,
6701 fib6_entry->common.nh_group = nh_grp;
6706 static void mlxsw_sp_nexthop6_group_put(struct mlxsw_sp *mlxsw_sp,
6707 struct mlxsw_sp_fib_entry *fib_entry)
6709 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
6711 list_del(&fib_entry->nexthop_group_node);
6712 if (!list_empty(&nh_grp->fib_list))
6715 if (nh_grp->type == MLXSW_SP_NEXTHOP_GROUP_TYPE_OBJ) {
6716 mlxsw_sp_nexthop_obj_group_destroy(mlxsw_sp, nh_grp);
6720 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, nh_grp);
6724 mlxsw_sp_nexthop6_group_update(struct mlxsw_sp *mlxsw_sp,
6725 struct mlxsw_sp_fib6_entry *fib6_entry)
6727 struct mlxsw_sp_nexthop_group *old_nh_grp = fib6_entry->common.nh_group;
6728 struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
6731 mlxsw_sp_nexthop_group_vr_unlink(old_nh_grp, fib_node->fib);
6732 fib6_entry->common.nh_group = NULL;
6733 list_del(&fib6_entry->common.nexthop_group_node);
6735 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
6737 goto err_nexthop6_group_get;
6739 err = mlxsw_sp_nexthop_group_vr_link(fib6_entry->common.nh_group,
6742 goto err_nexthop_group_vr_link;
6744 /* In case this entry is offloaded, then the adjacency index
6745 * currently associated with it in the device's table is that
6746 * of the old group. Start using the new one instead.
6748 err = mlxsw_sp_fib_entry_update(mlxsw_sp, &fib6_entry->common);
6750 goto err_fib_entry_update;
6752 if (list_empty(&old_nh_grp->fib_list))
6753 mlxsw_sp_nexthop6_group_destroy(mlxsw_sp, old_nh_grp);
6757 err_fib_entry_update:
6758 mlxsw_sp_nexthop_group_vr_unlink(fib6_entry->common.nh_group,
6760 err_nexthop_group_vr_link:
6761 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
6762 err_nexthop6_group_get:
6763 list_add_tail(&fib6_entry->common.nexthop_group_node,
6764 &old_nh_grp->fib_list);
6765 fib6_entry->common.nh_group = old_nh_grp;
6766 mlxsw_sp_nexthop_group_vr_link(old_nh_grp, fib_node->fib);
6771 mlxsw_sp_fib6_entry_nexthop_add(struct mlxsw_sp *mlxsw_sp,
6772 struct mlxsw_sp_fib6_entry *fib6_entry,
6773 struct fib6_info **rt_arr, unsigned int nrt6)
6775 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6778 for (i = 0; i < nrt6; i++) {
6779 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
6780 if (IS_ERR(mlxsw_sp_rt6)) {
6781 err = PTR_ERR(mlxsw_sp_rt6);
6782 goto err_rt6_unwind;
6785 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
6789 err = mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
6791 goto err_rt6_unwind;
6796 for (; i > 0; i--) {
6798 mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
6799 struct mlxsw_sp_rt6, list);
6800 list_del(&mlxsw_sp_rt6->list);
6801 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
6807 mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
6808 struct mlxsw_sp_fib6_entry *fib6_entry,
6809 struct fib6_info **rt_arr, unsigned int nrt6)
6811 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6814 for (i = 0; i < nrt6; i++) {
6815 mlxsw_sp_rt6 = mlxsw_sp_fib6_entry_rt_find(fib6_entry,
6817 if (WARN_ON_ONCE(!mlxsw_sp_rt6))
6821 list_del(&mlxsw_sp_rt6->list);
6822 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
6825 mlxsw_sp_nexthop6_group_update(mlxsw_sp, fib6_entry);
6829 mlxsw_sp_fib6_entry_type_set_local(struct mlxsw_sp *mlxsw_sp,
6830 struct mlxsw_sp_fib_entry *fib_entry,
6831 const struct fib6_info *rt)
6833 struct mlxsw_sp_nexthop_group_info *nhgi = fib_entry->nh_group->nhgi;
6834 union mlxsw_sp_l3addr dip = { .addr6 = rt->fib6_dst.addr };
6835 u32 tb_id = mlxsw_sp_fix_tb_id(rt->fib6_table->tb6_id);
6836 struct mlxsw_sp_router *router = mlxsw_sp->router;
6837 int ifindex = nhgi->nexthops[0].ifindex;
6838 struct mlxsw_sp_ipip_entry *ipip_entry;
6840 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
6841 ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, ifindex,
6842 MLXSW_SP_L3_PROTO_IPV6,
6845 if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
6846 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
6847 return mlxsw_sp_fib_entry_decap_init(mlxsw_sp, fib_entry,
6850 if (mlxsw_sp_router_nve_is_decap(mlxsw_sp, tb_id,
6851 MLXSW_SP_L3_PROTO_IPV6, &dip)) {
6854 tunnel_index = router->nve_decap_config.tunnel_index;
6855 fib_entry->decap.tunnel_index = tunnel_index;
6856 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP;
6862 static int mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
6863 struct mlxsw_sp_fib_entry *fib_entry,
6864 const struct fib6_info *rt)
6866 if (rt->fib6_flags & RTF_LOCAL)
6867 return mlxsw_sp_fib6_entry_type_set_local(mlxsw_sp, fib_entry,
6869 if (rt->fib6_flags & RTF_ANYCAST)
6870 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
6871 else if (rt->fib6_type == RTN_BLACKHOLE)
6872 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
6873 else if (rt->fib6_flags & RTF_REJECT)
6874 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_UNREACHABLE;
6875 else if (fib_entry->nh_group->nhgi->gateway)
6876 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
6878 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
6884 mlxsw_sp_fib6_entry_rt_destroy_all(struct mlxsw_sp_fib6_entry *fib6_entry)
6886 struct mlxsw_sp_rt6 *mlxsw_sp_rt6, *tmp;
6888 list_for_each_entry_safe(mlxsw_sp_rt6, tmp, &fib6_entry->rt6_list,
6891 list_del(&mlxsw_sp_rt6->list);
6892 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
6896 static struct mlxsw_sp_fib6_entry *
6897 mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
6898 struct mlxsw_sp_fib_node *fib_node,
6899 struct fib6_info **rt_arr, unsigned int nrt6)
6901 struct mlxsw_sp_fib6_entry *fib6_entry;
6902 struct mlxsw_sp_fib_entry *fib_entry;
6903 struct mlxsw_sp_rt6 *mlxsw_sp_rt6;
6906 fib6_entry = kzalloc(sizeof(*fib6_entry), GFP_KERNEL);
6908 return ERR_PTR(-ENOMEM);
6909 fib_entry = &fib6_entry->common;
6911 INIT_LIST_HEAD(&fib6_entry->rt6_list);
6913 for (i = 0; i < nrt6; i++) {
6914 mlxsw_sp_rt6 = mlxsw_sp_rt6_create(rt_arr[i]);
6915 if (IS_ERR(mlxsw_sp_rt6)) {
6916 err = PTR_ERR(mlxsw_sp_rt6);
6917 goto err_rt6_unwind;
6919 list_add_tail(&mlxsw_sp_rt6->list, &fib6_entry->rt6_list);
6923 err = mlxsw_sp_nexthop6_group_get(mlxsw_sp, fib6_entry);
6925 goto err_rt6_unwind;
6927 err = mlxsw_sp_nexthop_group_vr_link(fib_entry->nh_group,
6930 goto err_nexthop_group_vr_link;
6932 err = mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, rt_arr[0]);
6934 goto err_fib6_entry_type_set;
6936 fib_entry->fib_node = fib_node;
6940 err_fib6_entry_type_set:
6941 mlxsw_sp_nexthop_group_vr_unlink(fib_entry->nh_group, fib_node->fib);
6942 err_nexthop_group_vr_link:
6943 mlxsw_sp_nexthop6_group_put(mlxsw_sp, fib_entry);
6945 for (; i > 0; i--) {
6947 mlxsw_sp_rt6 = list_last_entry(&fib6_entry->rt6_list,
6948 struct mlxsw_sp_rt6, list);
6949 list_del(&mlxsw_sp_rt6->list);
6950 mlxsw_sp_rt6_destroy(mlxsw_sp_rt6);
6953 return ERR_PTR(err);
6957 mlxsw_sp_fib6_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
6958 struct mlxsw_sp_fib6_entry *fib6_entry)
6960 mlxsw_sp_fib_entry_type_unset(mlxsw_sp, &fib6_entry->common);
6963 static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
6964 struct mlxsw_sp_fib6_entry *fib6_entry)
6966 struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
6968 mlxsw_sp_fib6_entry_type_unset(mlxsw_sp, fib6_entry);
6969 mlxsw_sp_nexthop_group_vr_unlink(fib6_entry->common.nh_group,
6971 mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
6972 mlxsw_sp_fib6_entry_rt_destroy_all(fib6_entry);
6973 WARN_ON(fib6_entry->nrt6);
6977 static struct mlxsw_sp_fib6_entry *
6978 mlxsw_sp_fib6_entry_lookup(struct mlxsw_sp *mlxsw_sp,
6979 const struct fib6_info *rt)
6981 struct mlxsw_sp_fib6_entry *fib6_entry;
6982 struct mlxsw_sp_fib_node *fib_node;
6983 struct mlxsw_sp_fib *fib;
6984 struct fib6_info *cmp_rt;
6985 struct mlxsw_sp_vr *vr;
6987 vr = mlxsw_sp_vr_find(mlxsw_sp, rt->fib6_table->tb6_id);
6990 fib = mlxsw_sp_vr_fib(vr, MLXSW_SP_L3_PROTO_IPV6);
6992 fib_node = mlxsw_sp_fib_node_lookup(fib, &rt->fib6_dst.addr,
6993 sizeof(rt->fib6_dst.addr),
6998 fib6_entry = container_of(fib_node->fib_entry,
6999 struct mlxsw_sp_fib6_entry, common);
7000 cmp_rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
7001 if (rt->fib6_table->tb6_id == cmp_rt->fib6_table->tb6_id &&
7002 rt->fib6_metric == cmp_rt->fib6_metric &&
7003 mlxsw_sp_fib6_entry_rt_find(fib6_entry, rt))
7009 static bool mlxsw_sp_fib6_allow_replace(struct mlxsw_sp_fib6_entry *fib6_entry)
7011 struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
7012 struct mlxsw_sp_fib6_entry *fib6_replaced;
7013 struct fib6_info *rt, *rt_replaced;
7015 if (!fib_node->fib_entry)
7018 fib6_replaced = container_of(fib_node->fib_entry,
7019 struct mlxsw_sp_fib6_entry,
7021 rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
7022 rt_replaced = mlxsw_sp_fib6_entry_rt(fib6_replaced);
7023 if (rt->fib6_table->tb6_id == RT_TABLE_MAIN &&
7024 rt_replaced->fib6_table->tb6_id == RT_TABLE_LOCAL)
7030 static int mlxsw_sp_router_fib6_replace(struct mlxsw_sp *mlxsw_sp,
7031 struct fib6_info **rt_arr,
7034 struct mlxsw_sp_fib6_entry *fib6_entry, *fib6_replaced;
7035 struct mlxsw_sp_fib_entry *replaced;
7036 struct mlxsw_sp_fib_node *fib_node;
7037 struct fib6_info *rt = rt_arr[0];
7040 if (rt->fib6_src.plen)
7043 if (mlxsw_sp_fib6_rt_should_ignore(rt))
7046 if (rt->nh && !mlxsw_sp_nexthop_obj_group_lookup(mlxsw_sp, rt->nh->id))
7049 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
7051 sizeof(rt->fib6_dst.addr),
7053 MLXSW_SP_L3_PROTO_IPV6);
7054 if (IS_ERR(fib_node))
7055 return PTR_ERR(fib_node);
7057 fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt_arr,
7059 if (IS_ERR(fib6_entry)) {
7060 err = PTR_ERR(fib6_entry);
7061 goto err_fib6_entry_create;
7064 if (!mlxsw_sp_fib6_allow_replace(fib6_entry)) {
7065 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7066 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7070 replaced = fib_node->fib_entry;
7071 err = mlxsw_sp_fib_node_entry_link(mlxsw_sp, &fib6_entry->common);
7073 goto err_fib_node_entry_link;
7075 /* Nothing to replace */
7079 mlxsw_sp_fib_entry_hw_flags_clear(mlxsw_sp, replaced);
7080 fib6_replaced = container_of(replaced, struct mlxsw_sp_fib6_entry,
7082 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_replaced);
7086 err_fib_node_entry_link:
7087 fib_node->fib_entry = replaced;
7088 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7089 err_fib6_entry_create:
7090 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7094 static int mlxsw_sp_router_fib6_append(struct mlxsw_sp *mlxsw_sp,
7095 struct fib6_info **rt_arr,
7098 struct mlxsw_sp_fib6_entry *fib6_entry;
7099 struct mlxsw_sp_fib_node *fib_node;
7100 struct fib6_info *rt = rt_arr[0];
7103 if (rt->fib6_src.plen)
7106 if (mlxsw_sp_fib6_rt_should_ignore(rt))
7109 fib_node = mlxsw_sp_fib_node_get(mlxsw_sp, rt->fib6_table->tb6_id,
7111 sizeof(rt->fib6_dst.addr),
7113 MLXSW_SP_L3_PROTO_IPV6);
7114 if (IS_ERR(fib_node))
7115 return PTR_ERR(fib_node);
7117 if (WARN_ON_ONCE(!fib_node->fib_entry)) {
7118 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7122 fib6_entry = container_of(fib_node->fib_entry,
7123 struct mlxsw_sp_fib6_entry, common);
7124 err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt_arr,
7127 goto err_fib6_entry_nexthop_add;
7131 err_fib6_entry_nexthop_add:
7132 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7136 static void mlxsw_sp_router_fib6_del(struct mlxsw_sp *mlxsw_sp,
7137 struct fib6_info **rt_arr,
7140 struct mlxsw_sp_fib6_entry *fib6_entry;
7141 struct mlxsw_sp_fib_node *fib_node;
7142 struct fib6_info *rt = rt_arr[0];
7144 if (mlxsw_sp_fib6_rt_should_ignore(rt))
7147 /* Multipath routes are first added to the FIB trie and only then
7148 * notified. If we vetoed the addition, we will get a delete
7149 * notification for a route we do not have. Therefore, do not warn if
7150 * route was not found.
7152 fib6_entry = mlxsw_sp_fib6_entry_lookup(mlxsw_sp, rt);
7156 /* If not all the nexthops are deleted, then only reduce the nexthop
7159 if (nrt6 != fib6_entry->nrt6) {
7160 mlxsw_sp_fib6_entry_nexthop_del(mlxsw_sp, fib6_entry, rt_arr,
7165 fib_node = fib6_entry->common.fib_node;
7167 mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, &fib6_entry->common);
7168 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7169 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7172 static struct mlxsw_sp_mr_table *
7173 mlxsw_sp_router_fibmr_family_to_table(struct mlxsw_sp_vr *vr, int family)
7175 if (family == RTNL_FAMILY_IPMR)
7176 return vr->mr_table[MLXSW_SP_L3_PROTO_IPV4];
7178 return vr->mr_table[MLXSW_SP_L3_PROTO_IPV6];
7181 static int mlxsw_sp_router_fibmr_add(struct mlxsw_sp *mlxsw_sp,
7182 struct mfc_entry_notifier_info *men_info,
7185 struct mlxsw_sp_mr_table *mrt;
7186 struct mlxsw_sp_vr *vr;
7188 vr = mlxsw_sp_vr_get(mlxsw_sp, men_info->tb_id, NULL);
7192 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
7193 return mlxsw_sp_mr_route_add(mrt, men_info->mfc, replace);
7196 static void mlxsw_sp_router_fibmr_del(struct mlxsw_sp *mlxsw_sp,
7197 struct mfc_entry_notifier_info *men_info)
7199 struct mlxsw_sp_mr_table *mrt;
7200 struct mlxsw_sp_vr *vr;
7202 vr = mlxsw_sp_vr_find(mlxsw_sp, men_info->tb_id);
7206 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, men_info->info.family);
7207 mlxsw_sp_mr_route_del(mrt, men_info->mfc);
7208 mlxsw_sp_vr_put(mlxsw_sp, vr);
7212 mlxsw_sp_router_fibmr_vif_add(struct mlxsw_sp *mlxsw_sp,
7213 struct vif_entry_notifier_info *ven_info)
7215 struct mlxsw_sp_mr_table *mrt;
7216 struct mlxsw_sp_rif *rif;
7217 struct mlxsw_sp_vr *vr;
7219 vr = mlxsw_sp_vr_get(mlxsw_sp, ven_info->tb_id, NULL);
7223 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
7224 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, ven_info->dev);
7225 return mlxsw_sp_mr_vif_add(mrt, ven_info->dev,
7226 ven_info->vif_index,
7227 ven_info->vif_flags, rif);
7231 mlxsw_sp_router_fibmr_vif_del(struct mlxsw_sp *mlxsw_sp,
7232 struct vif_entry_notifier_info *ven_info)
7234 struct mlxsw_sp_mr_table *mrt;
7235 struct mlxsw_sp_vr *vr;
7237 vr = mlxsw_sp_vr_find(mlxsw_sp, ven_info->tb_id);
7241 mrt = mlxsw_sp_router_fibmr_family_to_table(vr, ven_info->info.family);
7242 mlxsw_sp_mr_vif_del(mrt, ven_info->vif_index);
7243 mlxsw_sp_vr_put(mlxsw_sp, vr);
7246 static void mlxsw_sp_fib4_node_flush(struct mlxsw_sp *mlxsw_sp,
7247 struct mlxsw_sp_fib_node *fib_node)
7249 struct mlxsw_sp_fib4_entry *fib4_entry;
7251 fib4_entry = container_of(fib_node->fib_entry,
7252 struct mlxsw_sp_fib4_entry, common);
7253 mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
7254 mlxsw_sp_fib4_entry_destroy(mlxsw_sp, fib4_entry);
7255 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7258 static void mlxsw_sp_fib6_node_flush(struct mlxsw_sp *mlxsw_sp,
7259 struct mlxsw_sp_fib_node *fib_node)
7261 struct mlxsw_sp_fib6_entry *fib6_entry;
7263 fib6_entry = container_of(fib_node->fib_entry,
7264 struct mlxsw_sp_fib6_entry, common);
7265 mlxsw_sp_fib_node_entry_unlink(mlxsw_sp, fib_node->fib_entry);
7266 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
7267 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
7270 static void mlxsw_sp_fib_node_flush(struct mlxsw_sp *mlxsw_sp,
7271 struct mlxsw_sp_fib_node *fib_node)
7273 switch (fib_node->fib->proto) {
7274 case MLXSW_SP_L3_PROTO_IPV4:
7275 mlxsw_sp_fib4_node_flush(mlxsw_sp, fib_node);
7277 case MLXSW_SP_L3_PROTO_IPV6:
7278 mlxsw_sp_fib6_node_flush(mlxsw_sp, fib_node);
7283 static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
7284 struct mlxsw_sp_vr *vr,
7285 enum mlxsw_sp_l3proto proto)
7287 struct mlxsw_sp_fib *fib = mlxsw_sp_vr_fib(vr, proto);
7288 struct mlxsw_sp_fib_node *fib_node, *tmp;
7290 list_for_each_entry_safe(fib_node, tmp, &fib->node_list, list) {
7291 bool do_break = &tmp->list == &fib->node_list;
7293 mlxsw_sp_fib_node_flush(mlxsw_sp, fib_node);
7299 static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
7303 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
7304 struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
7306 if (!mlxsw_sp_vr_is_used(vr))
7309 for (j = 0; j < MLXSW_SP_L3_PROTO_MAX; j++)
7310 mlxsw_sp_mr_table_flush(vr->mr_table[j]);
7311 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
7313 /* If virtual router was only used for IPv4, then it's no
7316 if (!mlxsw_sp_vr_is_used(vr))
7318 mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
7322 struct mlxsw_sp_fib6_event_work {
7323 struct fib6_info **rt_arr;
7327 struct mlxsw_sp_fib_event_work {
7328 struct work_struct work;
7330 struct mlxsw_sp_fib6_event_work fib6_work;
7331 struct fib_entry_notifier_info fen_info;
7332 struct fib_rule_notifier_info fr_info;
7333 struct fib_nh_notifier_info fnh_info;
7334 struct mfc_entry_notifier_info men_info;
7335 struct vif_entry_notifier_info ven_info;
7337 struct mlxsw_sp *mlxsw_sp;
7338 unsigned long event;
7342 mlxsw_sp_router_fib6_work_init(struct mlxsw_sp_fib6_event_work *fib6_work,
7343 struct fib6_entry_notifier_info *fen6_info)
7345 struct fib6_info *rt = fen6_info->rt;
7346 struct fib6_info **rt_arr;
7347 struct fib6_info *iter;
7351 nrt6 = fen6_info->nsiblings + 1;
7353 rt_arr = kcalloc(nrt6, sizeof(struct fib6_info *), GFP_ATOMIC);
7357 fib6_work->rt_arr = rt_arr;
7358 fib6_work->nrt6 = nrt6;
7363 if (!fen6_info->nsiblings)
7366 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
7367 if (i == fen6_info->nsiblings)
7370 rt_arr[i + 1] = iter;
7371 fib6_info_hold(iter);
7374 WARN_ON_ONCE(i != fen6_info->nsiblings);
7380 mlxsw_sp_router_fib6_work_fini(struct mlxsw_sp_fib6_event_work *fib6_work)
7384 for (i = 0; i < fib6_work->nrt6; i++)
7385 mlxsw_sp_rt6_release(fib6_work->rt_arr[i]);
7386 kfree(fib6_work->rt_arr);
7389 static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
7391 struct mlxsw_sp_fib_event_work *fib_work =
7392 container_of(work, struct mlxsw_sp_fib_event_work, work);
7393 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
7396 mutex_lock(&mlxsw_sp->router->lock);
7397 mlxsw_sp_span_respin(mlxsw_sp);
7399 switch (fib_work->event) {
7400 case FIB_EVENT_ENTRY_REPLACE:
7401 err = mlxsw_sp_router_fib4_replace(mlxsw_sp,
7402 &fib_work->fen_info);
7404 dev_warn(mlxsw_sp->bus_info->dev, "FIB replace failed.\n");
7405 mlxsw_sp_fib4_offload_failed_flag_set(mlxsw_sp,
7406 &fib_work->fen_info);
7408 fib_info_put(fib_work->fen_info.fi);
7410 case FIB_EVENT_ENTRY_DEL:
7411 mlxsw_sp_router_fib4_del(mlxsw_sp, &fib_work->fen_info);
7412 fib_info_put(fib_work->fen_info.fi);
7414 case FIB_EVENT_NH_ADD:
7415 case FIB_EVENT_NH_DEL:
7416 mlxsw_sp_nexthop4_event(mlxsw_sp, fib_work->event,
7417 fib_work->fnh_info.fib_nh);
7418 fib_info_put(fib_work->fnh_info.fib_nh->nh_parent);
7421 mutex_unlock(&mlxsw_sp->router->lock);
7425 static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
7427 struct mlxsw_sp_fib_event_work *fib_work =
7428 container_of(work, struct mlxsw_sp_fib_event_work, work);
7429 struct mlxsw_sp_fib6_event_work *fib6_work = &fib_work->fib6_work;
7430 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
7433 mutex_lock(&mlxsw_sp->router->lock);
7434 mlxsw_sp_span_respin(mlxsw_sp);
7436 switch (fib_work->event) {
7437 case FIB_EVENT_ENTRY_REPLACE:
7438 err = mlxsw_sp_router_fib6_replace(mlxsw_sp,
7442 dev_warn(mlxsw_sp->bus_info->dev, "FIB replace failed.\n");
7443 mlxsw_sp_fib6_offload_failed_flag_set(mlxsw_sp,
7447 mlxsw_sp_router_fib6_work_fini(fib6_work);
7449 case FIB_EVENT_ENTRY_APPEND:
7450 err = mlxsw_sp_router_fib6_append(mlxsw_sp,
7454 dev_warn(mlxsw_sp->bus_info->dev, "FIB append failed.\n");
7455 mlxsw_sp_fib6_offload_failed_flag_set(mlxsw_sp,
7459 mlxsw_sp_router_fib6_work_fini(fib6_work);
7461 case FIB_EVENT_ENTRY_DEL:
7462 mlxsw_sp_router_fib6_del(mlxsw_sp,
7465 mlxsw_sp_router_fib6_work_fini(fib6_work);
7468 mutex_unlock(&mlxsw_sp->router->lock);
7472 static void mlxsw_sp_router_fibmr_event_work(struct work_struct *work)
7474 struct mlxsw_sp_fib_event_work *fib_work =
7475 container_of(work, struct mlxsw_sp_fib_event_work, work);
7476 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
7481 mutex_lock(&mlxsw_sp->router->lock);
7482 switch (fib_work->event) {
7483 case FIB_EVENT_ENTRY_REPLACE:
7484 case FIB_EVENT_ENTRY_ADD:
7485 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
7487 err = mlxsw_sp_router_fibmr_add(mlxsw_sp, &fib_work->men_info,
7490 dev_warn(mlxsw_sp->bus_info->dev, "MR entry add failed.\n");
7491 mr_cache_put(fib_work->men_info.mfc);
7493 case FIB_EVENT_ENTRY_DEL:
7494 mlxsw_sp_router_fibmr_del(mlxsw_sp, &fib_work->men_info);
7495 mr_cache_put(fib_work->men_info.mfc);
7497 case FIB_EVENT_VIF_ADD:
7498 err = mlxsw_sp_router_fibmr_vif_add(mlxsw_sp,
7499 &fib_work->ven_info);
7501 dev_warn(mlxsw_sp->bus_info->dev, "MR VIF add failed.\n");
7502 dev_put(fib_work->ven_info.dev);
7504 case FIB_EVENT_VIF_DEL:
7505 mlxsw_sp_router_fibmr_vif_del(mlxsw_sp,
7506 &fib_work->ven_info);
7507 dev_put(fib_work->ven_info.dev);
7510 mutex_unlock(&mlxsw_sp->router->lock);
7515 static void mlxsw_sp_router_fib4_event(struct mlxsw_sp_fib_event_work *fib_work,
7516 struct fib_notifier_info *info)
7518 struct fib_entry_notifier_info *fen_info;
7519 struct fib_nh_notifier_info *fnh_info;
7521 switch (fib_work->event) {
7522 case FIB_EVENT_ENTRY_REPLACE:
7523 case FIB_EVENT_ENTRY_DEL:
7524 fen_info = container_of(info, struct fib_entry_notifier_info,
7526 fib_work->fen_info = *fen_info;
7527 /* Take reference on fib_info to prevent it from being
7528 * freed while work is queued. Release it afterwards.
7530 fib_info_hold(fib_work->fen_info.fi);
7532 case FIB_EVENT_NH_ADD:
7533 case FIB_EVENT_NH_DEL:
7534 fnh_info = container_of(info, struct fib_nh_notifier_info,
7536 fib_work->fnh_info = *fnh_info;
7537 fib_info_hold(fib_work->fnh_info.fib_nh->nh_parent);
7542 static int mlxsw_sp_router_fib6_event(struct mlxsw_sp_fib_event_work *fib_work,
7543 struct fib_notifier_info *info)
7545 struct fib6_entry_notifier_info *fen6_info;
7548 switch (fib_work->event) {
7549 case FIB_EVENT_ENTRY_REPLACE:
7550 case FIB_EVENT_ENTRY_APPEND:
7551 case FIB_EVENT_ENTRY_DEL:
7552 fen6_info = container_of(info, struct fib6_entry_notifier_info,
7554 err = mlxsw_sp_router_fib6_work_init(&fib_work->fib6_work,
7565 mlxsw_sp_router_fibmr_event(struct mlxsw_sp_fib_event_work *fib_work,
7566 struct fib_notifier_info *info)
7568 switch (fib_work->event) {
7569 case FIB_EVENT_ENTRY_REPLACE:
7570 case FIB_EVENT_ENTRY_ADD:
7571 case FIB_EVENT_ENTRY_DEL:
7572 memcpy(&fib_work->men_info, info, sizeof(fib_work->men_info));
7573 mr_cache_hold(fib_work->men_info.mfc);
7575 case FIB_EVENT_VIF_ADD:
7576 case FIB_EVENT_VIF_DEL:
7577 memcpy(&fib_work->ven_info, info, sizeof(fib_work->ven_info));
7578 dev_hold(fib_work->ven_info.dev);
7583 static int mlxsw_sp_router_fib_rule_event(unsigned long event,
7584 struct fib_notifier_info *info,
7585 struct mlxsw_sp *mlxsw_sp)
7587 struct netlink_ext_ack *extack = info->extack;
7588 struct fib_rule_notifier_info *fr_info;
7589 struct fib_rule *rule;
7592 /* nothing to do at the moment */
7593 if (event == FIB_EVENT_RULE_DEL)
7596 fr_info = container_of(info, struct fib_rule_notifier_info, info);
7597 rule = fr_info->rule;
7599 /* Rule only affects locally generated traffic */
7600 if (rule->iifindex == mlxsw_sp_net(mlxsw_sp)->loopback_dev->ifindex)
7603 switch (info->family) {
7605 if (!fib4_rule_default(rule) && !rule->l3mdev)
7609 if (!fib6_rule_default(rule) && !rule->l3mdev)
7612 case RTNL_FAMILY_IPMR:
7613 if (!ipmr_rule_default(rule) && !rule->l3mdev)
7616 case RTNL_FAMILY_IP6MR:
7617 if (!ip6mr_rule_default(rule) && !rule->l3mdev)
7623 NL_SET_ERR_MSG_MOD(extack, "FIB rules not supported");
7628 /* Called with rcu_read_lock() */
7629 static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
7630 unsigned long event, void *ptr)
7632 struct mlxsw_sp_fib_event_work *fib_work;
7633 struct fib_notifier_info *info = ptr;
7634 struct mlxsw_sp_router *router;
7637 if ((info->family != AF_INET && info->family != AF_INET6 &&
7638 info->family != RTNL_FAMILY_IPMR &&
7639 info->family != RTNL_FAMILY_IP6MR))
7642 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
7645 case FIB_EVENT_RULE_ADD:
7646 case FIB_EVENT_RULE_DEL:
7647 err = mlxsw_sp_router_fib_rule_event(event, info,
7649 return notifier_from_errno(err);
7650 case FIB_EVENT_ENTRY_ADD:
7651 case FIB_EVENT_ENTRY_REPLACE:
7652 case FIB_EVENT_ENTRY_APPEND:
7653 if (info->family == AF_INET) {
7654 struct fib_entry_notifier_info *fen_info = ptr;
7656 if (fen_info->fi->fib_nh_is_v6) {
7657 NL_SET_ERR_MSG_MOD(info->extack, "IPv6 gateway with IPv4 route is not supported");
7658 return notifier_from_errno(-EINVAL);
7664 fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
7668 fib_work->mlxsw_sp = router->mlxsw_sp;
7669 fib_work->event = event;
7671 switch (info->family) {
7673 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib4_event_work);
7674 mlxsw_sp_router_fib4_event(fib_work, info);
7677 INIT_WORK(&fib_work->work, mlxsw_sp_router_fib6_event_work);
7678 err = mlxsw_sp_router_fib6_event(fib_work, info);
7682 case RTNL_FAMILY_IP6MR:
7683 case RTNL_FAMILY_IPMR:
7684 INIT_WORK(&fib_work->work, mlxsw_sp_router_fibmr_event_work);
7685 mlxsw_sp_router_fibmr_event(fib_work, info);
7689 mlxsw_core_schedule_work(&fib_work->work);
7698 static struct mlxsw_sp_rif *
7699 mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
7700 const struct net_device *dev)
7704 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
7705 if (mlxsw_sp->router->rifs[i] &&
7706 mlxsw_sp->router->rifs[i]->dev == dev)
7707 return mlxsw_sp->router->rifs[i];
7712 bool mlxsw_sp_rif_exists(struct mlxsw_sp *mlxsw_sp,
7713 const struct net_device *dev)
7715 struct mlxsw_sp_rif *rif;
7717 mutex_lock(&mlxsw_sp->router->lock);
7718 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7719 mutex_unlock(&mlxsw_sp->router->lock);
7724 u16 mlxsw_sp_rif_vid(struct mlxsw_sp *mlxsw_sp, const struct net_device *dev)
7726 struct mlxsw_sp_rif *rif;
7729 mutex_lock(&mlxsw_sp->router->lock);
7730 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
7734 /* We only return the VID for VLAN RIFs. Otherwise we return an
7735 * invalid value (0).
7737 if (rif->ops->type != MLXSW_SP_RIF_TYPE_VLAN)
7740 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
7743 mutex_unlock(&mlxsw_sp->router->lock);
7747 static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
7749 char ritr_pl[MLXSW_REG_RITR_LEN];
7752 mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
7753 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7757 mlxsw_reg_ritr_enable_set(ritr_pl, false);
7758 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
7761 static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
7762 struct mlxsw_sp_rif *rif)
7764 mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
7765 mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
7766 mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
7770 mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
7771 unsigned long event)
7773 struct inet6_dev *inet6_dev;
7774 bool addr_list_empty = true;
7775 struct in_device *idev;
7782 idev = __in_dev_get_rcu(dev);
7783 if (idev && idev->ifa_list)
7784 addr_list_empty = false;
7786 inet6_dev = __in6_dev_get(dev);
7787 if (addr_list_empty && inet6_dev &&
7788 !list_empty(&inet6_dev->addr_list))
7789 addr_list_empty = false;
7792 /* macvlans do not have a RIF, but rather piggy back on the
7793 * RIF of their lower device.
7795 if (netif_is_macvlan(dev) && addr_list_empty)
7798 if (rif && addr_list_empty &&
7799 !netif_is_l3_slave(rif->dev))
7801 /* It is possible we already removed the RIF ourselves
7802 * if it was assigned to a netdev that is now a bridge
7811 static enum mlxsw_sp_rif_type
7812 mlxsw_sp_dev_rif_type(const struct mlxsw_sp *mlxsw_sp,
7813 const struct net_device *dev)
7815 enum mlxsw_sp_fid_type type;
7817 if (mlxsw_sp_netdev_ipip_type(mlxsw_sp, dev, NULL))
7818 return MLXSW_SP_RIF_TYPE_IPIP_LB;
7820 /* Otherwise RIF type is derived from the type of the underlying FID. */
7821 if (is_vlan_dev(dev) && netif_is_bridge_master(vlan_dev_real_dev(dev)))
7822 type = MLXSW_SP_FID_TYPE_8021Q;
7823 else if (netif_is_bridge_master(dev) && br_vlan_enabled(dev))
7824 type = MLXSW_SP_FID_TYPE_8021Q;
7825 else if (netif_is_bridge_master(dev))
7826 type = MLXSW_SP_FID_TYPE_8021D;
7828 type = MLXSW_SP_FID_TYPE_RFID;
7830 return mlxsw_sp_fid_type_rif_type(mlxsw_sp, type);
7833 static int mlxsw_sp_rif_index_alloc(struct mlxsw_sp *mlxsw_sp, u16 *p_rif_index,
7836 *p_rif_index = gen_pool_alloc(mlxsw_sp->router->rifs_table,
7838 if (*p_rif_index == 0)
7840 *p_rif_index -= MLXSW_SP_ROUTER_GENALLOC_OFFSET;
7842 /* RIF indexes must be aligned to the allocation size. */
7843 WARN_ON_ONCE(*p_rif_index % rif_entries);
7848 static void mlxsw_sp_rif_index_free(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
7851 gen_pool_free(mlxsw_sp->router->rifs_table,
7852 MLXSW_SP_ROUTER_GENALLOC_OFFSET + rif_index, rif_entries);
7855 static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
7857 struct net_device *l3_dev)
7859 struct mlxsw_sp_rif *rif;
7861 rif = kzalloc(rif_size, GFP_KERNEL);
7865 INIT_LIST_HEAD(&rif->nexthop_list);
7866 INIT_LIST_HEAD(&rif->neigh_list);
7868 ether_addr_copy(rif->addr, l3_dev->dev_addr);
7869 rif->mtu = l3_dev->mtu;
7873 rif->rif_index = rif_index;
7878 struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
7881 return mlxsw_sp->router->rifs[rif_index];
7884 u16 mlxsw_sp_rif_index(const struct mlxsw_sp_rif *rif)
7886 return rif->rif_index;
7889 u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
7891 return lb_rif->common.rif_index;
7894 u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
7896 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(lb_rif->common.dev);
7897 struct mlxsw_sp_vr *ul_vr;
7899 ul_vr = mlxsw_sp_vr_get(lb_rif->common.mlxsw_sp, ul_tb_id, NULL);
7900 if (WARN_ON(IS_ERR(ul_vr)))
7906 u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
7908 return lb_rif->ul_rif_id;
7912 mlxsw_sp_router_port_l3_stats_enabled(struct mlxsw_sp_rif *rif)
7914 return mlxsw_sp_rif_counter_valid_get(rif,
7915 MLXSW_SP_RIF_COUNTER_EGRESS) &&
7916 mlxsw_sp_rif_counter_valid_get(rif,
7917 MLXSW_SP_RIF_COUNTER_INGRESS);
7921 mlxsw_sp_router_port_l3_stats_enable(struct mlxsw_sp_rif *rif)
7925 err = mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_INGRESS);
7929 /* Clear stale data. */
7930 err = mlxsw_sp_rif_counter_fetch_clear(rif,
7931 MLXSW_SP_RIF_COUNTER_INGRESS,
7934 goto err_clear_ingress;
7936 err = mlxsw_sp_rif_counter_alloc(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
7938 goto err_alloc_egress;
7940 /* Clear stale data. */
7941 err = mlxsw_sp_rif_counter_fetch_clear(rif,
7942 MLXSW_SP_RIF_COUNTER_EGRESS,
7945 goto err_clear_egress;
7950 mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
7953 mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_INGRESS);
7958 mlxsw_sp_router_port_l3_stats_disable(struct mlxsw_sp_rif *rif)
7960 mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_EGRESS);
7961 mlxsw_sp_rif_counter_free(rif, MLXSW_SP_RIF_COUNTER_INGRESS);
7965 mlxsw_sp_router_port_l3_stats_report_used(struct mlxsw_sp_rif *rif,
7966 struct netdev_notifier_offload_xstats_info *info)
7968 if (!mlxsw_sp_router_port_l3_stats_enabled(rif))
7970 netdev_offload_xstats_report_used(info->report_used);
7974 mlxsw_sp_router_port_l3_stats_fetch(struct mlxsw_sp_rif *rif,
7975 struct rtnl_hw_stats64 *p_stats)
7977 struct mlxsw_sp_rif_counter_set_basic ingress;
7978 struct mlxsw_sp_rif_counter_set_basic egress;
7981 err = mlxsw_sp_rif_counter_fetch_clear(rif,
7982 MLXSW_SP_RIF_COUNTER_INGRESS,
7987 err = mlxsw_sp_rif_counter_fetch_clear(rif,
7988 MLXSW_SP_RIF_COUNTER_EGRESS,
7993 #define MLXSW_SP_ROUTER_ALL_GOOD(SET, SFX) \
7994 ((SET.good_unicast_ ## SFX) + \
7995 (SET.good_multicast_ ## SFX) + \
7996 (SET.good_broadcast_ ## SFX))
7998 p_stats->rx_packets = MLXSW_SP_ROUTER_ALL_GOOD(ingress, packets);
7999 p_stats->tx_packets = MLXSW_SP_ROUTER_ALL_GOOD(egress, packets);
8000 p_stats->rx_bytes = MLXSW_SP_ROUTER_ALL_GOOD(ingress, bytes);
8001 p_stats->tx_bytes = MLXSW_SP_ROUTER_ALL_GOOD(egress, bytes);
8002 p_stats->rx_errors = ingress.error_packets;
8003 p_stats->tx_errors = egress.error_packets;
8004 p_stats->rx_dropped = ingress.discard_packets;
8005 p_stats->tx_dropped = egress.discard_packets;
8006 p_stats->multicast = ingress.good_multicast_packets +
8007 ingress.good_broadcast_packets;
8009 #undef MLXSW_SP_ROUTER_ALL_GOOD
8015 mlxsw_sp_router_port_l3_stats_report_delta(struct mlxsw_sp_rif *rif,
8016 struct netdev_notifier_offload_xstats_info *info)
8018 struct rtnl_hw_stats64 stats = {};
8021 if (!mlxsw_sp_router_port_l3_stats_enabled(rif))
8024 err = mlxsw_sp_router_port_l3_stats_fetch(rif, &stats);
8028 netdev_offload_xstats_report_delta(info->report_delta, &stats);
8032 struct mlxsw_sp_router_hwstats_notify_work {
8033 struct work_struct work;
8034 struct net_device *dev;
8037 static void mlxsw_sp_router_hwstats_notify_work(struct work_struct *work)
8039 struct mlxsw_sp_router_hwstats_notify_work *hws_work =
8040 container_of(work, struct mlxsw_sp_router_hwstats_notify_work,
8044 rtnl_offload_xstats_notify(hws_work->dev);
8046 dev_put(hws_work->dev);
8051 mlxsw_sp_router_hwstats_notify_schedule(struct net_device *dev)
8053 struct mlxsw_sp_router_hwstats_notify_work *hws_work;
8055 /* To collect notification payload, the core ends up sending another
8056 * notifier block message, which would deadlock on the attempt to
8057 * acquire the router lock again. Just postpone the notification until
8061 hws_work = kzalloc(sizeof(*hws_work), GFP_KERNEL);
8065 INIT_WORK(&hws_work->work, mlxsw_sp_router_hwstats_notify_work);
8067 hws_work->dev = dev;
8068 mlxsw_core_schedule_work(&hws_work->work);
8071 int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
8073 return rif->dev->ifindex;
8076 const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
8081 static void mlxsw_sp_rif_push_l3_stats(struct mlxsw_sp_rif *rif)
8083 struct rtnl_hw_stats64 stats = {};
8085 if (!mlxsw_sp_router_port_l3_stats_fetch(rif, &stats))
8086 netdev_offload_xstats_push_delta(rif->dev,
8087 NETDEV_OFFLOAD_XSTATS_TYPE_L3,
8091 static struct mlxsw_sp_rif *
8092 mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
8093 const struct mlxsw_sp_rif_params *params,
8094 struct netlink_ext_ack *extack)
8096 u8 rif_entries = params->double_entry ? 2 : 1;
8097 u32 tb_id = l3mdev_fib_table(params->dev);
8098 const struct mlxsw_sp_rif_ops *ops;
8099 struct mlxsw_sp_fid *fid = NULL;
8100 enum mlxsw_sp_rif_type type;
8101 struct mlxsw_sp_rif *rif;
8102 struct mlxsw_sp_vr *vr;
8106 type = mlxsw_sp_dev_rif_type(mlxsw_sp, params->dev);
8107 ops = mlxsw_sp->router->rif_ops_arr[type];
8109 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN, extack);
8111 return ERR_CAST(vr);
8114 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index, rif_entries);
8116 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
8117 goto err_rif_index_alloc;
8120 rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, params->dev);
8126 mlxsw_sp->router->rifs[rif_index] = rif;
8127 rif->mlxsw_sp = mlxsw_sp;
8129 rif->rif_entries = rif_entries;
8132 fid = ops->fid_get(rif, extack);
8141 ops->setup(rif, params);
8143 err = ops->configure(rif, extack);
8147 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++) {
8148 err = mlxsw_sp_mr_rif_add(vr->mr_table[i], rif);
8150 goto err_mr_rif_add;
8153 if (netdev_offload_xstats_enabled(rif->dev,
8154 NETDEV_OFFLOAD_XSTATS_TYPE_L3)) {
8155 err = mlxsw_sp_router_port_l3_stats_enable(rif);
8157 goto err_stats_enable;
8158 mlxsw_sp_router_hwstats_notify_schedule(rif->dev);
8160 mlxsw_sp_rif_counters_alloc(rif);
8163 atomic_add(rif_entries, &mlxsw_sp->router->rifs_count);
8168 for (i--; i >= 0; i--)
8169 mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
8170 ops->deconfigure(rif);
8173 mlxsw_sp_fid_put(fid);
8175 mlxsw_sp->router->rifs[rif_index] = NULL;
8179 mlxsw_sp_rif_index_free(mlxsw_sp, rif_index, rif_entries);
8180 err_rif_index_alloc:
8182 mlxsw_sp_vr_put(mlxsw_sp, vr);
8183 return ERR_PTR(err);
8186 static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
8188 const struct mlxsw_sp_rif_ops *ops = rif->ops;
8189 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
8190 struct mlxsw_sp_fid *fid = rif->fid;
8191 u8 rif_entries = rif->rif_entries;
8192 u16 rif_index = rif->rif_index;
8193 struct mlxsw_sp_vr *vr;
8196 atomic_sub(rif_entries, &mlxsw_sp->router->rifs_count);
8197 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
8198 vr = &mlxsw_sp->router->vrs[rif->vr_id];
8200 if (netdev_offload_xstats_enabled(rif->dev,
8201 NETDEV_OFFLOAD_XSTATS_TYPE_L3)) {
8202 mlxsw_sp_rif_push_l3_stats(rif);
8203 mlxsw_sp_router_port_l3_stats_disable(rif);
8204 mlxsw_sp_router_hwstats_notify_schedule(rif->dev);
8206 mlxsw_sp_rif_counters_free(rif);
8209 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
8210 mlxsw_sp_mr_rif_del(vr->mr_table[i], rif);
8211 ops->deconfigure(rif);
8213 /* Loopback RIFs are not associated with a FID. */
8214 mlxsw_sp_fid_put(fid);
8215 mlxsw_sp->router->rifs[rif->rif_index] = NULL;
8218 mlxsw_sp_rif_index_free(mlxsw_sp, rif_index, rif_entries);
8220 mlxsw_sp_vr_put(mlxsw_sp, vr);
8223 void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
8224 struct net_device *dev)
8226 struct mlxsw_sp_rif *rif;
8228 mutex_lock(&mlxsw_sp->router->lock);
8229 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
8232 mlxsw_sp_rif_destroy(rif);
8234 mutex_unlock(&mlxsw_sp->router->lock);
8238 mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
8239 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
8241 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
8243 params->vid = mlxsw_sp_port_vlan->vid;
8244 params->lag = mlxsw_sp_port->lagged;
8246 params->lag_id = mlxsw_sp_port->lag_id;
8248 params->system_port = mlxsw_sp_port->local_port;
8251 static struct mlxsw_sp_rif_subport *
8252 mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif)
8254 return container_of(rif, struct mlxsw_sp_rif_subport, common);
8257 static struct mlxsw_sp_rif *
8258 mlxsw_sp_rif_subport_get(struct mlxsw_sp *mlxsw_sp,
8259 const struct mlxsw_sp_rif_params *params,
8260 struct netlink_ext_ack *extack)
8262 struct mlxsw_sp_rif_subport *rif_subport;
8263 struct mlxsw_sp_rif *rif;
8265 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, params->dev);
8267 return mlxsw_sp_rif_create(mlxsw_sp, params, extack);
8269 rif_subport = mlxsw_sp_rif_subport_rif(rif);
8270 refcount_inc(&rif_subport->ref_count);
8274 static void mlxsw_sp_rif_subport_put(struct mlxsw_sp_rif *rif)
8276 struct mlxsw_sp_rif_subport *rif_subport;
8278 rif_subport = mlxsw_sp_rif_subport_rif(rif);
8279 if (!refcount_dec_and_test(&rif_subport->ref_count))
8282 mlxsw_sp_rif_destroy(rif);
8285 static int mlxsw_sp_rif_mac_profile_index_alloc(struct mlxsw_sp *mlxsw_sp,
8286 struct mlxsw_sp_rif_mac_profile *profile,
8287 struct netlink_ext_ack *extack)
8289 u8 max_rif_mac_profiles = mlxsw_sp->router->max_rif_mac_profile;
8290 struct mlxsw_sp_router *router = mlxsw_sp->router;
8293 id = idr_alloc(&router->rif_mac_profiles_idr, profile, 0,
8294 max_rif_mac_profiles, GFP_KERNEL);
8302 NL_SET_ERR_MSG_MOD(extack,
8303 "Exceeded number of supported router interface MAC profiles");
8308 static struct mlxsw_sp_rif_mac_profile *
8309 mlxsw_sp_rif_mac_profile_index_free(struct mlxsw_sp *mlxsw_sp, u8 mac_profile)
8311 struct mlxsw_sp_rif_mac_profile *profile;
8313 profile = idr_remove(&mlxsw_sp->router->rif_mac_profiles_idr,
8319 static struct mlxsw_sp_rif_mac_profile *
8320 mlxsw_sp_rif_mac_profile_alloc(const char *mac)
8322 struct mlxsw_sp_rif_mac_profile *profile;
8324 profile = kzalloc(sizeof(*profile), GFP_KERNEL);
8328 ether_addr_copy(profile->mac_prefix, mac);
8329 refcount_set(&profile->ref_count, 1);
8333 static struct mlxsw_sp_rif_mac_profile *
8334 mlxsw_sp_rif_mac_profile_find(const struct mlxsw_sp *mlxsw_sp, const char *mac)
8336 struct mlxsw_sp_router *router = mlxsw_sp->router;
8337 struct mlxsw_sp_rif_mac_profile *profile;
8340 idr_for_each_entry(&router->rif_mac_profiles_idr, profile, id) {
8341 if (ether_addr_equal_masked(profile->mac_prefix, mac,
8342 mlxsw_sp->mac_mask))
8349 static u64 mlxsw_sp_rif_mac_profiles_occ_get(void *priv)
8351 const struct mlxsw_sp *mlxsw_sp = priv;
8353 return atomic_read(&mlxsw_sp->router->rif_mac_profiles_count);
8356 static u64 mlxsw_sp_rifs_occ_get(void *priv)
8358 const struct mlxsw_sp *mlxsw_sp = priv;
8360 return atomic_read(&mlxsw_sp->router->rifs_count);
8363 static struct mlxsw_sp_rif_mac_profile *
8364 mlxsw_sp_rif_mac_profile_create(struct mlxsw_sp *mlxsw_sp, const char *mac,
8365 struct netlink_ext_ack *extack)
8367 struct mlxsw_sp_rif_mac_profile *profile;
8370 profile = mlxsw_sp_rif_mac_profile_alloc(mac);
8372 return ERR_PTR(-ENOMEM);
8374 err = mlxsw_sp_rif_mac_profile_index_alloc(mlxsw_sp, profile, extack);
8376 goto profile_index_alloc_err;
8378 atomic_inc(&mlxsw_sp->router->rif_mac_profiles_count);
8381 profile_index_alloc_err:
8383 return ERR_PTR(err);
8386 static void mlxsw_sp_rif_mac_profile_destroy(struct mlxsw_sp *mlxsw_sp,
8389 struct mlxsw_sp_rif_mac_profile *profile;
8391 atomic_dec(&mlxsw_sp->router->rif_mac_profiles_count);
8392 profile = mlxsw_sp_rif_mac_profile_index_free(mlxsw_sp, mac_profile);
8396 static int mlxsw_sp_rif_mac_profile_get(struct mlxsw_sp *mlxsw_sp,
8397 const char *mac, u8 *p_mac_profile,
8398 struct netlink_ext_ack *extack)
8400 struct mlxsw_sp_rif_mac_profile *profile;
8402 profile = mlxsw_sp_rif_mac_profile_find(mlxsw_sp, mac);
8404 refcount_inc(&profile->ref_count);
8408 profile = mlxsw_sp_rif_mac_profile_create(mlxsw_sp, mac, extack);
8409 if (IS_ERR(profile))
8410 return PTR_ERR(profile);
8413 *p_mac_profile = profile->id;
8417 static void mlxsw_sp_rif_mac_profile_put(struct mlxsw_sp *mlxsw_sp,
8420 struct mlxsw_sp_rif_mac_profile *profile;
8422 profile = idr_find(&mlxsw_sp->router->rif_mac_profiles_idr,
8424 if (WARN_ON(!profile))
8427 if (!refcount_dec_and_test(&profile->ref_count))
8430 mlxsw_sp_rif_mac_profile_destroy(mlxsw_sp, mac_profile);
8433 static bool mlxsw_sp_rif_mac_profile_is_shared(const struct mlxsw_sp_rif *rif)
8435 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
8436 struct mlxsw_sp_rif_mac_profile *profile;
8438 profile = idr_find(&mlxsw_sp->router->rif_mac_profiles_idr,
8439 rif->mac_profile_id);
8440 if (WARN_ON(!profile))
8443 return refcount_read(&profile->ref_count) > 1;
8446 static int mlxsw_sp_rif_mac_profile_edit(struct mlxsw_sp_rif *rif,
8447 const char *new_mac)
8449 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
8450 struct mlxsw_sp_rif_mac_profile *profile;
8452 profile = idr_find(&mlxsw_sp->router->rif_mac_profiles_idr,
8453 rif->mac_profile_id);
8454 if (WARN_ON(!profile))
8457 ether_addr_copy(profile->mac_prefix, new_mac);
8462 mlxsw_sp_rif_mac_profile_replace(struct mlxsw_sp *mlxsw_sp,
8463 struct mlxsw_sp_rif *rif,
8464 const char *new_mac,
8465 struct netlink_ext_ack *extack)
8470 if (!mlxsw_sp_rif_mac_profile_is_shared(rif) &&
8471 !mlxsw_sp_rif_mac_profile_find(mlxsw_sp, new_mac))
8472 return mlxsw_sp_rif_mac_profile_edit(rif, new_mac);
8474 err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, new_mac,
8475 &mac_profile, extack);
8479 mlxsw_sp_rif_mac_profile_put(mlxsw_sp, rif->mac_profile_id);
8480 rif->mac_profile_id = mac_profile;
8485 __mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
8486 struct net_device *l3_dev,
8487 struct netlink_ext_ack *extack)
8489 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
8490 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
8491 struct mlxsw_sp_rif_params params = {
8494 u16 vid = mlxsw_sp_port_vlan->vid;
8495 struct mlxsw_sp_rif *rif;
8496 struct mlxsw_sp_fid *fid;
8499 mlxsw_sp_rif_subport_params_init(¶ms, mlxsw_sp_port_vlan);
8500 rif = mlxsw_sp_rif_subport_get(mlxsw_sp, ¶ms, extack);
8502 return PTR_ERR(rif);
8504 /* FID was already created, just take a reference */
8505 fid = rif->ops->fid_get(rif, extack);
8506 err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
8508 goto err_fid_port_vid_map;
8510 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
8512 goto err_port_vid_learning_set;
8514 err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
8515 BR_STATE_FORWARDING);
8517 goto err_port_vid_stp_set;
8519 mlxsw_sp_port_vlan->fid = fid;
8523 err_port_vid_stp_set:
8524 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
8525 err_port_vid_learning_set:
8526 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
8527 err_fid_port_vid_map:
8528 mlxsw_sp_fid_put(fid);
8529 mlxsw_sp_rif_subport_put(rif);
8534 __mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
8536 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
8537 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
8538 struct mlxsw_sp_rif *rif = mlxsw_sp_fid_rif(fid);
8539 u16 vid = mlxsw_sp_port_vlan->vid;
8541 if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID))
8544 mlxsw_sp_port_vlan->fid = NULL;
8545 mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING);
8546 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
8547 mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
8548 mlxsw_sp_fid_put(fid);
8549 mlxsw_sp_rif_subport_put(rif);
8553 mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
8554 struct net_device *l3_dev,
8555 struct netlink_ext_ack *extack)
8557 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port_vlan->mlxsw_sp_port->mlxsw_sp;
8558 struct mlxsw_sp_rif *rif;
8561 mutex_lock(&mlxsw_sp->router->lock);
8562 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
8566 err = __mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan, l3_dev,
8569 mutex_unlock(&mlxsw_sp->router->lock);
8574 mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
8576 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port_vlan->mlxsw_sp_port->mlxsw_sp;
8578 mutex_lock(&mlxsw_sp->router->lock);
8579 __mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
8580 mutex_unlock(&mlxsw_sp->router->lock);
8583 static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev,
8584 struct net_device *port_dev,
8585 unsigned long event, u16 vid,
8586 struct netlink_ext_ack *extack)
8588 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
8589 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
8591 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
8592 if (WARN_ON(!mlxsw_sp_port_vlan))
8597 return __mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan,
8600 __mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
8607 static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
8608 unsigned long event,
8609 struct netlink_ext_ack *extack)
8611 if (netif_is_any_bridge_port(port_dev) || netif_is_lag_port(port_dev))
8614 return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event,
8615 MLXSW_SP_DEFAULT_VID, extack);
8618 static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
8619 struct net_device *lag_dev,
8620 unsigned long event, u16 vid,
8621 struct netlink_ext_ack *extack)
8623 struct net_device *port_dev;
8624 struct list_head *iter;
8627 netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
8628 if (mlxsw_sp_port_dev_check(port_dev)) {
8629 err = mlxsw_sp_inetaddr_port_vlan_event(l3_dev,
8641 static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
8642 unsigned long event,
8643 struct netlink_ext_ack *extack)
8645 if (netif_is_bridge_port(lag_dev))
8648 return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event,
8649 MLXSW_SP_DEFAULT_VID, extack);
8652 static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp,
8653 struct net_device *l3_dev,
8654 unsigned long event,
8655 struct netlink_ext_ack *extack)
8657 struct mlxsw_sp_rif_params params = {
8660 struct mlxsw_sp_rif *rif;
8664 if (netif_is_bridge_master(l3_dev) && br_vlan_enabled(l3_dev)) {
8667 br_vlan_get_proto(l3_dev, &proto);
8668 if (proto == ETH_P_8021AD) {
8669 NL_SET_ERR_MSG_MOD(extack, "Adding an IP address to 802.1ad bridge is not supported");
8673 rif = mlxsw_sp_rif_create(mlxsw_sp, ¶ms, extack);
8675 return PTR_ERR(rif);
8678 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
8679 mlxsw_sp_rif_destroy(rif);
8686 static int mlxsw_sp_inetaddr_vlan_event(struct mlxsw_sp *mlxsw_sp,
8687 struct net_device *vlan_dev,
8688 unsigned long event,
8689 struct netlink_ext_ack *extack)
8691 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
8692 u16 vid = vlan_dev_vlan_id(vlan_dev);
8694 if (netif_is_bridge_port(vlan_dev))
8697 if (mlxsw_sp_port_dev_check(real_dev))
8698 return mlxsw_sp_inetaddr_port_vlan_event(vlan_dev, real_dev,
8699 event, vid, extack);
8700 else if (netif_is_lag_master(real_dev))
8701 return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
8703 else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev))
8704 return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, vlan_dev, event,
8710 static bool mlxsw_sp_rif_macvlan_is_vrrp4(const u8 *mac)
8712 u8 vrrp4[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x01, 0x00 };
8713 u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
8715 return ether_addr_equal_masked(mac, vrrp4, mask);
8718 static bool mlxsw_sp_rif_macvlan_is_vrrp6(const u8 *mac)
8720 u8 vrrp6[ETH_ALEN] = { 0x00, 0x00, 0x5e, 0x00, 0x02, 0x00 };
8721 u8 mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
8723 return ether_addr_equal_masked(mac, vrrp6, mask);
8726 static int mlxsw_sp_rif_vrrp_op(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
8727 const u8 *mac, bool adding)
8729 char ritr_pl[MLXSW_REG_RITR_LEN];
8730 u8 vrrp_id = adding ? mac[5] : 0;
8733 if (!mlxsw_sp_rif_macvlan_is_vrrp4(mac) &&
8734 !mlxsw_sp_rif_macvlan_is_vrrp6(mac))
8737 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
8738 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
8742 if (mlxsw_sp_rif_macvlan_is_vrrp4(mac))
8743 mlxsw_reg_ritr_if_vrrp_id_ipv4_set(ritr_pl, vrrp_id);
8745 mlxsw_reg_ritr_if_vrrp_id_ipv6_set(ritr_pl, vrrp_id);
8747 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
8750 static int mlxsw_sp_rif_macvlan_add(struct mlxsw_sp *mlxsw_sp,
8751 const struct net_device *macvlan_dev,
8752 struct netlink_ext_ack *extack)
8754 struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
8755 struct mlxsw_sp_rif *rif;
8758 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
8760 NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces");
8764 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
8765 mlxsw_sp_fid_index(rif->fid), true);
8769 err = mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index,
8770 macvlan_dev->dev_addr, true);
8772 goto err_rif_vrrp_add;
8774 /* Make sure the bridge driver does not have this MAC pointing at
8777 if (rif->ops->fdb_del)
8778 rif->ops->fdb_del(rif, macvlan_dev->dev_addr);
8783 mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
8784 mlxsw_sp_fid_index(rif->fid), false);
8788 static void __mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
8789 const struct net_device *macvlan_dev)
8791 struct macvlan_dev *vlan = netdev_priv(macvlan_dev);
8792 struct mlxsw_sp_rif *rif;
8794 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan->lowerdev);
8795 /* If we do not have a RIF, then we already took care of
8796 * removing the macvlan's MAC during RIF deletion.
8800 mlxsw_sp_rif_vrrp_op(mlxsw_sp, rif->rif_index, macvlan_dev->dev_addr,
8802 mlxsw_sp_rif_fdb_op(mlxsw_sp, macvlan_dev->dev_addr,
8803 mlxsw_sp_fid_index(rif->fid), false);
8806 void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
8807 const struct net_device *macvlan_dev)
8809 mutex_lock(&mlxsw_sp->router->lock);
8810 __mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
8811 mutex_unlock(&mlxsw_sp->router->lock);
8814 static int mlxsw_sp_inetaddr_macvlan_event(struct mlxsw_sp *mlxsw_sp,
8815 struct net_device *macvlan_dev,
8816 unsigned long event,
8817 struct netlink_ext_ack *extack)
8821 return mlxsw_sp_rif_macvlan_add(mlxsw_sp, macvlan_dev, extack);
8823 __mlxsw_sp_rif_macvlan_del(mlxsw_sp, macvlan_dev);
8830 static int __mlxsw_sp_inetaddr_event(struct mlxsw_sp *mlxsw_sp,
8831 struct net_device *dev,
8832 unsigned long event,
8833 struct netlink_ext_ack *extack)
8835 if (mlxsw_sp_port_dev_check(dev))
8836 return mlxsw_sp_inetaddr_port_event(dev, event, extack);
8837 else if (netif_is_lag_master(dev))
8838 return mlxsw_sp_inetaddr_lag_event(dev, event, extack);
8839 else if (netif_is_bridge_master(dev))
8840 return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, dev, event,
8842 else if (is_vlan_dev(dev))
8843 return mlxsw_sp_inetaddr_vlan_event(mlxsw_sp, dev, event,
8845 else if (netif_is_macvlan(dev))
8846 return mlxsw_sp_inetaddr_macvlan_event(mlxsw_sp, dev, event,
8852 static int mlxsw_sp_inetaddr_event(struct notifier_block *nb,
8853 unsigned long event, void *ptr)
8855 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
8856 struct net_device *dev = ifa->ifa_dev->dev;
8857 struct mlxsw_sp_router *router;
8858 struct mlxsw_sp_rif *rif;
8861 /* NETDEV_UP event is handled by mlxsw_sp_inetaddr_valid_event */
8862 if (event == NETDEV_UP)
8865 router = container_of(nb, struct mlxsw_sp_router, inetaddr_nb);
8866 mutex_lock(&router->lock);
8867 rif = mlxsw_sp_rif_find_by_dev(router->mlxsw_sp, dev);
8868 if (!mlxsw_sp_rif_should_config(rif, dev, event))
8871 err = __mlxsw_sp_inetaddr_event(router->mlxsw_sp, dev, event, NULL);
8873 mutex_unlock(&router->lock);
8874 return notifier_from_errno(err);
8877 int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
8878 unsigned long event, void *ptr)
8880 struct in_validator_info *ivi = (struct in_validator_info *) ptr;
8881 struct net_device *dev = ivi->ivi_dev->dev;
8882 struct mlxsw_sp *mlxsw_sp;
8883 struct mlxsw_sp_rif *rif;
8886 mlxsw_sp = mlxsw_sp_lower_get(dev);
8890 mutex_lock(&mlxsw_sp->router->lock);
8891 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
8892 if (!mlxsw_sp_rif_should_config(rif, dev, event))
8895 err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, ivi->extack);
8897 mutex_unlock(&mlxsw_sp->router->lock);
8898 return notifier_from_errno(err);
8901 struct mlxsw_sp_inet6addr_event_work {
8902 struct work_struct work;
8903 struct mlxsw_sp *mlxsw_sp;
8904 struct net_device *dev;
8905 unsigned long event;
8908 static void mlxsw_sp_inet6addr_event_work(struct work_struct *work)
8910 struct mlxsw_sp_inet6addr_event_work *inet6addr_work =
8911 container_of(work, struct mlxsw_sp_inet6addr_event_work, work);
8912 struct mlxsw_sp *mlxsw_sp = inet6addr_work->mlxsw_sp;
8913 struct net_device *dev = inet6addr_work->dev;
8914 unsigned long event = inet6addr_work->event;
8915 struct mlxsw_sp_rif *rif;
8918 mutex_lock(&mlxsw_sp->router->lock);
8920 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
8921 if (!mlxsw_sp_rif_should_config(rif, dev, event))
8924 __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, NULL);
8926 mutex_unlock(&mlxsw_sp->router->lock);
8929 kfree(inet6addr_work);
8932 /* Called with rcu_read_lock() */
8933 static int mlxsw_sp_inet6addr_event(struct notifier_block *nb,
8934 unsigned long event, void *ptr)
8936 struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr;
8937 struct mlxsw_sp_inet6addr_event_work *inet6addr_work;
8938 struct net_device *dev = if6->idev->dev;
8939 struct mlxsw_sp_router *router;
8941 /* NETDEV_UP event is handled by mlxsw_sp_inet6addr_valid_event */
8942 if (event == NETDEV_UP)
8945 inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC);
8946 if (!inet6addr_work)
8949 router = container_of(nb, struct mlxsw_sp_router, inet6addr_nb);
8950 INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work);
8951 inet6addr_work->mlxsw_sp = router->mlxsw_sp;
8952 inet6addr_work->dev = dev;
8953 inet6addr_work->event = event;
8955 mlxsw_core_schedule_work(&inet6addr_work->work);
8960 int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
8961 unsigned long event, void *ptr)
8963 struct in6_validator_info *i6vi = (struct in6_validator_info *) ptr;
8964 struct net_device *dev = i6vi->i6vi_dev->dev;
8965 struct mlxsw_sp *mlxsw_sp;
8966 struct mlxsw_sp_rif *rif;
8969 mlxsw_sp = mlxsw_sp_lower_get(dev);
8973 mutex_lock(&mlxsw_sp->router->lock);
8974 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
8975 if (!mlxsw_sp_rif_should_config(rif, dev, event))
8978 err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, i6vi->extack);
8980 mutex_unlock(&mlxsw_sp->router->lock);
8981 return notifier_from_errno(err);
8984 static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
8985 const char *mac, int mtu, u8 mac_profile)
8987 char ritr_pl[MLXSW_REG_RITR_LEN];
8990 mlxsw_reg_ritr_rif_pack(ritr_pl, rif_index);
8991 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
8995 mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
8996 mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
8997 mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, mac_profile);
8998 mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
8999 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9003 mlxsw_sp_router_port_change_event(struct mlxsw_sp *mlxsw_sp,
9004 struct mlxsw_sp_rif *rif,
9005 struct netlink_ext_ack *extack)
9007 struct net_device *dev = rif->dev;
9012 fid_index = mlxsw_sp_fid_index(rif->fid);
9014 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false);
9018 old_mac_profile = rif->mac_profile_id;
9019 err = mlxsw_sp_rif_mac_profile_replace(mlxsw_sp, rif, dev->dev_addr,
9022 goto err_rif_mac_profile_replace;
9024 err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
9025 dev->mtu, rif->mac_profile_id);
9029 err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, fid_index, true);
9031 goto err_rif_fdb_op;
9033 if (rif->mtu != dev->mtu) {
9034 struct mlxsw_sp_vr *vr;
9037 /* The RIF is relevant only to its mr_table instance, as unlike
9038 * unicast routing, in multicast routing a RIF cannot be shared
9039 * between several multicast routing tables.
9041 vr = &mlxsw_sp->router->vrs[rif->vr_id];
9042 for (i = 0; i < MLXSW_SP_L3_PROTO_MAX; i++)
9043 mlxsw_sp_mr_rif_mtu_update(vr->mr_table[i],
9047 ether_addr_copy(rif->addr, dev->dev_addr);
9048 rif->mtu = dev->mtu;
9050 netdev_dbg(dev, "Updated RIF=%d\n", rif->rif_index);
9055 mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu,
9058 mlxsw_sp_rif_mac_profile_replace(mlxsw_sp, rif, rif->addr, extack);
9059 err_rif_mac_profile_replace:
9060 mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
9064 static int mlxsw_sp_router_port_pre_changeaddr_event(struct mlxsw_sp_rif *rif,
9065 struct netdev_notifier_pre_changeaddr_info *info)
9067 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9068 struct mlxsw_sp_rif_mac_profile *profile;
9069 struct netlink_ext_ack *extack;
9070 u8 max_rif_mac_profiles;
9073 extack = netdev_notifier_info_to_extack(&info->info);
9075 profile = mlxsw_sp_rif_mac_profile_find(mlxsw_sp, info->dev_addr);
9079 max_rif_mac_profiles = mlxsw_sp->router->max_rif_mac_profile;
9080 occ = mlxsw_sp_rif_mac_profiles_occ_get(mlxsw_sp);
9081 if (occ < max_rif_mac_profiles)
9084 if (!mlxsw_sp_rif_mac_profile_is_shared(rif))
9087 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interface MAC profiles");
9091 static bool mlxsw_sp_is_offload_xstats_event(unsigned long event)
9094 case NETDEV_OFFLOAD_XSTATS_ENABLE:
9095 case NETDEV_OFFLOAD_XSTATS_DISABLE:
9096 case NETDEV_OFFLOAD_XSTATS_REPORT_USED:
9097 case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA:
9105 mlxsw_sp_router_port_offload_xstats_cmd(struct mlxsw_sp_rif *rif,
9106 unsigned long event,
9107 struct netdev_notifier_offload_xstats_info *info)
9109 switch (info->type) {
9110 case NETDEV_OFFLOAD_XSTATS_TYPE_L3:
9117 case NETDEV_OFFLOAD_XSTATS_ENABLE:
9118 return mlxsw_sp_router_port_l3_stats_enable(rif);
9119 case NETDEV_OFFLOAD_XSTATS_DISABLE:
9120 mlxsw_sp_router_port_l3_stats_disable(rif);
9122 case NETDEV_OFFLOAD_XSTATS_REPORT_USED:
9123 mlxsw_sp_router_port_l3_stats_report_used(rif, info);
9125 case NETDEV_OFFLOAD_XSTATS_REPORT_DELTA:
9126 return mlxsw_sp_router_port_l3_stats_report_delta(rif, info);
9134 mlxsw_sp_netdevice_offload_xstats_cmd(struct mlxsw_sp *mlxsw_sp,
9135 struct net_device *dev,
9136 unsigned long event,
9137 struct netdev_notifier_offload_xstats_info *info)
9139 struct mlxsw_sp_rif *rif;
9141 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9145 return mlxsw_sp_router_port_offload_xstats_cmd(rif, event, info);
9148 static bool mlxsw_sp_is_router_event(unsigned long event)
9151 case NETDEV_PRE_CHANGEADDR:
9152 case NETDEV_CHANGEADDR:
9153 case NETDEV_CHANGEMTU:
9160 static int mlxsw_sp_netdevice_router_port_event(struct net_device *dev,
9161 unsigned long event, void *ptr)
9163 struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
9164 struct mlxsw_sp *mlxsw_sp;
9165 struct mlxsw_sp_rif *rif;
9167 mlxsw_sp = mlxsw_sp_lower_get(dev);
9171 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
9176 case NETDEV_CHANGEMTU:
9177 case NETDEV_CHANGEADDR:
9178 return mlxsw_sp_router_port_change_event(mlxsw_sp, rif, extack);
9179 case NETDEV_PRE_CHANGEADDR:
9180 return mlxsw_sp_router_port_pre_changeaddr_event(rif, ptr);
9189 static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp,
9190 struct net_device *l3_dev,
9191 struct netlink_ext_ack *extack)
9193 struct mlxsw_sp_rif *rif;
9195 /* If netdev is already associated with a RIF, then we need to
9196 * destroy it and create a new one with the new virtual router ID.
9198 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
9200 __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN,
9203 return __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_UP, extack);
9206 static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp,
9207 struct net_device *l3_dev)
9209 struct mlxsw_sp_rif *rif;
9211 rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
9214 __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN, NULL);
9217 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr)
9219 struct netdev_notifier_changeupper_info *info = ptr;
9221 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER)
9223 return netif_is_l3_master(info->upper_dev);
9227 mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
9228 struct netdev_notifier_changeupper_info *info)
9230 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
9233 /* We do not create a RIF for a macvlan, but only use it to
9234 * direct more MAC addresses to the router.
9236 if (!mlxsw_sp || netif_is_macvlan(l3_dev))
9240 case NETDEV_PRECHANGEUPPER:
9242 case NETDEV_CHANGEUPPER:
9243 if (info->linking) {
9244 struct netlink_ext_ack *extack;
9246 extack = netdev_notifier_info_to_extack(&info->info);
9247 err = mlxsw_sp_port_vrf_join(mlxsw_sp, l3_dev, extack);
9249 mlxsw_sp_port_vrf_leave(mlxsw_sp, l3_dev);
9257 static int mlxsw_sp_router_netdevice_event(struct notifier_block *nb,
9258 unsigned long event, void *ptr)
9260 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
9261 struct mlxsw_sp_router *router;
9262 struct mlxsw_sp *mlxsw_sp;
9265 router = container_of(nb, struct mlxsw_sp_router, netdevice_nb);
9266 mlxsw_sp = router->mlxsw_sp;
9268 mutex_lock(&mlxsw_sp->router->lock);
9270 if (mlxsw_sp_is_offload_xstats_event(event))
9271 err = mlxsw_sp_netdevice_offload_xstats_cmd(mlxsw_sp, dev,
9273 else if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev))
9274 err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev,
9276 else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev))
9277 err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev,
9279 else if (mlxsw_sp_is_router_event(event))
9280 err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr);
9281 else if (mlxsw_sp_is_vrf_event(event, ptr))
9282 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr);
9284 mutex_unlock(&mlxsw_sp->router->lock);
9286 return notifier_from_errno(err);
9289 static int __mlxsw_sp_rif_macvlan_flush(struct net_device *dev,
9290 struct netdev_nested_priv *priv)
9292 struct mlxsw_sp_rif *rif = (struct mlxsw_sp_rif *)priv->data;
9294 if (!netif_is_macvlan(dev))
9297 return mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
9298 mlxsw_sp_fid_index(rif->fid), false);
9301 static int mlxsw_sp_rif_macvlan_flush(struct mlxsw_sp_rif *rif)
9303 struct netdev_nested_priv priv = {
9304 .data = (void *)rif,
9307 if (!netif_is_macvlan_port(rif->dev))
9310 netdev_warn(rif->dev, "Router interface is deleted. Upper macvlans will not work\n");
9311 return netdev_walk_all_upper_dev_rcu(rif->dev,
9312 __mlxsw_sp_rif_macvlan_flush, &priv);
9315 static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
9316 const struct mlxsw_sp_rif_params *params)
9318 struct mlxsw_sp_rif_subport *rif_subport;
9320 rif_subport = mlxsw_sp_rif_subport_rif(rif);
9321 refcount_set(&rif_subport->ref_count, 1);
9322 rif_subport->vid = params->vid;
9323 rif_subport->lag = params->lag;
9325 rif_subport->lag_id = params->lag_id;
9327 rif_subport->system_port = params->system_port;
9330 static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
9332 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9333 struct mlxsw_sp_rif_subport *rif_subport;
9334 char ritr_pl[MLXSW_REG_RITR_LEN];
9337 rif_subport = mlxsw_sp_rif_subport_rif(rif);
9338 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
9339 rif->rif_index, rif->vr_id, rif->dev->mtu);
9340 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
9341 mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, rif->mac_profile_id);
9342 efid = mlxsw_sp_fid_index(rif->fid);
9343 mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
9344 rif_subport->lag ? rif_subport->lag_id :
9345 rif_subport->system_port,
9347 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9350 static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif,
9351 struct netlink_ext_ack *extack)
9356 err = mlxsw_sp_rif_mac_profile_get(rif->mlxsw_sp, rif->addr,
9357 &mac_profile, extack);
9360 rif->mac_profile_id = mac_profile;
9362 err = mlxsw_sp_rif_subport_op(rif, true);
9364 goto err_rif_subport_op;
9366 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
9367 mlxsw_sp_fid_index(rif->fid), true);
9369 goto err_rif_fdb_op;
9371 err = mlxsw_sp_fid_rif_set(rif->fid, rif);
9373 goto err_fid_rif_set;
9378 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
9379 mlxsw_sp_fid_index(rif->fid), false);
9381 mlxsw_sp_rif_subport_op(rif, false);
9383 mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, mac_profile);
9387 static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
9389 struct mlxsw_sp_fid *fid = rif->fid;
9391 mlxsw_sp_fid_rif_unset(fid);
9392 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
9393 mlxsw_sp_fid_index(fid), false);
9394 mlxsw_sp_rif_macvlan_flush(rif);
9395 mlxsw_sp_rif_subport_op(rif, false);
9396 mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
9399 static struct mlxsw_sp_fid *
9400 mlxsw_sp_rif_subport_fid_get(struct mlxsw_sp_rif *rif,
9401 struct netlink_ext_ack *extack)
9403 return mlxsw_sp_fid_rfid_get(rif->mlxsw_sp, rif->rif_index);
9406 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
9407 .type = MLXSW_SP_RIF_TYPE_SUBPORT,
9408 .rif_size = sizeof(struct mlxsw_sp_rif_subport),
9409 .setup = mlxsw_sp_rif_subport_setup,
9410 .configure = mlxsw_sp_rif_subport_configure,
9411 .deconfigure = mlxsw_sp_rif_subport_deconfigure,
9412 .fid_get = mlxsw_sp_rif_subport_fid_get,
9415 static int mlxsw_sp_rif_fid_op(struct mlxsw_sp_rif *rif, u16 fid, bool enable)
9417 enum mlxsw_reg_ritr_if_type type = MLXSW_REG_RITR_FID_IF;
9418 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9419 char ritr_pl[MLXSW_REG_RITR_LEN];
9421 mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
9423 mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
9424 mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, rif->mac_profile_id);
9425 mlxsw_reg_ritr_fid_if_fid_set(ritr_pl, fid);
9427 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9430 u16 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
9432 return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
9435 static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif,
9436 struct netlink_ext_ack *extack)
9438 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9439 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
9443 err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, rif->addr,
9444 &mac_profile, extack);
9447 rif->mac_profile_id = mac_profile;
9449 err = mlxsw_sp_rif_fid_op(rif, fid_index, true);
9451 goto err_rif_fid_op;
9453 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
9454 mlxsw_sp_router_port(mlxsw_sp), true);
9456 goto err_fid_mc_flood_set;
9458 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
9459 mlxsw_sp_router_port(mlxsw_sp), true);
9461 goto err_fid_bc_flood_set;
9463 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
9464 mlxsw_sp_fid_index(rif->fid), true);
9466 goto err_rif_fdb_op;
9468 err = mlxsw_sp_fid_rif_set(rif->fid, rif);
9470 goto err_fid_rif_set;
9475 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
9476 mlxsw_sp_fid_index(rif->fid), false);
9478 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
9479 mlxsw_sp_router_port(mlxsw_sp), false);
9480 err_fid_bc_flood_set:
9481 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
9482 mlxsw_sp_router_port(mlxsw_sp), false);
9483 err_fid_mc_flood_set:
9484 mlxsw_sp_rif_fid_op(rif, fid_index, false);
9486 mlxsw_sp_rif_mac_profile_put(mlxsw_sp, mac_profile);
9490 static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
9492 u16 fid_index = mlxsw_sp_fid_index(rif->fid);
9493 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9494 struct mlxsw_sp_fid *fid = rif->fid;
9496 mlxsw_sp_fid_rif_unset(fid);
9497 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
9498 mlxsw_sp_fid_index(fid), false);
9499 mlxsw_sp_rif_macvlan_flush(rif);
9500 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
9501 mlxsw_sp_router_port(mlxsw_sp), false);
9502 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
9503 mlxsw_sp_router_port(mlxsw_sp), false);
9504 mlxsw_sp_rif_fid_op(rif, fid_index, false);
9505 mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
9508 static struct mlxsw_sp_fid *
9509 mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif,
9510 struct netlink_ext_ack *extack)
9512 return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
9515 static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
9517 struct switchdev_notifier_fdb_info info = {};
9518 struct net_device *dev;
9520 dev = br_fdb_find_port(rif->dev, mac, 0);
9526 call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
9530 static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = {
9531 .type = MLXSW_SP_RIF_TYPE_FID,
9532 .rif_size = sizeof(struct mlxsw_sp_rif),
9533 .configure = mlxsw_sp_rif_fid_configure,
9534 .deconfigure = mlxsw_sp_rif_fid_deconfigure,
9535 .fid_get = mlxsw_sp_rif_fid_fid_get,
9536 .fdb_del = mlxsw_sp_rif_fid_fdb_del,
9539 static struct mlxsw_sp_fid *
9540 mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
9541 struct netlink_ext_ack *extack)
9543 struct net_device *br_dev;
9547 if (is_vlan_dev(rif->dev)) {
9548 vid = vlan_dev_vlan_id(rif->dev);
9549 br_dev = vlan_dev_real_dev(rif->dev);
9550 if (WARN_ON(!netif_is_bridge_master(br_dev)))
9551 return ERR_PTR(-EINVAL);
9553 err = br_vlan_get_pvid(rif->dev, &vid);
9554 if (err < 0 || !vid) {
9555 NL_SET_ERR_MSG_MOD(extack, "Couldn't determine bridge PVID");
9556 return ERR_PTR(-EINVAL);
9560 return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid);
9563 static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
9565 struct switchdev_notifier_fdb_info info = {};
9566 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
9567 struct net_device *br_dev;
9568 struct net_device *dev;
9570 br_dev = is_vlan_dev(rif->dev) ? vlan_dev_real_dev(rif->dev) : rif->dev;
9571 dev = br_fdb_find_port(br_dev, mac, vid);
9577 call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, dev, &info.info,
9581 static int mlxsw_sp_rif_vlan_op(struct mlxsw_sp_rif *rif, u16 vid, u16 efid,
9584 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9585 char ritr_pl[MLXSW_REG_RITR_LEN];
9587 mlxsw_reg_ritr_vlan_if_pack(ritr_pl, enable, rif->rif_index, rif->vr_id,
9588 rif->dev->mtu, rif->dev->dev_addr,
9589 rif->mac_profile_id, vid, efid);
9591 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9594 static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif, u16 efid,
9595 struct netlink_ext_ack *extack)
9597 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
9598 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9602 err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, rif->addr,
9603 &mac_profile, extack);
9606 rif->mac_profile_id = mac_profile;
9608 err = mlxsw_sp_rif_vlan_op(rif, vid, efid, true);
9610 goto err_rif_vlan_fid_op;
9612 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
9613 mlxsw_sp_router_port(mlxsw_sp), true);
9615 goto err_fid_mc_flood_set;
9617 err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
9618 mlxsw_sp_router_port(mlxsw_sp), true);
9620 goto err_fid_bc_flood_set;
9622 err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
9623 mlxsw_sp_fid_index(rif->fid), true);
9625 goto err_rif_fdb_op;
9627 err = mlxsw_sp_fid_rif_set(rif->fid, rif);
9629 goto err_fid_rif_set;
9634 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
9635 mlxsw_sp_fid_index(rif->fid), false);
9637 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
9638 mlxsw_sp_router_port(mlxsw_sp), false);
9639 err_fid_bc_flood_set:
9640 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
9641 mlxsw_sp_router_port(mlxsw_sp), false);
9642 err_fid_mc_flood_set:
9643 mlxsw_sp_rif_vlan_op(rif, vid, 0, false);
9644 err_rif_vlan_fid_op:
9645 mlxsw_sp_rif_mac_profile_put(mlxsw_sp, mac_profile);
9649 static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
9651 u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
9652 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9654 mlxsw_sp_fid_rif_unset(rif->fid);
9655 mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
9656 mlxsw_sp_fid_index(rif->fid), false);
9657 mlxsw_sp_rif_macvlan_flush(rif);
9658 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
9659 mlxsw_sp_router_port(mlxsw_sp), false);
9660 mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
9661 mlxsw_sp_router_port(mlxsw_sp), false);
9662 mlxsw_sp_rif_vlan_op(rif, vid, 0, false);
9663 mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
9666 static int mlxsw_sp1_rif_vlan_configure(struct mlxsw_sp_rif *rif,
9667 struct netlink_ext_ack *extack)
9669 return mlxsw_sp_rif_vlan_configure(rif, 0, extack);
9672 static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_vlan_ops = {
9673 .type = MLXSW_SP_RIF_TYPE_VLAN,
9674 .rif_size = sizeof(struct mlxsw_sp_rif),
9675 .configure = mlxsw_sp1_rif_vlan_configure,
9676 .deconfigure = mlxsw_sp_rif_vlan_deconfigure,
9677 .fid_get = mlxsw_sp_rif_vlan_fid_get,
9678 .fdb_del = mlxsw_sp_rif_vlan_fdb_del,
9681 static int mlxsw_sp2_rif_vlan_configure(struct mlxsw_sp_rif *rif,
9682 struct netlink_ext_ack *extack)
9684 u16 efid = mlxsw_sp_fid_index(rif->fid);
9686 return mlxsw_sp_rif_vlan_configure(rif, efid, extack);
9689 static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_vlan_ops = {
9690 .type = MLXSW_SP_RIF_TYPE_VLAN,
9691 .rif_size = sizeof(struct mlxsw_sp_rif),
9692 .configure = mlxsw_sp2_rif_vlan_configure,
9693 .deconfigure = mlxsw_sp_rif_vlan_deconfigure,
9694 .fid_get = mlxsw_sp_rif_vlan_fid_get,
9695 .fdb_del = mlxsw_sp_rif_vlan_fdb_del,
9698 static struct mlxsw_sp_rif_ipip_lb *
9699 mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif *rif)
9701 return container_of(rif, struct mlxsw_sp_rif_ipip_lb, common);
9705 mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif,
9706 const struct mlxsw_sp_rif_params *params)
9708 struct mlxsw_sp_rif_params_ipip_lb *params_lb;
9709 struct mlxsw_sp_rif_ipip_lb *rif_lb;
9711 params_lb = container_of(params, struct mlxsw_sp_rif_params_ipip_lb,
9713 rif_lb = mlxsw_sp_rif_ipip_lb_rif(rif);
9714 rif_lb->lb_config = params_lb->lb_config;
9718 mlxsw_sp1_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif,
9719 struct netlink_ext_ack *extack)
9721 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
9722 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
9723 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9724 struct mlxsw_sp_vr *ul_vr;
9727 ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id, NULL);
9729 return PTR_ERR(ul_vr);
9731 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, true);
9733 goto err_loopback_op;
9735 lb_rif->ul_vr_id = ul_vr->id;
9736 lb_rif->ul_rif_id = 0;
9741 mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
9745 static void mlxsw_sp1_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
9747 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
9748 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9749 struct mlxsw_sp_vr *ul_vr;
9751 ul_vr = &mlxsw_sp->router->vrs[lb_rif->ul_vr_id];
9752 mlxsw_sp_rif_ipip_lb_op(lb_rif, ul_vr->id, 0, false);
9755 mlxsw_sp_vr_put(mlxsw_sp, ul_vr);
9758 static const struct mlxsw_sp_rif_ops mlxsw_sp1_rif_ipip_lb_ops = {
9759 .type = MLXSW_SP_RIF_TYPE_IPIP_LB,
9760 .rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb),
9761 .setup = mlxsw_sp_rif_ipip_lb_setup,
9762 .configure = mlxsw_sp1_rif_ipip_lb_configure,
9763 .deconfigure = mlxsw_sp1_rif_ipip_lb_deconfigure,
9766 static const struct mlxsw_sp_rif_ops *mlxsw_sp1_rif_ops_arr[] = {
9767 [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
9768 [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp1_rif_vlan_ops,
9769 [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
9770 [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp1_rif_ipip_lb_ops,
9774 mlxsw_sp_rif_ipip_lb_ul_rif_op(struct mlxsw_sp_rif *ul_rif, bool enable)
9776 struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
9777 char ritr_pl[MLXSW_REG_RITR_LEN];
9779 mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
9780 ul_rif->rif_index, ul_rif->vr_id, IP_MAX_MTU);
9781 mlxsw_reg_ritr_loopback_protocol_set(ritr_pl,
9782 MLXSW_REG_RITR_LOOPBACK_GENERIC);
9784 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
9787 static struct mlxsw_sp_rif *
9788 mlxsw_sp_ul_rif_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
9789 struct netlink_ext_ack *extack)
9791 struct mlxsw_sp_rif *ul_rif;
9796 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index, rif_entries);
9798 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
9799 return ERR_PTR(err);
9802 ul_rif = mlxsw_sp_rif_alloc(sizeof(*ul_rif), rif_index, vr->id, NULL);
9808 mlxsw_sp->router->rifs[rif_index] = ul_rif;
9809 ul_rif->mlxsw_sp = mlxsw_sp;
9810 ul_rif->rif_entries = rif_entries;
9811 err = mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, true);
9815 atomic_add(rif_entries, &mlxsw_sp->router->rifs_count);
9819 mlxsw_sp->router->rifs[rif_index] = NULL;
9822 mlxsw_sp_rif_index_free(mlxsw_sp, rif_index, rif_entries);
9823 return ERR_PTR(err);
9826 static void mlxsw_sp_ul_rif_destroy(struct mlxsw_sp_rif *ul_rif)
9828 struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
9829 u8 rif_entries = ul_rif->rif_entries;
9830 u16 rif_index = ul_rif->rif_index;
9832 atomic_sub(rif_entries, &mlxsw_sp->router->rifs_count);
9833 mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, false);
9834 mlxsw_sp->router->rifs[ul_rif->rif_index] = NULL;
9836 mlxsw_sp_rif_index_free(mlxsw_sp, rif_index, rif_entries);
9839 static struct mlxsw_sp_rif *
9840 mlxsw_sp_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
9841 struct netlink_ext_ack *extack)
9843 struct mlxsw_sp_vr *vr;
9846 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id, extack);
9848 return ERR_CAST(vr);
9850 if (refcount_inc_not_zero(&vr->ul_rif_refcnt))
9853 vr->ul_rif = mlxsw_sp_ul_rif_create(mlxsw_sp, vr, extack);
9854 if (IS_ERR(vr->ul_rif)) {
9855 err = PTR_ERR(vr->ul_rif);
9856 goto err_ul_rif_create;
9860 refcount_set(&vr->ul_rif_refcnt, 1);
9865 mlxsw_sp_vr_put(mlxsw_sp, vr);
9866 return ERR_PTR(err);
9869 static void mlxsw_sp_ul_rif_put(struct mlxsw_sp_rif *ul_rif)
9871 struct mlxsw_sp *mlxsw_sp = ul_rif->mlxsw_sp;
9872 struct mlxsw_sp_vr *vr;
9874 vr = &mlxsw_sp->router->vrs[ul_rif->vr_id];
9876 if (!refcount_dec_and_test(&vr->ul_rif_refcnt))
9880 mlxsw_sp_ul_rif_destroy(ul_rif);
9881 mlxsw_sp_vr_put(mlxsw_sp, vr);
9884 int mlxsw_sp_router_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
9887 struct mlxsw_sp_rif *ul_rif;
9890 mutex_lock(&mlxsw_sp->router->lock);
9891 ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL);
9892 if (IS_ERR(ul_rif)) {
9893 err = PTR_ERR(ul_rif);
9896 *ul_rif_index = ul_rif->rif_index;
9898 mutex_unlock(&mlxsw_sp->router->lock);
9902 void mlxsw_sp_router_ul_rif_put(struct mlxsw_sp *mlxsw_sp, u16 ul_rif_index)
9904 struct mlxsw_sp_rif *ul_rif;
9906 mutex_lock(&mlxsw_sp->router->lock);
9907 ul_rif = mlxsw_sp->router->rifs[ul_rif_index];
9908 if (WARN_ON(!ul_rif))
9911 mlxsw_sp_ul_rif_put(ul_rif);
9913 mutex_unlock(&mlxsw_sp->router->lock);
9917 mlxsw_sp2_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif,
9918 struct netlink_ext_ack *extack)
9920 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
9921 u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
9922 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9923 struct mlxsw_sp_rif *ul_rif;
9926 ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL);
9928 return PTR_ERR(ul_rif);
9930 err = mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, ul_rif->rif_index, true);
9932 goto err_loopback_op;
9934 lb_rif->ul_vr_id = 0;
9935 lb_rif->ul_rif_id = ul_rif->rif_index;
9940 mlxsw_sp_ul_rif_put(ul_rif);
9944 static void mlxsw_sp2_rif_ipip_lb_deconfigure(struct mlxsw_sp_rif *rif)
9946 struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
9947 struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
9948 struct mlxsw_sp_rif *ul_rif;
9950 ul_rif = mlxsw_sp_rif_by_index(mlxsw_sp, lb_rif->ul_rif_id);
9951 mlxsw_sp_rif_ipip_lb_op(lb_rif, 0, lb_rif->ul_rif_id, false);
9952 mlxsw_sp_ul_rif_put(ul_rif);
9955 static const struct mlxsw_sp_rif_ops mlxsw_sp2_rif_ipip_lb_ops = {
9956 .type = MLXSW_SP_RIF_TYPE_IPIP_LB,
9957 .rif_size = sizeof(struct mlxsw_sp_rif_ipip_lb),
9958 .setup = mlxsw_sp_rif_ipip_lb_setup,
9959 .configure = mlxsw_sp2_rif_ipip_lb_configure,
9960 .deconfigure = mlxsw_sp2_rif_ipip_lb_deconfigure,
9963 static const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[] = {
9964 [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops,
9965 [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp2_rif_vlan_ops,
9966 [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops,
9967 [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp2_rif_ipip_lb_ops,
9970 static int mlxsw_sp_rifs_table_init(struct mlxsw_sp *mlxsw_sp)
9972 struct gen_pool *rifs_table;
9975 rifs_table = gen_pool_create(0, -1);
9979 gen_pool_set_algo(rifs_table, gen_pool_first_fit_order_align,
9982 err = gen_pool_add(rifs_table, MLXSW_SP_ROUTER_GENALLOC_OFFSET,
9983 MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS), -1);
9985 goto err_gen_pool_add;
9987 mlxsw_sp->router->rifs_table = rifs_table;
9992 gen_pool_destroy(rifs_table);
9996 static void mlxsw_sp_rifs_table_fini(struct mlxsw_sp *mlxsw_sp)
9998 gen_pool_destroy(mlxsw_sp->router->rifs_table);
10001 static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
10003 u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
10004 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
10005 struct mlxsw_core *core = mlxsw_sp->core;
10008 if (!MLXSW_CORE_RES_VALID(core, MAX_RIF_MAC_PROFILES))
10010 mlxsw_sp->router->max_rif_mac_profile =
10011 MLXSW_CORE_RES_GET(core, MAX_RIF_MAC_PROFILES);
10013 mlxsw_sp->router->rifs = kcalloc(max_rifs,
10014 sizeof(struct mlxsw_sp_rif *),
10016 if (!mlxsw_sp->router->rifs)
10019 err = mlxsw_sp_rifs_table_init(mlxsw_sp);
10021 goto err_rifs_table_init;
10023 idr_init(&mlxsw_sp->router->rif_mac_profiles_idr);
10024 atomic_set(&mlxsw_sp->router->rif_mac_profiles_count, 0);
10025 atomic_set(&mlxsw_sp->router->rifs_count, 0);
10026 devl_resource_occ_get_register(devlink,
10027 MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
10028 mlxsw_sp_rif_mac_profiles_occ_get,
10030 devl_resource_occ_get_register(devlink,
10031 MLXSW_SP_RESOURCE_RIFS,
10032 mlxsw_sp_rifs_occ_get,
10037 err_rifs_table_init:
10038 kfree(mlxsw_sp->router->rifs);
10042 static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
10044 struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
10047 WARN_ON_ONCE(atomic_read(&mlxsw_sp->router->rifs_count));
10048 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
10049 WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
10051 devl_resource_occ_get_unregister(devlink, MLXSW_SP_RESOURCE_RIFS);
10052 devl_resource_occ_get_unregister(devlink,
10053 MLXSW_SP_RESOURCE_RIF_MAC_PROFILES);
10054 WARN_ON(!idr_is_empty(&mlxsw_sp->router->rif_mac_profiles_idr));
10055 idr_destroy(&mlxsw_sp->router->rif_mac_profiles_idr);
10056 mlxsw_sp_rifs_table_fini(mlxsw_sp);
10057 kfree(mlxsw_sp->router->rifs);
10061 mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp)
10063 char tigcr_pl[MLXSW_REG_TIGCR_LEN];
10065 mlxsw_reg_tigcr_pack(tigcr_pl, true, 0);
10066 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl);
10069 static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
10073 INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
10075 err = mlxsw_sp_ipip_ecn_encap_init(mlxsw_sp);
10078 err = mlxsw_sp_ipip_ecn_decap_init(mlxsw_sp);
10082 return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
10085 static int mlxsw_sp1_ipips_init(struct mlxsw_sp *mlxsw_sp)
10087 mlxsw_sp->router->ipip_ops_arr = mlxsw_sp1_ipip_ops_arr;
10088 return mlxsw_sp_ipips_init(mlxsw_sp);
10091 static int mlxsw_sp2_ipips_init(struct mlxsw_sp *mlxsw_sp)
10093 mlxsw_sp->router->ipip_ops_arr = mlxsw_sp2_ipip_ops_arr;
10094 return mlxsw_sp_ipips_init(mlxsw_sp);
10097 static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
10099 WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list));
10102 static void mlxsw_sp_router_fib_dump_flush(struct notifier_block *nb)
10104 struct mlxsw_sp_router *router;
10106 /* Flush pending FIB notifications and then flush the device's
10107 * table before requesting another dump. The FIB notification
10108 * block is unregistered, so no need to take RTNL.
10110 mlxsw_core_flush_owq();
10111 router = container_of(nb, struct mlxsw_sp_router, fib_nb);
10112 mlxsw_sp_router_fib_flush(router->mlxsw_sp);
10115 #ifdef CONFIG_IP_ROUTE_MULTIPATH
10116 struct mlxsw_sp_mp_hash_config {
10117 DECLARE_BITMAP(headers, __MLXSW_REG_RECR2_HEADER_CNT);
10118 DECLARE_BITMAP(fields, __MLXSW_REG_RECR2_FIELD_CNT);
10119 DECLARE_BITMAP(inner_headers, __MLXSW_REG_RECR2_HEADER_CNT);
10120 DECLARE_BITMAP(inner_fields, __MLXSW_REG_RECR2_INNER_FIELD_CNT);
10121 bool inc_parsing_depth;
10124 #define MLXSW_SP_MP_HASH_HEADER_SET(_headers, _header) \
10125 bitmap_set(_headers, MLXSW_REG_RECR2_##_header, 1)
10127 #define MLXSW_SP_MP_HASH_FIELD_SET(_fields, _field) \
10128 bitmap_set(_fields, MLXSW_REG_RECR2_##_field, 1)
10130 #define MLXSW_SP_MP_HASH_FIELD_RANGE_SET(_fields, _field, _nr) \
10131 bitmap_set(_fields, MLXSW_REG_RECR2_##_field, _nr)
10133 static void mlxsw_sp_mp_hash_inner_l3(struct mlxsw_sp_mp_hash_config *config)
10135 unsigned long *inner_headers = config->inner_headers;
10136 unsigned long *inner_fields = config->inner_fields;
10139 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_NOT_TCP_NOT_UDP);
10140 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_TCP_UDP);
10141 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_SIP0, 4);
10142 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_DIP0, 4);
10144 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_NOT_TCP_NOT_UDP);
10145 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_TCP_UDP);
10146 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_SIP0_7);
10147 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_SIP8, 8);
10148 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_DIP0_7);
10149 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_DIP8, 8);
10150 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_NEXT_HEADER);
10151 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_FLOW_LABEL);
10154 static void mlxsw_sp_mp4_hash_outer_addr(struct mlxsw_sp_mp_hash_config *config)
10156 unsigned long *headers = config->headers;
10157 unsigned long *fields = config->fields;
10159 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_NOT_TCP_NOT_UDP);
10160 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_TCP_UDP);
10161 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_SIP0, 4);
10162 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_DIP0, 4);
10166 mlxsw_sp_mp_hash_inner_custom(struct mlxsw_sp_mp_hash_config *config,
10169 unsigned long *inner_headers = config->inner_headers;
10170 unsigned long *inner_fields = config->inner_fields;
10173 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_NOT_TCP_NOT_UDP);
10174 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV4_EN_TCP_UDP);
10175 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
10176 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_SIP0, 4);
10177 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
10178 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV4_DIP0, 4);
10179 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
10180 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV4_PROTOCOL);
10182 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_NOT_TCP_NOT_UDP);
10183 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, IPV6_EN_TCP_UDP);
10184 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP) {
10185 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_SIP0_7);
10186 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_SIP8, 8);
10188 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP) {
10189 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_DIP0_7);
10190 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(inner_fields, INNER_IPV6_DIP8, 8);
10192 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
10193 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_NEXT_HEADER);
10194 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL)
10195 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_IPV6_FLOW_LABEL);
10197 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, TCP_UDP_EN_IPV4);
10198 MLXSW_SP_MP_HASH_HEADER_SET(inner_headers, TCP_UDP_EN_IPV6);
10199 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT)
10200 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_TCP_UDP_SPORT);
10201 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT)
10202 MLXSW_SP_MP_HASH_FIELD_SET(inner_fields, INNER_TCP_UDP_DPORT);
10205 static void mlxsw_sp_mp4_hash_init(struct mlxsw_sp *mlxsw_sp,
10206 struct mlxsw_sp_mp_hash_config *config)
10208 struct net *net = mlxsw_sp_net(mlxsw_sp);
10209 unsigned long *headers = config->headers;
10210 unsigned long *fields = config->fields;
10213 switch (READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_policy)) {
10215 mlxsw_sp_mp4_hash_outer_addr(config);
10218 mlxsw_sp_mp4_hash_outer_addr(config);
10219 MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV4);
10220 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV4_PROTOCOL);
10221 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
10222 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
10226 mlxsw_sp_mp4_hash_outer_addr(config);
10228 mlxsw_sp_mp_hash_inner_l3(config);
10231 hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields);
10233 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_NOT_TCP_NOT_UDP);
10234 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_TCP_UDP);
10235 MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV4);
10236 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
10237 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_SIP0, 4);
10238 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
10239 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV4_DIP0, 4);
10240 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
10241 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV4_PROTOCOL);
10242 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
10243 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
10244 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
10245 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
10247 mlxsw_sp_mp_hash_inner_custom(config, hash_fields);
10252 static void mlxsw_sp_mp6_hash_outer_addr(struct mlxsw_sp_mp_hash_config *config)
10254 unsigned long *headers = config->headers;
10255 unsigned long *fields = config->fields;
10257 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_NOT_TCP_NOT_UDP);
10258 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_TCP_UDP);
10259 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_SIP0_7);
10260 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_SIP8, 8);
10261 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_DIP0_7);
10262 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_DIP8, 8);
10265 static void mlxsw_sp_mp6_hash_init(struct mlxsw_sp *mlxsw_sp,
10266 struct mlxsw_sp_mp_hash_config *config)
10268 u32 hash_fields = ip6_multipath_hash_fields(mlxsw_sp_net(mlxsw_sp));
10269 unsigned long *headers = config->headers;
10270 unsigned long *fields = config->fields;
10272 switch (ip6_multipath_hash_policy(mlxsw_sp_net(mlxsw_sp))) {
10274 mlxsw_sp_mp6_hash_outer_addr(config);
10275 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
10276 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
10279 mlxsw_sp_mp6_hash_outer_addr(config);
10280 MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV6);
10281 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
10282 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
10283 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
10287 mlxsw_sp_mp6_hash_outer_addr(config);
10288 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
10289 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
10291 mlxsw_sp_mp_hash_inner_l3(config);
10292 config->inc_parsing_depth = true;
10296 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_NOT_TCP_NOT_UDP);
10297 MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV6_EN_TCP_UDP);
10298 MLXSW_SP_MP_HASH_HEADER_SET(headers, TCP_UDP_EN_IPV6);
10299 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP) {
10300 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_SIP0_7);
10301 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_SIP8, 8);
10303 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP) {
10304 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_DIP0_7);
10305 MLXSW_SP_MP_HASH_FIELD_RANGE_SET(fields, IPV6_DIP8, 8);
10307 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
10308 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_NEXT_HEADER);
10309 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_FLOWLABEL)
10310 MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL);
10311 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
10312 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_SPORT);
10313 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
10314 MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT);
10316 mlxsw_sp_mp_hash_inner_custom(config, hash_fields);
10317 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_MASK)
10318 config->inc_parsing_depth = true;
10323 static int mlxsw_sp_mp_hash_parsing_depth_adjust(struct mlxsw_sp *mlxsw_sp,
10324 bool old_inc_parsing_depth,
10325 bool new_inc_parsing_depth)
10329 if (!old_inc_parsing_depth && new_inc_parsing_depth) {
10330 err = mlxsw_sp_parsing_depth_inc(mlxsw_sp);
10333 mlxsw_sp->router->inc_parsing_depth = true;
10334 } else if (old_inc_parsing_depth && !new_inc_parsing_depth) {
10335 mlxsw_sp_parsing_depth_dec(mlxsw_sp);
10336 mlxsw_sp->router->inc_parsing_depth = false;
10342 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
10344 bool old_inc_parsing_depth, new_inc_parsing_depth;
10345 struct mlxsw_sp_mp_hash_config config = {};
10346 char recr2_pl[MLXSW_REG_RECR2_LEN];
10351 seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 0);
10352 mlxsw_reg_recr2_pack(recr2_pl, seed);
10353 mlxsw_sp_mp4_hash_init(mlxsw_sp, &config);
10354 mlxsw_sp_mp6_hash_init(mlxsw_sp, &config);
10356 old_inc_parsing_depth = mlxsw_sp->router->inc_parsing_depth;
10357 new_inc_parsing_depth = config.inc_parsing_depth;
10358 err = mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp,
10359 old_inc_parsing_depth,
10360 new_inc_parsing_depth);
10364 for_each_set_bit(bit, config.headers, __MLXSW_REG_RECR2_HEADER_CNT)
10365 mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, bit, 1);
10366 for_each_set_bit(bit, config.fields, __MLXSW_REG_RECR2_FIELD_CNT)
10367 mlxsw_reg_recr2_outer_header_fields_enable_set(recr2_pl, bit, 1);
10368 for_each_set_bit(bit, config.inner_headers, __MLXSW_REG_RECR2_HEADER_CNT)
10369 mlxsw_reg_recr2_inner_header_enables_set(recr2_pl, bit, 1);
10370 for_each_set_bit(bit, config.inner_fields, __MLXSW_REG_RECR2_INNER_FIELD_CNT)
10371 mlxsw_reg_recr2_inner_header_fields_enable_set(recr2_pl, bit, 1);
10373 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl);
10375 goto err_reg_write;
10380 mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp, new_inc_parsing_depth,
10381 old_inc_parsing_depth);
10385 static void mlxsw_sp_mp_hash_fini(struct mlxsw_sp *mlxsw_sp)
10387 bool old_inc_parsing_depth = mlxsw_sp->router->inc_parsing_depth;
10389 mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp, old_inc_parsing_depth,
10393 static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
10398 static void mlxsw_sp_mp_hash_fini(struct mlxsw_sp *mlxsw_sp)
10403 static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp)
10405 char rdpm_pl[MLXSW_REG_RDPM_LEN];
10408 MLXSW_REG_ZERO(rdpm, rdpm_pl);
10410 /* HW is determining switch priority based on DSCP-bits, but the
10411 * kernel is still doing that based on the ToS. Since there's a
10412 * mismatch in bits we need to make sure to translate the right
10413 * value ToS would observe, skipping the 2 least-significant ECN bits.
10415 for (i = 0; i < MLXSW_REG_RDPM_DSCP_ENTRY_REC_MAX_COUNT; i++)
10416 mlxsw_reg_rdpm_pack(rdpm_pl, i, rt_tos2priority(i << 2));
10418 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rdpm), rdpm_pl);
10421 static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
10423 struct net *net = mlxsw_sp_net(mlxsw_sp);
10424 char rgcr_pl[MLXSW_REG_RGCR_LEN];
10428 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
10430 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
10431 usp = READ_ONCE(net->ipv4.sysctl_ip_fwd_update_priority);
10433 mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
10434 mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
10435 mlxsw_reg_rgcr_usp_set(rgcr_pl, usp);
10436 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
10439 static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
10441 char rgcr_pl[MLXSW_REG_RGCR_LEN];
10443 mlxsw_reg_rgcr_pack(rgcr_pl, false, false);
10444 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
10447 static int mlxsw_sp_lb_rif_init(struct mlxsw_sp *mlxsw_sp)
10452 /* Create a generic loopback RIF associated with the main table
10453 * (default VRF). Any table can be used, but the main table exists
10454 * anyway, so we do not waste resources.
10456 err = mlxsw_sp_router_ul_rif_get(mlxsw_sp, RT_TABLE_MAIN,
10461 mlxsw_sp->router->lb_rif_index = lb_rif_index;
10466 static void mlxsw_sp_lb_rif_fini(struct mlxsw_sp *mlxsw_sp)
10468 mlxsw_sp_router_ul_rif_put(mlxsw_sp, mlxsw_sp->router->lb_rif_index);
10471 static int mlxsw_sp1_router_init(struct mlxsw_sp *mlxsw_sp)
10473 size_t size_ranges_count = ARRAY_SIZE(mlxsw_sp1_adj_grp_size_ranges);
10475 mlxsw_sp->router->rif_ops_arr = mlxsw_sp1_rif_ops_arr;
10476 mlxsw_sp->router->adj_grp_size_ranges = mlxsw_sp1_adj_grp_size_ranges;
10477 mlxsw_sp->router->adj_grp_size_ranges_count = size_ranges_count;
10482 const struct mlxsw_sp_router_ops mlxsw_sp1_router_ops = {
10483 .init = mlxsw_sp1_router_init,
10484 .ipips_init = mlxsw_sp1_ipips_init,
10487 static int mlxsw_sp2_router_init(struct mlxsw_sp *mlxsw_sp)
10489 size_t size_ranges_count = ARRAY_SIZE(mlxsw_sp2_adj_grp_size_ranges);
10491 mlxsw_sp->router->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
10492 mlxsw_sp->router->adj_grp_size_ranges = mlxsw_sp2_adj_grp_size_ranges;
10493 mlxsw_sp->router->adj_grp_size_ranges_count = size_ranges_count;
10498 const struct mlxsw_sp_router_ops mlxsw_sp2_router_ops = {
10499 .init = mlxsw_sp2_router_init,
10500 .ipips_init = mlxsw_sp2_ipips_init,
10503 int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
10504 struct netlink_ext_ack *extack)
10506 struct mlxsw_sp_router *router;
10509 router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
10512 mutex_init(&router->lock);
10513 mlxsw_sp->router = router;
10514 router->mlxsw_sp = mlxsw_sp;
10516 err = mlxsw_sp->router_ops->init(mlxsw_sp);
10518 goto err_router_ops_init;
10520 INIT_LIST_HEAD(&mlxsw_sp->router->nh_res_grp_list);
10521 INIT_DELAYED_WORK(&mlxsw_sp->router->nh_grp_activity_dw,
10522 mlxsw_sp_nh_grp_activity_work);
10523 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
10524 err = __mlxsw_sp_router_init(mlxsw_sp);
10526 goto err_router_init;
10528 err = mlxsw_sp_rifs_init(mlxsw_sp);
10530 goto err_rifs_init;
10532 err = mlxsw_sp->router_ops->ipips_init(mlxsw_sp);
10534 goto err_ipips_init;
10536 err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
10537 &mlxsw_sp_nexthop_ht_params);
10539 goto err_nexthop_ht_init;
10541 err = rhashtable_init(&mlxsw_sp->router->nexthop_group_ht,
10542 &mlxsw_sp_nexthop_group_ht_params);
10544 goto err_nexthop_group_ht_init;
10546 INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_list);
10547 err = mlxsw_sp_lpm_init(mlxsw_sp);
10551 err = mlxsw_sp_mr_init(mlxsw_sp, &mlxsw_sp_mr_tcam_ops);
10555 err = mlxsw_sp_vrs_init(mlxsw_sp);
10559 err = mlxsw_sp_lb_rif_init(mlxsw_sp);
10561 goto err_lb_rif_init;
10563 err = mlxsw_sp_neigh_init(mlxsw_sp);
10565 goto err_neigh_init;
10567 err = mlxsw_sp_mp_hash_init(mlxsw_sp);
10569 goto err_mp_hash_init;
10571 err = mlxsw_sp_dscp_init(mlxsw_sp);
10573 goto err_dscp_init;
10575 router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event;
10576 err = register_inetaddr_notifier(&router->inetaddr_nb);
10578 goto err_register_inetaddr_notifier;
10580 router->inet6addr_nb.notifier_call = mlxsw_sp_inet6addr_event;
10581 err = register_inet6addr_notifier(&router->inet6addr_nb);
10583 goto err_register_inet6addr_notifier;
10585 mlxsw_sp->router->netevent_nb.notifier_call =
10586 mlxsw_sp_router_netevent_event;
10587 err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
10589 goto err_register_netevent_notifier;
10591 mlxsw_sp->router->nexthop_nb.notifier_call =
10592 mlxsw_sp_nexthop_obj_event;
10593 err = register_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
10594 &mlxsw_sp->router->nexthop_nb,
10597 goto err_register_nexthop_notifier;
10599 mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
10600 err = register_fib_notifier(mlxsw_sp_net(mlxsw_sp),
10601 &mlxsw_sp->router->fib_nb,
10602 mlxsw_sp_router_fib_dump_flush, extack);
10604 goto err_register_fib_notifier;
10606 mlxsw_sp->router->netdevice_nb.notifier_call =
10607 mlxsw_sp_router_netdevice_event;
10608 err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
10609 &mlxsw_sp->router->netdevice_nb);
10611 goto err_register_netdev_notifier;
10615 err_register_netdev_notifier:
10616 unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp),
10617 &mlxsw_sp->router->fib_nb);
10618 err_register_fib_notifier:
10619 unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
10620 &mlxsw_sp->router->nexthop_nb);
10621 err_register_nexthop_notifier:
10622 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
10623 err_register_netevent_notifier:
10624 unregister_inet6addr_notifier(&router->inet6addr_nb);
10625 err_register_inet6addr_notifier:
10626 unregister_inetaddr_notifier(&router->inetaddr_nb);
10627 err_register_inetaddr_notifier:
10628 mlxsw_core_flush_owq();
10630 mlxsw_sp_mp_hash_fini(mlxsw_sp);
10632 mlxsw_sp_neigh_fini(mlxsw_sp);
10634 mlxsw_sp_lb_rif_fini(mlxsw_sp);
10636 mlxsw_sp_vrs_fini(mlxsw_sp);
10638 mlxsw_sp_mr_fini(mlxsw_sp);
10640 mlxsw_sp_lpm_fini(mlxsw_sp);
10642 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
10643 err_nexthop_group_ht_init:
10644 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
10645 err_nexthop_ht_init:
10646 mlxsw_sp_ipips_fini(mlxsw_sp);
10648 mlxsw_sp_rifs_fini(mlxsw_sp);
10650 __mlxsw_sp_router_fini(mlxsw_sp);
10652 cancel_delayed_work_sync(&mlxsw_sp->router->nh_grp_activity_dw);
10653 err_router_ops_init:
10654 mutex_destroy(&mlxsw_sp->router->lock);
10655 kfree(mlxsw_sp->router);
10659 void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
10661 unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
10662 &mlxsw_sp->router->netdevice_nb);
10663 unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp),
10664 &mlxsw_sp->router->fib_nb);
10665 unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
10666 &mlxsw_sp->router->nexthop_nb);
10667 unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
10668 unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
10669 unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);
10670 mlxsw_core_flush_owq();
10671 mlxsw_sp_mp_hash_fini(mlxsw_sp);
10672 mlxsw_sp_neigh_fini(mlxsw_sp);
10673 mlxsw_sp_lb_rif_fini(mlxsw_sp);
10674 mlxsw_sp_vrs_fini(mlxsw_sp);
10675 mlxsw_sp_mr_fini(mlxsw_sp);
10676 mlxsw_sp_lpm_fini(mlxsw_sp);
10677 rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
10678 rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
10679 mlxsw_sp_ipips_fini(mlxsw_sp);
10680 mlxsw_sp_rifs_fini(mlxsw_sp);
10681 __mlxsw_sp_router_fini(mlxsw_sp);
10682 cancel_delayed_work_sync(&mlxsw_sp->router->nh_grp_activity_dw);
10683 mutex_destroy(&mlxsw_sp->router->lock);
10684 kfree(mlxsw_sp->router);