1 // SPDX-License-Identifier: GPL-2.0
2 /* Generic nexthop implementation
4 * Copyright (c) 2017-19 Cumulus Networks
5 * Copyright (c) 2017-19 David Ahern <dsa@cumulusnetworks.com>
8 #include <linux/nexthop.h>
9 #include <linux/rtnetlink.h>
10 #include <linux/slab.h>
11 #include <linux/vmalloc.h>
13 #include <net/ipv6_stubs.h>
14 #include <net/lwtunnel.h>
15 #include <net/ndisc.h>
16 #include <net/nexthop.h>
17 #include <net/route.h>
20 #define NH_RES_DEFAULT_IDLE_TIMER (120 * HZ)
21 #define NH_RES_DEFAULT_UNBALANCED_TIMER 0 /* No forced rebalancing. */
23 static void remove_nexthop(struct net *net, struct nexthop *nh,
24 struct nl_info *nlinfo);
26 #define NH_DEV_HASHBITS 8
27 #define NH_DEV_HASHSIZE (1U << NH_DEV_HASHBITS)
29 #define NHA_OP_FLAGS_DUMP_ALL (NHA_OP_FLAG_DUMP_STATS | \
30 NHA_OP_FLAG_DUMP_HW_STATS)
32 static const struct nla_policy rtm_nh_policy_new[] = {
33 [NHA_ID] = { .type = NLA_U32 },
34 [NHA_GROUP] = { .type = NLA_BINARY },
35 [NHA_GROUP_TYPE] = { .type = NLA_U16 },
36 [NHA_BLACKHOLE] = { .type = NLA_FLAG },
37 [NHA_OIF] = { .type = NLA_U32 },
38 [NHA_GATEWAY] = { .type = NLA_BINARY },
39 [NHA_ENCAP_TYPE] = { .type = NLA_U16 },
40 [NHA_ENCAP] = { .type = NLA_NESTED },
41 [NHA_FDB] = { .type = NLA_FLAG },
42 [NHA_RES_GROUP] = { .type = NLA_NESTED },
43 [NHA_HW_STATS_ENABLE] = NLA_POLICY_MAX(NLA_U32, true),
46 static const struct nla_policy rtm_nh_policy_get[] = {
47 [NHA_ID] = { .type = NLA_U32 },
48 [NHA_OP_FLAGS] = NLA_POLICY_MASK(NLA_U32,
49 NHA_OP_FLAGS_DUMP_ALL),
52 static const struct nla_policy rtm_nh_policy_del[] = {
53 [NHA_ID] = { .type = NLA_U32 },
56 static const struct nla_policy rtm_nh_policy_dump[] = {
57 [NHA_OIF] = { .type = NLA_U32 },
58 [NHA_GROUPS] = { .type = NLA_FLAG },
59 [NHA_MASTER] = { .type = NLA_U32 },
60 [NHA_FDB] = { .type = NLA_FLAG },
61 [NHA_OP_FLAGS] = NLA_POLICY_MASK(NLA_U32,
62 NHA_OP_FLAGS_DUMP_ALL),
65 static const struct nla_policy rtm_nh_res_policy_new[] = {
66 [NHA_RES_GROUP_BUCKETS] = { .type = NLA_U16 },
67 [NHA_RES_GROUP_IDLE_TIMER] = { .type = NLA_U32 },
68 [NHA_RES_GROUP_UNBALANCED_TIMER] = { .type = NLA_U32 },
71 static const struct nla_policy rtm_nh_policy_dump_bucket[] = {
72 [NHA_ID] = { .type = NLA_U32 },
73 [NHA_OIF] = { .type = NLA_U32 },
74 [NHA_MASTER] = { .type = NLA_U32 },
75 [NHA_RES_BUCKET] = { .type = NLA_NESTED },
78 static const struct nla_policy rtm_nh_res_bucket_policy_dump[] = {
79 [NHA_RES_BUCKET_NH_ID] = { .type = NLA_U32 },
82 static const struct nla_policy rtm_nh_policy_get_bucket[] = {
83 [NHA_ID] = { .type = NLA_U32 },
84 [NHA_RES_BUCKET] = { .type = NLA_NESTED },
87 static const struct nla_policy rtm_nh_res_bucket_policy_get[] = {
88 [NHA_RES_BUCKET_INDEX] = { .type = NLA_U16 },
91 static bool nexthop_notifiers_is_empty(struct net *net)
93 return !net->nexthop.notifier_chain.head;
97 __nh_notifier_single_info_init(struct nh_notifier_single_info *nh_info,
98 const struct nh_info *nhi)
100 nh_info->dev = nhi->fib_nhc.nhc_dev;
101 nh_info->gw_family = nhi->fib_nhc.nhc_gw_family;
102 if (nh_info->gw_family == AF_INET)
103 nh_info->ipv4 = nhi->fib_nhc.nhc_gw.ipv4;
104 else if (nh_info->gw_family == AF_INET6)
105 nh_info->ipv6 = nhi->fib_nhc.nhc_gw.ipv6;
107 nh_info->id = nhi->nh_parent->id;
108 nh_info->is_reject = nhi->reject_nh;
109 nh_info->is_fdb = nhi->fdb_nh;
110 nh_info->has_encap = !!nhi->fib_nhc.nhc_lwtstate;
113 static int nh_notifier_single_info_init(struct nh_notifier_info *info,
114 const struct nexthop *nh)
116 struct nh_info *nhi = rtnl_dereference(nh->nh_info);
118 info->type = NH_NOTIFIER_INFO_TYPE_SINGLE;
119 info->nh = kzalloc(sizeof(*info->nh), GFP_KERNEL);
123 __nh_notifier_single_info_init(info->nh, nhi);
128 static void nh_notifier_single_info_fini(struct nh_notifier_info *info)
133 static int nh_notifier_mpath_info_init(struct nh_notifier_info *info,
134 struct nh_group *nhg)
136 u16 num_nh = nhg->num_nh;
139 info->type = NH_NOTIFIER_INFO_TYPE_GRP;
140 info->nh_grp = kzalloc(struct_size(info->nh_grp, nh_entries, num_nh),
145 info->nh_grp->num_nh = num_nh;
146 info->nh_grp->is_fdb = nhg->fdb_nh;
147 info->nh_grp->hw_stats = nhg->hw_stats;
149 for (i = 0; i < num_nh; i++) {
150 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
153 nhi = rtnl_dereference(nhge->nh->nh_info);
154 info->nh_grp->nh_entries[i].weight = nhge->weight;
155 __nh_notifier_single_info_init(&info->nh_grp->nh_entries[i].nh,
162 static int nh_notifier_res_table_info_init(struct nh_notifier_info *info,
163 struct nh_group *nhg)
165 struct nh_res_table *res_table = rtnl_dereference(nhg->res_table);
166 u16 num_nh_buckets = res_table->num_nh_buckets;
170 info->type = NH_NOTIFIER_INFO_TYPE_RES_TABLE;
171 size = struct_size(info->nh_res_table, nhs, num_nh_buckets);
172 info->nh_res_table = __vmalloc(size, GFP_KERNEL | __GFP_ZERO |
174 if (!info->nh_res_table)
177 info->nh_res_table->num_nh_buckets = num_nh_buckets;
178 info->nh_res_table->hw_stats = nhg->hw_stats;
180 for (i = 0; i < num_nh_buckets; i++) {
181 struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
182 struct nh_grp_entry *nhge;
185 nhge = rtnl_dereference(bucket->nh_entry);
186 nhi = rtnl_dereference(nhge->nh->nh_info);
187 __nh_notifier_single_info_init(&info->nh_res_table->nhs[i],
194 static int nh_notifier_grp_info_init(struct nh_notifier_info *info,
195 const struct nexthop *nh)
197 struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
199 if (nhg->hash_threshold)
200 return nh_notifier_mpath_info_init(info, nhg);
201 else if (nhg->resilient)
202 return nh_notifier_res_table_info_init(info, nhg);
206 static void nh_notifier_grp_info_fini(struct nh_notifier_info *info,
207 const struct nexthop *nh)
209 struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
211 if (nhg->hash_threshold)
213 else if (nhg->resilient)
214 vfree(info->nh_res_table);
217 static int nh_notifier_info_init(struct nh_notifier_info *info,
218 const struct nexthop *nh)
223 return nh_notifier_grp_info_init(info, nh);
225 return nh_notifier_single_info_init(info, nh);
228 static void nh_notifier_info_fini(struct nh_notifier_info *info,
229 const struct nexthop *nh)
232 nh_notifier_grp_info_fini(info, nh);
234 nh_notifier_single_info_fini(info);
237 static int call_nexthop_notifiers(struct net *net,
238 enum nexthop_event_type event_type,
240 struct netlink_ext_ack *extack)
242 struct nh_notifier_info info = {
250 if (nexthop_notifiers_is_empty(net))
253 err = nh_notifier_info_init(&info, nh);
255 NL_SET_ERR_MSG(extack, "Failed to initialize nexthop notifier info");
259 err = blocking_notifier_call_chain(&net->nexthop.notifier_chain,
261 nh_notifier_info_fini(&info, nh);
263 return notifier_to_errno(err);
267 nh_notifier_res_bucket_idle_timer_get(const struct nh_notifier_info *info,
268 bool force, unsigned int *p_idle_timer_ms)
270 struct nh_res_table *res_table;
271 struct nh_group *nhg;
275 /* When 'force' is false, nexthop bucket replacement is performed
276 * because the bucket was deemed to be idle. In this case, capable
277 * listeners can choose to perform an atomic replacement: The bucket is
278 * only replaced if it is inactive. However, if the idle timer interval
279 * is smaller than the interval in which a listener is querying
280 * buckets' activity from the device, then atomic replacement should
281 * not be tried. Pass the idle timer value to listeners, so that they
282 * could determine which type of replacement to perform.
285 *p_idle_timer_ms = 0;
291 nh = nexthop_find_by_id(info->net, info->id);
297 nhg = rcu_dereference(nh->nh_grp);
298 res_table = rcu_dereference(nhg->res_table);
299 *p_idle_timer_ms = jiffies_to_msecs(res_table->idle_timer);
307 static int nh_notifier_res_bucket_info_init(struct nh_notifier_info *info,
308 u16 bucket_index, bool force,
309 struct nh_info *oldi,
310 struct nh_info *newi)
312 unsigned int idle_timer_ms;
315 err = nh_notifier_res_bucket_idle_timer_get(info, force,
320 info->type = NH_NOTIFIER_INFO_TYPE_RES_BUCKET;
321 info->nh_res_bucket = kzalloc(sizeof(*info->nh_res_bucket),
323 if (!info->nh_res_bucket)
326 info->nh_res_bucket->bucket_index = bucket_index;
327 info->nh_res_bucket->idle_timer_ms = idle_timer_ms;
328 info->nh_res_bucket->force = force;
329 __nh_notifier_single_info_init(&info->nh_res_bucket->old_nh, oldi);
330 __nh_notifier_single_info_init(&info->nh_res_bucket->new_nh, newi);
334 static void nh_notifier_res_bucket_info_fini(struct nh_notifier_info *info)
336 kfree(info->nh_res_bucket);
339 static int __call_nexthop_res_bucket_notifiers(struct net *net, u32 nhg_id,
340 u16 bucket_index, bool force,
341 struct nh_info *oldi,
342 struct nh_info *newi,
343 struct netlink_ext_ack *extack)
345 struct nh_notifier_info info = {
352 if (nexthop_notifiers_is_empty(net))
355 err = nh_notifier_res_bucket_info_init(&info, bucket_index, force,
360 err = blocking_notifier_call_chain(&net->nexthop.notifier_chain,
361 NEXTHOP_EVENT_BUCKET_REPLACE, &info);
362 nh_notifier_res_bucket_info_fini(&info);
364 return notifier_to_errno(err);
367 /* There are three users of RES_TABLE, and NHs etc. referenced from there:
369 * 1) a collection of callbacks for NH maintenance. This operates under
371 * 2) the delayed work that gradually balances the resilient table,
372 * 3) and nexthop_select_path(), operating under RCU.
374 * Both the delayed work and the RTNL block are writers, and need to
375 * maintain mutual exclusion. Since there are only two and well-known
376 * writers for each table, the RTNL code can make sure it has exclusive
379 * - Have the DW operate without locking;
380 * - synchronously cancel the DW;
382 * - if the write was not actually a delete, call upkeep, which schedules
383 * DW again if necessary.
385 * The functions that are always called from the RTNL context use
386 * rtnl_dereference(). The functions that can also be called from the DW do
387 * a raw dereference and rely on the above mutual exclusion scheme.
389 #define nh_res_dereference(p) (rcu_dereference_raw(p))
391 static int call_nexthop_res_bucket_notifiers(struct net *net, u32 nhg_id,
392 u16 bucket_index, bool force,
393 struct nexthop *old_nh,
394 struct nexthop *new_nh,
395 struct netlink_ext_ack *extack)
397 struct nh_info *oldi = nh_res_dereference(old_nh->nh_info);
398 struct nh_info *newi = nh_res_dereference(new_nh->nh_info);
400 return __call_nexthop_res_bucket_notifiers(net, nhg_id, bucket_index,
401 force, oldi, newi, extack);
404 static int call_nexthop_res_table_notifiers(struct net *net, struct nexthop *nh,
405 struct netlink_ext_ack *extack)
407 struct nh_notifier_info info = {
412 struct nh_group *nhg;
417 if (nexthop_notifiers_is_empty(net))
420 /* At this point, the nexthop buckets are still not populated. Only
421 * emit a notification with the logical nexthops, so that a listener
422 * could potentially veto it in case of unsupported configuration.
424 nhg = rtnl_dereference(nh->nh_grp);
425 err = nh_notifier_mpath_info_init(&info, nhg);
427 NL_SET_ERR_MSG(extack, "Failed to initialize nexthop notifier info");
431 err = blocking_notifier_call_chain(&net->nexthop.notifier_chain,
432 NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE,
436 return notifier_to_errno(err);
439 static int call_nexthop_notifier(struct notifier_block *nb, struct net *net,
440 enum nexthop_event_type event_type,
442 struct netlink_ext_ack *extack)
444 struct nh_notifier_info info = {
450 err = nh_notifier_info_init(&info, nh);
454 err = nb->notifier_call(nb, event_type, &info);
455 nh_notifier_info_fini(&info, nh);
457 return notifier_to_errno(err);
460 static unsigned int nh_dev_hashfn(unsigned int val)
462 unsigned int mask = NH_DEV_HASHSIZE - 1;
465 (val >> NH_DEV_HASHBITS) ^
466 (val >> (NH_DEV_HASHBITS * 2))) & mask;
469 static void nexthop_devhash_add(struct net *net, struct nh_info *nhi)
471 struct net_device *dev = nhi->fib_nhc.nhc_dev;
472 struct hlist_head *head;
477 hash = nh_dev_hashfn(dev->ifindex);
478 head = &net->nexthop.devhash[hash];
479 hlist_add_head(&nhi->dev_hash, head);
482 static void nexthop_free_group(struct nexthop *nh)
484 struct nh_group *nhg;
487 nhg = rcu_dereference_raw(nh->nh_grp);
488 for (i = 0; i < nhg->num_nh; ++i) {
489 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
491 WARN_ON(!list_empty(&nhge->nh_list));
492 free_percpu(nhge->stats);
493 nexthop_put(nhge->nh);
496 WARN_ON(nhg->spare == nhg);
499 vfree(rcu_dereference_raw(nhg->res_table));
505 static void nexthop_free_single(struct nexthop *nh)
509 nhi = rcu_dereference_raw(nh->nh_info);
510 switch (nhi->family) {
512 fib_nh_release(nh->net, &nhi->fib_nh);
515 ipv6_stub->fib6_nh_release(&nhi->fib6_nh);
521 void nexthop_free_rcu(struct rcu_head *head)
523 struct nexthop *nh = container_of(head, struct nexthop, rcu);
526 nexthop_free_group(nh);
528 nexthop_free_single(nh);
532 EXPORT_SYMBOL_GPL(nexthop_free_rcu);
534 static struct nexthop *nexthop_alloc(void)
538 nh = kzalloc(sizeof(struct nexthop), GFP_KERNEL);
540 INIT_LIST_HEAD(&nh->fi_list);
541 INIT_LIST_HEAD(&nh->f6i_list);
542 INIT_LIST_HEAD(&nh->grp_list);
543 INIT_LIST_HEAD(&nh->fdb_list);
548 static struct nh_group *nexthop_grp_alloc(u16 num_nh)
550 struct nh_group *nhg;
552 nhg = kzalloc(struct_size(nhg, nh_entries, num_nh), GFP_KERNEL);
554 nhg->num_nh = num_nh;
559 static void nh_res_table_upkeep_dw(struct work_struct *work);
561 static struct nh_res_table *
562 nexthop_res_table_alloc(struct net *net, u32 nhg_id, struct nh_config *cfg)
564 const u16 num_nh_buckets = cfg->nh_grp_res_num_buckets;
565 struct nh_res_table *res_table;
568 size = struct_size(res_table, nh_buckets, num_nh_buckets);
569 res_table = __vmalloc(size, GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN);
573 res_table->net = net;
574 res_table->nhg_id = nhg_id;
575 INIT_DELAYED_WORK(&res_table->upkeep_dw, &nh_res_table_upkeep_dw);
576 INIT_LIST_HEAD(&res_table->uw_nh_entries);
577 res_table->idle_timer = cfg->nh_grp_res_idle_timer;
578 res_table->unbalanced_timer = cfg->nh_grp_res_unbalanced_timer;
579 res_table->num_nh_buckets = num_nh_buckets;
583 static void nh_base_seq_inc(struct net *net)
585 while (++net->nexthop.seq == 0)
589 /* no reference taken; rcu lock or rtnl must be held */
590 struct nexthop *nexthop_find_by_id(struct net *net, u32 id)
592 struct rb_node **pp, *parent = NULL, *next;
594 pp = &net->nexthop.rb_root.rb_node;
598 next = rcu_dereference_raw(*pp);
603 nh = rb_entry(parent, struct nexthop, rb_node);
606 else if (id > nh->id)
607 pp = &next->rb_right;
613 EXPORT_SYMBOL_GPL(nexthop_find_by_id);
615 /* used for auto id allocation; called with rtnl held */
616 static u32 nh_find_unused_id(struct net *net)
618 u32 id_start = net->nexthop.last_id_allocated;
621 net->nexthop.last_id_allocated++;
622 if (net->nexthop.last_id_allocated == id_start)
625 if (!nexthop_find_by_id(net, net->nexthop.last_id_allocated))
626 return net->nexthop.last_id_allocated;
631 static void nh_res_time_set_deadline(unsigned long next_time,
632 unsigned long *deadline)
634 if (time_before(next_time, *deadline))
635 *deadline = next_time;
638 static clock_t nh_res_table_unbalanced_time(struct nh_res_table *res_table)
640 if (list_empty(&res_table->uw_nh_entries))
642 return jiffies_delta_to_clock_t(jiffies - res_table->unbalanced_since);
645 static int nla_put_nh_group_res(struct sk_buff *skb, struct nh_group *nhg)
647 struct nh_res_table *res_table = rtnl_dereference(nhg->res_table);
650 nest = nla_nest_start(skb, NHA_RES_GROUP);
654 if (nla_put_u16(skb, NHA_RES_GROUP_BUCKETS,
655 res_table->num_nh_buckets) ||
656 nla_put_u32(skb, NHA_RES_GROUP_IDLE_TIMER,
657 jiffies_to_clock_t(res_table->idle_timer)) ||
658 nla_put_u32(skb, NHA_RES_GROUP_UNBALANCED_TIMER,
659 jiffies_to_clock_t(res_table->unbalanced_timer)) ||
660 nla_put_u64_64bit(skb, NHA_RES_GROUP_UNBALANCED_TIME,
661 nh_res_table_unbalanced_time(res_table),
663 goto nla_put_failure;
665 nla_nest_end(skb, nest);
669 nla_nest_cancel(skb, nest);
673 static void nh_grp_entry_stats_inc(struct nh_grp_entry *nhge)
675 struct nh_grp_entry_stats *cpu_stats;
677 cpu_stats = get_cpu_ptr(nhge->stats);
678 u64_stats_update_begin(&cpu_stats->syncp);
679 u64_stats_inc(&cpu_stats->packets);
680 u64_stats_update_end(&cpu_stats->syncp);
681 put_cpu_ptr(cpu_stats);
684 static void nh_grp_entry_stats_read(struct nh_grp_entry *nhge,
691 for_each_possible_cpu(i) {
692 struct nh_grp_entry_stats *cpu_stats;
696 cpu_stats = per_cpu_ptr(nhge->stats, i);
698 start = u64_stats_fetch_begin(&cpu_stats->syncp);
699 packets = u64_stats_read(&cpu_stats->packets);
700 } while (u64_stats_fetch_retry(&cpu_stats->syncp, start));
702 *ret_packets += packets;
706 static int nh_notifier_grp_hw_stats_init(struct nh_notifier_info *info,
707 const struct nexthop *nh)
709 struct nh_group *nhg;
713 nhg = rtnl_dereference(nh->nh_grp);
716 info->type = NH_NOTIFIER_INFO_TYPE_GRP_HW_STATS;
717 info->nh_grp_hw_stats = kzalloc(struct_size(info->nh_grp_hw_stats,
720 if (!info->nh_grp_hw_stats)
723 info->nh_grp_hw_stats->num_nh = nhg->num_nh;
724 for (i = 0; i < nhg->num_nh; i++) {
725 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
727 info->nh_grp_hw_stats->stats[i].id = nhge->nh->id;
733 static void nh_notifier_grp_hw_stats_fini(struct nh_notifier_info *info)
735 kfree(info->nh_grp_hw_stats);
738 void nh_grp_hw_stats_report_delta(struct nh_notifier_grp_hw_stats_info *info,
742 info->hw_stats_used = true;
743 info->stats[nh_idx].packets += delta_packets;
745 EXPORT_SYMBOL(nh_grp_hw_stats_report_delta);
747 static void nh_grp_hw_stats_apply_update(struct nexthop *nh,
748 struct nh_notifier_info *info)
750 struct nh_group *nhg;
754 nhg = rtnl_dereference(nh->nh_grp);
756 for (i = 0; i < nhg->num_nh; i++) {
757 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
759 nhge->packets_hw += info->nh_grp_hw_stats->stats[i].packets;
763 static int nh_grp_hw_stats_update(struct nexthop *nh, bool *hw_stats_used)
765 struct nh_notifier_info info = {
768 struct net *net = nh->net;
771 if (nexthop_notifiers_is_empty(net))
774 err = nh_notifier_grp_hw_stats_init(&info, nh);
778 err = blocking_notifier_call_chain(&net->nexthop.notifier_chain,
779 NEXTHOP_EVENT_HW_STATS_REPORT_DELTA,
782 /* Cache whatever we got, even if there was an error, otherwise the
783 * successful stats retrievals would get lost.
785 nh_grp_hw_stats_apply_update(nh, &info);
786 *hw_stats_used = info.nh_grp_hw_stats->hw_stats_used;
788 nh_notifier_grp_hw_stats_fini(&info);
789 return notifier_to_errno(err);
792 static int nla_put_nh_group_stats_entry(struct sk_buff *skb,
793 struct nh_grp_entry *nhge,
799 nh_grp_entry_stats_read(nhge, &packets);
801 nest = nla_nest_start(skb, NHA_GROUP_STATS_ENTRY);
805 if (nla_put_u32(skb, NHA_GROUP_STATS_ENTRY_ID, nhge->nh->id) ||
806 nla_put_uint(skb, NHA_GROUP_STATS_ENTRY_PACKETS,
807 packets + nhge->packets_hw))
808 goto nla_put_failure;
810 if (op_flags & NHA_OP_FLAG_DUMP_HW_STATS &&
811 nla_put_uint(skb, NHA_GROUP_STATS_ENTRY_PACKETS_HW,
813 goto nla_put_failure;
815 nla_nest_end(skb, nest);
819 nla_nest_cancel(skb, nest);
823 static int nla_put_nh_group_stats(struct sk_buff *skb, struct nexthop *nh,
826 struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
832 if (nla_put_u32(skb, NHA_HW_STATS_ENABLE, nhg->hw_stats))
835 if (op_flags & NHA_OP_FLAG_DUMP_HW_STATS &&
837 err = nh_grp_hw_stats_update(nh, &hw_stats_used);
841 if (nla_put_u32(skb, NHA_HW_STATS_USED, hw_stats_used))
845 nest = nla_nest_start(skb, NHA_GROUP_STATS);
849 for (i = 0; i < nhg->num_nh; i++)
850 if (nla_put_nh_group_stats_entry(skb, &nhg->nh_entries[i],
854 nla_nest_end(skb, nest);
858 nla_nest_cancel(skb, nest);
865 static int nla_put_nh_group(struct sk_buff *skb, struct nexthop *nh,
868 struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
869 struct nexthop_grp *p;
870 size_t len = nhg->num_nh * sizeof(*p);
875 if (nhg->hash_threshold)
876 group_type = NEXTHOP_GRP_TYPE_MPATH;
877 else if (nhg->resilient)
878 group_type = NEXTHOP_GRP_TYPE_RES;
880 if (nla_put_u16(skb, NHA_GROUP_TYPE, group_type))
881 goto nla_put_failure;
883 nla = nla_reserve(skb, NHA_GROUP, len);
885 goto nla_put_failure;
888 for (i = 0; i < nhg->num_nh; ++i) {
889 p->id = nhg->nh_entries[i].nh->id;
890 p->weight = nhg->nh_entries[i].weight - 1;
894 if (nhg->resilient && nla_put_nh_group_res(skb, nhg))
895 goto nla_put_failure;
897 if (op_flags & NHA_OP_FLAG_DUMP_STATS &&
898 (nla_put_u32(skb, NHA_HW_STATS_ENABLE, nhg->hw_stats) ||
899 nla_put_nh_group_stats(skb, nh, op_flags)))
900 goto nla_put_failure;
908 static int nh_fill_node(struct sk_buff *skb, struct nexthop *nh,
909 int event, u32 portid, u32 seq, unsigned int nlflags,
912 struct fib6_nh *fib6_nh;
913 struct fib_nh *fib_nh;
914 struct nlmsghdr *nlh;
918 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nhm), nlflags);
922 nhm = nlmsg_data(nlh);
923 nhm->nh_family = AF_UNSPEC;
924 nhm->nh_flags = nh->nh_flags;
925 nhm->nh_protocol = nh->protocol;
929 if (nla_put_u32(skb, NHA_ID, nh->id))
930 goto nla_put_failure;
933 struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
935 if (nhg->fdb_nh && nla_put_flag(skb, NHA_FDB))
936 goto nla_put_failure;
937 if (nla_put_nh_group(skb, nh, op_flags))
938 goto nla_put_failure;
942 nhi = rtnl_dereference(nh->nh_info);
943 nhm->nh_family = nhi->family;
944 if (nhi->reject_nh) {
945 if (nla_put_flag(skb, NHA_BLACKHOLE))
946 goto nla_put_failure;
948 } else if (nhi->fdb_nh) {
949 if (nla_put_flag(skb, NHA_FDB))
950 goto nla_put_failure;
952 const struct net_device *dev;
954 dev = nhi->fib_nhc.nhc_dev;
955 if (dev && nla_put_u32(skb, NHA_OIF, dev->ifindex))
956 goto nla_put_failure;
959 nhm->nh_scope = nhi->fib_nhc.nhc_scope;
960 switch (nhi->family) {
962 fib_nh = &nhi->fib_nh;
963 if (fib_nh->fib_nh_gw_family &&
964 nla_put_be32(skb, NHA_GATEWAY, fib_nh->fib_nh_gw4))
965 goto nla_put_failure;
969 fib6_nh = &nhi->fib6_nh;
970 if (fib6_nh->fib_nh_gw_family &&
971 nla_put_in6_addr(skb, NHA_GATEWAY, &fib6_nh->fib_nh_gw6))
972 goto nla_put_failure;
976 if (nhi->fib_nhc.nhc_lwtstate &&
977 lwtunnel_fill_encap(skb, nhi->fib_nhc.nhc_lwtstate,
978 NHA_ENCAP, NHA_ENCAP_TYPE) < 0)
979 goto nla_put_failure;
986 nlmsg_cancel(skb, nlh);
990 static size_t nh_nlmsg_size_grp_res(struct nh_group *nhg)
992 return nla_total_size(0) + /* NHA_RES_GROUP */
993 nla_total_size(2) + /* NHA_RES_GROUP_BUCKETS */
994 nla_total_size(4) + /* NHA_RES_GROUP_IDLE_TIMER */
995 nla_total_size(4) + /* NHA_RES_GROUP_UNBALANCED_TIMER */
996 nla_total_size_64bit(8);/* NHA_RES_GROUP_UNBALANCED_TIME */
999 static size_t nh_nlmsg_size_grp(struct nexthop *nh)
1001 struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
1002 size_t sz = sizeof(struct nexthop_grp) * nhg->num_nh;
1003 size_t tot = nla_total_size(sz) +
1004 nla_total_size(2); /* NHA_GROUP_TYPE */
1007 tot += nh_nlmsg_size_grp_res(nhg);
1012 static size_t nh_nlmsg_size_single(struct nexthop *nh)
1014 struct nh_info *nhi = rtnl_dereference(nh->nh_info);
1017 /* covers NHA_BLACKHOLE since NHA_OIF and BLACKHOLE
1018 * are mutually exclusive
1020 sz = nla_total_size(4); /* NHA_OIF */
1022 switch (nhi->family) {
1024 if (nhi->fib_nh.fib_nh_gw_family)
1025 sz += nla_total_size(4); /* NHA_GATEWAY */
1030 if (nhi->fib6_nh.fib_nh_gw_family)
1031 sz += nla_total_size(sizeof(const struct in6_addr));
1035 if (nhi->fib_nhc.nhc_lwtstate) {
1036 sz += lwtunnel_get_encap_size(nhi->fib_nhc.nhc_lwtstate);
1037 sz += nla_total_size(2); /* NHA_ENCAP_TYPE */
1043 static size_t nh_nlmsg_size(struct nexthop *nh)
1045 size_t sz = NLMSG_ALIGN(sizeof(struct nhmsg));
1047 sz += nla_total_size(4); /* NHA_ID */
1050 sz += nh_nlmsg_size_grp(nh);
1052 sz += nh_nlmsg_size_single(nh);
1057 static void nexthop_notify(int event, struct nexthop *nh, struct nl_info *info)
1059 unsigned int nlflags = info->nlh ? info->nlh->nlmsg_flags : 0;
1060 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
1061 struct sk_buff *skb;
1064 skb = nlmsg_new(nh_nlmsg_size(nh), gfp_any());
1068 err = nh_fill_node(skb, nh, event, info->portid, seq, nlflags, 0);
1070 /* -EMSGSIZE implies BUG in nh_nlmsg_size() */
1071 WARN_ON(err == -EMSGSIZE);
1076 rtnl_notify(skb, info->nl_net, info->portid, RTNLGRP_NEXTHOP,
1077 info->nlh, gfp_any());
1081 rtnl_set_sk_err(info->nl_net, RTNLGRP_NEXTHOP, err);
1084 static unsigned long nh_res_bucket_used_time(const struct nh_res_bucket *bucket)
1086 return (unsigned long)atomic_long_read(&bucket->used_time);
1089 static unsigned long
1090 nh_res_bucket_idle_point(const struct nh_res_table *res_table,
1091 const struct nh_res_bucket *bucket,
1094 unsigned long time = nh_res_bucket_used_time(bucket);
1096 /* Bucket was not used since it was migrated. The idle time is now. */
1097 if (time == bucket->migrated_time)
1100 return time + res_table->idle_timer;
1103 static unsigned long
1104 nh_res_table_unb_point(const struct nh_res_table *res_table)
1106 return res_table->unbalanced_since + res_table->unbalanced_timer;
1109 static void nh_res_bucket_set_idle(const struct nh_res_table *res_table,
1110 struct nh_res_bucket *bucket)
1112 unsigned long now = jiffies;
1114 atomic_long_set(&bucket->used_time, (long)now);
1115 bucket->migrated_time = now;
1118 static void nh_res_bucket_set_busy(struct nh_res_bucket *bucket)
1120 atomic_long_set(&bucket->used_time, (long)jiffies);
1123 static clock_t nh_res_bucket_idle_time(const struct nh_res_bucket *bucket)
1125 unsigned long used_time = nh_res_bucket_used_time(bucket);
1127 return jiffies_delta_to_clock_t(jiffies - used_time);
1130 static int nh_fill_res_bucket(struct sk_buff *skb, struct nexthop *nh,
1131 struct nh_res_bucket *bucket, u16 bucket_index,
1132 int event, u32 portid, u32 seq,
1133 unsigned int nlflags,
1134 struct netlink_ext_ack *extack)
1136 struct nh_grp_entry *nhge = nh_res_dereference(bucket->nh_entry);
1137 struct nlmsghdr *nlh;
1138 struct nlattr *nest;
1141 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nhm), nlflags);
1145 nhm = nlmsg_data(nlh);
1146 nhm->nh_family = AF_UNSPEC;
1147 nhm->nh_flags = bucket->nh_flags;
1148 nhm->nh_protocol = nh->protocol;
1152 if (nla_put_u32(skb, NHA_ID, nh->id))
1153 goto nla_put_failure;
1155 nest = nla_nest_start(skb, NHA_RES_BUCKET);
1157 goto nla_put_failure;
1159 if (nla_put_u16(skb, NHA_RES_BUCKET_INDEX, bucket_index) ||
1160 nla_put_u32(skb, NHA_RES_BUCKET_NH_ID, nhge->nh->id) ||
1161 nla_put_u64_64bit(skb, NHA_RES_BUCKET_IDLE_TIME,
1162 nh_res_bucket_idle_time(bucket),
1163 NHA_RES_BUCKET_PAD))
1164 goto nla_put_failure_nest;
1166 nla_nest_end(skb, nest);
1167 nlmsg_end(skb, nlh);
1170 nla_put_failure_nest:
1171 nla_nest_cancel(skb, nest);
1173 nlmsg_cancel(skb, nlh);
1177 static void nexthop_bucket_notify(struct nh_res_table *res_table,
1180 struct nh_res_bucket *bucket = &res_table->nh_buckets[bucket_index];
1181 struct nh_grp_entry *nhge = nh_res_dereference(bucket->nh_entry);
1182 struct nexthop *nh = nhge->nh_parent;
1183 struct sk_buff *skb;
1186 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1190 err = nh_fill_res_bucket(skb, nh, bucket, bucket_index,
1191 RTM_NEWNEXTHOPBUCKET, 0, 0, NLM_F_REPLACE,
1198 rtnl_notify(skb, nh->net, 0, RTNLGRP_NEXTHOP, NULL, GFP_KERNEL);
1202 rtnl_set_sk_err(nh->net, RTNLGRP_NEXTHOP, err);
1205 static bool valid_group_nh(struct nexthop *nh, unsigned int npaths,
1206 bool *is_fdb, struct netlink_ext_ack *extack)
1209 struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
1211 /* Nesting groups within groups is not supported. */
1212 if (nhg->hash_threshold) {
1213 NL_SET_ERR_MSG(extack,
1214 "Hash-threshold group can not be a nexthop within a group");
1217 if (nhg->resilient) {
1218 NL_SET_ERR_MSG(extack,
1219 "Resilient group can not be a nexthop within a group");
1222 *is_fdb = nhg->fdb_nh;
1224 struct nh_info *nhi = rtnl_dereference(nh->nh_info);
1226 if (nhi->reject_nh && npaths > 1) {
1227 NL_SET_ERR_MSG(extack,
1228 "Blackhole nexthop can not be used in a group with more than 1 path");
1231 *is_fdb = nhi->fdb_nh;
1237 static int nh_check_attr_fdb_group(struct nexthop *nh, u8 *nh_family,
1238 struct netlink_ext_ack *extack)
1240 struct nh_info *nhi;
1242 nhi = rtnl_dereference(nh->nh_info);
1245 NL_SET_ERR_MSG(extack, "FDB nexthop group can only have fdb nexthops");
1249 if (*nh_family == AF_UNSPEC) {
1250 *nh_family = nhi->family;
1251 } else if (*nh_family != nhi->family) {
1252 NL_SET_ERR_MSG(extack, "FDB nexthop group cannot have mixed family nexthops");
1259 static int nh_check_attr_group(struct net *net,
1260 struct nlattr *tb[], size_t tb_size,
1261 u16 nh_grp_type, struct netlink_ext_ack *extack)
1263 unsigned int len = nla_len(tb[NHA_GROUP]);
1264 u8 nh_family = AF_UNSPEC;
1265 struct nexthop_grp *nhg;
1269 if (!len || len & (sizeof(struct nexthop_grp) - 1)) {
1270 NL_SET_ERR_MSG(extack,
1271 "Invalid length for nexthop group attribute");
1275 /* convert len to number of nexthop ids */
1276 len /= sizeof(*nhg);
1278 nhg = nla_data(tb[NHA_GROUP]);
1279 for (i = 0; i < len; ++i) {
1280 if (nhg[i].resvd1 || nhg[i].resvd2) {
1281 NL_SET_ERR_MSG(extack, "Reserved fields in nexthop_grp must be 0");
1284 if (nhg[i].weight > 254) {
1285 NL_SET_ERR_MSG(extack, "Invalid value for weight");
1288 for (j = i + 1; j < len; ++j) {
1289 if (nhg[i].id == nhg[j].id) {
1290 NL_SET_ERR_MSG(extack, "Nexthop id can not be used twice in a group");
1298 nhg = nla_data(tb[NHA_GROUP]);
1299 for (i = 0; i < len; ++i) {
1303 nh = nexthop_find_by_id(net, nhg[i].id);
1305 NL_SET_ERR_MSG(extack, "Invalid nexthop id");
1308 if (!valid_group_nh(nh, len, &is_fdb_nh, extack))
1311 if (nhg_fdb && nh_check_attr_fdb_group(nh, &nh_family, extack))
1314 if (!nhg_fdb && is_fdb_nh) {
1315 NL_SET_ERR_MSG(extack, "Non FDB nexthop group cannot have fdb nexthops");
1319 for (i = NHA_GROUP_TYPE + 1; i < tb_size; ++i) {
1323 case NHA_HW_STATS_ENABLE:
1327 if (nh_grp_type == NEXTHOP_GRP_TYPE_RES)
1331 NL_SET_ERR_MSG(extack,
1332 "No other attributes can be set in nexthop groups");
1339 static bool ipv6_good_nh(const struct fib6_nh *nh)
1341 int state = NUD_REACHABLE;
1342 struct neighbour *n;
1346 n = __ipv6_neigh_lookup_noref_stub(nh->fib_nh_dev, &nh->fib_nh_gw6);
1348 state = READ_ONCE(n->nud_state);
1352 return !!(state & NUD_VALID);
1355 static bool ipv4_good_nh(const struct fib_nh *nh)
1357 int state = NUD_REACHABLE;
1358 struct neighbour *n;
1362 n = __ipv4_neigh_lookup_noref(nh->fib_nh_dev,
1363 (__force u32)nh->fib_nh_gw4);
1365 state = READ_ONCE(n->nud_state);
1369 return !!(state & NUD_VALID);
1372 static bool nexthop_is_good_nh(const struct nexthop *nh)
1374 struct nh_info *nhi = rcu_dereference(nh->nh_info);
1376 switch (nhi->family) {
1378 return ipv4_good_nh(&nhi->fib_nh);
1380 return ipv6_good_nh(&nhi->fib6_nh);
1386 static struct nexthop *nexthop_select_path_fdb(struct nh_group *nhg, int hash)
1390 for (i = 0; i < nhg->num_nh; i++) {
1391 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1393 if (hash > atomic_read(&nhge->hthr.upper_bound))
1396 nh_grp_entry_stats_inc(nhge);
1404 static struct nexthop *nexthop_select_path_hthr(struct nh_group *nhg, int hash)
1406 struct nh_grp_entry *nhge0 = NULL;
1410 return nexthop_select_path_fdb(nhg, hash);
1412 for (i = 0; i < nhg->num_nh; ++i) {
1413 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1415 /* nexthops always check if it is good and does
1416 * not rely on a sysctl for this behavior
1418 if (!nexthop_is_good_nh(nhge->nh))
1424 if (hash > atomic_read(&nhge->hthr.upper_bound))
1427 nh_grp_entry_stats_inc(nhge);
1432 nhge0 = &nhg->nh_entries[0];
1433 nh_grp_entry_stats_inc(nhge0);
1437 static struct nexthop *nexthop_select_path_res(struct nh_group *nhg, int hash)
1439 struct nh_res_table *res_table = rcu_dereference(nhg->res_table);
1440 u16 bucket_index = hash % res_table->num_nh_buckets;
1441 struct nh_res_bucket *bucket;
1442 struct nh_grp_entry *nhge;
1444 /* nexthop_select_path() is expected to return a non-NULL value, so
1445 * skip protocol validation and just hand out whatever there is.
1447 bucket = &res_table->nh_buckets[bucket_index];
1448 nh_res_bucket_set_busy(bucket);
1449 nhge = rcu_dereference(bucket->nh_entry);
1450 nh_grp_entry_stats_inc(nhge);
1454 struct nexthop *nexthop_select_path(struct nexthop *nh, int hash)
1456 struct nh_group *nhg;
1461 nhg = rcu_dereference(nh->nh_grp);
1462 if (nhg->hash_threshold)
1463 return nexthop_select_path_hthr(nhg, hash);
1464 else if (nhg->resilient)
1465 return nexthop_select_path_res(nhg, hash);
1470 EXPORT_SYMBOL_GPL(nexthop_select_path);
1472 int nexthop_for_each_fib6_nh(struct nexthop *nh,
1473 int (*cb)(struct fib6_nh *nh, void *arg),
1476 struct nh_info *nhi;
1480 struct nh_group *nhg;
1483 nhg = rcu_dereference_rtnl(nh->nh_grp);
1484 for (i = 0; i < nhg->num_nh; i++) {
1485 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1487 nhi = rcu_dereference_rtnl(nhge->nh->nh_info);
1488 err = cb(&nhi->fib6_nh, arg);
1493 nhi = rcu_dereference_rtnl(nh->nh_info);
1494 err = cb(&nhi->fib6_nh, arg);
1501 EXPORT_SYMBOL_GPL(nexthop_for_each_fib6_nh);
1503 static int check_src_addr(const struct in6_addr *saddr,
1504 struct netlink_ext_ack *extack)
1506 if (!ipv6_addr_any(saddr)) {
1507 NL_SET_ERR_MSG(extack, "IPv6 routes using source address can not use nexthop objects");
1513 int fib6_check_nexthop(struct nexthop *nh, struct fib6_config *cfg,
1514 struct netlink_ext_ack *extack)
1516 struct nh_info *nhi;
1519 /* fib6_src is unique to a fib6_info and limits the ability to cache
1520 * routes in fib6_nh within a nexthop that is potentially shared
1521 * across multiple fib entries. If the config wants to use source
1522 * routing it can not use nexthop objects. mlxsw also does not allow
1523 * fib6_src on routes.
1525 if (cfg && check_src_addr(&cfg->fc_src, extack) < 0)
1529 struct nh_group *nhg;
1531 nhg = rtnl_dereference(nh->nh_grp);
1534 is_fdb_nh = nhg->fdb_nh;
1536 nhi = rtnl_dereference(nh->nh_info);
1537 if (nhi->family == AF_INET)
1539 is_fdb_nh = nhi->fdb_nh;
1543 NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop");
1549 NL_SET_ERR_MSG(extack, "IPv6 routes can not use an IPv4 nexthop");
1552 EXPORT_SYMBOL_GPL(fib6_check_nexthop);
1554 /* if existing nexthop has ipv6 routes linked to it, need
1555 * to verify this new spec works with ipv6
1557 static int fib6_check_nh_list(struct nexthop *old, struct nexthop *new,
1558 struct netlink_ext_ack *extack)
1560 struct fib6_info *f6i;
1562 if (list_empty(&old->f6i_list))
1565 list_for_each_entry(f6i, &old->f6i_list, nh_list) {
1566 if (check_src_addr(&f6i->fib6_src.addr, extack) < 0)
1570 return fib6_check_nexthop(new, NULL, extack);
1573 static int nexthop_check_scope(struct nh_info *nhi, u8 scope,
1574 struct netlink_ext_ack *extack)
1576 if (scope == RT_SCOPE_HOST && nhi->fib_nhc.nhc_gw_family) {
1577 NL_SET_ERR_MSG(extack,
1578 "Route with host scope can not have a gateway");
1582 if (nhi->fib_nhc.nhc_flags & RTNH_F_ONLINK && scope >= RT_SCOPE_LINK) {
1583 NL_SET_ERR_MSG(extack, "Scope mismatch with nexthop");
1590 /* Invoked by fib add code to verify nexthop by id is ok with
1591 * config for prefix; parts of fib_check_nh not done when nexthop
1594 int fib_check_nexthop(struct nexthop *nh, u8 scope,
1595 struct netlink_ext_ack *extack)
1597 struct nh_info *nhi;
1601 struct nh_group *nhg;
1603 nhg = rtnl_dereference(nh->nh_grp);
1605 NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop");
1610 if (scope == RT_SCOPE_HOST) {
1611 NL_SET_ERR_MSG(extack, "Route with host scope can not have multiple nexthops");
1616 /* all nexthops in a group have the same scope */
1617 nhi = rtnl_dereference(nhg->nh_entries[0].nh->nh_info);
1618 err = nexthop_check_scope(nhi, scope, extack);
1620 nhi = rtnl_dereference(nh->nh_info);
1622 NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop");
1626 err = nexthop_check_scope(nhi, scope, extack);
1633 static int fib_check_nh_list(struct nexthop *old, struct nexthop *new,
1634 struct netlink_ext_ack *extack)
1636 struct fib_info *fi;
1638 list_for_each_entry(fi, &old->fi_list, nh_list) {
1641 err = fib_check_nexthop(new, fi->fib_scope, extack);
1648 static bool nh_res_nhge_is_balanced(const struct nh_grp_entry *nhge)
1650 return nhge->res.count_buckets == nhge->res.wants_buckets;
1653 static bool nh_res_nhge_is_ow(const struct nh_grp_entry *nhge)
1655 return nhge->res.count_buckets > nhge->res.wants_buckets;
1658 static bool nh_res_nhge_is_uw(const struct nh_grp_entry *nhge)
1660 return nhge->res.count_buckets < nhge->res.wants_buckets;
1663 static bool nh_res_table_is_balanced(const struct nh_res_table *res_table)
1665 return list_empty(&res_table->uw_nh_entries);
1668 static void nh_res_bucket_unset_nh(struct nh_res_bucket *bucket)
1670 struct nh_grp_entry *nhge;
1672 if (bucket->occupied) {
1673 nhge = nh_res_dereference(bucket->nh_entry);
1674 nhge->res.count_buckets--;
1675 bucket->occupied = false;
1679 static void nh_res_bucket_set_nh(struct nh_res_bucket *bucket,
1680 struct nh_grp_entry *nhge)
1682 nh_res_bucket_unset_nh(bucket);
1684 bucket->occupied = true;
1685 rcu_assign_pointer(bucket->nh_entry, nhge);
1686 nhge->res.count_buckets++;
1689 static bool nh_res_bucket_should_migrate(struct nh_res_table *res_table,
1690 struct nh_res_bucket *bucket,
1691 unsigned long *deadline, bool *force)
1693 unsigned long now = jiffies;
1694 struct nh_grp_entry *nhge;
1695 unsigned long idle_point;
1697 if (!bucket->occupied) {
1698 /* The bucket is not occupied, its NHGE pointer is either
1699 * NULL or obsolete. We _have to_ migrate: set force.
1705 nhge = nh_res_dereference(bucket->nh_entry);
1707 /* If the bucket is populated by an underweight or balanced
1708 * nexthop, do not migrate.
1710 if (!nh_res_nhge_is_ow(nhge))
1713 /* At this point we know that the bucket is populated with an
1714 * overweight nexthop. It needs to be migrated to a new nexthop if
1715 * the idle timer of unbalanced timer expired.
1718 idle_point = nh_res_bucket_idle_point(res_table, bucket, now);
1719 if (time_after_eq(now, idle_point)) {
1720 /* The bucket is idle. We _can_ migrate: unset force. */
1725 /* Unbalanced timer of 0 means "never force". */
1726 if (res_table->unbalanced_timer) {
1727 unsigned long unb_point;
1729 unb_point = nh_res_table_unb_point(res_table);
1730 if (time_after(now, unb_point)) {
1731 /* The bucket is not idle, but the unbalanced timer
1732 * expired. We _can_ migrate, but set force anyway,
1733 * so that drivers know to ignore activity reports
1740 nh_res_time_set_deadline(unb_point, deadline);
1743 nh_res_time_set_deadline(idle_point, deadline);
1747 static bool nh_res_bucket_migrate(struct nh_res_table *res_table,
1748 u16 bucket_index, bool notify,
1749 bool notify_nl, bool force)
1751 struct nh_res_bucket *bucket = &res_table->nh_buckets[bucket_index];
1752 struct nh_grp_entry *new_nhge;
1753 struct netlink_ext_ack extack;
1756 new_nhge = list_first_entry_or_null(&res_table->uw_nh_entries,
1757 struct nh_grp_entry,
1759 if (WARN_ON_ONCE(!new_nhge))
1760 /* If this function is called, "bucket" is either not
1761 * occupied, or it belongs to a next hop that is
1762 * overweight. In either case, there ought to be a
1763 * corresponding underweight next hop.
1768 struct nh_grp_entry *old_nhge;
1770 old_nhge = nh_res_dereference(bucket->nh_entry);
1771 err = call_nexthop_res_bucket_notifiers(res_table->net,
1773 bucket_index, force,
1775 new_nhge->nh, &extack);
1777 pr_err_ratelimited("%s\n", extack._msg);
1780 /* It is not possible to veto a forced replacement, so
1781 * just clear the hardware flags from the nexthop
1782 * bucket to indicate to user space that this bucket is
1783 * not correctly populated in hardware.
1785 bucket->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP);
1789 nh_res_bucket_set_nh(bucket, new_nhge);
1790 nh_res_bucket_set_idle(res_table, bucket);
1793 nexthop_bucket_notify(res_table, bucket_index);
1795 if (nh_res_nhge_is_balanced(new_nhge))
1796 list_del(&new_nhge->res.uw_nh_entry);
1800 #define NH_RES_UPKEEP_DW_MINIMUM_INTERVAL (HZ / 2)
1802 static void nh_res_table_upkeep(struct nh_res_table *res_table,
1803 bool notify, bool notify_nl)
1805 unsigned long now = jiffies;
1806 unsigned long deadline;
1809 /* Deadline is the next time that upkeep should be run. It is the
1810 * earliest time at which one of the buckets might be migrated.
1811 * Start at the most pessimistic estimate: either unbalanced_timer
1812 * from now, or if there is none, idle_timer from now. For each
1813 * encountered time point, call nh_res_time_set_deadline() to
1814 * refine the estimate.
1816 if (res_table->unbalanced_timer)
1817 deadline = now + res_table->unbalanced_timer;
1819 deadline = now + res_table->idle_timer;
1821 for (i = 0; i < res_table->num_nh_buckets; i++) {
1822 struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
1825 if (nh_res_bucket_should_migrate(res_table, bucket,
1826 &deadline, &force)) {
1827 if (!nh_res_bucket_migrate(res_table, i, notify,
1828 notify_nl, force)) {
1829 unsigned long idle_point;
1831 /* A driver can override the migration
1832 * decision if the HW reports that the
1833 * bucket is actually not idle. Therefore
1834 * remark the bucket as busy again and
1835 * update the deadline.
1837 nh_res_bucket_set_busy(bucket);
1838 idle_point = nh_res_bucket_idle_point(res_table,
1841 nh_res_time_set_deadline(idle_point, &deadline);
1846 /* If the group is still unbalanced, schedule the next upkeep to
1847 * either the deadline computed above, or the minimum deadline,
1848 * whichever comes later.
1850 if (!nh_res_table_is_balanced(res_table)) {
1851 unsigned long now = jiffies;
1852 unsigned long min_deadline;
1854 min_deadline = now + NH_RES_UPKEEP_DW_MINIMUM_INTERVAL;
1855 if (time_before(deadline, min_deadline))
1856 deadline = min_deadline;
1858 queue_delayed_work(system_power_efficient_wq,
1859 &res_table->upkeep_dw, deadline - now);
1863 static void nh_res_table_upkeep_dw(struct work_struct *work)
1865 struct delayed_work *dw = to_delayed_work(work);
1866 struct nh_res_table *res_table;
1868 res_table = container_of(dw, struct nh_res_table, upkeep_dw);
1869 nh_res_table_upkeep(res_table, true, true);
1872 static void nh_res_table_cancel_upkeep(struct nh_res_table *res_table)
1874 cancel_delayed_work_sync(&res_table->upkeep_dw);
1877 static void nh_res_group_rebalance(struct nh_group *nhg,
1878 struct nh_res_table *res_table)
1880 int prev_upper_bound = 0;
1885 INIT_LIST_HEAD(&res_table->uw_nh_entries);
1887 for (i = 0; i < nhg->num_nh; ++i)
1888 total += nhg->nh_entries[i].weight;
1890 for (i = 0; i < nhg->num_nh; ++i) {
1891 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1895 upper_bound = DIV_ROUND_CLOSEST(res_table->num_nh_buckets * w,
1897 nhge->res.wants_buckets = upper_bound - prev_upper_bound;
1898 prev_upper_bound = upper_bound;
1900 if (nh_res_nhge_is_uw(nhge)) {
1901 if (list_empty(&res_table->uw_nh_entries))
1902 res_table->unbalanced_since = jiffies;
1903 list_add(&nhge->res.uw_nh_entry,
1904 &res_table->uw_nh_entries);
1909 /* Migrate buckets in res_table so that they reference NHGE's from NHG with
1910 * the right NH ID. Set those buckets that do not have a corresponding NHGE
1911 * entry in NHG as not occupied.
1913 static void nh_res_table_migrate_buckets(struct nh_res_table *res_table,
1914 struct nh_group *nhg)
1918 for (i = 0; i < res_table->num_nh_buckets; i++) {
1919 struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
1920 u32 id = rtnl_dereference(bucket->nh_entry)->nh->id;
1924 for (j = 0; j < nhg->num_nh; j++) {
1925 struct nh_grp_entry *nhge = &nhg->nh_entries[j];
1927 if (nhge->nh->id == id) {
1928 nh_res_bucket_set_nh(bucket, nhge);
1935 nh_res_bucket_unset_nh(bucket);
1939 static void replace_nexthop_grp_res(struct nh_group *oldg,
1940 struct nh_group *newg)
1942 /* For NH group replacement, the new NHG might only have a stub
1943 * hash table with 0 buckets, because the number of buckets was not
1944 * specified. For NH removal, oldg and newg both reference the same
1945 * res_table. So in any case, in the following, we want to work
1946 * with oldg->res_table.
1948 struct nh_res_table *old_res_table = rtnl_dereference(oldg->res_table);
1949 unsigned long prev_unbalanced_since = old_res_table->unbalanced_since;
1950 bool prev_has_uw = !list_empty(&old_res_table->uw_nh_entries);
1952 nh_res_table_cancel_upkeep(old_res_table);
1953 nh_res_table_migrate_buckets(old_res_table, newg);
1954 nh_res_group_rebalance(newg, old_res_table);
1955 if (prev_has_uw && !list_empty(&old_res_table->uw_nh_entries))
1956 old_res_table->unbalanced_since = prev_unbalanced_since;
1957 nh_res_table_upkeep(old_res_table, true, false);
1960 static void nh_hthr_group_rebalance(struct nh_group *nhg)
1966 for (i = 0; i < nhg->num_nh; ++i)
1967 total += nhg->nh_entries[i].weight;
1969 for (i = 0; i < nhg->num_nh; ++i) {
1970 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1974 upper_bound = DIV_ROUND_CLOSEST_ULL((u64)w << 31, total) - 1;
1975 atomic_set(&nhge->hthr.upper_bound, upper_bound);
1979 static void remove_nh_grp_entry(struct net *net, struct nh_grp_entry *nhge,
1980 struct nl_info *nlinfo)
1982 struct nh_grp_entry *nhges, *new_nhges;
1983 struct nexthop *nhp = nhge->nh_parent;
1984 struct netlink_ext_ack extack;
1985 struct nexthop *nh = nhge->nh;
1986 struct nh_group *nhg, *newg;
1991 nhg = rtnl_dereference(nhp->nh_grp);
1994 /* last entry, keep it visible and remove the parent */
1995 if (nhg->num_nh == 1) {
1996 remove_nexthop(net, nhp, nlinfo);
2000 newg->has_v4 = false;
2001 newg->is_multipath = nhg->is_multipath;
2002 newg->hash_threshold = nhg->hash_threshold;
2003 newg->resilient = nhg->resilient;
2004 newg->fdb_nh = nhg->fdb_nh;
2005 newg->num_nh = nhg->num_nh;
2007 /* copy old entries to new except the one getting removed */
2008 nhges = nhg->nh_entries;
2009 new_nhges = newg->nh_entries;
2010 for (i = 0, j = 0; i < nhg->num_nh; ++i) {
2011 struct nh_info *nhi;
2013 /* current nexthop getting removed */
2014 if (nhg->nh_entries[i].nh == nh) {
2019 nhi = rtnl_dereference(nhges[i].nh->nh_info);
2020 if (nhi->family == AF_INET)
2021 newg->has_v4 = true;
2023 list_del(&nhges[i].nh_list);
2024 new_nhges[j].stats = nhges[i].stats;
2025 new_nhges[j].nh_parent = nhges[i].nh_parent;
2026 new_nhges[j].nh = nhges[i].nh;
2027 new_nhges[j].weight = nhges[i].weight;
2028 list_add(&new_nhges[j].nh_list, &new_nhges[j].nh->grp_list);
2032 if (newg->hash_threshold)
2033 nh_hthr_group_rebalance(newg);
2034 else if (newg->resilient)
2035 replace_nexthop_grp_res(nhg, newg);
2037 rcu_assign_pointer(nhp->nh_grp, newg);
2039 list_del(&nhge->nh_list);
2040 free_percpu(nhge->stats);
2041 nexthop_put(nhge->nh);
2043 /* Removal of a NH from a resilient group is notified through
2044 * bucket notifications.
2046 if (newg->hash_threshold) {
2047 err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, nhp,
2050 pr_err("%s\n", extack._msg);
2054 nexthop_notify(RTM_NEWNEXTHOP, nhp, nlinfo);
2057 static void remove_nexthop_from_groups(struct net *net, struct nexthop *nh,
2058 struct nl_info *nlinfo)
2060 struct nh_grp_entry *nhge, *tmp;
2062 list_for_each_entry_safe(nhge, tmp, &nh->grp_list, nh_list)
2063 remove_nh_grp_entry(net, nhge, nlinfo);
2065 /* make sure all see the newly published array before releasing rtnl */
2069 static void remove_nexthop_group(struct nexthop *nh, struct nl_info *nlinfo)
2071 struct nh_group *nhg = rcu_dereference_rtnl(nh->nh_grp);
2072 struct nh_res_table *res_table;
2073 int i, num_nh = nhg->num_nh;
2075 for (i = 0; i < num_nh; ++i) {
2076 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
2078 if (WARN_ON(!nhge->nh))
2081 list_del_init(&nhge->nh_list);
2084 if (nhg->resilient) {
2085 res_table = rtnl_dereference(nhg->res_table);
2086 nh_res_table_cancel_upkeep(res_table);
2090 /* not called for nexthop replace */
2091 static void __remove_nexthop_fib(struct net *net, struct nexthop *nh)
2093 struct fib6_info *f6i, *tmp;
2094 bool do_flush = false;
2095 struct fib_info *fi;
2097 list_for_each_entry(fi, &nh->fi_list, nh_list) {
2098 fi->fib_flags |= RTNH_F_DEAD;
2104 /* ip6_del_rt removes the entry from this list hence the _safe */
2105 list_for_each_entry_safe(f6i, tmp, &nh->f6i_list, nh_list) {
2106 /* __ip6_del_rt does a release, so do a hold here */
2107 fib6_info_hold(f6i);
2108 ipv6_stub->ip6_del_rt(net, f6i,
2109 !READ_ONCE(net->ipv4.sysctl_nexthop_compat_mode));
2113 static void __remove_nexthop(struct net *net, struct nexthop *nh,
2114 struct nl_info *nlinfo)
2116 __remove_nexthop_fib(net, nh);
2119 remove_nexthop_group(nh, nlinfo);
2121 struct nh_info *nhi;
2123 nhi = rtnl_dereference(nh->nh_info);
2124 if (nhi->fib_nhc.nhc_dev)
2125 hlist_del(&nhi->dev_hash);
2127 remove_nexthop_from_groups(net, nh, nlinfo);
2131 static void remove_nexthop(struct net *net, struct nexthop *nh,
2132 struct nl_info *nlinfo)
2134 call_nexthop_notifiers(net, NEXTHOP_EVENT_DEL, nh, NULL);
2136 /* remove from the tree */
2137 rb_erase(&nh->rb_node, &net->nexthop.rb_root);
2140 nexthop_notify(RTM_DELNEXTHOP, nh, nlinfo);
2142 __remove_nexthop(net, nh, nlinfo);
2143 nh_base_seq_inc(net);
2148 /* if any FIB entries reference this nexthop, any dst entries
2149 * need to be regenerated
2151 static void nh_rt_cache_flush(struct net *net, struct nexthop *nh,
2152 struct nexthop *replaced_nh)
2154 struct fib6_info *f6i;
2155 struct nh_group *nhg;
2158 if (!list_empty(&nh->fi_list))
2159 rt_cache_flush(net);
2161 list_for_each_entry(f6i, &nh->f6i_list, nh_list)
2162 ipv6_stub->fib6_update_sernum(net, f6i);
2164 /* if an IPv6 group was replaced, we have to release all old
2165 * dsts to make sure all refcounts are released
2167 if (!replaced_nh->is_group)
2170 nhg = rtnl_dereference(replaced_nh->nh_grp);
2171 for (i = 0; i < nhg->num_nh; i++) {
2172 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
2173 struct nh_info *nhi = rtnl_dereference(nhge->nh->nh_info);
2175 if (nhi->family == AF_INET6)
2176 ipv6_stub->fib6_nh_release_dsts(&nhi->fib6_nh);
2180 static int replace_nexthop_grp(struct net *net, struct nexthop *old,
2181 struct nexthop *new, const struct nh_config *cfg,
2182 struct netlink_ext_ack *extack)
2184 struct nh_res_table *tmp_table = NULL;
2185 struct nh_res_table *new_res_table;
2186 struct nh_res_table *old_res_table;
2187 struct nh_group *oldg, *newg;
2190 if (!new->is_group) {
2191 NL_SET_ERR_MSG(extack, "Can not replace a nexthop group with a nexthop.");
2195 oldg = rtnl_dereference(old->nh_grp);
2196 newg = rtnl_dereference(new->nh_grp);
2198 if (newg->hash_threshold != oldg->hash_threshold) {
2199 NL_SET_ERR_MSG(extack, "Can not replace a nexthop group with one of a different type.");
2203 if (newg->hash_threshold) {
2204 err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new,
2208 } else if (newg->resilient) {
2209 new_res_table = rtnl_dereference(newg->res_table);
2210 old_res_table = rtnl_dereference(oldg->res_table);
2212 /* Accept if num_nh_buckets was not given, but if it was
2213 * given, demand that the value be correct.
2215 if (cfg->nh_grp_res_has_num_buckets &&
2216 cfg->nh_grp_res_num_buckets !=
2217 old_res_table->num_nh_buckets) {
2218 NL_SET_ERR_MSG(extack, "Can not change number of buckets of a resilient nexthop group.");
2222 /* Emit a pre-replace notification so that listeners could veto
2223 * a potentially unsupported configuration. Otherwise,
2224 * individual bucket replacement notifications would need to be
2225 * vetoed, which is something that should only happen if the
2226 * bucket is currently active.
2228 err = call_nexthop_res_table_notifiers(net, new, extack);
2232 if (cfg->nh_grp_res_has_idle_timer)
2233 old_res_table->idle_timer = cfg->nh_grp_res_idle_timer;
2234 if (cfg->nh_grp_res_has_unbalanced_timer)
2235 old_res_table->unbalanced_timer =
2236 cfg->nh_grp_res_unbalanced_timer;
2238 replace_nexthop_grp_res(oldg, newg);
2240 tmp_table = new_res_table;
2241 rcu_assign_pointer(newg->res_table, old_res_table);
2242 rcu_assign_pointer(newg->spare->res_table, old_res_table);
2245 /* update parents - used by nexthop code for cleanup */
2246 for (i = 0; i < newg->num_nh; i++)
2247 newg->nh_entries[i].nh_parent = old;
2249 rcu_assign_pointer(old->nh_grp, newg);
2251 /* Make sure concurrent readers are not using 'oldg' anymore. */
2254 if (newg->resilient) {
2255 rcu_assign_pointer(oldg->res_table, tmp_table);
2256 rcu_assign_pointer(oldg->spare->res_table, tmp_table);
2259 for (i = 0; i < oldg->num_nh; i++)
2260 oldg->nh_entries[i].nh_parent = new;
2262 rcu_assign_pointer(new->nh_grp, oldg);
2267 static void nh_group_v4_update(struct nh_group *nhg)
2269 struct nh_grp_entry *nhges;
2270 bool has_v4 = false;
2273 nhges = nhg->nh_entries;
2274 for (i = 0; i < nhg->num_nh; i++) {
2275 struct nh_info *nhi;
2277 nhi = rtnl_dereference(nhges[i].nh->nh_info);
2278 if (nhi->family == AF_INET)
2281 nhg->has_v4 = has_v4;
2284 static int replace_nexthop_single_notify_res(struct net *net,
2285 struct nh_res_table *res_table,
2286 struct nexthop *old,
2287 struct nh_info *oldi,
2288 struct nh_info *newi,
2289 struct netlink_ext_ack *extack)
2291 u32 nhg_id = res_table->nhg_id;
2295 for (i = 0; i < res_table->num_nh_buckets; i++) {
2296 struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
2297 struct nh_grp_entry *nhge;
2299 nhge = rtnl_dereference(bucket->nh_entry);
2300 if (nhge->nh == old) {
2301 err = __call_nexthop_res_bucket_notifiers(net, nhg_id,
2314 struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
2315 struct nh_grp_entry *nhge;
2317 nhge = rtnl_dereference(bucket->nh_entry);
2318 if (nhge->nh == old)
2319 __call_nexthop_res_bucket_notifiers(net, nhg_id, i,
2326 static int replace_nexthop_single_notify(struct net *net,
2327 struct nexthop *group_nh,
2328 struct nexthop *old,
2329 struct nh_info *oldi,
2330 struct nh_info *newi,
2331 struct netlink_ext_ack *extack)
2333 struct nh_group *nhg = rtnl_dereference(group_nh->nh_grp);
2334 struct nh_res_table *res_table;
2336 if (nhg->hash_threshold) {
2337 return call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE,
2339 } else if (nhg->resilient) {
2340 res_table = rtnl_dereference(nhg->res_table);
2341 return replace_nexthop_single_notify_res(net, res_table,
2349 static int replace_nexthop_single(struct net *net, struct nexthop *old,
2350 struct nexthop *new,
2351 struct netlink_ext_ack *extack)
2353 u8 old_protocol, old_nh_flags;
2354 struct nh_info *oldi, *newi;
2355 struct nh_grp_entry *nhge;
2358 if (new->is_group) {
2359 NL_SET_ERR_MSG(extack, "Can not replace a nexthop with a nexthop group.");
2363 err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new, extack);
2367 /* Hardware flags were set on 'old' as 'new' is not in the red-black
2368 * tree. Therefore, inherit the flags from 'old' to 'new'.
2370 new->nh_flags |= old->nh_flags & (RTNH_F_OFFLOAD | RTNH_F_TRAP);
2372 oldi = rtnl_dereference(old->nh_info);
2373 newi = rtnl_dereference(new->nh_info);
2375 newi->nh_parent = old;
2376 oldi->nh_parent = new;
2378 old_protocol = old->protocol;
2379 old_nh_flags = old->nh_flags;
2381 old->protocol = new->protocol;
2382 old->nh_flags = new->nh_flags;
2384 rcu_assign_pointer(old->nh_info, newi);
2385 rcu_assign_pointer(new->nh_info, oldi);
2387 /* Send a replace notification for all the groups using the nexthop. */
2388 list_for_each_entry(nhge, &old->grp_list, nh_list) {
2389 struct nexthop *nhp = nhge->nh_parent;
2391 err = replace_nexthop_single_notify(net, nhp, old, oldi, newi,
2397 /* When replacing an IPv4 nexthop with an IPv6 nexthop, potentially
2398 * update IPv4 indication in all the groups using the nexthop.
2400 if (oldi->family == AF_INET && newi->family == AF_INET6) {
2401 list_for_each_entry(nhge, &old->grp_list, nh_list) {
2402 struct nexthop *nhp = nhge->nh_parent;
2403 struct nh_group *nhg;
2405 nhg = rtnl_dereference(nhp->nh_grp);
2406 nh_group_v4_update(nhg);
2413 rcu_assign_pointer(new->nh_info, newi);
2414 rcu_assign_pointer(old->nh_info, oldi);
2415 old->nh_flags = old_nh_flags;
2416 old->protocol = old_protocol;
2417 oldi->nh_parent = old;
2418 newi->nh_parent = new;
2419 list_for_each_entry_continue_reverse(nhge, &old->grp_list, nh_list) {
2420 struct nexthop *nhp = nhge->nh_parent;
2422 replace_nexthop_single_notify(net, nhp, old, newi, oldi, NULL);
2424 call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, old, extack);
2428 static void __nexthop_replace_notify(struct net *net, struct nexthop *nh,
2429 struct nl_info *info)
2431 struct fib6_info *f6i;
2433 if (!list_empty(&nh->fi_list)) {
2434 struct fib_info *fi;
2436 /* expectation is a few fib_info per nexthop and then
2437 * a lot of routes per fib_info. So mark the fib_info
2438 * and then walk the fib tables once
2440 list_for_each_entry(fi, &nh->fi_list, nh_list)
2441 fi->nh_updated = true;
2443 fib_info_notify_update(net, info);
2445 list_for_each_entry(fi, &nh->fi_list, nh_list)
2446 fi->nh_updated = false;
2449 list_for_each_entry(f6i, &nh->f6i_list, nh_list)
2450 ipv6_stub->fib6_rt_update(net, f6i, info);
2453 /* send RTM_NEWROUTE with REPLACE flag set for all FIB entries
2454 * linked to this nexthop and for all groups that the nexthop
2457 static void nexthop_replace_notify(struct net *net, struct nexthop *nh,
2458 struct nl_info *info)
2460 struct nh_grp_entry *nhge;
2462 __nexthop_replace_notify(net, nh, info);
2464 list_for_each_entry(nhge, &nh->grp_list, nh_list)
2465 __nexthop_replace_notify(net, nhge->nh_parent, info);
2468 static int replace_nexthop(struct net *net, struct nexthop *old,
2469 struct nexthop *new, const struct nh_config *cfg,
2470 struct netlink_ext_ack *extack)
2472 bool new_is_reject = false;
2473 struct nh_grp_entry *nhge;
2476 /* check that existing FIB entries are ok with the
2477 * new nexthop definition
2479 err = fib_check_nh_list(old, new, extack);
2483 err = fib6_check_nh_list(old, new, extack);
2487 if (!new->is_group) {
2488 struct nh_info *nhi = rtnl_dereference(new->nh_info);
2490 new_is_reject = nhi->reject_nh;
2493 list_for_each_entry(nhge, &old->grp_list, nh_list) {
2494 /* if new nexthop is a blackhole, any groups using this
2495 * nexthop cannot have more than 1 path
2497 if (new_is_reject &&
2498 nexthop_num_path(nhge->nh_parent) > 1) {
2499 NL_SET_ERR_MSG(extack, "Blackhole nexthop can not be a member of a group with more than one path");
2503 err = fib_check_nh_list(nhge->nh_parent, new, extack);
2507 err = fib6_check_nh_list(nhge->nh_parent, new, extack);
2513 err = replace_nexthop_grp(net, old, new, cfg, extack);
2515 err = replace_nexthop_single(net, old, new, extack);
2518 nh_rt_cache_flush(net, old, new);
2520 __remove_nexthop(net, new, NULL);
2527 /* called with rtnl_lock held */
2528 static int insert_nexthop(struct net *net, struct nexthop *new_nh,
2529 struct nh_config *cfg, struct netlink_ext_ack *extack)
2531 struct rb_node **pp, *parent = NULL, *next;
2532 struct rb_root *root = &net->nexthop.rb_root;
2533 bool replace = !!(cfg->nlflags & NLM_F_REPLACE);
2534 bool create = !!(cfg->nlflags & NLM_F_CREATE);
2535 u32 new_id = new_nh->id;
2536 int replace_notify = 0;
2539 pp = &root->rb_node;
2549 nh = rb_entry(parent, struct nexthop, rb_node);
2550 if (new_id < nh->id) {
2551 pp = &next->rb_left;
2552 } else if (new_id > nh->id) {
2553 pp = &next->rb_right;
2554 } else if (replace) {
2555 rc = replace_nexthop(net, nh, new_nh, cfg, extack);
2557 new_nh = nh; /* send notification with old nh */
2562 /* id already exists and not a replace */
2567 if (replace && !create) {
2568 NL_SET_ERR_MSG(extack, "Replace specified without create and no entry exists");
2573 if (new_nh->is_group) {
2574 struct nh_group *nhg = rtnl_dereference(new_nh->nh_grp);
2575 struct nh_res_table *res_table;
2577 if (nhg->resilient) {
2578 res_table = rtnl_dereference(nhg->res_table);
2580 /* Not passing the number of buckets is OK when
2581 * replacing, but not when creating a new group.
2583 if (!cfg->nh_grp_res_has_num_buckets) {
2584 NL_SET_ERR_MSG(extack, "Number of buckets not specified for nexthop group insertion");
2589 nh_res_group_rebalance(nhg, res_table);
2591 /* Do not send bucket notifications, we do full
2592 * notification below.
2594 nh_res_table_upkeep(res_table, false, false);
2598 rb_link_node_rcu(&new_nh->rb_node, parent, pp);
2599 rb_insert_color(&new_nh->rb_node, root);
2601 /* The initial insertion is a full notification for hash-threshold as
2602 * well as resilient groups.
2604 rc = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new_nh, extack);
2606 rb_erase(&new_nh->rb_node, &net->nexthop.rb_root);
2610 nh_base_seq_inc(net);
2611 nexthop_notify(RTM_NEWNEXTHOP, new_nh, &cfg->nlinfo);
2612 if (replace_notify &&
2613 READ_ONCE(net->ipv4.sysctl_nexthop_compat_mode))
2614 nexthop_replace_notify(net, new_nh, &cfg->nlinfo);
2621 /* remove all nexthops tied to a device being deleted */
2622 static void nexthop_flush_dev(struct net_device *dev, unsigned long event)
2624 unsigned int hash = nh_dev_hashfn(dev->ifindex);
2625 struct net *net = dev_net(dev);
2626 struct hlist_head *head = &net->nexthop.devhash[hash];
2627 struct hlist_node *n;
2628 struct nh_info *nhi;
2630 hlist_for_each_entry_safe(nhi, n, head, dev_hash) {
2631 if (nhi->fib_nhc.nhc_dev != dev)
2634 if (nhi->reject_nh &&
2635 (event == NETDEV_DOWN || event == NETDEV_CHANGE))
2638 remove_nexthop(net, nhi->nh_parent, NULL);
2642 /* rtnl; called when net namespace is deleted */
2643 static void flush_all_nexthops(struct net *net)
2645 struct rb_root *root = &net->nexthop.rb_root;
2646 struct rb_node *node;
2649 while ((node = rb_first(root))) {
2650 nh = rb_entry(node, struct nexthop, rb_node);
2651 remove_nexthop(net, nh, NULL);
2656 static struct nexthop *nexthop_create_group(struct net *net,
2657 struct nh_config *cfg)
2659 struct nlattr *grps_attr = cfg->nh_grp;
2660 struct nexthop_grp *entry = nla_data(grps_attr);
2661 u16 num_nh = nla_len(grps_attr) / sizeof(*entry);
2662 struct nh_group *nhg;
2667 if (WARN_ON(!num_nh))
2668 return ERR_PTR(-EINVAL);
2670 nh = nexthop_alloc();
2672 return ERR_PTR(-ENOMEM);
2676 nhg = nexthop_grp_alloc(num_nh);
2679 return ERR_PTR(-ENOMEM);
2682 /* spare group used for removals */
2683 nhg->spare = nexthop_grp_alloc(num_nh);
2687 return ERR_PTR(-ENOMEM);
2689 nhg->spare->spare = nhg;
2691 for (i = 0; i < nhg->num_nh; ++i) {
2692 struct nexthop *nhe;
2693 struct nh_info *nhi;
2695 nhe = nexthop_find_by_id(net, entry[i].id);
2696 if (!nexthop_get(nhe)) {
2701 nhi = rtnl_dereference(nhe->nh_info);
2702 if (nhi->family == AF_INET)
2705 nhg->nh_entries[i].stats =
2706 netdev_alloc_pcpu_stats(struct nh_grp_entry_stats);
2707 if (!nhg->nh_entries[i].stats) {
2712 nhg->nh_entries[i].nh = nhe;
2713 nhg->nh_entries[i].weight = entry[i].weight + 1;
2714 list_add(&nhg->nh_entries[i].nh_list, &nhe->grp_list);
2715 nhg->nh_entries[i].nh_parent = nh;
2718 if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_MPATH) {
2719 nhg->hash_threshold = 1;
2720 nhg->is_multipath = true;
2721 } else if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_RES) {
2722 struct nh_res_table *res_table;
2724 res_table = nexthop_res_table_alloc(net, cfg->nh_id, cfg);
2730 rcu_assign_pointer(nhg->spare->res_table, res_table);
2731 rcu_assign_pointer(nhg->res_table, res_table);
2732 nhg->resilient = true;
2733 nhg->is_multipath = true;
2736 WARN_ON_ONCE(nhg->hash_threshold + nhg->resilient != 1);
2738 if (nhg->hash_threshold)
2739 nh_hthr_group_rebalance(nhg);
2744 if (cfg->nh_hw_stats)
2745 nhg->hw_stats = true;
2747 rcu_assign_pointer(nh->nh_grp, nhg);
2752 for (i--; i >= 0; --i) {
2753 list_del(&nhg->nh_entries[i].nh_list);
2754 free_percpu(nhg->nh_entries[i].stats);
2755 nexthop_put(nhg->nh_entries[i].nh);
2762 return ERR_PTR(err);
2765 static int nh_create_ipv4(struct net *net, struct nexthop *nh,
2766 struct nh_info *nhi, struct nh_config *cfg,
2767 struct netlink_ext_ack *extack)
2769 struct fib_nh *fib_nh = &nhi->fib_nh;
2770 struct fib_config fib_cfg = {
2771 .fc_oif = cfg->nh_ifindex,
2772 .fc_gw4 = cfg->gw.ipv4,
2773 .fc_gw_family = cfg->gw.ipv4 ? AF_INET : 0,
2774 .fc_flags = cfg->nh_flags,
2775 .fc_nlinfo = cfg->nlinfo,
2776 .fc_encap = cfg->nh_encap,
2777 .fc_encap_type = cfg->nh_encap_type,
2779 u32 tb_id = (cfg->dev ? l3mdev_fib_table(cfg->dev) : RT_TABLE_MAIN);
2782 err = fib_nh_init(net, fib_nh, &fib_cfg, 1, extack);
2784 fib_nh_release(net, fib_nh);
2791 /* sets nh_dev if successful */
2792 err = fib_check_nh(net, fib_nh, tb_id, 0, extack);
2794 nh->nh_flags = fib_nh->fib_nh_flags;
2795 fib_info_update_nhc_saddr(net, &fib_nh->nh_common,
2796 !fib_nh->fib_nh_scope ? 0 : fib_nh->fib_nh_scope - 1);
2798 fib_nh_release(net, fib_nh);
2804 static int nh_create_ipv6(struct net *net, struct nexthop *nh,
2805 struct nh_info *nhi, struct nh_config *cfg,
2806 struct netlink_ext_ack *extack)
2808 struct fib6_nh *fib6_nh = &nhi->fib6_nh;
2809 struct fib6_config fib6_cfg = {
2810 .fc_table = l3mdev_fib_table(cfg->dev),
2811 .fc_ifindex = cfg->nh_ifindex,
2812 .fc_gateway = cfg->gw.ipv6,
2813 .fc_flags = cfg->nh_flags,
2814 .fc_nlinfo = cfg->nlinfo,
2815 .fc_encap = cfg->nh_encap,
2816 .fc_encap_type = cfg->nh_encap_type,
2817 .fc_is_fdb = cfg->nh_fdb,
2821 if (!ipv6_addr_any(&cfg->gw.ipv6))
2822 fib6_cfg.fc_flags |= RTF_GATEWAY;
2824 /* sets nh_dev if successful */
2825 err = ipv6_stub->fib6_nh_init(net, fib6_nh, &fib6_cfg, GFP_KERNEL,
2828 /* IPv6 is not enabled, don't call fib6_nh_release */
2829 if (err == -EAFNOSUPPORT)
2831 ipv6_stub->fib6_nh_release(fib6_nh);
2833 nh->nh_flags = fib6_nh->fib_nh_flags;
2839 static struct nexthop *nexthop_create(struct net *net, struct nh_config *cfg,
2840 struct netlink_ext_ack *extack)
2842 struct nh_info *nhi;
2846 nh = nexthop_alloc();
2848 return ERR_PTR(-ENOMEM);
2850 nhi = kzalloc(sizeof(*nhi), GFP_KERNEL);
2853 return ERR_PTR(-ENOMEM);
2856 nh->nh_flags = cfg->nh_flags;
2859 nhi->nh_parent = nh;
2860 nhi->family = cfg->nh_family;
2861 nhi->fib_nhc.nhc_scope = RT_SCOPE_LINK;
2866 if (cfg->nh_blackhole) {
2868 cfg->nh_ifindex = net->loopback_dev->ifindex;
2871 switch (cfg->nh_family) {
2873 err = nh_create_ipv4(net, nh, nhi, cfg, extack);
2876 err = nh_create_ipv6(net, nh, nhi, cfg, extack);
2883 return ERR_PTR(err);
2886 /* add the entry to the device based hash */
2888 nexthop_devhash_add(net, nhi);
2890 rcu_assign_pointer(nh->nh_info, nhi);
2895 /* called with rtnl lock held */
2896 static struct nexthop *nexthop_add(struct net *net, struct nh_config *cfg,
2897 struct netlink_ext_ack *extack)
2902 if (cfg->nlflags & NLM_F_REPLACE && !cfg->nh_id) {
2903 NL_SET_ERR_MSG(extack, "Replace requires nexthop id");
2904 return ERR_PTR(-EINVAL);
2908 cfg->nh_id = nh_find_unused_id(net);
2910 NL_SET_ERR_MSG(extack, "No unused id");
2911 return ERR_PTR(-EINVAL);
2916 nh = nexthop_create_group(net, cfg);
2918 nh = nexthop_create(net, cfg, extack);
2923 refcount_set(&nh->refcnt, 1);
2924 nh->id = cfg->nh_id;
2925 nh->protocol = cfg->nh_protocol;
2928 err = insert_nexthop(net, nh, cfg, extack);
2930 __remove_nexthop(net, nh, NULL);
2938 static int rtm_nh_get_timer(struct nlattr *attr, unsigned long fallback,
2939 unsigned long *timer_p, bool *has_p,
2940 struct netlink_ext_ack *extack)
2942 unsigned long timer;
2946 *timer_p = fallback;
2951 value = nla_get_u32(attr);
2952 timer = clock_t_to_jiffies(value);
2953 if (timer == ~0UL) {
2954 NL_SET_ERR_MSG(extack, "Timer value too large");
2963 static int rtm_to_nh_config_grp_res(struct nlattr *res, struct nh_config *cfg,
2964 struct netlink_ext_ack *extack)
2966 struct nlattr *tb[ARRAY_SIZE(rtm_nh_res_policy_new)] = {};
2970 err = nla_parse_nested(tb,
2971 ARRAY_SIZE(rtm_nh_res_policy_new) - 1,
2972 res, rtm_nh_res_policy_new, extack);
2977 if (tb[NHA_RES_GROUP_BUCKETS]) {
2978 cfg->nh_grp_res_num_buckets =
2979 nla_get_u16(tb[NHA_RES_GROUP_BUCKETS]);
2980 cfg->nh_grp_res_has_num_buckets = true;
2981 if (!cfg->nh_grp_res_num_buckets) {
2982 NL_SET_ERR_MSG(extack, "Number of buckets needs to be non-0");
2987 err = rtm_nh_get_timer(tb[NHA_RES_GROUP_IDLE_TIMER],
2988 NH_RES_DEFAULT_IDLE_TIMER,
2989 &cfg->nh_grp_res_idle_timer,
2990 &cfg->nh_grp_res_has_idle_timer,
2995 return rtm_nh_get_timer(tb[NHA_RES_GROUP_UNBALANCED_TIMER],
2996 NH_RES_DEFAULT_UNBALANCED_TIMER,
2997 &cfg->nh_grp_res_unbalanced_timer,
2998 &cfg->nh_grp_res_has_unbalanced_timer,
3002 static int rtm_to_nh_config(struct net *net, struct sk_buff *skb,
3003 struct nlmsghdr *nlh, struct nh_config *cfg,
3004 struct netlink_ext_ack *extack)
3006 struct nhmsg *nhm = nlmsg_data(nlh);
3007 struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_new)];
3010 err = nlmsg_parse(nlh, sizeof(*nhm), tb,
3011 ARRAY_SIZE(rtm_nh_policy_new) - 1,
3012 rtm_nh_policy_new, extack);
3017 if (nhm->resvd || nhm->nh_scope) {
3018 NL_SET_ERR_MSG(extack, "Invalid values in ancillary header");
3021 if (nhm->nh_flags & ~NEXTHOP_VALID_USER_FLAGS) {
3022 NL_SET_ERR_MSG(extack, "Invalid nexthop flags in ancillary header");
3026 switch (nhm->nh_family) {
3035 NL_SET_ERR_MSG(extack, "Invalid address family");
3039 memset(cfg, 0, sizeof(*cfg));
3040 cfg->nlflags = nlh->nlmsg_flags;
3041 cfg->nlinfo.portid = NETLINK_CB(skb).portid;
3042 cfg->nlinfo.nlh = nlh;
3043 cfg->nlinfo.nl_net = net;
3045 cfg->nh_family = nhm->nh_family;
3046 cfg->nh_protocol = nhm->nh_protocol;
3047 cfg->nh_flags = nhm->nh_flags;
3050 cfg->nh_id = nla_get_u32(tb[NHA_ID]);
3053 if (tb[NHA_OIF] || tb[NHA_BLACKHOLE] ||
3054 tb[NHA_ENCAP] || tb[NHA_ENCAP_TYPE]) {
3055 NL_SET_ERR_MSG(extack, "Fdb attribute can not be used with encap, oif or blackhole");
3058 if (nhm->nh_flags) {
3059 NL_SET_ERR_MSG(extack, "Unsupported nexthop flags in ancillary header");
3062 cfg->nh_fdb = nla_get_flag(tb[NHA_FDB]);
3065 if (tb[NHA_GROUP]) {
3066 if (nhm->nh_family != AF_UNSPEC) {
3067 NL_SET_ERR_MSG(extack, "Invalid family for group");
3070 cfg->nh_grp = tb[NHA_GROUP];
3072 cfg->nh_grp_type = NEXTHOP_GRP_TYPE_MPATH;
3073 if (tb[NHA_GROUP_TYPE])
3074 cfg->nh_grp_type = nla_get_u16(tb[NHA_GROUP_TYPE]);
3076 if (cfg->nh_grp_type > NEXTHOP_GRP_TYPE_MAX) {
3077 NL_SET_ERR_MSG(extack, "Invalid group type");
3080 err = nh_check_attr_group(net, tb, ARRAY_SIZE(tb),
3081 cfg->nh_grp_type, extack);
3085 if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_RES)
3086 err = rtm_to_nh_config_grp_res(tb[NHA_RES_GROUP],
3089 if (tb[NHA_HW_STATS_ENABLE])
3090 cfg->nh_hw_stats = nla_get_u32(tb[NHA_HW_STATS_ENABLE]);
3092 /* no other attributes should be set */
3096 if (tb[NHA_BLACKHOLE]) {
3097 if (tb[NHA_GATEWAY] || tb[NHA_OIF] ||
3098 tb[NHA_ENCAP] || tb[NHA_ENCAP_TYPE] || tb[NHA_FDB]) {
3099 NL_SET_ERR_MSG(extack, "Blackhole attribute can not be used with gateway, oif, encap or fdb");
3103 cfg->nh_blackhole = 1;
3108 if (!cfg->nh_fdb && !tb[NHA_OIF]) {
3109 NL_SET_ERR_MSG(extack, "Device attribute required for non-blackhole and non-fdb nexthops");
3113 if (!cfg->nh_fdb && tb[NHA_OIF]) {
3114 cfg->nh_ifindex = nla_get_u32(tb[NHA_OIF]);
3115 if (cfg->nh_ifindex)
3116 cfg->dev = __dev_get_by_index(net, cfg->nh_ifindex);
3119 NL_SET_ERR_MSG(extack, "Invalid device index");
3121 } else if (!(cfg->dev->flags & IFF_UP)) {
3122 NL_SET_ERR_MSG(extack, "Nexthop device is not up");
3125 } else if (!netif_carrier_ok(cfg->dev)) {
3126 NL_SET_ERR_MSG(extack, "Carrier for nexthop device is down");
3133 if (tb[NHA_GATEWAY]) {
3134 struct nlattr *gwa = tb[NHA_GATEWAY];
3136 switch (cfg->nh_family) {
3138 if (nla_len(gwa) != sizeof(u32)) {
3139 NL_SET_ERR_MSG(extack, "Invalid gateway");
3142 cfg->gw.ipv4 = nla_get_be32(gwa);
3145 if (nla_len(gwa) != sizeof(struct in6_addr)) {
3146 NL_SET_ERR_MSG(extack, "Invalid gateway");
3149 cfg->gw.ipv6 = nla_get_in6_addr(gwa);
3152 NL_SET_ERR_MSG(extack,
3153 "Unknown address family for gateway");
3157 /* device only nexthop (no gateway) */
3158 if (cfg->nh_flags & RTNH_F_ONLINK) {
3159 NL_SET_ERR_MSG(extack,
3160 "ONLINK flag can not be set for nexthop without a gateway");
3165 if (tb[NHA_ENCAP]) {
3166 cfg->nh_encap = tb[NHA_ENCAP];
3168 if (!tb[NHA_ENCAP_TYPE]) {
3169 NL_SET_ERR_MSG(extack, "LWT encapsulation type is missing");
3173 cfg->nh_encap_type = nla_get_u16(tb[NHA_ENCAP_TYPE]);
3174 err = lwtunnel_valid_encap_type(cfg->nh_encap_type, extack);
3178 } else if (tb[NHA_ENCAP_TYPE]) {
3179 NL_SET_ERR_MSG(extack, "LWT encapsulation attribute is missing");
3183 if (tb[NHA_HW_STATS_ENABLE]) {
3184 NL_SET_ERR_MSG(extack, "Cannot enable nexthop hardware statistics for non-group nexthops");
3194 static int rtm_new_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh,
3195 struct netlink_ext_ack *extack)
3197 struct net *net = sock_net(skb->sk);
3198 struct nh_config cfg;
3202 err = rtm_to_nh_config(net, skb, nlh, &cfg, extack);
3204 nh = nexthop_add(net, &cfg, extack);
3212 static int nh_valid_get_del_req(const struct nlmsghdr *nlh,
3213 struct nlattr **tb, u32 *id, u32 *op_flags,
3214 struct netlink_ext_ack *extack)
3216 struct nhmsg *nhm = nlmsg_data(nlh);
3218 if (nhm->nh_protocol || nhm->resvd || nhm->nh_scope || nhm->nh_flags) {
3219 NL_SET_ERR_MSG(extack, "Invalid values in header");
3224 NL_SET_ERR_MSG(extack, "Nexthop id is missing");
3228 *id = nla_get_u32(tb[NHA_ID]);
3230 NL_SET_ERR_MSG(extack, "Invalid nexthop id");
3235 if (tb[NHA_OP_FLAGS])
3236 *op_flags = nla_get_u32(tb[NHA_OP_FLAGS]);
3245 static int rtm_del_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh,
3246 struct netlink_ext_ack *extack)
3248 struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_del)];
3249 struct net *net = sock_net(skb->sk);
3250 struct nl_info nlinfo = {
3253 .portid = NETLINK_CB(skb).portid,
3259 err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
3260 ARRAY_SIZE(rtm_nh_policy_del) - 1, rtm_nh_policy_del,
3265 err = nh_valid_get_del_req(nlh, tb, &id, NULL, extack);
3269 nh = nexthop_find_by_id(net, id);
3273 remove_nexthop(net, nh, &nlinfo);
3279 static int rtm_get_nexthop(struct sk_buff *in_skb, struct nlmsghdr *nlh,
3280 struct netlink_ext_ack *extack)
3282 struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_get)];
3283 struct net *net = sock_net(in_skb->sk);
3284 struct sk_buff *skb = NULL;
3290 err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
3291 ARRAY_SIZE(rtm_nh_policy_get) - 1, rtm_nh_policy_get,
3296 err = nh_valid_get_del_req(nlh, tb, &id, &op_flags, extack);
3301 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
3306 nh = nexthop_find_by_id(net, id);
3310 err = nh_fill_node(skb, nh, RTM_NEWNEXTHOP, NETLINK_CB(in_skb).portid,
3311 nlh->nlmsg_seq, 0, op_flags);
3313 WARN_ON(err == -EMSGSIZE);
3317 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
3325 struct nh_dump_filter {
3331 u32 res_bucket_nh_id;
3335 static bool nh_dump_filtered(struct nexthop *nh,
3336 struct nh_dump_filter *filter, u8 family)
3338 const struct net_device *dev;
3339 const struct nh_info *nhi;
3341 if (filter->group_filter && !nh->is_group)
3344 if (!filter->dev_idx && !filter->master_idx && !family)
3350 nhi = rtnl_dereference(nh->nh_info);
3351 if (family && nhi->family != family)
3354 dev = nhi->fib_nhc.nhc_dev;
3355 if (filter->dev_idx && (!dev || dev->ifindex != filter->dev_idx))
3358 if (filter->master_idx) {
3359 struct net_device *master;
3364 master = netdev_master_upper_dev_get((struct net_device *)dev);
3365 if (!master || master->ifindex != filter->master_idx)
3372 static int __nh_valid_dump_req(const struct nlmsghdr *nlh, struct nlattr **tb,
3373 struct nh_dump_filter *filter,
3374 struct netlink_ext_ack *extack)
3380 idx = nla_get_u32(tb[NHA_OIF]);
3381 if (idx > INT_MAX) {
3382 NL_SET_ERR_MSG(extack, "Invalid device index");
3385 filter->dev_idx = idx;
3387 if (tb[NHA_MASTER]) {
3388 idx = nla_get_u32(tb[NHA_MASTER]);
3389 if (idx > INT_MAX) {
3390 NL_SET_ERR_MSG(extack, "Invalid master device index");
3393 filter->master_idx = idx;
3395 filter->group_filter = nla_get_flag(tb[NHA_GROUPS]);
3396 filter->fdb_filter = nla_get_flag(tb[NHA_FDB]);
3398 nhm = nlmsg_data(nlh);
3399 if (nhm->nh_protocol || nhm->resvd || nhm->nh_scope || nhm->nh_flags) {
3400 NL_SET_ERR_MSG(extack, "Invalid values in header for nexthop dump request");
3407 static int nh_valid_dump_req(const struct nlmsghdr *nlh,
3408 struct nh_dump_filter *filter,
3409 struct netlink_callback *cb)
3411 struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_dump)];
3414 err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
3415 ARRAY_SIZE(rtm_nh_policy_dump) - 1,
3416 rtm_nh_policy_dump, cb->extack);
3420 if (tb[NHA_OP_FLAGS])
3421 filter->op_flags = nla_get_u32(tb[NHA_OP_FLAGS]);
3423 filter->op_flags = 0;
3425 return __nh_valid_dump_req(nlh, tb, filter, cb->extack);
3428 struct rtm_dump_nh_ctx {
3432 static struct rtm_dump_nh_ctx *
3433 rtm_dump_nh_ctx(struct netlink_callback *cb)
3435 struct rtm_dump_nh_ctx *ctx = (void *)cb->ctx;
3437 BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
3441 static int rtm_dump_walk_nexthops(struct sk_buff *skb,
3442 struct netlink_callback *cb,
3443 struct rb_root *root,
3444 struct rtm_dump_nh_ctx *ctx,
3445 int (*nh_cb)(struct sk_buff *skb,
3446 struct netlink_callback *cb,
3447 struct nexthop *nh, void *data),
3450 struct rb_node *node;
3455 for (node = rb_first(root); node; node = rb_next(node)) {
3458 nh = rb_entry(node, struct nexthop, rb_node);
3463 err = nh_cb(skb, cb, nh, data);
3471 static int rtm_dump_nexthop_cb(struct sk_buff *skb, struct netlink_callback *cb,
3472 struct nexthop *nh, void *data)
3474 struct nhmsg *nhm = nlmsg_data(cb->nlh);
3475 struct nh_dump_filter *filter = data;
3477 if (nh_dump_filtered(nh, filter, nhm->nh_family))
3480 return nh_fill_node(skb, nh, RTM_NEWNEXTHOP,
3481 NETLINK_CB(cb->skb).portid,
3482 cb->nlh->nlmsg_seq, NLM_F_MULTI, filter->op_flags);
3486 static int rtm_dump_nexthop(struct sk_buff *skb, struct netlink_callback *cb)
3488 struct rtm_dump_nh_ctx *ctx = rtm_dump_nh_ctx(cb);
3489 struct net *net = sock_net(skb->sk);
3490 struct rb_root *root = &net->nexthop.rb_root;
3491 struct nh_dump_filter filter = {};
3494 err = nh_valid_dump_req(cb->nlh, &filter, cb);
3498 err = rtm_dump_walk_nexthops(skb, cb, root, ctx,
3499 &rtm_dump_nexthop_cb, &filter);
3501 cb->seq = net->nexthop.seq;
3502 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
3506 static struct nexthop *
3507 nexthop_find_group_resilient(struct net *net, u32 id,
3508 struct netlink_ext_ack *extack)
3510 struct nh_group *nhg;
3513 nh = nexthop_find_by_id(net, id);
3515 return ERR_PTR(-ENOENT);
3517 if (!nh->is_group) {
3518 NL_SET_ERR_MSG(extack, "Not a nexthop group");
3519 return ERR_PTR(-EINVAL);
3522 nhg = rtnl_dereference(nh->nh_grp);
3523 if (!nhg->resilient) {
3524 NL_SET_ERR_MSG(extack, "Nexthop group not of type resilient");
3525 return ERR_PTR(-EINVAL);
3531 static int nh_valid_dump_nhid(struct nlattr *attr, u32 *nh_id_p,
3532 struct netlink_ext_ack *extack)
3537 idx = nla_get_u32(attr);
3539 NL_SET_ERR_MSG(extack, "Invalid nexthop id");
3550 static int nh_valid_dump_bucket_req(const struct nlmsghdr *nlh,
3551 struct nh_dump_filter *filter,
3552 struct netlink_callback *cb)
3554 struct nlattr *res_tb[ARRAY_SIZE(rtm_nh_res_bucket_policy_dump)];
3555 struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_dump_bucket)];
3558 err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
3559 ARRAY_SIZE(rtm_nh_policy_dump_bucket) - 1,
3560 rtm_nh_policy_dump_bucket, NULL);
3564 err = nh_valid_dump_nhid(tb[NHA_ID], &filter->nh_id, cb->extack);
3568 if (tb[NHA_RES_BUCKET]) {
3569 size_t max = ARRAY_SIZE(rtm_nh_res_bucket_policy_dump) - 1;
3571 err = nla_parse_nested(res_tb, max,
3573 rtm_nh_res_bucket_policy_dump,
3578 err = nh_valid_dump_nhid(res_tb[NHA_RES_BUCKET_NH_ID],
3579 &filter->res_bucket_nh_id,
3585 return __nh_valid_dump_req(nlh, tb, filter, cb->extack);
3588 struct rtm_dump_res_bucket_ctx {
3589 struct rtm_dump_nh_ctx nh;
3593 static struct rtm_dump_res_bucket_ctx *
3594 rtm_dump_res_bucket_ctx(struct netlink_callback *cb)
3596 struct rtm_dump_res_bucket_ctx *ctx = (void *)cb->ctx;
3598 BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
3602 struct rtm_dump_nexthop_bucket_data {
3603 struct rtm_dump_res_bucket_ctx *ctx;
3604 struct nh_dump_filter filter;
3607 static int rtm_dump_nexthop_bucket_nh(struct sk_buff *skb,
3608 struct netlink_callback *cb,
3610 struct rtm_dump_nexthop_bucket_data *dd)
3612 u32 portid = NETLINK_CB(cb->skb).portid;
3613 struct nhmsg *nhm = nlmsg_data(cb->nlh);
3614 struct nh_res_table *res_table;
3615 struct nh_group *nhg;
3619 nhg = rtnl_dereference(nh->nh_grp);
3620 res_table = rtnl_dereference(nhg->res_table);
3621 for (bucket_index = dd->ctx->bucket_index;
3622 bucket_index < res_table->num_nh_buckets;
3624 struct nh_res_bucket *bucket;
3625 struct nh_grp_entry *nhge;
3627 bucket = &res_table->nh_buckets[bucket_index];
3628 nhge = rtnl_dereference(bucket->nh_entry);
3629 if (nh_dump_filtered(nhge->nh, &dd->filter, nhm->nh_family))
3632 if (dd->filter.res_bucket_nh_id &&
3633 dd->filter.res_bucket_nh_id != nhge->nh->id)
3636 dd->ctx->bucket_index = bucket_index;
3637 err = nh_fill_res_bucket(skb, nh, bucket, bucket_index,
3638 RTM_NEWNEXTHOPBUCKET, portid,
3639 cb->nlh->nlmsg_seq, NLM_F_MULTI,
3645 dd->ctx->bucket_index = 0;
3650 static int rtm_dump_nexthop_bucket_cb(struct sk_buff *skb,
3651 struct netlink_callback *cb,
3652 struct nexthop *nh, void *data)
3654 struct rtm_dump_nexthop_bucket_data *dd = data;
3655 struct nh_group *nhg;
3660 nhg = rtnl_dereference(nh->nh_grp);
3661 if (!nhg->resilient)
3664 return rtm_dump_nexthop_bucket_nh(skb, cb, nh, dd);
3668 static int rtm_dump_nexthop_bucket(struct sk_buff *skb,
3669 struct netlink_callback *cb)
3671 struct rtm_dump_res_bucket_ctx *ctx = rtm_dump_res_bucket_ctx(cb);
3672 struct rtm_dump_nexthop_bucket_data dd = { .ctx = ctx };
3673 struct net *net = sock_net(skb->sk);
3677 err = nh_valid_dump_bucket_req(cb->nlh, &dd.filter, cb);
3681 if (dd.filter.nh_id) {
3682 nh = nexthop_find_group_resilient(net, dd.filter.nh_id,
3686 err = rtm_dump_nexthop_bucket_nh(skb, cb, nh, &dd);
3688 struct rb_root *root = &net->nexthop.rb_root;
3690 err = rtm_dump_walk_nexthops(skb, cb, root, &ctx->nh,
3691 &rtm_dump_nexthop_bucket_cb, &dd);
3694 cb->seq = net->nexthop.seq;
3695 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
3699 static int nh_valid_get_bucket_req_res_bucket(struct nlattr *res,
3701 struct netlink_ext_ack *extack)
3703 struct nlattr *tb[ARRAY_SIZE(rtm_nh_res_bucket_policy_get)];
3706 err = nla_parse_nested(tb, ARRAY_SIZE(rtm_nh_res_bucket_policy_get) - 1,
3707 res, rtm_nh_res_bucket_policy_get, extack);
3711 if (!tb[NHA_RES_BUCKET_INDEX]) {
3712 NL_SET_ERR_MSG(extack, "Bucket index is missing");
3716 *bucket_index = nla_get_u16(tb[NHA_RES_BUCKET_INDEX]);
3720 static int nh_valid_get_bucket_req(const struct nlmsghdr *nlh,
3721 u32 *id, u16 *bucket_index,
3722 struct netlink_ext_ack *extack)
3724 struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_get_bucket)];
3727 err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
3728 ARRAY_SIZE(rtm_nh_policy_get_bucket) - 1,
3729 rtm_nh_policy_get_bucket, extack);
3733 err = nh_valid_get_del_req(nlh, tb, id, NULL, extack);
3737 if (!tb[NHA_RES_BUCKET]) {
3738 NL_SET_ERR_MSG(extack, "Bucket information is missing");
3742 err = nh_valid_get_bucket_req_res_bucket(tb[NHA_RES_BUCKET],
3743 bucket_index, extack);
3751 static int rtm_get_nexthop_bucket(struct sk_buff *in_skb, struct nlmsghdr *nlh,
3752 struct netlink_ext_ack *extack)
3754 struct net *net = sock_net(in_skb->sk);
3755 struct nh_res_table *res_table;
3756 struct sk_buff *skb = NULL;
3757 struct nh_group *nhg;
3763 err = nh_valid_get_bucket_req(nlh, &id, &bucket_index, extack);
3767 nh = nexthop_find_group_resilient(net, id, extack);
3771 nhg = rtnl_dereference(nh->nh_grp);
3772 res_table = rtnl_dereference(nhg->res_table);
3773 if (bucket_index >= res_table->num_nh_buckets) {
3774 NL_SET_ERR_MSG(extack, "Bucket index out of bounds");
3778 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
3782 err = nh_fill_res_bucket(skb, nh, &res_table->nh_buckets[bucket_index],
3783 bucket_index, RTM_NEWNEXTHOPBUCKET,
3784 NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
3787 WARN_ON(err == -EMSGSIZE);
3791 return rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
3798 static void nexthop_sync_mtu(struct net_device *dev, u32 orig_mtu)
3800 unsigned int hash = nh_dev_hashfn(dev->ifindex);
3801 struct net *net = dev_net(dev);
3802 struct hlist_head *head = &net->nexthop.devhash[hash];
3803 struct hlist_node *n;
3804 struct nh_info *nhi;
3806 hlist_for_each_entry_safe(nhi, n, head, dev_hash) {
3807 if (nhi->fib_nhc.nhc_dev == dev) {
3808 if (nhi->family == AF_INET)
3809 fib_nhc_update_mtu(&nhi->fib_nhc, dev->mtu,
3816 static int nh_netdev_event(struct notifier_block *this,
3817 unsigned long event, void *ptr)
3819 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3820 struct netdev_notifier_info_ext *info_ext;
3824 case NETDEV_UNREGISTER:
3825 nexthop_flush_dev(dev, event);
3828 if (!(dev_get_flags(dev) & (IFF_RUNNING | IFF_LOWER_UP)))
3829 nexthop_flush_dev(dev, event);
3831 case NETDEV_CHANGEMTU:
3833 nexthop_sync_mtu(dev, info_ext->ext.mtu);
3834 rt_cache_flush(dev_net(dev));
3840 static struct notifier_block nh_netdev_notifier = {
3841 .notifier_call = nh_netdev_event,
3844 static int nexthops_dump(struct net *net, struct notifier_block *nb,
3845 enum nexthop_event_type event_type,
3846 struct netlink_ext_ack *extack)
3848 struct rb_root *root = &net->nexthop.rb_root;
3849 struct rb_node *node;
3852 for (node = rb_first(root); node; node = rb_next(node)) {
3855 nh = rb_entry(node, struct nexthop, rb_node);
3856 err = call_nexthop_notifier(nb, net, event_type, nh, extack);
3864 int register_nexthop_notifier(struct net *net, struct notifier_block *nb,
3865 struct netlink_ext_ack *extack)
3870 err = nexthops_dump(net, nb, NEXTHOP_EVENT_REPLACE, extack);
3873 err = blocking_notifier_chain_register(&net->nexthop.notifier_chain,
3879 EXPORT_SYMBOL(register_nexthop_notifier);
3881 int __unregister_nexthop_notifier(struct net *net, struct notifier_block *nb)
3885 err = blocking_notifier_chain_unregister(&net->nexthop.notifier_chain,
3888 nexthops_dump(net, nb, NEXTHOP_EVENT_DEL, NULL);
3891 EXPORT_SYMBOL(__unregister_nexthop_notifier);
3893 int unregister_nexthop_notifier(struct net *net, struct notifier_block *nb)
3898 err = __unregister_nexthop_notifier(net, nb);
3902 EXPORT_SYMBOL(unregister_nexthop_notifier);
3904 void nexthop_set_hw_flags(struct net *net, u32 id, bool offload, bool trap)
3906 struct nexthop *nexthop;
3910 nexthop = nexthop_find_by_id(net, id);
3914 nexthop->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP);
3916 nexthop->nh_flags |= RTNH_F_OFFLOAD;
3918 nexthop->nh_flags |= RTNH_F_TRAP;
3923 EXPORT_SYMBOL(nexthop_set_hw_flags);
3925 void nexthop_bucket_set_hw_flags(struct net *net, u32 id, u16 bucket_index,
3926 bool offload, bool trap)
3928 struct nh_res_table *res_table;
3929 struct nh_res_bucket *bucket;
3930 struct nexthop *nexthop;
3931 struct nh_group *nhg;
3935 nexthop = nexthop_find_by_id(net, id);
3936 if (!nexthop || !nexthop->is_group)
3939 nhg = rcu_dereference(nexthop->nh_grp);
3940 if (!nhg->resilient)
3943 if (bucket_index >= nhg->res_table->num_nh_buckets)
3946 res_table = rcu_dereference(nhg->res_table);
3947 bucket = &res_table->nh_buckets[bucket_index];
3948 bucket->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP);
3950 bucket->nh_flags |= RTNH_F_OFFLOAD;
3952 bucket->nh_flags |= RTNH_F_TRAP;
3957 EXPORT_SYMBOL(nexthop_bucket_set_hw_flags);
3959 void nexthop_res_grp_activity_update(struct net *net, u32 id, u16 num_buckets,
3960 unsigned long *activity)
3962 struct nh_res_table *res_table;
3963 struct nexthop *nexthop;
3964 struct nh_group *nhg;
3969 nexthop = nexthop_find_by_id(net, id);
3970 if (!nexthop || !nexthop->is_group)
3973 nhg = rcu_dereference(nexthop->nh_grp);
3974 if (!nhg->resilient)
3977 /* Instead of silently ignoring some buckets, demand that the sizes
3980 res_table = rcu_dereference(nhg->res_table);
3981 if (num_buckets != res_table->num_nh_buckets)
3984 for (i = 0; i < num_buckets; i++) {
3985 if (test_bit(i, activity))
3986 nh_res_bucket_set_busy(&res_table->nh_buckets[i]);
3992 EXPORT_SYMBOL(nexthop_res_grp_activity_update);
3994 static void __net_exit nexthop_net_exit_batch_rtnl(struct list_head *net_list,
3995 struct list_head *dev_to_kill)
4000 list_for_each_entry(net, net_list, exit_list)
4001 flush_all_nexthops(net);
4004 static void __net_exit nexthop_net_exit(struct net *net)
4006 kfree(net->nexthop.devhash);
4007 net->nexthop.devhash = NULL;
4010 static int __net_init nexthop_net_init(struct net *net)
4012 size_t sz = sizeof(struct hlist_head) * NH_DEV_HASHSIZE;
4014 net->nexthop.rb_root = RB_ROOT;
4015 net->nexthop.devhash = kzalloc(sz, GFP_KERNEL);
4016 if (!net->nexthop.devhash)
4018 BLOCKING_INIT_NOTIFIER_HEAD(&net->nexthop.notifier_chain);
4023 static struct pernet_operations nexthop_net_ops = {
4024 .init = nexthop_net_init,
4025 .exit = nexthop_net_exit,
4026 .exit_batch_rtnl = nexthop_net_exit_batch_rtnl,
4029 static int __init nexthop_init(void)
4031 register_pernet_subsys(&nexthop_net_ops);
4033 register_netdevice_notifier(&nh_netdev_notifier);
4035 rtnl_register(PF_UNSPEC, RTM_NEWNEXTHOP, rtm_new_nexthop, NULL, 0);
4036 rtnl_register(PF_UNSPEC, RTM_DELNEXTHOP, rtm_del_nexthop, NULL, 0);
4037 rtnl_register(PF_UNSPEC, RTM_GETNEXTHOP, rtm_get_nexthop,
4038 rtm_dump_nexthop, 0);
4040 rtnl_register(PF_INET, RTM_NEWNEXTHOP, rtm_new_nexthop, NULL, 0);
4041 rtnl_register(PF_INET, RTM_GETNEXTHOP, NULL, rtm_dump_nexthop, 0);
4043 rtnl_register(PF_INET6, RTM_NEWNEXTHOP, rtm_new_nexthop, NULL, 0);
4044 rtnl_register(PF_INET6, RTM_GETNEXTHOP, NULL, rtm_dump_nexthop, 0);
4046 rtnl_register(PF_UNSPEC, RTM_GETNEXTHOPBUCKET, rtm_get_nexthop_bucket,
4047 rtm_dump_nexthop_bucket, 0);
4051 subsys_initcall(nexthop_init);