nexthop: Add netlink handlers for resilient nexthop groups
[linux-2.6-microblaze.git] / net / ipv4 / nexthop.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Generic nexthop implementation
3  *
4  * Copyright (c) 2017-19 Cumulus Networks
5  * Copyright (c) 2017-19 David Ahern <dsa@cumulusnetworks.com>
6  */
7
8 #include <linux/nexthop.h>
9 #include <linux/rtnetlink.h>
10 #include <linux/slab.h>
11 #include <net/arp.h>
12 #include <net/ipv6_stubs.h>
13 #include <net/lwtunnel.h>
14 #include <net/ndisc.h>
15 #include <net/nexthop.h>
16 #include <net/route.h>
17 #include <net/sock.h>
18
19 #define NH_RES_DEFAULT_IDLE_TIMER       (120 * HZ)
20 #define NH_RES_DEFAULT_UNBALANCED_TIMER 0       /* No forced rebalancing. */
21
22 static void remove_nexthop(struct net *net, struct nexthop *nh,
23                            struct nl_info *nlinfo);
24
25 #define NH_DEV_HASHBITS  8
26 #define NH_DEV_HASHSIZE (1U << NH_DEV_HASHBITS)
27
28 static const struct nla_policy rtm_nh_policy_new[] = {
29         [NHA_ID]                = { .type = NLA_U32 },
30         [NHA_GROUP]             = { .type = NLA_BINARY },
31         [NHA_GROUP_TYPE]        = { .type = NLA_U16 },
32         [NHA_BLACKHOLE]         = { .type = NLA_FLAG },
33         [NHA_OIF]               = { .type = NLA_U32 },
34         [NHA_GATEWAY]           = { .type = NLA_BINARY },
35         [NHA_ENCAP_TYPE]        = { .type = NLA_U16 },
36         [NHA_ENCAP]             = { .type = NLA_NESTED },
37         [NHA_FDB]               = { .type = NLA_FLAG },
38         [NHA_RES_GROUP]         = { .type = NLA_NESTED },
39 };
40
41 static const struct nla_policy rtm_nh_policy_get[] = {
42         [NHA_ID]                = { .type = NLA_U32 },
43 };
44
45 static const struct nla_policy rtm_nh_policy_dump[] = {
46         [NHA_OIF]               = { .type = NLA_U32 },
47         [NHA_GROUPS]            = { .type = NLA_FLAG },
48         [NHA_MASTER]            = { .type = NLA_U32 },
49         [NHA_FDB]               = { .type = NLA_FLAG },
50 };
51
52 static const struct nla_policy rtm_nh_res_policy_new[] = {
53         [NHA_RES_GROUP_BUCKETS]                 = { .type = NLA_U16 },
54         [NHA_RES_GROUP_IDLE_TIMER]              = { .type = NLA_U32 },
55         [NHA_RES_GROUP_UNBALANCED_TIMER]        = { .type = NLA_U32 },
56 };
57
58 static bool nexthop_notifiers_is_empty(struct net *net)
59 {
60         return !net->nexthop.notifier_chain.head;
61 }
62
63 static void
64 __nh_notifier_single_info_init(struct nh_notifier_single_info *nh_info,
65                                const struct nh_info *nhi)
66 {
67         nh_info->dev = nhi->fib_nhc.nhc_dev;
68         nh_info->gw_family = nhi->fib_nhc.nhc_gw_family;
69         if (nh_info->gw_family == AF_INET)
70                 nh_info->ipv4 = nhi->fib_nhc.nhc_gw.ipv4;
71         else if (nh_info->gw_family == AF_INET6)
72                 nh_info->ipv6 = nhi->fib_nhc.nhc_gw.ipv6;
73
74         nh_info->is_reject = nhi->reject_nh;
75         nh_info->is_fdb = nhi->fdb_nh;
76         nh_info->has_encap = !!nhi->fib_nhc.nhc_lwtstate;
77 }
78
79 static int nh_notifier_single_info_init(struct nh_notifier_info *info,
80                                         const struct nexthop *nh)
81 {
82         struct nh_info *nhi = rtnl_dereference(nh->nh_info);
83
84         info->type = NH_NOTIFIER_INFO_TYPE_SINGLE;
85         info->nh = kzalloc(sizeof(*info->nh), GFP_KERNEL);
86         if (!info->nh)
87                 return -ENOMEM;
88
89         __nh_notifier_single_info_init(info->nh, nhi);
90
91         return 0;
92 }
93
94 static void nh_notifier_single_info_fini(struct nh_notifier_info *info)
95 {
96         kfree(info->nh);
97 }
98
99 static int nh_notifier_mp_info_init(struct nh_notifier_info *info,
100                                     struct nh_group *nhg)
101 {
102         u16 num_nh = nhg->num_nh;
103         int i;
104
105         info->type = NH_NOTIFIER_INFO_TYPE_GRP;
106         info->nh_grp = kzalloc(struct_size(info->nh_grp, nh_entries, num_nh),
107                                GFP_KERNEL);
108         if (!info->nh_grp)
109                 return -ENOMEM;
110
111         info->nh_grp->num_nh = num_nh;
112         info->nh_grp->is_fdb = nhg->fdb_nh;
113
114         for (i = 0; i < num_nh; i++) {
115                 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
116                 struct nh_info *nhi;
117
118                 nhi = rtnl_dereference(nhge->nh->nh_info);
119                 info->nh_grp->nh_entries[i].id = nhge->nh->id;
120                 info->nh_grp->nh_entries[i].weight = nhge->weight;
121                 __nh_notifier_single_info_init(&info->nh_grp->nh_entries[i].nh,
122                                                nhi);
123         }
124
125         return 0;
126 }
127
128 static int nh_notifier_res_table_info_init(struct nh_notifier_info *info,
129                                            struct nh_group *nhg)
130 {
131         struct nh_res_table *res_table = rtnl_dereference(nhg->res_table);
132         u16 num_nh_buckets = res_table->num_nh_buckets;
133         unsigned long size;
134         u16 i;
135
136         info->type = NH_NOTIFIER_INFO_TYPE_RES_TABLE;
137         size = struct_size(info->nh_res_table, nhs, num_nh_buckets);
138         info->nh_res_table = __vmalloc(size, GFP_KERNEL | __GFP_ZERO |
139                                        __GFP_NOWARN);
140         if (!info->nh_res_table)
141                 return -ENOMEM;
142
143         info->nh_res_table->num_nh_buckets = num_nh_buckets;
144
145         for (i = 0; i < num_nh_buckets; i++) {
146                 struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
147                 struct nh_grp_entry *nhge;
148                 struct nh_info *nhi;
149
150                 nhge = rtnl_dereference(bucket->nh_entry);
151                 nhi = rtnl_dereference(nhge->nh->nh_info);
152                 __nh_notifier_single_info_init(&info->nh_res_table->nhs[i],
153                                                nhi);
154         }
155
156         return 0;
157 }
158
159 static int nh_notifier_grp_info_init(struct nh_notifier_info *info,
160                                      const struct nexthop *nh)
161 {
162         struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
163
164         if (nhg->mpath)
165                 return nh_notifier_mp_info_init(info, nhg);
166         else if (nhg->resilient)
167                 return nh_notifier_res_table_info_init(info, nhg);
168         return -EINVAL;
169 }
170
171 static void nh_notifier_grp_info_fini(struct nh_notifier_info *info,
172                                       const struct nexthop *nh)
173 {
174         struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
175
176         if (nhg->mpath)
177                 kfree(info->nh_grp);
178         else if (nhg->resilient)
179                 vfree(info->nh_res_table);
180 }
181
182 static int nh_notifier_info_init(struct nh_notifier_info *info,
183                                  const struct nexthop *nh)
184 {
185         info->id = nh->id;
186
187         if (nh->is_group)
188                 return nh_notifier_grp_info_init(info, nh);
189         else
190                 return nh_notifier_single_info_init(info, nh);
191 }
192
193 static void nh_notifier_info_fini(struct nh_notifier_info *info,
194                                   const struct nexthop *nh)
195 {
196         if (nh->is_group)
197                 nh_notifier_grp_info_fini(info, nh);
198         else
199                 nh_notifier_single_info_fini(info);
200 }
201
202 static int call_nexthop_notifiers(struct net *net,
203                                   enum nexthop_event_type event_type,
204                                   struct nexthop *nh,
205                                   struct netlink_ext_ack *extack)
206 {
207         struct nh_notifier_info info = {
208                 .net = net,
209                 .extack = extack,
210         };
211         int err;
212
213         ASSERT_RTNL();
214
215         if (nexthop_notifiers_is_empty(net))
216                 return 0;
217
218         err = nh_notifier_info_init(&info, nh);
219         if (err) {
220                 NL_SET_ERR_MSG(extack, "Failed to initialize nexthop notifier info");
221                 return err;
222         }
223
224         err = blocking_notifier_call_chain(&net->nexthop.notifier_chain,
225                                            event_type, &info);
226         nh_notifier_info_fini(&info, nh);
227
228         return notifier_to_errno(err);
229 }
230
231 static int
232 nh_notifier_res_bucket_idle_timer_get(const struct nh_notifier_info *info,
233                                       bool force, unsigned int *p_idle_timer_ms)
234 {
235         struct nh_res_table *res_table;
236         struct nh_group *nhg;
237         struct nexthop *nh;
238         int err = 0;
239
240         /* When 'force' is false, nexthop bucket replacement is performed
241          * because the bucket was deemed to be idle. In this case, capable
242          * listeners can choose to perform an atomic replacement: The bucket is
243          * only replaced if it is inactive. However, if the idle timer interval
244          * is smaller than the interval in which a listener is querying
245          * buckets' activity from the device, then atomic replacement should
246          * not be tried. Pass the idle timer value to listeners, so that they
247          * could determine which type of replacement to perform.
248          */
249         if (force) {
250                 *p_idle_timer_ms = 0;
251                 return 0;
252         }
253
254         rcu_read_lock();
255
256         nh = nexthop_find_by_id(info->net, info->id);
257         if (!nh) {
258                 err = -EINVAL;
259                 goto out;
260         }
261
262         nhg = rcu_dereference(nh->nh_grp);
263         res_table = rcu_dereference(nhg->res_table);
264         *p_idle_timer_ms = jiffies_to_msecs(res_table->idle_timer);
265
266 out:
267         rcu_read_unlock();
268
269         return err;
270 }
271
272 static int nh_notifier_res_bucket_info_init(struct nh_notifier_info *info,
273                                             u16 bucket_index, bool force,
274                                             struct nh_info *oldi,
275                                             struct nh_info *newi)
276 {
277         unsigned int idle_timer_ms;
278         int err;
279
280         err = nh_notifier_res_bucket_idle_timer_get(info, force,
281                                                     &idle_timer_ms);
282         if (err)
283                 return err;
284
285         info->type = NH_NOTIFIER_INFO_TYPE_RES_BUCKET;
286         info->nh_res_bucket = kzalloc(sizeof(*info->nh_res_bucket),
287                                       GFP_KERNEL);
288         if (!info->nh_res_bucket)
289                 return -ENOMEM;
290
291         info->nh_res_bucket->bucket_index = bucket_index;
292         info->nh_res_bucket->idle_timer_ms = idle_timer_ms;
293         info->nh_res_bucket->force = force;
294         __nh_notifier_single_info_init(&info->nh_res_bucket->old_nh, oldi);
295         __nh_notifier_single_info_init(&info->nh_res_bucket->new_nh, newi);
296         return 0;
297 }
298
299 static void nh_notifier_res_bucket_info_fini(struct nh_notifier_info *info)
300 {
301         kfree(info->nh_res_bucket);
302 }
303
304 static int __call_nexthop_res_bucket_notifiers(struct net *net, u32 nhg_id,
305                                                u16 bucket_index, bool force,
306                                                struct nh_info *oldi,
307                                                struct nh_info *newi,
308                                                struct netlink_ext_ack *extack)
309 {
310         struct nh_notifier_info info = {
311                 .net = net,
312                 .extack = extack,
313                 .id = nhg_id,
314         };
315         int err;
316
317         if (nexthop_notifiers_is_empty(net))
318                 return 0;
319
320         err = nh_notifier_res_bucket_info_init(&info, bucket_index, force,
321                                                oldi, newi);
322         if (err)
323                 return err;
324
325         err = blocking_notifier_call_chain(&net->nexthop.notifier_chain,
326                                            NEXTHOP_EVENT_BUCKET_REPLACE, &info);
327         nh_notifier_res_bucket_info_fini(&info);
328
329         return notifier_to_errno(err);
330 }
331
332 /* There are three users of RES_TABLE, and NHs etc. referenced from there:
333  *
334  * 1) a collection of callbacks for NH maintenance. This operates under
335  *    RTNL,
336  * 2) the delayed work that gradually balances the resilient table,
337  * 3) and nexthop_select_path(), operating under RCU.
338  *
339  * Both the delayed work and the RTNL block are writers, and need to
340  * maintain mutual exclusion. Since there are only two and well-known
341  * writers for each table, the RTNL code can make sure it has exclusive
342  * access thus:
343  *
344  * - Have the DW operate without locking;
345  * - synchronously cancel the DW;
346  * - do the writing;
347  * - if the write was not actually a delete, call upkeep, which schedules
348  *   DW again if necessary.
349  *
350  * The functions that are always called from the RTNL context use
351  * rtnl_dereference(). The functions that can also be called from the DW do
352  * a raw dereference and rely on the above mutual exclusion scheme.
353  */
354 #define nh_res_dereference(p) (rcu_dereference_raw(p))
355
356 static int call_nexthop_res_bucket_notifiers(struct net *net, u32 nhg_id,
357                                              u16 bucket_index, bool force,
358                                              struct nexthop *old_nh,
359                                              struct nexthop *new_nh,
360                                              struct netlink_ext_ack *extack)
361 {
362         struct nh_info *oldi = nh_res_dereference(old_nh->nh_info);
363         struct nh_info *newi = nh_res_dereference(new_nh->nh_info);
364
365         return __call_nexthop_res_bucket_notifiers(net, nhg_id, bucket_index,
366                                                    force, oldi, newi, extack);
367 }
368
369 static int call_nexthop_res_table_notifiers(struct net *net, struct nexthop *nh,
370                                             struct netlink_ext_ack *extack)
371 {
372         struct nh_notifier_info info = {
373                 .net = net,
374                 .extack = extack,
375         };
376         struct nh_group *nhg;
377         int err;
378
379         ASSERT_RTNL();
380
381         if (nexthop_notifiers_is_empty(net))
382                 return 0;
383
384         /* At this point, the nexthop buckets are still not populated. Only
385          * emit a notification with the logical nexthops, so that a listener
386          * could potentially veto it in case of unsupported configuration.
387          */
388         nhg = rtnl_dereference(nh->nh_grp);
389         err = nh_notifier_mp_info_init(&info, nhg);
390         if (err) {
391                 NL_SET_ERR_MSG(extack, "Failed to initialize nexthop notifier info");
392                 return err;
393         }
394
395         err = blocking_notifier_call_chain(&net->nexthop.notifier_chain,
396                                            NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE,
397                                            &info);
398         kfree(info.nh_grp);
399
400         return notifier_to_errno(err);
401 }
402
403 static int call_nexthop_notifier(struct notifier_block *nb, struct net *net,
404                                  enum nexthop_event_type event_type,
405                                  struct nexthop *nh,
406                                  struct netlink_ext_ack *extack)
407 {
408         struct nh_notifier_info info = {
409                 .net = net,
410                 .extack = extack,
411         };
412         int err;
413
414         err = nh_notifier_info_init(&info, nh);
415         if (err)
416                 return err;
417
418         err = nb->notifier_call(nb, event_type, &info);
419         nh_notifier_info_fini(&info, nh);
420
421         return notifier_to_errno(err);
422 }
423
424 static unsigned int nh_dev_hashfn(unsigned int val)
425 {
426         unsigned int mask = NH_DEV_HASHSIZE - 1;
427
428         return (val ^
429                 (val >> NH_DEV_HASHBITS) ^
430                 (val >> (NH_DEV_HASHBITS * 2))) & mask;
431 }
432
433 static void nexthop_devhash_add(struct net *net, struct nh_info *nhi)
434 {
435         struct net_device *dev = nhi->fib_nhc.nhc_dev;
436         struct hlist_head *head;
437         unsigned int hash;
438
439         WARN_ON(!dev);
440
441         hash = nh_dev_hashfn(dev->ifindex);
442         head = &net->nexthop.devhash[hash];
443         hlist_add_head(&nhi->dev_hash, head);
444 }
445
446 static void nexthop_free_group(struct nexthop *nh)
447 {
448         struct nh_group *nhg;
449         int i;
450
451         nhg = rcu_dereference_raw(nh->nh_grp);
452         for (i = 0; i < nhg->num_nh; ++i) {
453                 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
454
455                 WARN_ON(!list_empty(&nhge->nh_list));
456                 nexthop_put(nhge->nh);
457         }
458
459         WARN_ON(nhg->spare == nhg);
460
461         if (nhg->resilient)
462                 vfree(rcu_dereference_raw(nhg->res_table));
463
464         kfree(nhg->spare);
465         kfree(nhg);
466 }
467
468 static void nexthop_free_single(struct nexthop *nh)
469 {
470         struct nh_info *nhi;
471
472         nhi = rcu_dereference_raw(nh->nh_info);
473         switch (nhi->family) {
474         case AF_INET:
475                 fib_nh_release(nh->net, &nhi->fib_nh);
476                 break;
477         case AF_INET6:
478                 ipv6_stub->fib6_nh_release(&nhi->fib6_nh);
479                 break;
480         }
481         kfree(nhi);
482 }
483
484 void nexthop_free_rcu(struct rcu_head *head)
485 {
486         struct nexthop *nh = container_of(head, struct nexthop, rcu);
487
488         if (nh->is_group)
489                 nexthop_free_group(nh);
490         else
491                 nexthop_free_single(nh);
492
493         kfree(nh);
494 }
495 EXPORT_SYMBOL_GPL(nexthop_free_rcu);
496
497 static struct nexthop *nexthop_alloc(void)
498 {
499         struct nexthop *nh;
500
501         nh = kzalloc(sizeof(struct nexthop), GFP_KERNEL);
502         if (nh) {
503                 INIT_LIST_HEAD(&nh->fi_list);
504                 INIT_LIST_HEAD(&nh->f6i_list);
505                 INIT_LIST_HEAD(&nh->grp_list);
506                 INIT_LIST_HEAD(&nh->fdb_list);
507         }
508         return nh;
509 }
510
511 static struct nh_group *nexthop_grp_alloc(u16 num_nh)
512 {
513         struct nh_group *nhg;
514
515         nhg = kzalloc(struct_size(nhg, nh_entries, num_nh), GFP_KERNEL);
516         if (nhg)
517                 nhg->num_nh = num_nh;
518
519         return nhg;
520 }
521
522 static void nh_res_table_upkeep_dw(struct work_struct *work);
523
524 static struct nh_res_table *
525 nexthop_res_table_alloc(struct net *net, u32 nhg_id, struct nh_config *cfg)
526 {
527         const u16 num_nh_buckets = cfg->nh_grp_res_num_buckets;
528         struct nh_res_table *res_table;
529         unsigned long size;
530
531         size = struct_size(res_table, nh_buckets, num_nh_buckets);
532         res_table = __vmalloc(size, GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN);
533         if (!res_table)
534                 return NULL;
535
536         res_table->net = net;
537         res_table->nhg_id = nhg_id;
538         INIT_DELAYED_WORK(&res_table->upkeep_dw, &nh_res_table_upkeep_dw);
539         INIT_LIST_HEAD(&res_table->uw_nh_entries);
540         res_table->idle_timer = cfg->nh_grp_res_idle_timer;
541         res_table->unbalanced_timer = cfg->nh_grp_res_unbalanced_timer;
542         res_table->num_nh_buckets = num_nh_buckets;
543         return res_table;
544 }
545
546 static void nh_base_seq_inc(struct net *net)
547 {
548         while (++net->nexthop.seq == 0)
549                 ;
550 }
551
552 /* no reference taken; rcu lock or rtnl must be held */
553 struct nexthop *nexthop_find_by_id(struct net *net, u32 id)
554 {
555         struct rb_node **pp, *parent = NULL, *next;
556
557         pp = &net->nexthop.rb_root.rb_node;
558         while (1) {
559                 struct nexthop *nh;
560
561                 next = rcu_dereference_raw(*pp);
562                 if (!next)
563                         break;
564                 parent = next;
565
566                 nh = rb_entry(parent, struct nexthop, rb_node);
567                 if (id < nh->id)
568                         pp = &next->rb_left;
569                 else if (id > nh->id)
570                         pp = &next->rb_right;
571                 else
572                         return nh;
573         }
574         return NULL;
575 }
576 EXPORT_SYMBOL_GPL(nexthop_find_by_id);
577
578 /* used for auto id allocation; called with rtnl held */
579 static u32 nh_find_unused_id(struct net *net)
580 {
581         u32 id_start = net->nexthop.last_id_allocated;
582
583         while (1) {
584                 net->nexthop.last_id_allocated++;
585                 if (net->nexthop.last_id_allocated == id_start)
586                         break;
587
588                 if (!nexthop_find_by_id(net, net->nexthop.last_id_allocated))
589                         return net->nexthop.last_id_allocated;
590         }
591         return 0;
592 }
593
594 static void nh_res_time_set_deadline(unsigned long next_time,
595                                      unsigned long *deadline)
596 {
597         if (time_before(next_time, *deadline))
598                 *deadline = next_time;
599 }
600
601 static clock_t nh_res_table_unbalanced_time(struct nh_res_table *res_table)
602 {
603         if (list_empty(&res_table->uw_nh_entries))
604                 return 0;
605         return jiffies_delta_to_clock_t(jiffies - res_table->unbalanced_since);
606 }
607
608 static int nla_put_nh_group_res(struct sk_buff *skb, struct nh_group *nhg)
609 {
610         struct nh_res_table *res_table = rtnl_dereference(nhg->res_table);
611         struct nlattr *nest;
612
613         nest = nla_nest_start(skb, NHA_RES_GROUP);
614         if (!nest)
615                 return -EMSGSIZE;
616
617         if (nla_put_u16(skb, NHA_RES_GROUP_BUCKETS,
618                         res_table->num_nh_buckets) ||
619             nla_put_u32(skb, NHA_RES_GROUP_IDLE_TIMER,
620                         jiffies_to_clock_t(res_table->idle_timer)) ||
621             nla_put_u32(skb, NHA_RES_GROUP_UNBALANCED_TIMER,
622                         jiffies_to_clock_t(res_table->unbalanced_timer)) ||
623             nla_put_u64_64bit(skb, NHA_RES_GROUP_UNBALANCED_TIME,
624                               nh_res_table_unbalanced_time(res_table),
625                               NHA_RES_GROUP_PAD))
626                 goto nla_put_failure;
627
628         nla_nest_end(skb, nest);
629         return 0;
630
631 nla_put_failure:
632         nla_nest_cancel(skb, nest);
633         return -EMSGSIZE;
634 }
635
636 static int nla_put_nh_group(struct sk_buff *skb, struct nh_group *nhg)
637 {
638         struct nexthop_grp *p;
639         size_t len = nhg->num_nh * sizeof(*p);
640         struct nlattr *nla;
641         u16 group_type = 0;
642         int i;
643
644         if (nhg->mpath)
645                 group_type = NEXTHOP_GRP_TYPE_MPATH;
646         else if (nhg->resilient)
647                 group_type = NEXTHOP_GRP_TYPE_RES;
648
649         if (nla_put_u16(skb, NHA_GROUP_TYPE, group_type))
650                 goto nla_put_failure;
651
652         nla = nla_reserve(skb, NHA_GROUP, len);
653         if (!nla)
654                 goto nla_put_failure;
655
656         p = nla_data(nla);
657         for (i = 0; i < nhg->num_nh; ++i) {
658                 p->id = nhg->nh_entries[i].nh->id;
659                 p->weight = nhg->nh_entries[i].weight - 1;
660                 p += 1;
661         }
662
663         if (nhg->resilient && nla_put_nh_group_res(skb, nhg))
664                 goto nla_put_failure;
665
666         return 0;
667
668 nla_put_failure:
669         return -EMSGSIZE;
670 }
671
672 static int nh_fill_node(struct sk_buff *skb, struct nexthop *nh,
673                         int event, u32 portid, u32 seq, unsigned int nlflags)
674 {
675         struct fib6_nh *fib6_nh;
676         struct fib_nh *fib_nh;
677         struct nlmsghdr *nlh;
678         struct nh_info *nhi;
679         struct nhmsg *nhm;
680
681         nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nhm), nlflags);
682         if (!nlh)
683                 return -EMSGSIZE;
684
685         nhm = nlmsg_data(nlh);
686         nhm->nh_family = AF_UNSPEC;
687         nhm->nh_flags = nh->nh_flags;
688         nhm->nh_protocol = nh->protocol;
689         nhm->nh_scope = 0;
690         nhm->resvd = 0;
691
692         if (nla_put_u32(skb, NHA_ID, nh->id))
693                 goto nla_put_failure;
694
695         if (nh->is_group) {
696                 struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
697
698                 if (nhg->fdb_nh && nla_put_flag(skb, NHA_FDB))
699                         goto nla_put_failure;
700                 if (nla_put_nh_group(skb, nhg))
701                         goto nla_put_failure;
702                 goto out;
703         }
704
705         nhi = rtnl_dereference(nh->nh_info);
706         nhm->nh_family = nhi->family;
707         if (nhi->reject_nh) {
708                 if (nla_put_flag(skb, NHA_BLACKHOLE))
709                         goto nla_put_failure;
710                 goto out;
711         } else if (nhi->fdb_nh) {
712                 if (nla_put_flag(skb, NHA_FDB))
713                         goto nla_put_failure;
714         } else {
715                 const struct net_device *dev;
716
717                 dev = nhi->fib_nhc.nhc_dev;
718                 if (dev && nla_put_u32(skb, NHA_OIF, dev->ifindex))
719                         goto nla_put_failure;
720         }
721
722         nhm->nh_scope = nhi->fib_nhc.nhc_scope;
723         switch (nhi->family) {
724         case AF_INET:
725                 fib_nh = &nhi->fib_nh;
726                 if (fib_nh->fib_nh_gw_family &&
727                     nla_put_be32(skb, NHA_GATEWAY, fib_nh->fib_nh_gw4))
728                         goto nla_put_failure;
729                 break;
730
731         case AF_INET6:
732                 fib6_nh = &nhi->fib6_nh;
733                 if (fib6_nh->fib_nh_gw_family &&
734                     nla_put_in6_addr(skb, NHA_GATEWAY, &fib6_nh->fib_nh_gw6))
735                         goto nla_put_failure;
736                 break;
737         }
738
739         if (nhi->fib_nhc.nhc_lwtstate &&
740             lwtunnel_fill_encap(skb, nhi->fib_nhc.nhc_lwtstate,
741                                 NHA_ENCAP, NHA_ENCAP_TYPE) < 0)
742                 goto nla_put_failure;
743
744 out:
745         nlmsg_end(skb, nlh);
746         return 0;
747
748 nla_put_failure:
749         nlmsg_cancel(skb, nlh);
750         return -EMSGSIZE;
751 }
752
753 static size_t nh_nlmsg_size_grp_res(struct nh_group *nhg)
754 {
755         return nla_total_size(0) +      /* NHA_RES_GROUP */
756                 nla_total_size(2) +     /* NHA_RES_GROUP_BUCKETS */
757                 nla_total_size(4) +     /* NHA_RES_GROUP_IDLE_TIMER */
758                 nla_total_size(4) +     /* NHA_RES_GROUP_UNBALANCED_TIMER */
759                 nla_total_size_64bit(8);/* NHA_RES_GROUP_UNBALANCED_TIME */
760 }
761
762 static size_t nh_nlmsg_size_grp(struct nexthop *nh)
763 {
764         struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
765         size_t sz = sizeof(struct nexthop_grp) * nhg->num_nh;
766         size_t tot = nla_total_size(sz) +
767                 nla_total_size(2); /* NHA_GROUP_TYPE */
768
769         if (nhg->resilient)
770                 tot += nh_nlmsg_size_grp_res(nhg);
771
772         return tot;
773 }
774
775 static size_t nh_nlmsg_size_single(struct nexthop *nh)
776 {
777         struct nh_info *nhi = rtnl_dereference(nh->nh_info);
778         size_t sz;
779
780         /* covers NHA_BLACKHOLE since NHA_OIF and BLACKHOLE
781          * are mutually exclusive
782          */
783         sz = nla_total_size(4);  /* NHA_OIF */
784
785         switch (nhi->family) {
786         case AF_INET:
787                 if (nhi->fib_nh.fib_nh_gw_family)
788                         sz += nla_total_size(4);  /* NHA_GATEWAY */
789                 break;
790
791         case AF_INET6:
792                 /* NHA_GATEWAY */
793                 if (nhi->fib6_nh.fib_nh_gw_family)
794                         sz += nla_total_size(sizeof(const struct in6_addr));
795                 break;
796         }
797
798         if (nhi->fib_nhc.nhc_lwtstate) {
799                 sz += lwtunnel_get_encap_size(nhi->fib_nhc.nhc_lwtstate);
800                 sz += nla_total_size(2);  /* NHA_ENCAP_TYPE */
801         }
802
803         return sz;
804 }
805
806 static size_t nh_nlmsg_size(struct nexthop *nh)
807 {
808         size_t sz = NLMSG_ALIGN(sizeof(struct nhmsg));
809
810         sz += nla_total_size(4); /* NHA_ID */
811
812         if (nh->is_group)
813                 sz += nh_nlmsg_size_grp(nh);
814         else
815                 sz += nh_nlmsg_size_single(nh);
816
817         return sz;
818 }
819
820 static void nexthop_notify(int event, struct nexthop *nh, struct nl_info *info)
821 {
822         unsigned int nlflags = info->nlh ? info->nlh->nlmsg_flags : 0;
823         u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
824         struct sk_buff *skb;
825         int err = -ENOBUFS;
826
827         skb = nlmsg_new(nh_nlmsg_size(nh), gfp_any());
828         if (!skb)
829                 goto errout;
830
831         err = nh_fill_node(skb, nh, event, info->portid, seq, nlflags);
832         if (err < 0) {
833                 /* -EMSGSIZE implies BUG in nh_nlmsg_size() */
834                 WARN_ON(err == -EMSGSIZE);
835                 kfree_skb(skb);
836                 goto errout;
837         }
838
839         rtnl_notify(skb, info->nl_net, info->portid, RTNLGRP_NEXTHOP,
840                     info->nlh, gfp_any());
841         return;
842 errout:
843         if (err < 0)
844                 rtnl_set_sk_err(info->nl_net, RTNLGRP_NEXTHOP, err);
845 }
846
847 static unsigned long nh_res_bucket_used_time(const struct nh_res_bucket *bucket)
848 {
849         return (unsigned long)atomic_long_read(&bucket->used_time);
850 }
851
852 static unsigned long
853 nh_res_bucket_idle_point(const struct nh_res_table *res_table,
854                          const struct nh_res_bucket *bucket,
855                          unsigned long now)
856 {
857         unsigned long time = nh_res_bucket_used_time(bucket);
858
859         /* Bucket was not used since it was migrated. The idle time is now. */
860         if (time == bucket->migrated_time)
861                 return now;
862
863         return time + res_table->idle_timer;
864 }
865
866 static unsigned long
867 nh_res_table_unb_point(const struct nh_res_table *res_table)
868 {
869         return res_table->unbalanced_since + res_table->unbalanced_timer;
870 }
871
872 static void nh_res_bucket_set_idle(const struct nh_res_table *res_table,
873                                    struct nh_res_bucket *bucket)
874 {
875         unsigned long now = jiffies;
876
877         atomic_long_set(&bucket->used_time, (long)now);
878         bucket->migrated_time = now;
879 }
880
881 static void nh_res_bucket_set_busy(struct nh_res_bucket *bucket)
882 {
883         atomic_long_set(&bucket->used_time, (long)jiffies);
884 }
885
886 static bool valid_group_nh(struct nexthop *nh, unsigned int npaths,
887                            bool *is_fdb, struct netlink_ext_ack *extack)
888 {
889         if (nh->is_group) {
890                 struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
891
892                 /* Nesting groups within groups is not supported. */
893                 if (nhg->mpath) {
894                         NL_SET_ERR_MSG(extack,
895                                        "Multipath group can not be a nexthop within a group");
896                         return false;
897                 }
898                 if (nhg->resilient) {
899                         NL_SET_ERR_MSG(extack,
900                                        "Resilient group can not be a nexthop within a group");
901                         return false;
902                 }
903                 *is_fdb = nhg->fdb_nh;
904         } else {
905                 struct nh_info *nhi = rtnl_dereference(nh->nh_info);
906
907                 if (nhi->reject_nh && npaths > 1) {
908                         NL_SET_ERR_MSG(extack,
909                                        "Blackhole nexthop can not be used in a group with more than 1 path");
910                         return false;
911                 }
912                 *is_fdb = nhi->fdb_nh;
913         }
914
915         return true;
916 }
917
918 static int nh_check_attr_fdb_group(struct nexthop *nh, u8 *nh_family,
919                                    struct netlink_ext_ack *extack)
920 {
921         struct nh_info *nhi;
922
923         nhi = rtnl_dereference(nh->nh_info);
924
925         if (!nhi->fdb_nh) {
926                 NL_SET_ERR_MSG(extack, "FDB nexthop group can only have fdb nexthops");
927                 return -EINVAL;
928         }
929
930         if (*nh_family == AF_UNSPEC) {
931                 *nh_family = nhi->family;
932         } else if (*nh_family != nhi->family) {
933                 NL_SET_ERR_MSG(extack, "FDB nexthop group cannot have mixed family nexthops");
934                 return -EINVAL;
935         }
936
937         return 0;
938 }
939
940 static int nh_check_attr_group(struct net *net,
941                                struct nlattr *tb[], size_t tb_size,
942                                u16 nh_grp_type, struct netlink_ext_ack *extack)
943 {
944         unsigned int len = nla_len(tb[NHA_GROUP]);
945         u8 nh_family = AF_UNSPEC;
946         struct nexthop_grp *nhg;
947         unsigned int i, j;
948         u8 nhg_fdb = 0;
949
950         if (!len || len & (sizeof(struct nexthop_grp) - 1)) {
951                 NL_SET_ERR_MSG(extack,
952                                "Invalid length for nexthop group attribute");
953                 return -EINVAL;
954         }
955
956         /* convert len to number of nexthop ids */
957         len /= sizeof(*nhg);
958
959         nhg = nla_data(tb[NHA_GROUP]);
960         for (i = 0; i < len; ++i) {
961                 if (nhg[i].resvd1 || nhg[i].resvd2) {
962                         NL_SET_ERR_MSG(extack, "Reserved fields in nexthop_grp must be 0");
963                         return -EINVAL;
964                 }
965                 if (nhg[i].weight > 254) {
966                         NL_SET_ERR_MSG(extack, "Invalid value for weight");
967                         return -EINVAL;
968                 }
969                 for (j = i + 1; j < len; ++j) {
970                         if (nhg[i].id == nhg[j].id) {
971                                 NL_SET_ERR_MSG(extack, "Nexthop id can not be used twice in a group");
972                                 return -EINVAL;
973                         }
974                 }
975         }
976
977         if (tb[NHA_FDB])
978                 nhg_fdb = 1;
979         nhg = nla_data(tb[NHA_GROUP]);
980         for (i = 0; i < len; ++i) {
981                 struct nexthop *nh;
982                 bool is_fdb_nh;
983
984                 nh = nexthop_find_by_id(net, nhg[i].id);
985                 if (!nh) {
986                         NL_SET_ERR_MSG(extack, "Invalid nexthop id");
987                         return -EINVAL;
988                 }
989                 if (!valid_group_nh(nh, len, &is_fdb_nh, extack))
990                         return -EINVAL;
991
992                 if (nhg_fdb && nh_check_attr_fdb_group(nh, &nh_family, extack))
993                         return -EINVAL;
994
995                 if (!nhg_fdb && is_fdb_nh) {
996                         NL_SET_ERR_MSG(extack, "Non FDB nexthop group cannot have fdb nexthops");
997                         return -EINVAL;
998                 }
999         }
1000         for (i = NHA_GROUP_TYPE + 1; i < tb_size; ++i) {
1001                 if (!tb[i])
1002                         continue;
1003                 switch (i) {
1004                 case NHA_FDB:
1005                         continue;
1006                 case NHA_RES_GROUP:
1007                         if (nh_grp_type == NEXTHOP_GRP_TYPE_RES)
1008                                 continue;
1009                         break;
1010                 }
1011                 NL_SET_ERR_MSG(extack,
1012                                "No other attributes can be set in nexthop groups");
1013                 return -EINVAL;
1014         }
1015
1016         return 0;
1017 }
1018
1019 static bool ipv6_good_nh(const struct fib6_nh *nh)
1020 {
1021         int state = NUD_REACHABLE;
1022         struct neighbour *n;
1023
1024         rcu_read_lock_bh();
1025
1026         n = __ipv6_neigh_lookup_noref_stub(nh->fib_nh_dev, &nh->fib_nh_gw6);
1027         if (n)
1028                 state = n->nud_state;
1029
1030         rcu_read_unlock_bh();
1031
1032         return !!(state & NUD_VALID);
1033 }
1034
1035 static bool ipv4_good_nh(const struct fib_nh *nh)
1036 {
1037         int state = NUD_REACHABLE;
1038         struct neighbour *n;
1039
1040         rcu_read_lock_bh();
1041
1042         n = __ipv4_neigh_lookup_noref(nh->fib_nh_dev,
1043                                       (__force u32)nh->fib_nh_gw4);
1044         if (n)
1045                 state = n->nud_state;
1046
1047         rcu_read_unlock_bh();
1048
1049         return !!(state & NUD_VALID);
1050 }
1051
1052 static struct nexthop *nexthop_select_path_mp(struct nh_group *nhg, int hash)
1053 {
1054         struct nexthop *rc = NULL;
1055         int i;
1056
1057         for (i = 0; i < nhg->num_nh; ++i) {
1058                 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1059                 struct nh_info *nhi;
1060
1061                 if (hash > atomic_read(&nhge->mpath.upper_bound))
1062                         continue;
1063
1064                 nhi = rcu_dereference(nhge->nh->nh_info);
1065                 if (nhi->fdb_nh)
1066                         return nhge->nh;
1067
1068                 /* nexthops always check if it is good and does
1069                  * not rely on a sysctl for this behavior
1070                  */
1071                 switch (nhi->family) {
1072                 case AF_INET:
1073                         if (ipv4_good_nh(&nhi->fib_nh))
1074                                 return nhge->nh;
1075                         break;
1076                 case AF_INET6:
1077                         if (ipv6_good_nh(&nhi->fib6_nh))
1078                                 return nhge->nh;
1079                         break;
1080                 }
1081
1082                 if (!rc)
1083                         rc = nhge->nh;
1084         }
1085
1086         return rc;
1087 }
1088
1089 static struct nexthop *nexthop_select_path_res(struct nh_group *nhg, int hash)
1090 {
1091         struct nh_res_table *res_table = rcu_dereference(nhg->res_table);
1092         u16 bucket_index = hash % res_table->num_nh_buckets;
1093         struct nh_res_bucket *bucket;
1094         struct nh_grp_entry *nhge;
1095
1096         /* nexthop_select_path() is expected to return a non-NULL value, so
1097          * skip protocol validation and just hand out whatever there is.
1098          */
1099         bucket = &res_table->nh_buckets[bucket_index];
1100         nh_res_bucket_set_busy(bucket);
1101         nhge = rcu_dereference(bucket->nh_entry);
1102         return nhge->nh;
1103 }
1104
1105 struct nexthop *nexthop_select_path(struct nexthop *nh, int hash)
1106 {
1107         struct nh_group *nhg;
1108
1109         if (!nh->is_group)
1110                 return nh;
1111
1112         nhg = rcu_dereference(nh->nh_grp);
1113         if (nhg->mpath)
1114                 return nexthop_select_path_mp(nhg, hash);
1115         else if (nhg->resilient)
1116                 return nexthop_select_path_res(nhg, hash);
1117
1118         /* Unreachable. */
1119         return NULL;
1120 }
1121 EXPORT_SYMBOL_GPL(nexthop_select_path);
1122
1123 int nexthop_for_each_fib6_nh(struct nexthop *nh,
1124                              int (*cb)(struct fib6_nh *nh, void *arg),
1125                              void *arg)
1126 {
1127         struct nh_info *nhi;
1128         int err;
1129
1130         if (nh->is_group) {
1131                 struct nh_group *nhg;
1132                 int i;
1133
1134                 nhg = rcu_dereference_rtnl(nh->nh_grp);
1135                 for (i = 0; i < nhg->num_nh; i++) {
1136                         struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1137
1138                         nhi = rcu_dereference_rtnl(nhge->nh->nh_info);
1139                         err = cb(&nhi->fib6_nh, arg);
1140                         if (err)
1141                                 return err;
1142                 }
1143         } else {
1144                 nhi = rcu_dereference_rtnl(nh->nh_info);
1145                 err = cb(&nhi->fib6_nh, arg);
1146                 if (err)
1147                         return err;
1148         }
1149
1150         return 0;
1151 }
1152 EXPORT_SYMBOL_GPL(nexthop_for_each_fib6_nh);
1153
1154 static int check_src_addr(const struct in6_addr *saddr,
1155                           struct netlink_ext_ack *extack)
1156 {
1157         if (!ipv6_addr_any(saddr)) {
1158                 NL_SET_ERR_MSG(extack, "IPv6 routes using source address can not use nexthop objects");
1159                 return -EINVAL;
1160         }
1161         return 0;
1162 }
1163
1164 int fib6_check_nexthop(struct nexthop *nh, struct fib6_config *cfg,
1165                        struct netlink_ext_ack *extack)
1166 {
1167         struct nh_info *nhi;
1168         bool is_fdb_nh;
1169
1170         /* fib6_src is unique to a fib6_info and limits the ability to cache
1171          * routes in fib6_nh within a nexthop that is potentially shared
1172          * across multiple fib entries. If the config wants to use source
1173          * routing it can not use nexthop objects. mlxsw also does not allow
1174          * fib6_src on routes.
1175          */
1176         if (cfg && check_src_addr(&cfg->fc_src, extack) < 0)
1177                 return -EINVAL;
1178
1179         if (nh->is_group) {
1180                 struct nh_group *nhg;
1181
1182                 nhg = rtnl_dereference(nh->nh_grp);
1183                 if (nhg->has_v4)
1184                         goto no_v4_nh;
1185                 is_fdb_nh = nhg->fdb_nh;
1186         } else {
1187                 nhi = rtnl_dereference(nh->nh_info);
1188                 if (nhi->family == AF_INET)
1189                         goto no_v4_nh;
1190                 is_fdb_nh = nhi->fdb_nh;
1191         }
1192
1193         if (is_fdb_nh) {
1194                 NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop");
1195                 return -EINVAL;
1196         }
1197
1198         return 0;
1199 no_v4_nh:
1200         NL_SET_ERR_MSG(extack, "IPv6 routes can not use an IPv4 nexthop");
1201         return -EINVAL;
1202 }
1203 EXPORT_SYMBOL_GPL(fib6_check_nexthop);
1204
1205 /* if existing nexthop has ipv6 routes linked to it, need
1206  * to verify this new spec works with ipv6
1207  */
1208 static int fib6_check_nh_list(struct nexthop *old, struct nexthop *new,
1209                               struct netlink_ext_ack *extack)
1210 {
1211         struct fib6_info *f6i;
1212
1213         if (list_empty(&old->f6i_list))
1214                 return 0;
1215
1216         list_for_each_entry(f6i, &old->f6i_list, nh_list) {
1217                 if (check_src_addr(&f6i->fib6_src.addr, extack) < 0)
1218                         return -EINVAL;
1219         }
1220
1221         return fib6_check_nexthop(new, NULL, extack);
1222 }
1223
1224 static int nexthop_check_scope(struct nh_info *nhi, u8 scope,
1225                                struct netlink_ext_ack *extack)
1226 {
1227         if (scope == RT_SCOPE_HOST && nhi->fib_nhc.nhc_gw_family) {
1228                 NL_SET_ERR_MSG(extack,
1229                                "Route with host scope can not have a gateway");
1230                 return -EINVAL;
1231         }
1232
1233         if (nhi->fib_nhc.nhc_flags & RTNH_F_ONLINK && scope >= RT_SCOPE_LINK) {
1234                 NL_SET_ERR_MSG(extack, "Scope mismatch with nexthop");
1235                 return -EINVAL;
1236         }
1237
1238         return 0;
1239 }
1240
1241 /* Invoked by fib add code to verify nexthop by id is ok with
1242  * config for prefix; parts of fib_check_nh not done when nexthop
1243  * object is used.
1244  */
1245 int fib_check_nexthop(struct nexthop *nh, u8 scope,
1246                       struct netlink_ext_ack *extack)
1247 {
1248         struct nh_info *nhi;
1249         int err = 0;
1250
1251         if (nh->is_group) {
1252                 struct nh_group *nhg;
1253
1254                 nhg = rtnl_dereference(nh->nh_grp);
1255                 if (nhg->fdb_nh) {
1256                         NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop");
1257                         err = -EINVAL;
1258                         goto out;
1259                 }
1260
1261                 if (scope == RT_SCOPE_HOST) {
1262                         NL_SET_ERR_MSG(extack, "Route with host scope can not have multiple nexthops");
1263                         err = -EINVAL;
1264                         goto out;
1265                 }
1266
1267                 /* all nexthops in a group have the same scope */
1268                 nhi = rtnl_dereference(nhg->nh_entries[0].nh->nh_info);
1269                 err = nexthop_check_scope(nhi, scope, extack);
1270         } else {
1271                 nhi = rtnl_dereference(nh->nh_info);
1272                 if (nhi->fdb_nh) {
1273                         NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop");
1274                         err = -EINVAL;
1275                         goto out;
1276                 }
1277                 err = nexthop_check_scope(nhi, scope, extack);
1278         }
1279
1280 out:
1281         return err;
1282 }
1283
1284 static int fib_check_nh_list(struct nexthop *old, struct nexthop *new,
1285                              struct netlink_ext_ack *extack)
1286 {
1287         struct fib_info *fi;
1288
1289         list_for_each_entry(fi, &old->fi_list, nh_list) {
1290                 int err;
1291
1292                 err = fib_check_nexthop(new, fi->fib_scope, extack);
1293                 if (err)
1294                         return err;
1295         }
1296         return 0;
1297 }
1298
1299 static bool nh_res_nhge_is_balanced(const struct nh_grp_entry *nhge)
1300 {
1301         return nhge->res.count_buckets == nhge->res.wants_buckets;
1302 }
1303
1304 static bool nh_res_nhge_is_ow(const struct nh_grp_entry *nhge)
1305 {
1306         return nhge->res.count_buckets > nhge->res.wants_buckets;
1307 }
1308
1309 static bool nh_res_nhge_is_uw(const struct nh_grp_entry *nhge)
1310 {
1311         return nhge->res.count_buckets < nhge->res.wants_buckets;
1312 }
1313
1314 static bool nh_res_table_is_balanced(const struct nh_res_table *res_table)
1315 {
1316         return list_empty(&res_table->uw_nh_entries);
1317 }
1318
1319 static void nh_res_bucket_unset_nh(struct nh_res_bucket *bucket)
1320 {
1321         struct nh_grp_entry *nhge;
1322
1323         if (bucket->occupied) {
1324                 nhge = nh_res_dereference(bucket->nh_entry);
1325                 nhge->res.count_buckets--;
1326                 bucket->occupied = false;
1327         }
1328 }
1329
1330 static void nh_res_bucket_set_nh(struct nh_res_bucket *bucket,
1331                                  struct nh_grp_entry *nhge)
1332 {
1333         nh_res_bucket_unset_nh(bucket);
1334
1335         bucket->occupied = true;
1336         rcu_assign_pointer(bucket->nh_entry, nhge);
1337         nhge->res.count_buckets++;
1338 }
1339
1340 static bool nh_res_bucket_should_migrate(struct nh_res_table *res_table,
1341                                          struct nh_res_bucket *bucket,
1342                                          unsigned long *deadline, bool *force)
1343 {
1344         unsigned long now = jiffies;
1345         struct nh_grp_entry *nhge;
1346         unsigned long idle_point;
1347
1348         if (!bucket->occupied) {
1349                 /* The bucket is not occupied, its NHGE pointer is either
1350                  * NULL or obsolete. We _have to_ migrate: set force.
1351                  */
1352                 *force = true;
1353                 return true;
1354         }
1355
1356         nhge = nh_res_dereference(bucket->nh_entry);
1357
1358         /* If the bucket is populated by an underweight or balanced
1359          * nexthop, do not migrate.
1360          */
1361         if (!nh_res_nhge_is_ow(nhge))
1362                 return false;
1363
1364         /* At this point we know that the bucket is populated with an
1365          * overweight nexthop. It needs to be migrated to a new nexthop if
1366          * the idle timer of unbalanced timer expired.
1367          */
1368
1369         idle_point = nh_res_bucket_idle_point(res_table, bucket, now);
1370         if (time_after_eq(now, idle_point)) {
1371                 /* The bucket is idle. We _can_ migrate: unset force. */
1372                 *force = false;
1373                 return true;
1374         }
1375
1376         /* Unbalanced timer of 0 means "never force". */
1377         if (res_table->unbalanced_timer) {
1378                 unsigned long unb_point;
1379
1380                 unb_point = nh_res_table_unb_point(res_table);
1381                 if (time_after(now, unb_point)) {
1382                         /* The bucket is not idle, but the unbalanced timer
1383                          * expired. We _can_ migrate, but set force anyway,
1384                          * so that drivers know to ignore activity reports
1385                          * from the HW.
1386                          */
1387                         *force = true;
1388                         return true;
1389                 }
1390
1391                 nh_res_time_set_deadline(unb_point, deadline);
1392         }
1393
1394         nh_res_time_set_deadline(idle_point, deadline);
1395         return false;
1396 }
1397
1398 static bool nh_res_bucket_migrate(struct nh_res_table *res_table,
1399                                   u16 bucket_index, bool notify, bool force)
1400 {
1401         struct nh_res_bucket *bucket = &res_table->nh_buckets[bucket_index];
1402         struct nh_grp_entry *new_nhge;
1403         struct netlink_ext_ack extack;
1404         int err;
1405
1406         new_nhge = list_first_entry_or_null(&res_table->uw_nh_entries,
1407                                             struct nh_grp_entry,
1408                                             res.uw_nh_entry);
1409         if (WARN_ON_ONCE(!new_nhge))
1410                 /* If this function is called, "bucket" is either not
1411                  * occupied, or it belongs to a next hop that is
1412                  * overweight. In either case, there ought to be a
1413                  * corresponding underweight next hop.
1414                  */
1415                 return false;
1416
1417         if (notify) {
1418                 struct nh_grp_entry *old_nhge;
1419
1420                 old_nhge = nh_res_dereference(bucket->nh_entry);
1421                 err = call_nexthop_res_bucket_notifiers(res_table->net,
1422                                                         res_table->nhg_id,
1423                                                         bucket_index, force,
1424                                                         old_nhge->nh,
1425                                                         new_nhge->nh, &extack);
1426                 if (err) {
1427                         pr_err_ratelimited("%s\n", extack._msg);
1428                         if (!force)
1429                                 return false;
1430                         /* It is not possible to veto a forced replacement, so
1431                          * just clear the hardware flags from the nexthop
1432                          * bucket to indicate to user space that this bucket is
1433                          * not correctly populated in hardware.
1434                          */
1435                         bucket->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP);
1436                 }
1437         }
1438
1439         nh_res_bucket_set_nh(bucket, new_nhge);
1440         nh_res_bucket_set_idle(res_table, bucket);
1441
1442         if (nh_res_nhge_is_balanced(new_nhge))
1443                 list_del(&new_nhge->res.uw_nh_entry);
1444         return true;
1445 }
1446
1447 #define NH_RES_UPKEEP_DW_MINIMUM_INTERVAL (HZ / 2)
1448
1449 static void nh_res_table_upkeep(struct nh_res_table *res_table, bool notify)
1450 {
1451         unsigned long now = jiffies;
1452         unsigned long deadline;
1453         u16 i;
1454
1455         /* Deadline is the next time that upkeep should be run. It is the
1456          * earliest time at which one of the buckets might be migrated.
1457          * Start at the most pessimistic estimate: either unbalanced_timer
1458          * from now, or if there is none, idle_timer from now. For each
1459          * encountered time point, call nh_res_time_set_deadline() to
1460          * refine the estimate.
1461          */
1462         if (res_table->unbalanced_timer)
1463                 deadline = now + res_table->unbalanced_timer;
1464         else
1465                 deadline = now + res_table->idle_timer;
1466
1467         for (i = 0; i < res_table->num_nh_buckets; i++) {
1468                 struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
1469                 bool force;
1470
1471                 if (nh_res_bucket_should_migrate(res_table, bucket,
1472                                                  &deadline, &force)) {
1473                         if (!nh_res_bucket_migrate(res_table, i, notify,
1474                                                    force)) {
1475                                 unsigned long idle_point;
1476
1477                                 /* A driver can override the migration
1478                                  * decision if the HW reports that the
1479                                  * bucket is actually not idle. Therefore
1480                                  * remark the bucket as busy again and
1481                                  * update the deadline.
1482                                  */
1483                                 nh_res_bucket_set_busy(bucket);
1484                                 idle_point = nh_res_bucket_idle_point(res_table,
1485                                                                       bucket,
1486                                                                       now);
1487                                 nh_res_time_set_deadline(idle_point, &deadline);
1488                         }
1489                 }
1490         }
1491
1492         /* If the group is still unbalanced, schedule the next upkeep to
1493          * either the deadline computed above, or the minimum deadline,
1494          * whichever comes later.
1495          */
1496         if (!nh_res_table_is_balanced(res_table)) {
1497                 unsigned long now = jiffies;
1498                 unsigned long min_deadline;
1499
1500                 min_deadline = now + NH_RES_UPKEEP_DW_MINIMUM_INTERVAL;
1501                 if (time_before(deadline, min_deadline))
1502                         deadline = min_deadline;
1503
1504                 queue_delayed_work(system_power_efficient_wq,
1505                                    &res_table->upkeep_dw, deadline - now);
1506         }
1507 }
1508
1509 static void nh_res_table_upkeep_dw(struct work_struct *work)
1510 {
1511         struct delayed_work *dw = to_delayed_work(work);
1512         struct nh_res_table *res_table;
1513
1514         res_table = container_of(dw, struct nh_res_table, upkeep_dw);
1515         nh_res_table_upkeep(res_table, true);
1516 }
1517
1518 static void nh_res_table_cancel_upkeep(struct nh_res_table *res_table)
1519 {
1520         cancel_delayed_work_sync(&res_table->upkeep_dw);
1521 }
1522
1523 static void nh_res_group_rebalance(struct nh_group *nhg,
1524                                    struct nh_res_table *res_table)
1525 {
1526         int prev_upper_bound = 0;
1527         int total = 0;
1528         int w = 0;
1529         int i;
1530
1531         INIT_LIST_HEAD(&res_table->uw_nh_entries);
1532
1533         for (i = 0; i < nhg->num_nh; ++i)
1534                 total += nhg->nh_entries[i].weight;
1535
1536         for (i = 0; i < nhg->num_nh; ++i) {
1537                 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1538                 int upper_bound;
1539
1540                 w += nhge->weight;
1541                 upper_bound = DIV_ROUND_CLOSEST(res_table->num_nh_buckets * w,
1542                                                 total);
1543                 nhge->res.wants_buckets = upper_bound - prev_upper_bound;
1544                 prev_upper_bound = upper_bound;
1545
1546                 if (nh_res_nhge_is_uw(nhge)) {
1547                         if (list_empty(&res_table->uw_nh_entries))
1548                                 res_table->unbalanced_since = jiffies;
1549                         list_add(&nhge->res.uw_nh_entry,
1550                                  &res_table->uw_nh_entries);
1551                 }
1552         }
1553 }
1554
1555 /* Migrate buckets in res_table so that they reference NHGE's from NHG with
1556  * the right NH ID. Set those buckets that do not have a corresponding NHGE
1557  * entry in NHG as not occupied.
1558  */
1559 static void nh_res_table_migrate_buckets(struct nh_res_table *res_table,
1560                                          struct nh_group *nhg)
1561 {
1562         u16 i;
1563
1564         for (i = 0; i < res_table->num_nh_buckets; i++) {
1565                 struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
1566                 u32 id = rtnl_dereference(bucket->nh_entry)->nh->id;
1567                 bool found = false;
1568                 int j;
1569
1570                 for (j = 0; j < nhg->num_nh; j++) {
1571                         struct nh_grp_entry *nhge = &nhg->nh_entries[j];
1572
1573                         if (nhge->nh->id == id) {
1574                                 nh_res_bucket_set_nh(bucket, nhge);
1575                                 found = true;
1576                                 break;
1577                         }
1578                 }
1579
1580                 if (!found)
1581                         nh_res_bucket_unset_nh(bucket);
1582         }
1583 }
1584
1585 static void replace_nexthop_grp_res(struct nh_group *oldg,
1586                                     struct nh_group *newg)
1587 {
1588         /* For NH group replacement, the new NHG might only have a stub
1589          * hash table with 0 buckets, because the number of buckets was not
1590          * specified. For NH removal, oldg and newg both reference the same
1591          * res_table. So in any case, in the following, we want to work
1592          * with oldg->res_table.
1593          */
1594         struct nh_res_table *old_res_table = rtnl_dereference(oldg->res_table);
1595         unsigned long prev_unbalanced_since = old_res_table->unbalanced_since;
1596         bool prev_has_uw = !list_empty(&old_res_table->uw_nh_entries);
1597
1598         nh_res_table_cancel_upkeep(old_res_table);
1599         nh_res_table_migrate_buckets(old_res_table, newg);
1600         nh_res_group_rebalance(newg, old_res_table);
1601         if (prev_has_uw && !list_empty(&old_res_table->uw_nh_entries))
1602                 old_res_table->unbalanced_since = prev_unbalanced_since;
1603         nh_res_table_upkeep(old_res_table, true);
1604 }
1605
1606 static void nh_mp_group_rebalance(struct nh_group *nhg)
1607 {
1608         int total = 0;
1609         int w = 0;
1610         int i;
1611
1612         for (i = 0; i < nhg->num_nh; ++i)
1613                 total += nhg->nh_entries[i].weight;
1614
1615         for (i = 0; i < nhg->num_nh; ++i) {
1616                 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1617                 int upper_bound;
1618
1619                 w += nhge->weight;
1620                 upper_bound = DIV_ROUND_CLOSEST_ULL((u64)w << 31, total) - 1;
1621                 atomic_set(&nhge->mpath.upper_bound, upper_bound);
1622         }
1623 }
1624
1625 static void remove_nh_grp_entry(struct net *net, struct nh_grp_entry *nhge,
1626                                 struct nl_info *nlinfo)
1627 {
1628         struct nh_grp_entry *nhges, *new_nhges;
1629         struct nexthop *nhp = nhge->nh_parent;
1630         struct netlink_ext_ack extack;
1631         struct nexthop *nh = nhge->nh;
1632         struct nh_group *nhg, *newg;
1633         int i, j, err;
1634
1635         WARN_ON(!nh);
1636
1637         nhg = rtnl_dereference(nhp->nh_grp);
1638         newg = nhg->spare;
1639
1640         /* last entry, keep it visible and remove the parent */
1641         if (nhg->num_nh == 1) {
1642                 remove_nexthop(net, nhp, nlinfo);
1643                 return;
1644         }
1645
1646         newg->has_v4 = false;
1647         newg->is_multipath = nhg->is_multipath;
1648         newg->mpath = nhg->mpath;
1649         newg->resilient = nhg->resilient;
1650         newg->fdb_nh = nhg->fdb_nh;
1651         newg->num_nh = nhg->num_nh;
1652
1653         /* copy old entries to new except the one getting removed */
1654         nhges = nhg->nh_entries;
1655         new_nhges = newg->nh_entries;
1656         for (i = 0, j = 0; i < nhg->num_nh; ++i) {
1657                 struct nh_info *nhi;
1658
1659                 /* current nexthop getting removed */
1660                 if (nhg->nh_entries[i].nh == nh) {
1661                         newg->num_nh--;
1662                         continue;
1663                 }
1664
1665                 nhi = rtnl_dereference(nhges[i].nh->nh_info);
1666                 if (nhi->family == AF_INET)
1667                         newg->has_v4 = true;
1668
1669                 list_del(&nhges[i].nh_list);
1670                 new_nhges[j].nh_parent = nhges[i].nh_parent;
1671                 new_nhges[j].nh = nhges[i].nh;
1672                 new_nhges[j].weight = nhges[i].weight;
1673                 list_add(&new_nhges[j].nh_list, &new_nhges[j].nh->grp_list);
1674                 j++;
1675         }
1676
1677         if (newg->mpath)
1678                 nh_mp_group_rebalance(newg);
1679         else if (newg->resilient)
1680                 replace_nexthop_grp_res(nhg, newg);
1681
1682         rcu_assign_pointer(nhp->nh_grp, newg);
1683
1684         list_del(&nhge->nh_list);
1685         nexthop_put(nhge->nh);
1686
1687         /* Removal of a NH from a resilient group is notified through
1688          * bucket notifications.
1689          */
1690         if (newg->mpath) {
1691                 err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, nhp,
1692                                              &extack);
1693                 if (err)
1694                         pr_err("%s\n", extack._msg);
1695         }
1696
1697         if (nlinfo)
1698                 nexthop_notify(RTM_NEWNEXTHOP, nhp, nlinfo);
1699 }
1700
1701 static void remove_nexthop_from_groups(struct net *net, struct nexthop *nh,
1702                                        struct nl_info *nlinfo)
1703 {
1704         struct nh_grp_entry *nhge, *tmp;
1705
1706         list_for_each_entry_safe(nhge, tmp, &nh->grp_list, nh_list)
1707                 remove_nh_grp_entry(net, nhge, nlinfo);
1708
1709         /* make sure all see the newly published array before releasing rtnl */
1710         synchronize_net();
1711 }
1712
1713 static void remove_nexthop_group(struct nexthop *nh, struct nl_info *nlinfo)
1714 {
1715         struct nh_group *nhg = rcu_dereference_rtnl(nh->nh_grp);
1716         struct nh_res_table *res_table;
1717         int i, num_nh = nhg->num_nh;
1718
1719         for (i = 0; i < num_nh; ++i) {
1720                 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1721
1722                 if (WARN_ON(!nhge->nh))
1723                         continue;
1724
1725                 list_del_init(&nhge->nh_list);
1726         }
1727
1728         if (nhg->resilient) {
1729                 res_table = rtnl_dereference(nhg->res_table);
1730                 nh_res_table_cancel_upkeep(res_table);
1731         }
1732 }
1733
1734 /* not called for nexthop replace */
1735 static void __remove_nexthop_fib(struct net *net, struct nexthop *nh)
1736 {
1737         struct fib6_info *f6i, *tmp;
1738         bool do_flush = false;
1739         struct fib_info *fi;
1740
1741         list_for_each_entry(fi, &nh->fi_list, nh_list) {
1742                 fi->fib_flags |= RTNH_F_DEAD;
1743                 do_flush = true;
1744         }
1745         if (do_flush)
1746                 fib_flush(net);
1747
1748         /* ip6_del_rt removes the entry from this list hence the _safe */
1749         list_for_each_entry_safe(f6i, tmp, &nh->f6i_list, nh_list) {
1750                 /* __ip6_del_rt does a release, so do a hold here */
1751                 fib6_info_hold(f6i);
1752                 ipv6_stub->ip6_del_rt(net, f6i,
1753                                       !net->ipv4.sysctl_nexthop_compat_mode);
1754         }
1755 }
1756
1757 static void __remove_nexthop(struct net *net, struct nexthop *nh,
1758                              struct nl_info *nlinfo)
1759 {
1760         __remove_nexthop_fib(net, nh);
1761
1762         if (nh->is_group) {
1763                 remove_nexthop_group(nh, nlinfo);
1764         } else {
1765                 struct nh_info *nhi;
1766
1767                 nhi = rtnl_dereference(nh->nh_info);
1768                 if (nhi->fib_nhc.nhc_dev)
1769                         hlist_del(&nhi->dev_hash);
1770
1771                 remove_nexthop_from_groups(net, nh, nlinfo);
1772         }
1773 }
1774
1775 static void remove_nexthop(struct net *net, struct nexthop *nh,
1776                            struct nl_info *nlinfo)
1777 {
1778         call_nexthop_notifiers(net, NEXTHOP_EVENT_DEL, nh, NULL);
1779
1780         /* remove from the tree */
1781         rb_erase(&nh->rb_node, &net->nexthop.rb_root);
1782
1783         if (nlinfo)
1784                 nexthop_notify(RTM_DELNEXTHOP, nh, nlinfo);
1785
1786         __remove_nexthop(net, nh, nlinfo);
1787         nh_base_seq_inc(net);
1788
1789         nexthop_put(nh);
1790 }
1791
1792 /* if any FIB entries reference this nexthop, any dst entries
1793  * need to be regenerated
1794  */
1795 static void nh_rt_cache_flush(struct net *net, struct nexthop *nh)
1796 {
1797         struct fib6_info *f6i;
1798
1799         if (!list_empty(&nh->fi_list))
1800                 rt_cache_flush(net);
1801
1802         list_for_each_entry(f6i, &nh->f6i_list, nh_list)
1803                 ipv6_stub->fib6_update_sernum(net, f6i);
1804 }
1805
1806 static int replace_nexthop_grp(struct net *net, struct nexthop *old,
1807                                struct nexthop *new, const struct nh_config *cfg,
1808                                struct netlink_ext_ack *extack)
1809 {
1810         struct nh_res_table *tmp_table = NULL;
1811         struct nh_res_table *new_res_table;
1812         struct nh_res_table *old_res_table;
1813         struct nh_group *oldg, *newg;
1814         int i, err;
1815
1816         if (!new->is_group) {
1817                 NL_SET_ERR_MSG(extack, "Can not replace a nexthop group with a nexthop.");
1818                 return -EINVAL;
1819         }
1820
1821         oldg = rtnl_dereference(old->nh_grp);
1822         newg = rtnl_dereference(new->nh_grp);
1823
1824         if (newg->mpath != oldg->mpath) {
1825                 NL_SET_ERR_MSG(extack, "Can not replace a nexthop group with one of a different type.");
1826                 return -EINVAL;
1827         }
1828
1829         if (newg->mpath) {
1830                 err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new,
1831                                              extack);
1832                 if (err)
1833                         return err;
1834         } else if (newg->resilient) {
1835                 new_res_table = rtnl_dereference(newg->res_table);
1836                 old_res_table = rtnl_dereference(oldg->res_table);
1837
1838                 /* Accept if num_nh_buckets was not given, but if it was
1839                  * given, demand that the value be correct.
1840                  */
1841                 if (cfg->nh_grp_res_has_num_buckets &&
1842                     cfg->nh_grp_res_num_buckets !=
1843                     old_res_table->num_nh_buckets) {
1844                         NL_SET_ERR_MSG(extack, "Can not change number of buckets of a resilient nexthop group.");
1845                         return -EINVAL;
1846                 }
1847
1848                 /* Emit a pre-replace notification so that listeners could veto
1849                  * a potentially unsupported configuration. Otherwise,
1850                  * individual bucket replacement notifications would need to be
1851                  * vetoed, which is something that should only happen if the
1852                  * bucket is currently active.
1853                  */
1854                 err = call_nexthop_res_table_notifiers(net, new, extack);
1855                 if (err)
1856                         return err;
1857
1858                 if (cfg->nh_grp_res_has_idle_timer)
1859                         old_res_table->idle_timer = cfg->nh_grp_res_idle_timer;
1860                 if (cfg->nh_grp_res_has_unbalanced_timer)
1861                         old_res_table->unbalanced_timer =
1862                                 cfg->nh_grp_res_unbalanced_timer;
1863
1864                 replace_nexthop_grp_res(oldg, newg);
1865
1866                 tmp_table = new_res_table;
1867                 rcu_assign_pointer(newg->res_table, old_res_table);
1868                 rcu_assign_pointer(newg->spare->res_table, old_res_table);
1869         }
1870
1871         /* update parents - used by nexthop code for cleanup */
1872         for (i = 0; i < newg->num_nh; i++)
1873                 newg->nh_entries[i].nh_parent = old;
1874
1875         rcu_assign_pointer(old->nh_grp, newg);
1876
1877         if (newg->resilient) {
1878                 rcu_assign_pointer(oldg->res_table, tmp_table);
1879                 rcu_assign_pointer(oldg->spare->res_table, tmp_table);
1880         }
1881
1882         for (i = 0; i < oldg->num_nh; i++)
1883                 oldg->nh_entries[i].nh_parent = new;
1884
1885         rcu_assign_pointer(new->nh_grp, oldg);
1886
1887         return 0;
1888 }
1889
1890 static void nh_group_v4_update(struct nh_group *nhg)
1891 {
1892         struct nh_grp_entry *nhges;
1893         bool has_v4 = false;
1894         int i;
1895
1896         nhges = nhg->nh_entries;
1897         for (i = 0; i < nhg->num_nh; i++) {
1898                 struct nh_info *nhi;
1899
1900                 nhi = rtnl_dereference(nhges[i].nh->nh_info);
1901                 if (nhi->family == AF_INET)
1902                         has_v4 = true;
1903         }
1904         nhg->has_v4 = has_v4;
1905 }
1906
1907 static int replace_nexthop_single_notify_res(struct net *net,
1908                                              struct nh_res_table *res_table,
1909                                              struct nexthop *old,
1910                                              struct nh_info *oldi,
1911                                              struct nh_info *newi,
1912                                              struct netlink_ext_ack *extack)
1913 {
1914         u32 nhg_id = res_table->nhg_id;
1915         int err;
1916         u16 i;
1917
1918         for (i = 0; i < res_table->num_nh_buckets; i++) {
1919                 struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
1920                 struct nh_grp_entry *nhge;
1921
1922                 nhge = rtnl_dereference(bucket->nh_entry);
1923                 if (nhge->nh == old) {
1924                         err = __call_nexthop_res_bucket_notifiers(net, nhg_id,
1925                                                                   i, true,
1926                                                                   oldi, newi,
1927                                                                   extack);
1928                         if (err)
1929                                 goto err_notify;
1930                 }
1931         }
1932
1933         return 0;
1934
1935 err_notify:
1936         while (i-- > 0) {
1937                 struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
1938                 struct nh_grp_entry *nhge;
1939
1940                 nhge = rtnl_dereference(bucket->nh_entry);
1941                 if (nhge->nh == old)
1942                         __call_nexthop_res_bucket_notifiers(net, nhg_id, i,
1943                                                             true, newi, oldi,
1944                                                             extack);
1945         }
1946         return err;
1947 }
1948
1949 static int replace_nexthop_single_notify(struct net *net,
1950                                          struct nexthop *group_nh,
1951                                          struct nexthop *old,
1952                                          struct nh_info *oldi,
1953                                          struct nh_info *newi,
1954                                          struct netlink_ext_ack *extack)
1955 {
1956         struct nh_group *nhg = rtnl_dereference(group_nh->nh_grp);
1957         struct nh_res_table *res_table;
1958
1959         if (nhg->mpath) {
1960                 return call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE,
1961                                               group_nh, extack);
1962         } else if (nhg->resilient) {
1963                 res_table = rtnl_dereference(nhg->res_table);
1964                 return replace_nexthop_single_notify_res(net, res_table,
1965                                                          old, oldi, newi,
1966                                                          extack);
1967         }
1968
1969         return -EINVAL;
1970 }
1971
1972 static int replace_nexthop_single(struct net *net, struct nexthop *old,
1973                                   struct nexthop *new,
1974                                   struct netlink_ext_ack *extack)
1975 {
1976         u8 old_protocol, old_nh_flags;
1977         struct nh_info *oldi, *newi;
1978         struct nh_grp_entry *nhge;
1979         int err;
1980
1981         if (new->is_group) {
1982                 NL_SET_ERR_MSG(extack, "Can not replace a nexthop with a nexthop group.");
1983                 return -EINVAL;
1984         }
1985
1986         err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new, extack);
1987         if (err)
1988                 return err;
1989
1990         /* Hardware flags were set on 'old' as 'new' is not in the red-black
1991          * tree. Therefore, inherit the flags from 'old' to 'new'.
1992          */
1993         new->nh_flags |= old->nh_flags & (RTNH_F_OFFLOAD | RTNH_F_TRAP);
1994
1995         oldi = rtnl_dereference(old->nh_info);
1996         newi = rtnl_dereference(new->nh_info);
1997
1998         newi->nh_parent = old;
1999         oldi->nh_parent = new;
2000
2001         old_protocol = old->protocol;
2002         old_nh_flags = old->nh_flags;
2003
2004         old->protocol = new->protocol;
2005         old->nh_flags = new->nh_flags;
2006
2007         rcu_assign_pointer(old->nh_info, newi);
2008         rcu_assign_pointer(new->nh_info, oldi);
2009
2010         /* Send a replace notification for all the groups using the nexthop. */
2011         list_for_each_entry(nhge, &old->grp_list, nh_list) {
2012                 struct nexthop *nhp = nhge->nh_parent;
2013
2014                 err = replace_nexthop_single_notify(net, nhp, old, oldi, newi,
2015                                                     extack);
2016                 if (err)
2017                         goto err_notify;
2018         }
2019
2020         /* When replacing an IPv4 nexthop with an IPv6 nexthop, potentially
2021          * update IPv4 indication in all the groups using the nexthop.
2022          */
2023         if (oldi->family == AF_INET && newi->family == AF_INET6) {
2024                 list_for_each_entry(nhge, &old->grp_list, nh_list) {
2025                         struct nexthop *nhp = nhge->nh_parent;
2026                         struct nh_group *nhg;
2027
2028                         nhg = rtnl_dereference(nhp->nh_grp);
2029                         nh_group_v4_update(nhg);
2030                 }
2031         }
2032
2033         return 0;
2034
2035 err_notify:
2036         rcu_assign_pointer(new->nh_info, newi);
2037         rcu_assign_pointer(old->nh_info, oldi);
2038         old->nh_flags = old_nh_flags;
2039         old->protocol = old_protocol;
2040         oldi->nh_parent = old;
2041         newi->nh_parent = new;
2042         list_for_each_entry_continue_reverse(nhge, &old->grp_list, nh_list) {
2043                 struct nexthop *nhp = nhge->nh_parent;
2044
2045                 replace_nexthop_single_notify(net, nhp, old, newi, oldi, NULL);
2046         }
2047         call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, old, extack);
2048         return err;
2049 }
2050
2051 static void __nexthop_replace_notify(struct net *net, struct nexthop *nh,
2052                                      struct nl_info *info)
2053 {
2054         struct fib6_info *f6i;
2055
2056         if (!list_empty(&nh->fi_list)) {
2057                 struct fib_info *fi;
2058
2059                 /* expectation is a few fib_info per nexthop and then
2060                  * a lot of routes per fib_info. So mark the fib_info
2061                  * and then walk the fib tables once
2062                  */
2063                 list_for_each_entry(fi, &nh->fi_list, nh_list)
2064                         fi->nh_updated = true;
2065
2066                 fib_info_notify_update(net, info);
2067
2068                 list_for_each_entry(fi, &nh->fi_list, nh_list)
2069                         fi->nh_updated = false;
2070         }
2071
2072         list_for_each_entry(f6i, &nh->f6i_list, nh_list)
2073                 ipv6_stub->fib6_rt_update(net, f6i, info);
2074 }
2075
2076 /* send RTM_NEWROUTE with REPLACE flag set for all FIB entries
2077  * linked to this nexthop and for all groups that the nexthop
2078  * is a member of
2079  */
2080 static void nexthop_replace_notify(struct net *net, struct nexthop *nh,
2081                                    struct nl_info *info)
2082 {
2083         struct nh_grp_entry *nhge;
2084
2085         __nexthop_replace_notify(net, nh, info);
2086
2087         list_for_each_entry(nhge, &nh->grp_list, nh_list)
2088                 __nexthop_replace_notify(net, nhge->nh_parent, info);
2089 }
2090
2091 static int replace_nexthop(struct net *net, struct nexthop *old,
2092                            struct nexthop *new, const struct nh_config *cfg,
2093                            struct netlink_ext_ack *extack)
2094 {
2095         bool new_is_reject = false;
2096         struct nh_grp_entry *nhge;
2097         int err;
2098
2099         /* check that existing FIB entries are ok with the
2100          * new nexthop definition
2101          */
2102         err = fib_check_nh_list(old, new, extack);
2103         if (err)
2104                 return err;
2105
2106         err = fib6_check_nh_list(old, new, extack);
2107         if (err)
2108                 return err;
2109
2110         if (!new->is_group) {
2111                 struct nh_info *nhi = rtnl_dereference(new->nh_info);
2112
2113                 new_is_reject = nhi->reject_nh;
2114         }
2115
2116         list_for_each_entry(nhge, &old->grp_list, nh_list) {
2117                 /* if new nexthop is a blackhole, any groups using this
2118                  * nexthop cannot have more than 1 path
2119                  */
2120                 if (new_is_reject &&
2121                     nexthop_num_path(nhge->nh_parent) > 1) {
2122                         NL_SET_ERR_MSG(extack, "Blackhole nexthop can not be a member of a group with more than one path");
2123                         return -EINVAL;
2124                 }
2125
2126                 err = fib_check_nh_list(nhge->nh_parent, new, extack);
2127                 if (err)
2128                         return err;
2129
2130                 err = fib6_check_nh_list(nhge->nh_parent, new, extack);
2131                 if (err)
2132                         return err;
2133         }
2134
2135         if (old->is_group)
2136                 err = replace_nexthop_grp(net, old, new, cfg, extack);
2137         else
2138                 err = replace_nexthop_single(net, old, new, extack);
2139
2140         if (!err) {
2141                 nh_rt_cache_flush(net, old);
2142
2143                 __remove_nexthop(net, new, NULL);
2144                 nexthop_put(new);
2145         }
2146
2147         return err;
2148 }
2149
2150 /* called with rtnl_lock held */
2151 static int insert_nexthop(struct net *net, struct nexthop *new_nh,
2152                           struct nh_config *cfg, struct netlink_ext_ack *extack)
2153 {
2154         struct rb_node **pp, *parent = NULL, *next;
2155         struct rb_root *root = &net->nexthop.rb_root;
2156         bool replace = !!(cfg->nlflags & NLM_F_REPLACE);
2157         bool create = !!(cfg->nlflags & NLM_F_CREATE);
2158         u32 new_id = new_nh->id;
2159         int replace_notify = 0;
2160         int rc = -EEXIST;
2161
2162         pp = &root->rb_node;
2163         while (1) {
2164                 struct nexthop *nh;
2165
2166                 next = *pp;
2167                 if (!next)
2168                         break;
2169
2170                 parent = next;
2171
2172                 nh = rb_entry(parent, struct nexthop, rb_node);
2173                 if (new_id < nh->id) {
2174                         pp = &next->rb_left;
2175                 } else if (new_id > nh->id) {
2176                         pp = &next->rb_right;
2177                 } else if (replace) {
2178                         rc = replace_nexthop(net, nh, new_nh, cfg, extack);
2179                         if (!rc) {
2180                                 new_nh = nh; /* send notification with old nh */
2181                                 replace_notify = 1;
2182                         }
2183                         goto out;
2184                 } else {
2185                         /* id already exists and not a replace */
2186                         goto out;
2187                 }
2188         }
2189
2190         if (replace && !create) {
2191                 NL_SET_ERR_MSG(extack, "Replace specified without create and no entry exists");
2192                 rc = -ENOENT;
2193                 goto out;
2194         }
2195
2196         if (new_nh->is_group) {
2197                 struct nh_group *nhg = rtnl_dereference(new_nh->nh_grp);
2198                 struct nh_res_table *res_table;
2199
2200                 if (nhg->resilient) {
2201                         res_table = rtnl_dereference(nhg->res_table);
2202
2203                         /* Not passing the number of buckets is OK when
2204                          * replacing, but not when creating a new group.
2205                          */
2206                         if (!cfg->nh_grp_res_has_num_buckets) {
2207                                 NL_SET_ERR_MSG(extack, "Number of buckets not specified for nexthop group insertion");
2208                                 rc = -EINVAL;
2209                                 goto out;
2210                         }
2211
2212                         nh_res_group_rebalance(nhg, res_table);
2213
2214                         /* Do not send bucket notifications, we do full
2215                          * notification below.
2216                          */
2217                         nh_res_table_upkeep(res_table, false);
2218                 }
2219         }
2220
2221         rb_link_node_rcu(&new_nh->rb_node, parent, pp);
2222         rb_insert_color(&new_nh->rb_node, root);
2223
2224         /* The initial insertion is a full notification for mpath as well
2225          * as resilient groups.
2226          */
2227         rc = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new_nh, extack);
2228         if (rc)
2229                 rb_erase(&new_nh->rb_node, &net->nexthop.rb_root);
2230
2231 out:
2232         if (!rc) {
2233                 nh_base_seq_inc(net);
2234                 nexthop_notify(RTM_NEWNEXTHOP, new_nh, &cfg->nlinfo);
2235                 if (replace_notify && net->ipv4.sysctl_nexthop_compat_mode)
2236                         nexthop_replace_notify(net, new_nh, &cfg->nlinfo);
2237         }
2238
2239         return rc;
2240 }
2241
2242 /* rtnl */
2243 /* remove all nexthops tied to a device being deleted */
2244 static void nexthop_flush_dev(struct net_device *dev, unsigned long event)
2245 {
2246         unsigned int hash = nh_dev_hashfn(dev->ifindex);
2247         struct net *net = dev_net(dev);
2248         struct hlist_head *head = &net->nexthop.devhash[hash];
2249         struct hlist_node *n;
2250         struct nh_info *nhi;
2251
2252         hlist_for_each_entry_safe(nhi, n, head, dev_hash) {
2253                 if (nhi->fib_nhc.nhc_dev != dev)
2254                         continue;
2255
2256                 if (nhi->reject_nh &&
2257                     (event == NETDEV_DOWN || event == NETDEV_CHANGE))
2258                         continue;
2259
2260                 remove_nexthop(net, nhi->nh_parent, NULL);
2261         }
2262 }
2263
2264 /* rtnl; called when net namespace is deleted */
2265 static void flush_all_nexthops(struct net *net)
2266 {
2267         struct rb_root *root = &net->nexthop.rb_root;
2268         struct rb_node *node;
2269         struct nexthop *nh;
2270
2271         while ((node = rb_first(root))) {
2272                 nh = rb_entry(node, struct nexthop, rb_node);
2273                 remove_nexthop(net, nh, NULL);
2274                 cond_resched();
2275         }
2276 }
2277
2278 static struct nexthop *nexthop_create_group(struct net *net,
2279                                             struct nh_config *cfg)
2280 {
2281         struct nlattr *grps_attr = cfg->nh_grp;
2282         struct nexthop_grp *entry = nla_data(grps_attr);
2283         u16 num_nh = nla_len(grps_attr) / sizeof(*entry);
2284         struct nh_group *nhg;
2285         struct nexthop *nh;
2286         int err;
2287         int i;
2288
2289         if (WARN_ON(!num_nh))
2290                 return ERR_PTR(-EINVAL);
2291
2292         nh = nexthop_alloc();
2293         if (!nh)
2294                 return ERR_PTR(-ENOMEM);
2295
2296         nh->is_group = 1;
2297
2298         nhg = nexthop_grp_alloc(num_nh);
2299         if (!nhg) {
2300                 kfree(nh);
2301                 return ERR_PTR(-ENOMEM);
2302         }
2303
2304         /* spare group used for removals */
2305         nhg->spare = nexthop_grp_alloc(num_nh);
2306         if (!nhg->spare) {
2307                 kfree(nhg);
2308                 kfree(nh);
2309                 return ERR_PTR(-ENOMEM);
2310         }
2311         nhg->spare->spare = nhg;
2312
2313         for (i = 0; i < nhg->num_nh; ++i) {
2314                 struct nexthop *nhe;
2315                 struct nh_info *nhi;
2316
2317                 nhe = nexthop_find_by_id(net, entry[i].id);
2318                 if (!nexthop_get(nhe)) {
2319                         err = -ENOENT;
2320                         goto out_no_nh;
2321                 }
2322
2323                 nhi = rtnl_dereference(nhe->nh_info);
2324                 if (nhi->family == AF_INET)
2325                         nhg->has_v4 = true;
2326
2327                 nhg->nh_entries[i].nh = nhe;
2328                 nhg->nh_entries[i].weight = entry[i].weight + 1;
2329                 list_add(&nhg->nh_entries[i].nh_list, &nhe->grp_list);
2330                 nhg->nh_entries[i].nh_parent = nh;
2331         }
2332
2333         if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_MPATH) {
2334                 nhg->mpath = 1;
2335                 nhg->is_multipath = true;
2336         } else if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_RES) {
2337                 struct nh_res_table *res_table;
2338
2339                 /* Bounce resilient groups for now. */
2340                 err = -EINVAL;
2341                 goto out_no_nh;
2342
2343                 res_table = nexthop_res_table_alloc(net, cfg->nh_id, cfg);
2344                 if (!res_table) {
2345                         err = -ENOMEM;
2346                         goto out_no_nh;
2347                 }
2348
2349                 rcu_assign_pointer(nhg->spare->res_table, res_table);
2350                 rcu_assign_pointer(nhg->res_table, res_table);
2351                 nhg->resilient = true;
2352                 nhg->is_multipath = true;
2353         }
2354
2355         WARN_ON_ONCE(nhg->mpath + nhg->resilient != 1);
2356
2357         if (nhg->mpath)
2358                 nh_mp_group_rebalance(nhg);
2359
2360         if (cfg->nh_fdb)
2361                 nhg->fdb_nh = 1;
2362
2363         rcu_assign_pointer(nh->nh_grp, nhg);
2364
2365         return nh;
2366
2367 out_no_nh:
2368         for (i--; i >= 0; --i) {
2369                 list_del(&nhg->nh_entries[i].nh_list);
2370                 nexthop_put(nhg->nh_entries[i].nh);
2371         }
2372
2373         kfree(nhg->spare);
2374         kfree(nhg);
2375         kfree(nh);
2376
2377         return ERR_PTR(err);
2378 }
2379
2380 static int nh_create_ipv4(struct net *net, struct nexthop *nh,
2381                           struct nh_info *nhi, struct nh_config *cfg,
2382                           struct netlink_ext_ack *extack)
2383 {
2384         struct fib_nh *fib_nh = &nhi->fib_nh;
2385         struct fib_config fib_cfg = {
2386                 .fc_oif   = cfg->nh_ifindex,
2387                 .fc_gw4   = cfg->gw.ipv4,
2388                 .fc_gw_family = cfg->gw.ipv4 ? AF_INET : 0,
2389                 .fc_flags = cfg->nh_flags,
2390                 .fc_encap = cfg->nh_encap,
2391                 .fc_encap_type = cfg->nh_encap_type,
2392         };
2393         u32 tb_id = (cfg->dev ? l3mdev_fib_table(cfg->dev) : RT_TABLE_MAIN);
2394         int err;
2395
2396         err = fib_nh_init(net, fib_nh, &fib_cfg, 1, extack);
2397         if (err) {
2398                 fib_nh_release(net, fib_nh);
2399                 goto out;
2400         }
2401
2402         if (nhi->fdb_nh)
2403                 goto out;
2404
2405         /* sets nh_dev if successful */
2406         err = fib_check_nh(net, fib_nh, tb_id, 0, extack);
2407         if (!err) {
2408                 nh->nh_flags = fib_nh->fib_nh_flags;
2409                 fib_info_update_nhc_saddr(net, &fib_nh->nh_common,
2410                                           fib_nh->fib_nh_scope);
2411         } else {
2412                 fib_nh_release(net, fib_nh);
2413         }
2414 out:
2415         return err;
2416 }
2417
2418 static int nh_create_ipv6(struct net *net,  struct nexthop *nh,
2419                           struct nh_info *nhi, struct nh_config *cfg,
2420                           struct netlink_ext_ack *extack)
2421 {
2422         struct fib6_nh *fib6_nh = &nhi->fib6_nh;
2423         struct fib6_config fib6_cfg = {
2424                 .fc_table = l3mdev_fib_table(cfg->dev),
2425                 .fc_ifindex = cfg->nh_ifindex,
2426                 .fc_gateway = cfg->gw.ipv6,
2427                 .fc_flags = cfg->nh_flags,
2428                 .fc_encap = cfg->nh_encap,
2429                 .fc_encap_type = cfg->nh_encap_type,
2430                 .fc_is_fdb = cfg->nh_fdb,
2431         };
2432         int err;
2433
2434         if (!ipv6_addr_any(&cfg->gw.ipv6))
2435                 fib6_cfg.fc_flags |= RTF_GATEWAY;
2436
2437         /* sets nh_dev if successful */
2438         err = ipv6_stub->fib6_nh_init(net, fib6_nh, &fib6_cfg, GFP_KERNEL,
2439                                       extack);
2440         if (err)
2441                 ipv6_stub->fib6_nh_release(fib6_nh);
2442         else
2443                 nh->nh_flags = fib6_nh->fib_nh_flags;
2444
2445         return err;
2446 }
2447
2448 static struct nexthop *nexthop_create(struct net *net, struct nh_config *cfg,
2449                                       struct netlink_ext_ack *extack)
2450 {
2451         struct nh_info *nhi;
2452         struct nexthop *nh;
2453         int err = 0;
2454
2455         nh = nexthop_alloc();
2456         if (!nh)
2457                 return ERR_PTR(-ENOMEM);
2458
2459         nhi = kzalloc(sizeof(*nhi), GFP_KERNEL);
2460         if (!nhi) {
2461                 kfree(nh);
2462                 return ERR_PTR(-ENOMEM);
2463         }
2464
2465         nh->nh_flags = cfg->nh_flags;
2466         nh->net = net;
2467
2468         nhi->nh_parent = nh;
2469         nhi->family = cfg->nh_family;
2470         nhi->fib_nhc.nhc_scope = RT_SCOPE_LINK;
2471
2472         if (cfg->nh_fdb)
2473                 nhi->fdb_nh = 1;
2474
2475         if (cfg->nh_blackhole) {
2476                 nhi->reject_nh = 1;
2477                 cfg->nh_ifindex = net->loopback_dev->ifindex;
2478         }
2479
2480         switch (cfg->nh_family) {
2481         case AF_INET:
2482                 err = nh_create_ipv4(net, nh, nhi, cfg, extack);
2483                 break;
2484         case AF_INET6:
2485                 err = nh_create_ipv6(net, nh, nhi, cfg, extack);
2486                 break;
2487         }
2488
2489         if (err) {
2490                 kfree(nhi);
2491                 kfree(nh);
2492                 return ERR_PTR(err);
2493         }
2494
2495         /* add the entry to the device based hash */
2496         if (!nhi->fdb_nh)
2497                 nexthop_devhash_add(net, nhi);
2498
2499         rcu_assign_pointer(nh->nh_info, nhi);
2500
2501         return nh;
2502 }
2503
2504 /* called with rtnl lock held */
2505 static struct nexthop *nexthop_add(struct net *net, struct nh_config *cfg,
2506                                    struct netlink_ext_ack *extack)
2507 {
2508         struct nexthop *nh;
2509         int err;
2510
2511         if (cfg->nlflags & NLM_F_REPLACE && !cfg->nh_id) {
2512                 NL_SET_ERR_MSG(extack, "Replace requires nexthop id");
2513                 return ERR_PTR(-EINVAL);
2514         }
2515
2516         if (!cfg->nh_id) {
2517                 cfg->nh_id = nh_find_unused_id(net);
2518                 if (!cfg->nh_id) {
2519                         NL_SET_ERR_MSG(extack, "No unused id");
2520                         return ERR_PTR(-EINVAL);
2521                 }
2522         }
2523
2524         if (cfg->nh_grp)
2525                 nh = nexthop_create_group(net, cfg);
2526         else
2527                 nh = nexthop_create(net, cfg, extack);
2528
2529         if (IS_ERR(nh))
2530                 return nh;
2531
2532         refcount_set(&nh->refcnt, 1);
2533         nh->id = cfg->nh_id;
2534         nh->protocol = cfg->nh_protocol;
2535         nh->net = net;
2536
2537         err = insert_nexthop(net, nh, cfg, extack);
2538         if (err) {
2539                 __remove_nexthop(net, nh, NULL);
2540                 nexthop_put(nh);
2541                 nh = ERR_PTR(err);
2542         }
2543
2544         return nh;
2545 }
2546
2547 static int rtm_nh_get_timer(struct nlattr *attr, unsigned long fallback,
2548                             unsigned long *timer_p, bool *has_p,
2549                             struct netlink_ext_ack *extack)
2550 {
2551         unsigned long timer;
2552         u32 value;
2553
2554         if (!attr) {
2555                 *timer_p = fallback;
2556                 *has_p = false;
2557                 return 0;
2558         }
2559
2560         value = nla_get_u32(attr);
2561         timer = clock_t_to_jiffies(value);
2562         if (timer == ~0UL) {
2563                 NL_SET_ERR_MSG(extack, "Timer value too large");
2564                 return -EINVAL;
2565         }
2566
2567         *timer_p = timer;
2568         *has_p = true;
2569         return 0;
2570 }
2571
2572 static int rtm_to_nh_config_grp_res(struct nlattr *res, struct nh_config *cfg,
2573                                     struct netlink_ext_ack *extack)
2574 {
2575         struct nlattr *tb[ARRAY_SIZE(rtm_nh_res_policy_new)] = {};
2576         int err;
2577
2578         if (res) {
2579                 err = nla_parse_nested(tb,
2580                                        ARRAY_SIZE(rtm_nh_res_policy_new) - 1,
2581                                        res, rtm_nh_res_policy_new, extack);
2582                 if (err < 0)
2583                         return err;
2584         }
2585
2586         if (tb[NHA_RES_GROUP_BUCKETS]) {
2587                 cfg->nh_grp_res_num_buckets =
2588                         nla_get_u16(tb[NHA_RES_GROUP_BUCKETS]);
2589                 cfg->nh_grp_res_has_num_buckets = true;
2590                 if (!cfg->nh_grp_res_num_buckets) {
2591                         NL_SET_ERR_MSG(extack, "Number of buckets needs to be non-0");
2592                         return -EINVAL;
2593                 }
2594         }
2595
2596         err = rtm_nh_get_timer(tb[NHA_RES_GROUP_IDLE_TIMER],
2597                                NH_RES_DEFAULT_IDLE_TIMER,
2598                                &cfg->nh_grp_res_idle_timer,
2599                                &cfg->nh_grp_res_has_idle_timer,
2600                                extack);
2601         if (err)
2602                 return err;
2603
2604         return rtm_nh_get_timer(tb[NHA_RES_GROUP_UNBALANCED_TIMER],
2605                                 NH_RES_DEFAULT_UNBALANCED_TIMER,
2606                                 &cfg->nh_grp_res_unbalanced_timer,
2607                                 &cfg->nh_grp_res_has_unbalanced_timer,
2608                                 extack);
2609 }
2610
2611 static int rtm_to_nh_config(struct net *net, struct sk_buff *skb,
2612                             struct nlmsghdr *nlh, struct nh_config *cfg,
2613                             struct netlink_ext_ack *extack)
2614 {
2615         struct nhmsg *nhm = nlmsg_data(nlh);
2616         struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_new)];
2617         int err;
2618
2619         err = nlmsg_parse(nlh, sizeof(*nhm), tb,
2620                           ARRAY_SIZE(rtm_nh_policy_new) - 1,
2621                           rtm_nh_policy_new, extack);
2622         if (err < 0)
2623                 return err;
2624
2625         err = -EINVAL;
2626         if (nhm->resvd || nhm->nh_scope) {
2627                 NL_SET_ERR_MSG(extack, "Invalid values in ancillary header");
2628                 goto out;
2629         }
2630         if (nhm->nh_flags & ~NEXTHOP_VALID_USER_FLAGS) {
2631                 NL_SET_ERR_MSG(extack, "Invalid nexthop flags in ancillary header");
2632                 goto out;
2633         }
2634
2635         switch (nhm->nh_family) {
2636         case AF_INET:
2637         case AF_INET6:
2638                 break;
2639         case AF_UNSPEC:
2640                 if (tb[NHA_GROUP])
2641                         break;
2642                 fallthrough;
2643         default:
2644                 NL_SET_ERR_MSG(extack, "Invalid address family");
2645                 goto out;
2646         }
2647
2648         memset(cfg, 0, sizeof(*cfg));
2649         cfg->nlflags = nlh->nlmsg_flags;
2650         cfg->nlinfo.portid = NETLINK_CB(skb).portid;
2651         cfg->nlinfo.nlh = nlh;
2652         cfg->nlinfo.nl_net = net;
2653
2654         cfg->nh_family = nhm->nh_family;
2655         cfg->nh_protocol = nhm->nh_protocol;
2656         cfg->nh_flags = nhm->nh_flags;
2657
2658         if (tb[NHA_ID])
2659                 cfg->nh_id = nla_get_u32(tb[NHA_ID]);
2660
2661         if (tb[NHA_FDB]) {
2662                 if (tb[NHA_OIF] || tb[NHA_BLACKHOLE] ||
2663                     tb[NHA_ENCAP]   || tb[NHA_ENCAP_TYPE]) {
2664                         NL_SET_ERR_MSG(extack, "Fdb attribute can not be used with encap, oif or blackhole");
2665                         goto out;
2666                 }
2667                 if (nhm->nh_flags) {
2668                         NL_SET_ERR_MSG(extack, "Unsupported nexthop flags in ancillary header");
2669                         goto out;
2670                 }
2671                 cfg->nh_fdb = nla_get_flag(tb[NHA_FDB]);
2672         }
2673
2674         if (tb[NHA_GROUP]) {
2675                 if (nhm->nh_family != AF_UNSPEC) {
2676                         NL_SET_ERR_MSG(extack, "Invalid family for group");
2677                         goto out;
2678                 }
2679                 cfg->nh_grp = tb[NHA_GROUP];
2680
2681                 cfg->nh_grp_type = NEXTHOP_GRP_TYPE_MPATH;
2682                 if (tb[NHA_GROUP_TYPE])
2683                         cfg->nh_grp_type = nla_get_u16(tb[NHA_GROUP_TYPE]);
2684
2685                 if (cfg->nh_grp_type > NEXTHOP_GRP_TYPE_MAX) {
2686                         NL_SET_ERR_MSG(extack, "Invalid group type");
2687                         goto out;
2688                 }
2689                 err = nh_check_attr_group(net, tb, ARRAY_SIZE(tb),
2690                                           cfg->nh_grp_type, extack);
2691                 if (err)
2692                         goto out;
2693
2694                 if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_RES)
2695                         err = rtm_to_nh_config_grp_res(tb[NHA_RES_GROUP],
2696                                                        cfg, extack);
2697
2698                 /* no other attributes should be set */
2699                 goto out;
2700         }
2701
2702         if (tb[NHA_BLACKHOLE]) {
2703                 if (tb[NHA_GATEWAY] || tb[NHA_OIF] ||
2704                     tb[NHA_ENCAP]   || tb[NHA_ENCAP_TYPE] || tb[NHA_FDB]) {
2705                         NL_SET_ERR_MSG(extack, "Blackhole attribute can not be used with gateway, oif, encap or fdb");
2706                         goto out;
2707                 }
2708
2709                 cfg->nh_blackhole = 1;
2710                 err = 0;
2711                 goto out;
2712         }
2713
2714         if (!cfg->nh_fdb && !tb[NHA_OIF]) {
2715                 NL_SET_ERR_MSG(extack, "Device attribute required for non-blackhole and non-fdb nexthops");
2716                 goto out;
2717         }
2718
2719         if (!cfg->nh_fdb && tb[NHA_OIF]) {
2720                 cfg->nh_ifindex = nla_get_u32(tb[NHA_OIF]);
2721                 if (cfg->nh_ifindex)
2722                         cfg->dev = __dev_get_by_index(net, cfg->nh_ifindex);
2723
2724                 if (!cfg->dev) {
2725                         NL_SET_ERR_MSG(extack, "Invalid device index");
2726                         goto out;
2727                 } else if (!(cfg->dev->flags & IFF_UP)) {
2728                         NL_SET_ERR_MSG(extack, "Nexthop device is not up");
2729                         err = -ENETDOWN;
2730                         goto out;
2731                 } else if (!netif_carrier_ok(cfg->dev)) {
2732                         NL_SET_ERR_MSG(extack, "Carrier for nexthop device is down");
2733                         err = -ENETDOWN;
2734                         goto out;
2735                 }
2736         }
2737
2738         err = -EINVAL;
2739         if (tb[NHA_GATEWAY]) {
2740                 struct nlattr *gwa = tb[NHA_GATEWAY];
2741
2742                 switch (cfg->nh_family) {
2743                 case AF_INET:
2744                         if (nla_len(gwa) != sizeof(u32)) {
2745                                 NL_SET_ERR_MSG(extack, "Invalid gateway");
2746                                 goto out;
2747                         }
2748                         cfg->gw.ipv4 = nla_get_be32(gwa);
2749                         break;
2750                 case AF_INET6:
2751                         if (nla_len(gwa) != sizeof(struct in6_addr)) {
2752                                 NL_SET_ERR_MSG(extack, "Invalid gateway");
2753                                 goto out;
2754                         }
2755                         cfg->gw.ipv6 = nla_get_in6_addr(gwa);
2756                         break;
2757                 default:
2758                         NL_SET_ERR_MSG(extack,
2759                                        "Unknown address family for gateway");
2760                         goto out;
2761                 }
2762         } else {
2763                 /* device only nexthop (no gateway) */
2764                 if (cfg->nh_flags & RTNH_F_ONLINK) {
2765                         NL_SET_ERR_MSG(extack,
2766                                        "ONLINK flag can not be set for nexthop without a gateway");
2767                         goto out;
2768                 }
2769         }
2770
2771         if (tb[NHA_ENCAP]) {
2772                 cfg->nh_encap = tb[NHA_ENCAP];
2773
2774                 if (!tb[NHA_ENCAP_TYPE]) {
2775                         NL_SET_ERR_MSG(extack, "LWT encapsulation type is missing");
2776                         goto out;
2777                 }
2778
2779                 cfg->nh_encap_type = nla_get_u16(tb[NHA_ENCAP_TYPE]);
2780                 err = lwtunnel_valid_encap_type(cfg->nh_encap_type, extack);
2781                 if (err < 0)
2782                         goto out;
2783
2784         } else if (tb[NHA_ENCAP_TYPE]) {
2785                 NL_SET_ERR_MSG(extack, "LWT encapsulation attribute is missing");
2786                 goto out;
2787         }
2788
2789
2790         err = 0;
2791 out:
2792         return err;
2793 }
2794
2795 /* rtnl */
2796 static int rtm_new_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh,
2797                            struct netlink_ext_ack *extack)
2798 {
2799         struct net *net = sock_net(skb->sk);
2800         struct nh_config cfg;
2801         struct nexthop *nh;
2802         int err;
2803
2804         err = rtm_to_nh_config(net, skb, nlh, &cfg, extack);
2805         if (!err) {
2806                 nh = nexthop_add(net, &cfg, extack);
2807                 if (IS_ERR(nh))
2808                         err = PTR_ERR(nh);
2809         }
2810
2811         return err;
2812 }
2813
2814 static int __nh_valid_get_del_req(const struct nlmsghdr *nlh,
2815                                   struct nlattr **tb, u32 *id,
2816                                   struct netlink_ext_ack *extack)
2817 {
2818         struct nhmsg *nhm = nlmsg_data(nlh);
2819
2820         if (nhm->nh_protocol || nhm->resvd || nhm->nh_scope || nhm->nh_flags) {
2821                 NL_SET_ERR_MSG(extack, "Invalid values in header");
2822                 return -EINVAL;
2823         }
2824
2825         if (!tb[NHA_ID]) {
2826                 NL_SET_ERR_MSG(extack, "Nexthop id is missing");
2827                 return -EINVAL;
2828         }
2829
2830         *id = nla_get_u32(tb[NHA_ID]);
2831         if (!(*id)) {
2832                 NL_SET_ERR_MSG(extack, "Invalid nexthop id");
2833                 return -EINVAL;
2834         }
2835
2836         return 0;
2837 }
2838
2839 static int nh_valid_get_del_req(const struct nlmsghdr *nlh, u32 *id,
2840                                 struct netlink_ext_ack *extack)
2841 {
2842         struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_get)];
2843         int err;
2844
2845         err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
2846                           ARRAY_SIZE(rtm_nh_policy_get) - 1,
2847                           rtm_nh_policy_get, extack);
2848         if (err < 0)
2849                 return err;
2850
2851         return __nh_valid_get_del_req(nlh, tb, id, extack);
2852 }
2853
2854 /* rtnl */
2855 static int rtm_del_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh,
2856                            struct netlink_ext_ack *extack)
2857 {
2858         struct net *net = sock_net(skb->sk);
2859         struct nl_info nlinfo = {
2860                 .nlh = nlh,
2861                 .nl_net = net,
2862                 .portid = NETLINK_CB(skb).portid,
2863         };
2864         struct nexthop *nh;
2865         int err;
2866         u32 id;
2867
2868         err = nh_valid_get_del_req(nlh, &id, extack);
2869         if (err)
2870                 return err;
2871
2872         nh = nexthop_find_by_id(net, id);
2873         if (!nh)
2874                 return -ENOENT;
2875
2876         remove_nexthop(net, nh, &nlinfo);
2877
2878         return 0;
2879 }
2880
2881 /* rtnl */
2882 static int rtm_get_nexthop(struct sk_buff *in_skb, struct nlmsghdr *nlh,
2883                            struct netlink_ext_ack *extack)
2884 {
2885         struct net *net = sock_net(in_skb->sk);
2886         struct sk_buff *skb = NULL;
2887         struct nexthop *nh;
2888         int err;
2889         u32 id;
2890
2891         err = nh_valid_get_del_req(nlh, &id, extack);
2892         if (err)
2893                 return err;
2894
2895         err = -ENOBUFS;
2896         skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2897         if (!skb)
2898                 goto out;
2899
2900         err = -ENOENT;
2901         nh = nexthop_find_by_id(net, id);
2902         if (!nh)
2903                 goto errout_free;
2904
2905         err = nh_fill_node(skb, nh, RTM_NEWNEXTHOP, NETLINK_CB(in_skb).portid,
2906                            nlh->nlmsg_seq, 0);
2907         if (err < 0) {
2908                 WARN_ON(err == -EMSGSIZE);
2909                 goto errout_free;
2910         }
2911
2912         err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
2913 out:
2914         return err;
2915 errout_free:
2916         kfree_skb(skb);
2917         goto out;
2918 }
2919
2920 struct nh_dump_filter {
2921         int dev_idx;
2922         int master_idx;
2923         bool group_filter;
2924         bool fdb_filter;
2925 };
2926
2927 static bool nh_dump_filtered(struct nexthop *nh,
2928                              struct nh_dump_filter *filter, u8 family)
2929 {
2930         const struct net_device *dev;
2931         const struct nh_info *nhi;
2932
2933         if (filter->group_filter && !nh->is_group)
2934                 return true;
2935
2936         if (!filter->dev_idx && !filter->master_idx && !family)
2937                 return false;
2938
2939         if (nh->is_group)
2940                 return true;
2941
2942         nhi = rtnl_dereference(nh->nh_info);
2943         if (family && nhi->family != family)
2944                 return true;
2945
2946         dev = nhi->fib_nhc.nhc_dev;
2947         if (filter->dev_idx && (!dev || dev->ifindex != filter->dev_idx))
2948                 return true;
2949
2950         if (filter->master_idx) {
2951                 struct net_device *master;
2952
2953                 if (!dev)
2954                         return true;
2955
2956                 master = netdev_master_upper_dev_get((struct net_device *)dev);
2957                 if (!master || master->ifindex != filter->master_idx)
2958                         return true;
2959         }
2960
2961         return false;
2962 }
2963
2964 static int __nh_valid_dump_req(const struct nlmsghdr *nlh, struct nlattr **tb,
2965                                struct nh_dump_filter *filter,
2966                                struct netlink_ext_ack *extack)
2967 {
2968         struct nhmsg *nhm;
2969         u32 idx;
2970
2971         if (tb[NHA_OIF]) {
2972                 idx = nla_get_u32(tb[NHA_OIF]);
2973                 if (idx > INT_MAX) {
2974                         NL_SET_ERR_MSG(extack, "Invalid device index");
2975                         return -EINVAL;
2976                 }
2977                 filter->dev_idx = idx;
2978         }
2979         if (tb[NHA_MASTER]) {
2980                 idx = nla_get_u32(tb[NHA_MASTER]);
2981                 if (idx > INT_MAX) {
2982                         NL_SET_ERR_MSG(extack, "Invalid master device index");
2983                         return -EINVAL;
2984                 }
2985                 filter->master_idx = idx;
2986         }
2987         filter->group_filter = nla_get_flag(tb[NHA_GROUPS]);
2988         filter->fdb_filter = nla_get_flag(tb[NHA_FDB]);
2989
2990         nhm = nlmsg_data(nlh);
2991         if (nhm->nh_protocol || nhm->resvd || nhm->nh_scope || nhm->nh_flags) {
2992                 NL_SET_ERR_MSG(extack, "Invalid values in header for nexthop dump request");
2993                 return -EINVAL;
2994         }
2995
2996         return 0;
2997 }
2998
2999 static int nh_valid_dump_req(const struct nlmsghdr *nlh,
3000                              struct nh_dump_filter *filter,
3001                              struct netlink_callback *cb)
3002 {
3003         struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_dump)];
3004         int err;
3005
3006         err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
3007                           ARRAY_SIZE(rtm_nh_policy_dump) - 1,
3008                           rtm_nh_policy_dump, cb->extack);
3009         if (err < 0)
3010                 return err;
3011
3012         return __nh_valid_dump_req(nlh, tb, filter, cb->extack);
3013 }
3014
3015 struct rtm_dump_nh_ctx {
3016         u32 idx;
3017 };
3018
3019 static struct rtm_dump_nh_ctx *
3020 rtm_dump_nh_ctx(struct netlink_callback *cb)
3021 {
3022         struct rtm_dump_nh_ctx *ctx = (void *)cb->ctx;
3023
3024         BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
3025         return ctx;
3026 }
3027
3028 static int rtm_dump_walk_nexthops(struct sk_buff *skb,
3029                                   struct netlink_callback *cb,
3030                                   struct rb_root *root,
3031                                   struct rtm_dump_nh_ctx *ctx,
3032                                   int (*nh_cb)(struct sk_buff *skb,
3033                                                struct netlink_callback *cb,
3034                                                struct nexthop *nh, void *data),
3035                                   void *data)
3036 {
3037         struct rb_node *node;
3038         int idx = 0, s_idx;
3039         int err;
3040
3041         s_idx = ctx->idx;
3042         for (node = rb_first(root); node; node = rb_next(node)) {
3043                 struct nexthop *nh;
3044
3045                 if (idx < s_idx)
3046                         goto cont;
3047
3048                 nh = rb_entry(node, struct nexthop, rb_node);
3049                 ctx->idx = idx;
3050                 err = nh_cb(skb, cb, nh, data);
3051                 if (err)
3052                         return err;
3053 cont:
3054                 idx++;
3055         }
3056
3057         ctx->idx = idx;
3058         return 0;
3059 }
3060
3061 static int rtm_dump_nexthop_cb(struct sk_buff *skb, struct netlink_callback *cb,
3062                                struct nexthop *nh, void *data)
3063 {
3064         struct nhmsg *nhm = nlmsg_data(cb->nlh);
3065         struct nh_dump_filter *filter = data;
3066
3067         if (nh_dump_filtered(nh, filter, nhm->nh_family))
3068                 return 0;
3069
3070         return nh_fill_node(skb, nh, RTM_NEWNEXTHOP,
3071                             NETLINK_CB(cb->skb).portid,
3072                             cb->nlh->nlmsg_seq, NLM_F_MULTI);
3073 }
3074
3075 /* rtnl */
3076 static int rtm_dump_nexthop(struct sk_buff *skb, struct netlink_callback *cb)
3077 {
3078         struct rtm_dump_nh_ctx *ctx = rtm_dump_nh_ctx(cb);
3079         struct net *net = sock_net(skb->sk);
3080         struct rb_root *root = &net->nexthop.rb_root;
3081         struct nh_dump_filter filter = {};
3082         int err;
3083
3084         err = nh_valid_dump_req(cb->nlh, &filter, cb);
3085         if (err < 0)
3086                 return err;
3087
3088         err = rtm_dump_walk_nexthops(skb, cb, root, ctx,
3089                                      &rtm_dump_nexthop_cb, &filter);
3090         if (err < 0) {
3091                 if (likely(skb->len))
3092                         goto out;
3093                 goto out_err;
3094         }
3095
3096 out:
3097         err = skb->len;
3098 out_err:
3099         cb->seq = net->nexthop.seq;
3100         nl_dump_check_consistent(cb, nlmsg_hdr(skb));
3101         return err;
3102 }
3103
3104 static void nexthop_sync_mtu(struct net_device *dev, u32 orig_mtu)
3105 {
3106         unsigned int hash = nh_dev_hashfn(dev->ifindex);
3107         struct net *net = dev_net(dev);
3108         struct hlist_head *head = &net->nexthop.devhash[hash];
3109         struct hlist_node *n;
3110         struct nh_info *nhi;
3111
3112         hlist_for_each_entry_safe(nhi, n, head, dev_hash) {
3113                 if (nhi->fib_nhc.nhc_dev == dev) {
3114                         if (nhi->family == AF_INET)
3115                                 fib_nhc_update_mtu(&nhi->fib_nhc, dev->mtu,
3116                                                    orig_mtu);
3117                 }
3118         }
3119 }
3120
3121 /* rtnl */
3122 static int nh_netdev_event(struct notifier_block *this,
3123                            unsigned long event, void *ptr)
3124 {
3125         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3126         struct netdev_notifier_info_ext *info_ext;
3127
3128         switch (event) {
3129         case NETDEV_DOWN:
3130         case NETDEV_UNREGISTER:
3131                 nexthop_flush_dev(dev, event);
3132                 break;
3133         case NETDEV_CHANGE:
3134                 if (!(dev_get_flags(dev) & (IFF_RUNNING | IFF_LOWER_UP)))
3135                         nexthop_flush_dev(dev, event);
3136                 break;
3137         case NETDEV_CHANGEMTU:
3138                 info_ext = ptr;
3139                 nexthop_sync_mtu(dev, info_ext->ext.mtu);
3140                 rt_cache_flush(dev_net(dev));
3141                 break;
3142         }
3143         return NOTIFY_DONE;
3144 }
3145
3146 static struct notifier_block nh_netdev_notifier = {
3147         .notifier_call = nh_netdev_event,
3148 };
3149
3150 static int nexthops_dump(struct net *net, struct notifier_block *nb,
3151                          struct netlink_ext_ack *extack)
3152 {
3153         struct rb_root *root = &net->nexthop.rb_root;
3154         struct rb_node *node;
3155         int err = 0;
3156
3157         for (node = rb_first(root); node; node = rb_next(node)) {
3158                 struct nexthop *nh;
3159
3160                 nh = rb_entry(node, struct nexthop, rb_node);
3161                 err = call_nexthop_notifier(nb, net, NEXTHOP_EVENT_REPLACE, nh,
3162                                             extack);
3163                 if (err)
3164                         break;
3165         }
3166
3167         return err;
3168 }
3169
3170 int register_nexthop_notifier(struct net *net, struct notifier_block *nb,
3171                               struct netlink_ext_ack *extack)
3172 {
3173         int err;
3174
3175         rtnl_lock();
3176         err = nexthops_dump(net, nb, extack);
3177         if (err)
3178                 goto unlock;
3179         err = blocking_notifier_chain_register(&net->nexthop.notifier_chain,
3180                                                nb);
3181 unlock:
3182         rtnl_unlock();
3183         return err;
3184 }
3185 EXPORT_SYMBOL(register_nexthop_notifier);
3186
3187 int unregister_nexthop_notifier(struct net *net, struct notifier_block *nb)
3188 {
3189         return blocking_notifier_chain_unregister(&net->nexthop.notifier_chain,
3190                                                   nb);
3191 }
3192 EXPORT_SYMBOL(unregister_nexthop_notifier);
3193
3194 void nexthop_set_hw_flags(struct net *net, u32 id, bool offload, bool trap)
3195 {
3196         struct nexthop *nexthop;
3197
3198         rcu_read_lock();
3199
3200         nexthop = nexthop_find_by_id(net, id);
3201         if (!nexthop)
3202                 goto out;
3203
3204         nexthop->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP);
3205         if (offload)
3206                 nexthop->nh_flags |= RTNH_F_OFFLOAD;
3207         if (trap)
3208                 nexthop->nh_flags |= RTNH_F_TRAP;
3209
3210 out:
3211         rcu_read_unlock();
3212 }
3213 EXPORT_SYMBOL(nexthop_set_hw_flags);
3214
3215 void nexthop_bucket_set_hw_flags(struct net *net, u32 id, u16 bucket_index,
3216                                  bool offload, bool trap)
3217 {
3218         struct nh_res_table *res_table;
3219         struct nh_res_bucket *bucket;
3220         struct nexthop *nexthop;
3221         struct nh_group *nhg;
3222
3223         rcu_read_lock();
3224
3225         nexthop = nexthop_find_by_id(net, id);
3226         if (!nexthop || !nexthop->is_group)
3227                 goto out;
3228
3229         nhg = rcu_dereference(nexthop->nh_grp);
3230         if (!nhg->resilient)
3231                 goto out;
3232
3233         if (bucket_index >= nhg->res_table->num_nh_buckets)
3234                 goto out;
3235
3236         res_table = rcu_dereference(nhg->res_table);
3237         bucket = &res_table->nh_buckets[bucket_index];
3238         bucket->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP);
3239         if (offload)
3240                 bucket->nh_flags |= RTNH_F_OFFLOAD;
3241         if (trap)
3242                 bucket->nh_flags |= RTNH_F_TRAP;
3243
3244 out:
3245         rcu_read_unlock();
3246 }
3247 EXPORT_SYMBOL(nexthop_bucket_set_hw_flags);
3248
3249 void nexthop_res_grp_activity_update(struct net *net, u32 id, u16 num_buckets,
3250                                      unsigned long *activity)
3251 {
3252         struct nh_res_table *res_table;
3253         struct nexthop *nexthop;
3254         struct nh_group *nhg;
3255         u16 i;
3256
3257         rcu_read_lock();
3258
3259         nexthop = nexthop_find_by_id(net, id);
3260         if (!nexthop || !nexthop->is_group)
3261                 goto out;
3262
3263         nhg = rcu_dereference(nexthop->nh_grp);
3264         if (!nhg->resilient)
3265                 goto out;
3266
3267         /* Instead of silently ignoring some buckets, demand that the sizes
3268          * be the same.
3269          */
3270         res_table = rcu_dereference(nhg->res_table);
3271         if (num_buckets != res_table->num_nh_buckets)
3272                 goto out;
3273
3274         for (i = 0; i < num_buckets; i++) {
3275                 if (test_bit(i, activity))
3276                         nh_res_bucket_set_busy(&res_table->nh_buckets[i]);
3277         }
3278
3279 out:
3280         rcu_read_unlock();
3281 }
3282 EXPORT_SYMBOL(nexthop_res_grp_activity_update);
3283
3284 static void __net_exit nexthop_net_exit(struct net *net)
3285 {
3286         rtnl_lock();
3287         flush_all_nexthops(net);
3288         rtnl_unlock();
3289         kfree(net->nexthop.devhash);
3290 }
3291
3292 static int __net_init nexthop_net_init(struct net *net)
3293 {
3294         size_t sz = sizeof(struct hlist_head) * NH_DEV_HASHSIZE;
3295
3296         net->nexthop.rb_root = RB_ROOT;
3297         net->nexthop.devhash = kzalloc(sz, GFP_KERNEL);
3298         if (!net->nexthop.devhash)
3299                 return -ENOMEM;
3300         BLOCKING_INIT_NOTIFIER_HEAD(&net->nexthop.notifier_chain);
3301
3302         return 0;
3303 }
3304
3305 static struct pernet_operations nexthop_net_ops = {
3306         .init = nexthop_net_init,
3307         .exit = nexthop_net_exit,
3308 };
3309
3310 static int __init nexthop_init(void)
3311 {
3312         register_pernet_subsys(&nexthop_net_ops);
3313
3314         register_netdevice_notifier(&nh_netdev_notifier);
3315
3316         rtnl_register(PF_UNSPEC, RTM_NEWNEXTHOP, rtm_new_nexthop, NULL, 0);
3317         rtnl_register(PF_UNSPEC, RTM_DELNEXTHOP, rtm_del_nexthop, NULL, 0);
3318         rtnl_register(PF_UNSPEC, RTM_GETNEXTHOP, rtm_get_nexthop,
3319                       rtm_dump_nexthop, 0);
3320
3321         rtnl_register(PF_INET, RTM_NEWNEXTHOP, rtm_new_nexthop, NULL, 0);
3322         rtnl_register(PF_INET, RTM_GETNEXTHOP, NULL, rtm_dump_nexthop, 0);
3323
3324         rtnl_register(PF_INET6, RTM_NEWNEXTHOP, rtm_new_nexthop, NULL, 0);
3325         rtnl_register(PF_INET6, RTM_GETNEXTHOP, NULL, rtm_dump_nexthop, 0);
3326
3327         return 0;
3328 }
3329 subsys_initcall(nexthop_init);