495b5e69ffcd4b888b0cc29c15de9bcd0662c651
[linux-2.6-microblaze.git] / net / ipv4 / nexthop.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Generic nexthop implementation
3  *
4  * Copyright (c) 2017-19 Cumulus Networks
5  * Copyright (c) 2017-19 David Ahern <dsa@cumulusnetworks.com>
6  */
7
8 #include <linux/nexthop.h>
9 #include <linux/rtnetlink.h>
10 #include <linux/slab.h>
11 #include <net/arp.h>
12 #include <net/ipv6_stubs.h>
13 #include <net/lwtunnel.h>
14 #include <net/ndisc.h>
15 #include <net/nexthop.h>
16 #include <net/route.h>
17 #include <net/sock.h>
18
19 static void remove_nexthop(struct net *net, struct nexthop *nh,
20                            struct nl_info *nlinfo);
21
22 #define NH_DEV_HASHBITS  8
23 #define NH_DEV_HASHSIZE (1U << NH_DEV_HASHBITS)
24
25 static const struct nla_policy rtm_nh_policy_new[] = {
26         [NHA_ID]                = { .type = NLA_U32 },
27         [NHA_GROUP]             = { .type = NLA_BINARY },
28         [NHA_GROUP_TYPE]        = { .type = NLA_U16 },
29         [NHA_BLACKHOLE]         = { .type = NLA_FLAG },
30         [NHA_OIF]               = { .type = NLA_U32 },
31         [NHA_GATEWAY]           = { .type = NLA_BINARY },
32         [NHA_ENCAP_TYPE]        = { .type = NLA_U16 },
33         [NHA_ENCAP]             = { .type = NLA_NESTED },
34         [NHA_FDB]               = { .type = NLA_FLAG },
35 };
36
37 static const struct nla_policy rtm_nh_policy_get[] = {
38         [NHA_ID]                = { .type = NLA_U32 },
39 };
40
41 static const struct nla_policy rtm_nh_policy_dump[] = {
42         [NHA_OIF]               = { .type = NLA_U32 },
43         [NHA_GROUPS]            = { .type = NLA_FLAG },
44         [NHA_MASTER]            = { .type = NLA_U32 },
45         [NHA_FDB]               = { .type = NLA_FLAG },
46 };
47
48 static bool nexthop_notifiers_is_empty(struct net *net)
49 {
50         return !net->nexthop.notifier_chain.head;
51 }
52
53 static void
54 __nh_notifier_single_info_init(struct nh_notifier_single_info *nh_info,
55                                const struct nh_info *nhi)
56 {
57         nh_info->dev = nhi->fib_nhc.nhc_dev;
58         nh_info->gw_family = nhi->fib_nhc.nhc_gw_family;
59         if (nh_info->gw_family == AF_INET)
60                 nh_info->ipv4 = nhi->fib_nhc.nhc_gw.ipv4;
61         else if (nh_info->gw_family == AF_INET6)
62                 nh_info->ipv6 = nhi->fib_nhc.nhc_gw.ipv6;
63
64         nh_info->is_reject = nhi->reject_nh;
65         nh_info->is_fdb = nhi->fdb_nh;
66         nh_info->has_encap = !!nhi->fib_nhc.nhc_lwtstate;
67 }
68
69 static int nh_notifier_single_info_init(struct nh_notifier_info *info,
70                                         const struct nexthop *nh)
71 {
72         struct nh_info *nhi = rtnl_dereference(nh->nh_info);
73
74         info->type = NH_NOTIFIER_INFO_TYPE_SINGLE;
75         info->nh = kzalloc(sizeof(*info->nh), GFP_KERNEL);
76         if (!info->nh)
77                 return -ENOMEM;
78
79         __nh_notifier_single_info_init(info->nh, nhi);
80
81         return 0;
82 }
83
84 static void nh_notifier_single_info_fini(struct nh_notifier_info *info)
85 {
86         kfree(info->nh);
87 }
88
89 static int nh_notifier_mp_info_init(struct nh_notifier_info *info,
90                                     struct nh_group *nhg)
91 {
92         u16 num_nh = nhg->num_nh;
93         int i;
94
95         info->type = NH_NOTIFIER_INFO_TYPE_GRP;
96         info->nh_grp = kzalloc(struct_size(info->nh_grp, nh_entries, num_nh),
97                                GFP_KERNEL);
98         if (!info->nh_grp)
99                 return -ENOMEM;
100
101         info->nh_grp->num_nh = num_nh;
102         info->nh_grp->is_fdb = nhg->fdb_nh;
103
104         for (i = 0; i < num_nh; i++) {
105                 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
106                 struct nh_info *nhi;
107
108                 nhi = rtnl_dereference(nhge->nh->nh_info);
109                 info->nh_grp->nh_entries[i].id = nhge->nh->id;
110                 info->nh_grp->nh_entries[i].weight = nhge->weight;
111                 __nh_notifier_single_info_init(&info->nh_grp->nh_entries[i].nh,
112                                                nhi);
113         }
114
115         return 0;
116 }
117
118 static int nh_notifier_res_table_info_init(struct nh_notifier_info *info,
119                                            struct nh_group *nhg)
120 {
121         struct nh_res_table *res_table = rtnl_dereference(nhg->res_table);
122         u16 num_nh_buckets = res_table->num_nh_buckets;
123         unsigned long size;
124         u16 i;
125
126         info->type = NH_NOTIFIER_INFO_TYPE_RES_TABLE;
127         size = struct_size(info->nh_res_table, nhs, num_nh_buckets);
128         info->nh_res_table = __vmalloc(size, GFP_KERNEL | __GFP_ZERO |
129                                        __GFP_NOWARN);
130         if (!info->nh_res_table)
131                 return -ENOMEM;
132
133         info->nh_res_table->num_nh_buckets = num_nh_buckets;
134
135         for (i = 0; i < num_nh_buckets; i++) {
136                 struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
137                 struct nh_grp_entry *nhge;
138                 struct nh_info *nhi;
139
140                 nhge = rtnl_dereference(bucket->nh_entry);
141                 nhi = rtnl_dereference(nhge->nh->nh_info);
142                 __nh_notifier_single_info_init(&info->nh_res_table->nhs[i],
143                                                nhi);
144         }
145
146         return 0;
147 }
148
149 static int nh_notifier_grp_info_init(struct nh_notifier_info *info,
150                                      const struct nexthop *nh)
151 {
152         struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
153
154         if (nhg->mpath)
155                 return nh_notifier_mp_info_init(info, nhg);
156         else if (nhg->resilient)
157                 return nh_notifier_res_table_info_init(info, nhg);
158         return -EINVAL;
159 }
160
161 static void nh_notifier_grp_info_fini(struct nh_notifier_info *info,
162                                       const struct nexthop *nh)
163 {
164         struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
165
166         if (nhg->mpath)
167                 kfree(info->nh_grp);
168         else if (nhg->resilient)
169                 vfree(info->nh_res_table);
170 }
171
172 static int nh_notifier_info_init(struct nh_notifier_info *info,
173                                  const struct nexthop *nh)
174 {
175         info->id = nh->id;
176
177         if (nh->is_group)
178                 return nh_notifier_grp_info_init(info, nh);
179         else
180                 return nh_notifier_single_info_init(info, nh);
181 }
182
183 static void nh_notifier_info_fini(struct nh_notifier_info *info,
184                                   const struct nexthop *nh)
185 {
186         if (nh->is_group)
187                 nh_notifier_grp_info_fini(info, nh);
188         else
189                 nh_notifier_single_info_fini(info);
190 }
191
192 static int call_nexthop_notifiers(struct net *net,
193                                   enum nexthop_event_type event_type,
194                                   struct nexthop *nh,
195                                   struct netlink_ext_ack *extack)
196 {
197         struct nh_notifier_info info = {
198                 .net = net,
199                 .extack = extack,
200         };
201         int err;
202
203         ASSERT_RTNL();
204
205         if (nexthop_notifiers_is_empty(net))
206                 return 0;
207
208         err = nh_notifier_info_init(&info, nh);
209         if (err) {
210                 NL_SET_ERR_MSG(extack, "Failed to initialize nexthop notifier info");
211                 return err;
212         }
213
214         err = blocking_notifier_call_chain(&net->nexthop.notifier_chain,
215                                            event_type, &info);
216         nh_notifier_info_fini(&info, nh);
217
218         return notifier_to_errno(err);
219 }
220
221 static int
222 nh_notifier_res_bucket_idle_timer_get(const struct nh_notifier_info *info,
223                                       bool force, unsigned int *p_idle_timer_ms)
224 {
225         struct nh_res_table *res_table;
226         struct nh_group *nhg;
227         struct nexthop *nh;
228         int err = 0;
229
230         /* When 'force' is false, nexthop bucket replacement is performed
231          * because the bucket was deemed to be idle. In this case, capable
232          * listeners can choose to perform an atomic replacement: The bucket is
233          * only replaced if it is inactive. However, if the idle timer interval
234          * is smaller than the interval in which a listener is querying
235          * buckets' activity from the device, then atomic replacement should
236          * not be tried. Pass the idle timer value to listeners, so that they
237          * could determine which type of replacement to perform.
238          */
239         if (force) {
240                 *p_idle_timer_ms = 0;
241                 return 0;
242         }
243
244         rcu_read_lock();
245
246         nh = nexthop_find_by_id(info->net, info->id);
247         if (!nh) {
248                 err = -EINVAL;
249                 goto out;
250         }
251
252         nhg = rcu_dereference(nh->nh_grp);
253         res_table = rcu_dereference(nhg->res_table);
254         *p_idle_timer_ms = jiffies_to_msecs(res_table->idle_timer);
255
256 out:
257         rcu_read_unlock();
258
259         return err;
260 }
261
262 static int nh_notifier_res_bucket_info_init(struct nh_notifier_info *info,
263                                             u16 bucket_index, bool force,
264                                             struct nh_info *oldi,
265                                             struct nh_info *newi)
266 {
267         unsigned int idle_timer_ms;
268         int err;
269
270         err = nh_notifier_res_bucket_idle_timer_get(info, force,
271                                                     &idle_timer_ms);
272         if (err)
273                 return err;
274
275         info->type = NH_NOTIFIER_INFO_TYPE_RES_BUCKET;
276         info->nh_res_bucket = kzalloc(sizeof(*info->nh_res_bucket),
277                                       GFP_KERNEL);
278         if (!info->nh_res_bucket)
279                 return -ENOMEM;
280
281         info->nh_res_bucket->bucket_index = bucket_index;
282         info->nh_res_bucket->idle_timer_ms = idle_timer_ms;
283         info->nh_res_bucket->force = force;
284         __nh_notifier_single_info_init(&info->nh_res_bucket->old_nh, oldi);
285         __nh_notifier_single_info_init(&info->nh_res_bucket->new_nh, newi);
286         return 0;
287 }
288
289 static void nh_notifier_res_bucket_info_fini(struct nh_notifier_info *info)
290 {
291         kfree(info->nh_res_bucket);
292 }
293
294 static int __call_nexthop_res_bucket_notifiers(struct net *net, u32 nhg_id,
295                                                u16 bucket_index, bool force,
296                                                struct nh_info *oldi,
297                                                struct nh_info *newi,
298                                                struct netlink_ext_ack *extack)
299 {
300         struct nh_notifier_info info = {
301                 .net = net,
302                 .extack = extack,
303                 .id = nhg_id,
304         };
305         int err;
306
307         if (nexthop_notifiers_is_empty(net))
308                 return 0;
309
310         err = nh_notifier_res_bucket_info_init(&info, bucket_index, force,
311                                                oldi, newi);
312         if (err)
313                 return err;
314
315         err = blocking_notifier_call_chain(&net->nexthop.notifier_chain,
316                                            NEXTHOP_EVENT_BUCKET_REPLACE, &info);
317         nh_notifier_res_bucket_info_fini(&info);
318
319         return notifier_to_errno(err);
320 }
321
322 /* There are three users of RES_TABLE, and NHs etc. referenced from there:
323  *
324  * 1) a collection of callbacks for NH maintenance. This operates under
325  *    RTNL,
326  * 2) the delayed work that gradually balances the resilient table,
327  * 3) and nexthop_select_path(), operating under RCU.
328  *
329  * Both the delayed work and the RTNL block are writers, and need to
330  * maintain mutual exclusion. Since there are only two and well-known
331  * writers for each table, the RTNL code can make sure it has exclusive
332  * access thus:
333  *
334  * - Have the DW operate without locking;
335  * - synchronously cancel the DW;
336  * - do the writing;
337  * - if the write was not actually a delete, call upkeep, which schedules
338  *   DW again if necessary.
339  *
340  * The functions that are always called from the RTNL context use
341  * rtnl_dereference(). The functions that can also be called from the DW do
342  * a raw dereference and rely on the above mutual exclusion scheme.
343  */
344 #define nh_res_dereference(p) (rcu_dereference_raw(p))
345
346 static int call_nexthop_res_bucket_notifiers(struct net *net, u32 nhg_id,
347                                              u16 bucket_index, bool force,
348                                              struct nexthop *old_nh,
349                                              struct nexthop *new_nh,
350                                              struct netlink_ext_ack *extack)
351 {
352         struct nh_info *oldi = nh_res_dereference(old_nh->nh_info);
353         struct nh_info *newi = nh_res_dereference(new_nh->nh_info);
354
355         return __call_nexthop_res_bucket_notifiers(net, nhg_id, bucket_index,
356                                                    force, oldi, newi, extack);
357 }
358
359 static int call_nexthop_res_table_notifiers(struct net *net, struct nexthop *nh,
360                                             struct netlink_ext_ack *extack)
361 {
362         struct nh_notifier_info info = {
363                 .net = net,
364                 .extack = extack,
365         };
366         struct nh_group *nhg;
367         int err;
368
369         ASSERT_RTNL();
370
371         if (nexthop_notifiers_is_empty(net))
372                 return 0;
373
374         /* At this point, the nexthop buckets are still not populated. Only
375          * emit a notification with the logical nexthops, so that a listener
376          * could potentially veto it in case of unsupported configuration.
377          */
378         nhg = rtnl_dereference(nh->nh_grp);
379         err = nh_notifier_mp_info_init(&info, nhg);
380         if (err) {
381                 NL_SET_ERR_MSG(extack, "Failed to initialize nexthop notifier info");
382                 return err;
383         }
384
385         err = blocking_notifier_call_chain(&net->nexthop.notifier_chain,
386                                            NEXTHOP_EVENT_RES_TABLE_PRE_REPLACE,
387                                            &info);
388         kfree(info.nh_grp);
389
390         return notifier_to_errno(err);
391 }
392
393 static int call_nexthop_notifier(struct notifier_block *nb, struct net *net,
394                                  enum nexthop_event_type event_type,
395                                  struct nexthop *nh,
396                                  struct netlink_ext_ack *extack)
397 {
398         struct nh_notifier_info info = {
399                 .net = net,
400                 .extack = extack,
401         };
402         int err;
403
404         err = nh_notifier_info_init(&info, nh);
405         if (err)
406                 return err;
407
408         err = nb->notifier_call(nb, event_type, &info);
409         nh_notifier_info_fini(&info, nh);
410
411         return notifier_to_errno(err);
412 }
413
414 static unsigned int nh_dev_hashfn(unsigned int val)
415 {
416         unsigned int mask = NH_DEV_HASHSIZE - 1;
417
418         return (val ^
419                 (val >> NH_DEV_HASHBITS) ^
420                 (val >> (NH_DEV_HASHBITS * 2))) & mask;
421 }
422
423 static void nexthop_devhash_add(struct net *net, struct nh_info *nhi)
424 {
425         struct net_device *dev = nhi->fib_nhc.nhc_dev;
426         struct hlist_head *head;
427         unsigned int hash;
428
429         WARN_ON(!dev);
430
431         hash = nh_dev_hashfn(dev->ifindex);
432         head = &net->nexthop.devhash[hash];
433         hlist_add_head(&nhi->dev_hash, head);
434 }
435
436 static void nexthop_free_group(struct nexthop *nh)
437 {
438         struct nh_group *nhg;
439         int i;
440
441         nhg = rcu_dereference_raw(nh->nh_grp);
442         for (i = 0; i < nhg->num_nh; ++i) {
443                 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
444
445                 WARN_ON(!list_empty(&nhge->nh_list));
446                 nexthop_put(nhge->nh);
447         }
448
449         WARN_ON(nhg->spare == nhg);
450
451         if (nhg->resilient)
452                 vfree(rcu_dereference_raw(nhg->res_table));
453
454         kfree(nhg->spare);
455         kfree(nhg);
456 }
457
458 static void nexthop_free_single(struct nexthop *nh)
459 {
460         struct nh_info *nhi;
461
462         nhi = rcu_dereference_raw(nh->nh_info);
463         switch (nhi->family) {
464         case AF_INET:
465                 fib_nh_release(nh->net, &nhi->fib_nh);
466                 break;
467         case AF_INET6:
468                 ipv6_stub->fib6_nh_release(&nhi->fib6_nh);
469                 break;
470         }
471         kfree(nhi);
472 }
473
474 void nexthop_free_rcu(struct rcu_head *head)
475 {
476         struct nexthop *nh = container_of(head, struct nexthop, rcu);
477
478         if (nh->is_group)
479                 nexthop_free_group(nh);
480         else
481                 nexthop_free_single(nh);
482
483         kfree(nh);
484 }
485 EXPORT_SYMBOL_GPL(nexthop_free_rcu);
486
487 static struct nexthop *nexthop_alloc(void)
488 {
489         struct nexthop *nh;
490
491         nh = kzalloc(sizeof(struct nexthop), GFP_KERNEL);
492         if (nh) {
493                 INIT_LIST_HEAD(&nh->fi_list);
494                 INIT_LIST_HEAD(&nh->f6i_list);
495                 INIT_LIST_HEAD(&nh->grp_list);
496                 INIT_LIST_HEAD(&nh->fdb_list);
497         }
498         return nh;
499 }
500
501 static struct nh_group *nexthop_grp_alloc(u16 num_nh)
502 {
503         struct nh_group *nhg;
504
505         nhg = kzalloc(struct_size(nhg, nh_entries, num_nh), GFP_KERNEL);
506         if (nhg)
507                 nhg->num_nh = num_nh;
508
509         return nhg;
510 }
511
512 static void nh_res_table_upkeep_dw(struct work_struct *work);
513
514 static struct nh_res_table *
515 nexthop_res_table_alloc(struct net *net, u32 nhg_id, struct nh_config *cfg)
516 {
517         const u16 num_nh_buckets = cfg->nh_grp_res_num_buckets;
518         struct nh_res_table *res_table;
519         unsigned long size;
520
521         size = struct_size(res_table, nh_buckets, num_nh_buckets);
522         res_table = __vmalloc(size, GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN);
523         if (!res_table)
524                 return NULL;
525
526         res_table->net = net;
527         res_table->nhg_id = nhg_id;
528         INIT_DELAYED_WORK(&res_table->upkeep_dw, &nh_res_table_upkeep_dw);
529         INIT_LIST_HEAD(&res_table->uw_nh_entries);
530         res_table->idle_timer = cfg->nh_grp_res_idle_timer;
531         res_table->unbalanced_timer = cfg->nh_grp_res_unbalanced_timer;
532         res_table->num_nh_buckets = num_nh_buckets;
533         return res_table;
534 }
535
536 static void nh_base_seq_inc(struct net *net)
537 {
538         while (++net->nexthop.seq == 0)
539                 ;
540 }
541
542 /* no reference taken; rcu lock or rtnl must be held */
543 struct nexthop *nexthop_find_by_id(struct net *net, u32 id)
544 {
545         struct rb_node **pp, *parent = NULL, *next;
546
547         pp = &net->nexthop.rb_root.rb_node;
548         while (1) {
549                 struct nexthop *nh;
550
551                 next = rcu_dereference_raw(*pp);
552                 if (!next)
553                         break;
554                 parent = next;
555
556                 nh = rb_entry(parent, struct nexthop, rb_node);
557                 if (id < nh->id)
558                         pp = &next->rb_left;
559                 else if (id > nh->id)
560                         pp = &next->rb_right;
561                 else
562                         return nh;
563         }
564         return NULL;
565 }
566 EXPORT_SYMBOL_GPL(nexthop_find_by_id);
567
568 /* used for auto id allocation; called with rtnl held */
569 static u32 nh_find_unused_id(struct net *net)
570 {
571         u32 id_start = net->nexthop.last_id_allocated;
572
573         while (1) {
574                 net->nexthop.last_id_allocated++;
575                 if (net->nexthop.last_id_allocated == id_start)
576                         break;
577
578                 if (!nexthop_find_by_id(net, net->nexthop.last_id_allocated))
579                         return net->nexthop.last_id_allocated;
580         }
581         return 0;
582 }
583
584 static void nh_res_time_set_deadline(unsigned long next_time,
585                                      unsigned long *deadline)
586 {
587         if (time_before(next_time, *deadline))
588                 *deadline = next_time;
589 }
590
591 static int nla_put_nh_group(struct sk_buff *skb, struct nh_group *nhg)
592 {
593         struct nexthop_grp *p;
594         size_t len = nhg->num_nh * sizeof(*p);
595         struct nlattr *nla;
596         u16 group_type = 0;
597         int i;
598
599         if (nhg->mpath)
600                 group_type = NEXTHOP_GRP_TYPE_MPATH;
601
602         if (nla_put_u16(skb, NHA_GROUP_TYPE, group_type))
603                 goto nla_put_failure;
604
605         nla = nla_reserve(skb, NHA_GROUP, len);
606         if (!nla)
607                 goto nla_put_failure;
608
609         p = nla_data(nla);
610         for (i = 0; i < nhg->num_nh; ++i) {
611                 p->id = nhg->nh_entries[i].nh->id;
612                 p->weight = nhg->nh_entries[i].weight - 1;
613                 p += 1;
614         }
615
616         return 0;
617
618 nla_put_failure:
619         return -EMSGSIZE;
620 }
621
622 static int nh_fill_node(struct sk_buff *skb, struct nexthop *nh,
623                         int event, u32 portid, u32 seq, unsigned int nlflags)
624 {
625         struct fib6_nh *fib6_nh;
626         struct fib_nh *fib_nh;
627         struct nlmsghdr *nlh;
628         struct nh_info *nhi;
629         struct nhmsg *nhm;
630
631         nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nhm), nlflags);
632         if (!nlh)
633                 return -EMSGSIZE;
634
635         nhm = nlmsg_data(nlh);
636         nhm->nh_family = AF_UNSPEC;
637         nhm->nh_flags = nh->nh_flags;
638         nhm->nh_protocol = nh->protocol;
639         nhm->nh_scope = 0;
640         nhm->resvd = 0;
641
642         if (nla_put_u32(skb, NHA_ID, nh->id))
643                 goto nla_put_failure;
644
645         if (nh->is_group) {
646                 struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
647
648                 if (nhg->fdb_nh && nla_put_flag(skb, NHA_FDB))
649                         goto nla_put_failure;
650                 if (nla_put_nh_group(skb, nhg))
651                         goto nla_put_failure;
652                 goto out;
653         }
654
655         nhi = rtnl_dereference(nh->nh_info);
656         nhm->nh_family = nhi->family;
657         if (nhi->reject_nh) {
658                 if (nla_put_flag(skb, NHA_BLACKHOLE))
659                         goto nla_put_failure;
660                 goto out;
661         } else if (nhi->fdb_nh) {
662                 if (nla_put_flag(skb, NHA_FDB))
663                         goto nla_put_failure;
664         } else {
665                 const struct net_device *dev;
666
667                 dev = nhi->fib_nhc.nhc_dev;
668                 if (dev && nla_put_u32(skb, NHA_OIF, dev->ifindex))
669                         goto nla_put_failure;
670         }
671
672         nhm->nh_scope = nhi->fib_nhc.nhc_scope;
673         switch (nhi->family) {
674         case AF_INET:
675                 fib_nh = &nhi->fib_nh;
676                 if (fib_nh->fib_nh_gw_family &&
677                     nla_put_be32(skb, NHA_GATEWAY, fib_nh->fib_nh_gw4))
678                         goto nla_put_failure;
679                 break;
680
681         case AF_INET6:
682                 fib6_nh = &nhi->fib6_nh;
683                 if (fib6_nh->fib_nh_gw_family &&
684                     nla_put_in6_addr(skb, NHA_GATEWAY, &fib6_nh->fib_nh_gw6))
685                         goto nla_put_failure;
686                 break;
687         }
688
689         if (nhi->fib_nhc.nhc_lwtstate &&
690             lwtunnel_fill_encap(skb, nhi->fib_nhc.nhc_lwtstate,
691                                 NHA_ENCAP, NHA_ENCAP_TYPE) < 0)
692                 goto nla_put_failure;
693
694 out:
695         nlmsg_end(skb, nlh);
696         return 0;
697
698 nla_put_failure:
699         nlmsg_cancel(skb, nlh);
700         return -EMSGSIZE;
701 }
702
703 static size_t nh_nlmsg_size_grp(struct nexthop *nh)
704 {
705         struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
706         size_t sz = sizeof(struct nexthop_grp) * nhg->num_nh;
707
708         return nla_total_size(sz) +
709                nla_total_size(2);  /* NHA_GROUP_TYPE */
710 }
711
712 static size_t nh_nlmsg_size_single(struct nexthop *nh)
713 {
714         struct nh_info *nhi = rtnl_dereference(nh->nh_info);
715         size_t sz;
716
717         /* covers NHA_BLACKHOLE since NHA_OIF and BLACKHOLE
718          * are mutually exclusive
719          */
720         sz = nla_total_size(4);  /* NHA_OIF */
721
722         switch (nhi->family) {
723         case AF_INET:
724                 if (nhi->fib_nh.fib_nh_gw_family)
725                         sz += nla_total_size(4);  /* NHA_GATEWAY */
726                 break;
727
728         case AF_INET6:
729                 /* NHA_GATEWAY */
730                 if (nhi->fib6_nh.fib_nh_gw_family)
731                         sz += nla_total_size(sizeof(const struct in6_addr));
732                 break;
733         }
734
735         if (nhi->fib_nhc.nhc_lwtstate) {
736                 sz += lwtunnel_get_encap_size(nhi->fib_nhc.nhc_lwtstate);
737                 sz += nla_total_size(2);  /* NHA_ENCAP_TYPE */
738         }
739
740         return sz;
741 }
742
743 static size_t nh_nlmsg_size(struct nexthop *nh)
744 {
745         size_t sz = NLMSG_ALIGN(sizeof(struct nhmsg));
746
747         sz += nla_total_size(4); /* NHA_ID */
748
749         if (nh->is_group)
750                 sz += nh_nlmsg_size_grp(nh);
751         else
752                 sz += nh_nlmsg_size_single(nh);
753
754         return sz;
755 }
756
757 static void nexthop_notify(int event, struct nexthop *nh, struct nl_info *info)
758 {
759         unsigned int nlflags = info->nlh ? info->nlh->nlmsg_flags : 0;
760         u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
761         struct sk_buff *skb;
762         int err = -ENOBUFS;
763
764         skb = nlmsg_new(nh_nlmsg_size(nh), gfp_any());
765         if (!skb)
766                 goto errout;
767
768         err = nh_fill_node(skb, nh, event, info->portid, seq, nlflags);
769         if (err < 0) {
770                 /* -EMSGSIZE implies BUG in nh_nlmsg_size() */
771                 WARN_ON(err == -EMSGSIZE);
772                 kfree_skb(skb);
773                 goto errout;
774         }
775
776         rtnl_notify(skb, info->nl_net, info->portid, RTNLGRP_NEXTHOP,
777                     info->nlh, gfp_any());
778         return;
779 errout:
780         if (err < 0)
781                 rtnl_set_sk_err(info->nl_net, RTNLGRP_NEXTHOP, err);
782 }
783
784 static unsigned long nh_res_bucket_used_time(const struct nh_res_bucket *bucket)
785 {
786         return (unsigned long)atomic_long_read(&bucket->used_time);
787 }
788
789 static unsigned long
790 nh_res_bucket_idle_point(const struct nh_res_table *res_table,
791                          const struct nh_res_bucket *bucket,
792                          unsigned long now)
793 {
794         unsigned long time = nh_res_bucket_used_time(bucket);
795
796         /* Bucket was not used since it was migrated. The idle time is now. */
797         if (time == bucket->migrated_time)
798                 return now;
799
800         return time + res_table->idle_timer;
801 }
802
803 static unsigned long
804 nh_res_table_unb_point(const struct nh_res_table *res_table)
805 {
806         return res_table->unbalanced_since + res_table->unbalanced_timer;
807 }
808
809 static void nh_res_bucket_set_idle(const struct nh_res_table *res_table,
810                                    struct nh_res_bucket *bucket)
811 {
812         unsigned long now = jiffies;
813
814         atomic_long_set(&bucket->used_time, (long)now);
815         bucket->migrated_time = now;
816 }
817
818 static void nh_res_bucket_set_busy(struct nh_res_bucket *bucket)
819 {
820         atomic_long_set(&bucket->used_time, (long)jiffies);
821 }
822
823 static bool valid_group_nh(struct nexthop *nh, unsigned int npaths,
824                            bool *is_fdb, struct netlink_ext_ack *extack)
825 {
826         if (nh->is_group) {
827                 struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
828
829                 /* Nesting groups within groups is not supported. */
830                 if (nhg->mpath) {
831                         NL_SET_ERR_MSG(extack,
832                                        "Multipath group can not be a nexthop within a group");
833                         return false;
834                 }
835                 if (nhg->resilient) {
836                         NL_SET_ERR_MSG(extack,
837                                        "Resilient group can not be a nexthop within a group");
838                         return false;
839                 }
840                 *is_fdb = nhg->fdb_nh;
841         } else {
842                 struct nh_info *nhi = rtnl_dereference(nh->nh_info);
843
844                 if (nhi->reject_nh && npaths > 1) {
845                         NL_SET_ERR_MSG(extack,
846                                        "Blackhole nexthop can not be used in a group with more than 1 path");
847                         return false;
848                 }
849                 *is_fdb = nhi->fdb_nh;
850         }
851
852         return true;
853 }
854
855 static int nh_check_attr_fdb_group(struct nexthop *nh, u8 *nh_family,
856                                    struct netlink_ext_ack *extack)
857 {
858         struct nh_info *nhi;
859
860         nhi = rtnl_dereference(nh->nh_info);
861
862         if (!nhi->fdb_nh) {
863                 NL_SET_ERR_MSG(extack, "FDB nexthop group can only have fdb nexthops");
864                 return -EINVAL;
865         }
866
867         if (*nh_family == AF_UNSPEC) {
868                 *nh_family = nhi->family;
869         } else if (*nh_family != nhi->family) {
870                 NL_SET_ERR_MSG(extack, "FDB nexthop group cannot have mixed family nexthops");
871                 return -EINVAL;
872         }
873
874         return 0;
875 }
876
877 static int nh_check_attr_group(struct net *net,
878                                struct nlattr *tb[], size_t tb_size,
879                                struct netlink_ext_ack *extack)
880 {
881         unsigned int len = nla_len(tb[NHA_GROUP]);
882         u8 nh_family = AF_UNSPEC;
883         struct nexthop_grp *nhg;
884         unsigned int i, j;
885         u8 nhg_fdb = 0;
886
887         if (!len || len & (sizeof(struct nexthop_grp) - 1)) {
888                 NL_SET_ERR_MSG(extack,
889                                "Invalid length for nexthop group attribute");
890                 return -EINVAL;
891         }
892
893         /* convert len to number of nexthop ids */
894         len /= sizeof(*nhg);
895
896         nhg = nla_data(tb[NHA_GROUP]);
897         for (i = 0; i < len; ++i) {
898                 if (nhg[i].resvd1 || nhg[i].resvd2) {
899                         NL_SET_ERR_MSG(extack, "Reserved fields in nexthop_grp must be 0");
900                         return -EINVAL;
901                 }
902                 if (nhg[i].weight > 254) {
903                         NL_SET_ERR_MSG(extack, "Invalid value for weight");
904                         return -EINVAL;
905                 }
906                 for (j = i + 1; j < len; ++j) {
907                         if (nhg[i].id == nhg[j].id) {
908                                 NL_SET_ERR_MSG(extack, "Nexthop id can not be used twice in a group");
909                                 return -EINVAL;
910                         }
911                 }
912         }
913
914         if (tb[NHA_FDB])
915                 nhg_fdb = 1;
916         nhg = nla_data(tb[NHA_GROUP]);
917         for (i = 0; i < len; ++i) {
918                 struct nexthop *nh;
919                 bool is_fdb_nh;
920
921                 nh = nexthop_find_by_id(net, nhg[i].id);
922                 if (!nh) {
923                         NL_SET_ERR_MSG(extack, "Invalid nexthop id");
924                         return -EINVAL;
925                 }
926                 if (!valid_group_nh(nh, len, &is_fdb_nh, extack))
927                         return -EINVAL;
928
929                 if (nhg_fdb && nh_check_attr_fdb_group(nh, &nh_family, extack))
930                         return -EINVAL;
931
932                 if (!nhg_fdb && is_fdb_nh) {
933                         NL_SET_ERR_MSG(extack, "Non FDB nexthop group cannot have fdb nexthops");
934                         return -EINVAL;
935                 }
936         }
937         for (i = NHA_GROUP_TYPE + 1; i < tb_size; ++i) {
938                 if (!tb[i])
939                         continue;
940                 if (i == NHA_FDB)
941                         continue;
942                 NL_SET_ERR_MSG(extack,
943                                "No other attributes can be set in nexthop groups");
944                 return -EINVAL;
945         }
946
947         return 0;
948 }
949
950 static bool ipv6_good_nh(const struct fib6_nh *nh)
951 {
952         int state = NUD_REACHABLE;
953         struct neighbour *n;
954
955         rcu_read_lock_bh();
956
957         n = __ipv6_neigh_lookup_noref_stub(nh->fib_nh_dev, &nh->fib_nh_gw6);
958         if (n)
959                 state = n->nud_state;
960
961         rcu_read_unlock_bh();
962
963         return !!(state & NUD_VALID);
964 }
965
966 static bool ipv4_good_nh(const struct fib_nh *nh)
967 {
968         int state = NUD_REACHABLE;
969         struct neighbour *n;
970
971         rcu_read_lock_bh();
972
973         n = __ipv4_neigh_lookup_noref(nh->fib_nh_dev,
974                                       (__force u32)nh->fib_nh_gw4);
975         if (n)
976                 state = n->nud_state;
977
978         rcu_read_unlock_bh();
979
980         return !!(state & NUD_VALID);
981 }
982
983 static struct nexthop *nexthop_select_path_mp(struct nh_group *nhg, int hash)
984 {
985         struct nexthop *rc = NULL;
986         int i;
987
988         for (i = 0; i < nhg->num_nh; ++i) {
989                 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
990                 struct nh_info *nhi;
991
992                 if (hash > atomic_read(&nhge->mpath.upper_bound))
993                         continue;
994
995                 nhi = rcu_dereference(nhge->nh->nh_info);
996                 if (nhi->fdb_nh)
997                         return nhge->nh;
998
999                 /* nexthops always check if it is good and does
1000                  * not rely on a sysctl for this behavior
1001                  */
1002                 switch (nhi->family) {
1003                 case AF_INET:
1004                         if (ipv4_good_nh(&nhi->fib_nh))
1005                                 return nhge->nh;
1006                         break;
1007                 case AF_INET6:
1008                         if (ipv6_good_nh(&nhi->fib6_nh))
1009                                 return nhge->nh;
1010                         break;
1011                 }
1012
1013                 if (!rc)
1014                         rc = nhge->nh;
1015         }
1016
1017         return rc;
1018 }
1019
1020 static struct nexthop *nexthop_select_path_res(struct nh_group *nhg, int hash)
1021 {
1022         struct nh_res_table *res_table = rcu_dereference(nhg->res_table);
1023         u16 bucket_index = hash % res_table->num_nh_buckets;
1024         struct nh_res_bucket *bucket;
1025         struct nh_grp_entry *nhge;
1026
1027         /* nexthop_select_path() is expected to return a non-NULL value, so
1028          * skip protocol validation and just hand out whatever there is.
1029          */
1030         bucket = &res_table->nh_buckets[bucket_index];
1031         nh_res_bucket_set_busy(bucket);
1032         nhge = rcu_dereference(bucket->nh_entry);
1033         return nhge->nh;
1034 }
1035
1036 struct nexthop *nexthop_select_path(struct nexthop *nh, int hash)
1037 {
1038         struct nh_group *nhg;
1039
1040         if (!nh->is_group)
1041                 return nh;
1042
1043         nhg = rcu_dereference(nh->nh_grp);
1044         if (nhg->mpath)
1045                 return nexthop_select_path_mp(nhg, hash);
1046         else if (nhg->resilient)
1047                 return nexthop_select_path_res(nhg, hash);
1048
1049         /* Unreachable. */
1050         return NULL;
1051 }
1052 EXPORT_SYMBOL_GPL(nexthop_select_path);
1053
1054 int nexthop_for_each_fib6_nh(struct nexthop *nh,
1055                              int (*cb)(struct fib6_nh *nh, void *arg),
1056                              void *arg)
1057 {
1058         struct nh_info *nhi;
1059         int err;
1060
1061         if (nh->is_group) {
1062                 struct nh_group *nhg;
1063                 int i;
1064
1065                 nhg = rcu_dereference_rtnl(nh->nh_grp);
1066                 for (i = 0; i < nhg->num_nh; i++) {
1067                         struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1068
1069                         nhi = rcu_dereference_rtnl(nhge->nh->nh_info);
1070                         err = cb(&nhi->fib6_nh, arg);
1071                         if (err)
1072                                 return err;
1073                 }
1074         } else {
1075                 nhi = rcu_dereference_rtnl(nh->nh_info);
1076                 err = cb(&nhi->fib6_nh, arg);
1077                 if (err)
1078                         return err;
1079         }
1080
1081         return 0;
1082 }
1083 EXPORT_SYMBOL_GPL(nexthop_for_each_fib6_nh);
1084
1085 static int check_src_addr(const struct in6_addr *saddr,
1086                           struct netlink_ext_ack *extack)
1087 {
1088         if (!ipv6_addr_any(saddr)) {
1089                 NL_SET_ERR_MSG(extack, "IPv6 routes using source address can not use nexthop objects");
1090                 return -EINVAL;
1091         }
1092         return 0;
1093 }
1094
1095 int fib6_check_nexthop(struct nexthop *nh, struct fib6_config *cfg,
1096                        struct netlink_ext_ack *extack)
1097 {
1098         struct nh_info *nhi;
1099         bool is_fdb_nh;
1100
1101         /* fib6_src is unique to a fib6_info and limits the ability to cache
1102          * routes in fib6_nh within a nexthop that is potentially shared
1103          * across multiple fib entries. If the config wants to use source
1104          * routing it can not use nexthop objects. mlxsw also does not allow
1105          * fib6_src on routes.
1106          */
1107         if (cfg && check_src_addr(&cfg->fc_src, extack) < 0)
1108                 return -EINVAL;
1109
1110         if (nh->is_group) {
1111                 struct nh_group *nhg;
1112
1113                 nhg = rtnl_dereference(nh->nh_grp);
1114                 if (nhg->has_v4)
1115                         goto no_v4_nh;
1116                 is_fdb_nh = nhg->fdb_nh;
1117         } else {
1118                 nhi = rtnl_dereference(nh->nh_info);
1119                 if (nhi->family == AF_INET)
1120                         goto no_v4_nh;
1121                 is_fdb_nh = nhi->fdb_nh;
1122         }
1123
1124         if (is_fdb_nh) {
1125                 NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop");
1126                 return -EINVAL;
1127         }
1128
1129         return 0;
1130 no_v4_nh:
1131         NL_SET_ERR_MSG(extack, "IPv6 routes can not use an IPv4 nexthop");
1132         return -EINVAL;
1133 }
1134 EXPORT_SYMBOL_GPL(fib6_check_nexthop);
1135
1136 /* if existing nexthop has ipv6 routes linked to it, need
1137  * to verify this new spec works with ipv6
1138  */
1139 static int fib6_check_nh_list(struct nexthop *old, struct nexthop *new,
1140                               struct netlink_ext_ack *extack)
1141 {
1142         struct fib6_info *f6i;
1143
1144         if (list_empty(&old->f6i_list))
1145                 return 0;
1146
1147         list_for_each_entry(f6i, &old->f6i_list, nh_list) {
1148                 if (check_src_addr(&f6i->fib6_src.addr, extack) < 0)
1149                         return -EINVAL;
1150         }
1151
1152         return fib6_check_nexthop(new, NULL, extack);
1153 }
1154
1155 static int nexthop_check_scope(struct nh_info *nhi, u8 scope,
1156                                struct netlink_ext_ack *extack)
1157 {
1158         if (scope == RT_SCOPE_HOST && nhi->fib_nhc.nhc_gw_family) {
1159                 NL_SET_ERR_MSG(extack,
1160                                "Route with host scope can not have a gateway");
1161                 return -EINVAL;
1162         }
1163
1164         if (nhi->fib_nhc.nhc_flags & RTNH_F_ONLINK && scope >= RT_SCOPE_LINK) {
1165                 NL_SET_ERR_MSG(extack, "Scope mismatch with nexthop");
1166                 return -EINVAL;
1167         }
1168
1169         return 0;
1170 }
1171
1172 /* Invoked by fib add code to verify nexthop by id is ok with
1173  * config for prefix; parts of fib_check_nh not done when nexthop
1174  * object is used.
1175  */
1176 int fib_check_nexthop(struct nexthop *nh, u8 scope,
1177                       struct netlink_ext_ack *extack)
1178 {
1179         struct nh_info *nhi;
1180         int err = 0;
1181
1182         if (nh->is_group) {
1183                 struct nh_group *nhg;
1184
1185                 nhg = rtnl_dereference(nh->nh_grp);
1186                 if (nhg->fdb_nh) {
1187                         NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop");
1188                         err = -EINVAL;
1189                         goto out;
1190                 }
1191
1192                 if (scope == RT_SCOPE_HOST) {
1193                         NL_SET_ERR_MSG(extack, "Route with host scope can not have multiple nexthops");
1194                         err = -EINVAL;
1195                         goto out;
1196                 }
1197
1198                 /* all nexthops in a group have the same scope */
1199                 nhi = rtnl_dereference(nhg->nh_entries[0].nh->nh_info);
1200                 err = nexthop_check_scope(nhi, scope, extack);
1201         } else {
1202                 nhi = rtnl_dereference(nh->nh_info);
1203                 if (nhi->fdb_nh) {
1204                         NL_SET_ERR_MSG(extack, "Route cannot point to a fdb nexthop");
1205                         err = -EINVAL;
1206                         goto out;
1207                 }
1208                 err = nexthop_check_scope(nhi, scope, extack);
1209         }
1210
1211 out:
1212         return err;
1213 }
1214
1215 static int fib_check_nh_list(struct nexthop *old, struct nexthop *new,
1216                              struct netlink_ext_ack *extack)
1217 {
1218         struct fib_info *fi;
1219
1220         list_for_each_entry(fi, &old->fi_list, nh_list) {
1221                 int err;
1222
1223                 err = fib_check_nexthop(new, fi->fib_scope, extack);
1224                 if (err)
1225                         return err;
1226         }
1227         return 0;
1228 }
1229
1230 static bool nh_res_nhge_is_balanced(const struct nh_grp_entry *nhge)
1231 {
1232         return nhge->res.count_buckets == nhge->res.wants_buckets;
1233 }
1234
1235 static bool nh_res_nhge_is_ow(const struct nh_grp_entry *nhge)
1236 {
1237         return nhge->res.count_buckets > nhge->res.wants_buckets;
1238 }
1239
1240 static bool nh_res_nhge_is_uw(const struct nh_grp_entry *nhge)
1241 {
1242         return nhge->res.count_buckets < nhge->res.wants_buckets;
1243 }
1244
1245 static bool nh_res_table_is_balanced(const struct nh_res_table *res_table)
1246 {
1247         return list_empty(&res_table->uw_nh_entries);
1248 }
1249
1250 static void nh_res_bucket_unset_nh(struct nh_res_bucket *bucket)
1251 {
1252         struct nh_grp_entry *nhge;
1253
1254         if (bucket->occupied) {
1255                 nhge = nh_res_dereference(bucket->nh_entry);
1256                 nhge->res.count_buckets--;
1257                 bucket->occupied = false;
1258         }
1259 }
1260
1261 static void nh_res_bucket_set_nh(struct nh_res_bucket *bucket,
1262                                  struct nh_grp_entry *nhge)
1263 {
1264         nh_res_bucket_unset_nh(bucket);
1265
1266         bucket->occupied = true;
1267         rcu_assign_pointer(bucket->nh_entry, nhge);
1268         nhge->res.count_buckets++;
1269 }
1270
1271 static bool nh_res_bucket_should_migrate(struct nh_res_table *res_table,
1272                                          struct nh_res_bucket *bucket,
1273                                          unsigned long *deadline, bool *force)
1274 {
1275         unsigned long now = jiffies;
1276         struct nh_grp_entry *nhge;
1277         unsigned long idle_point;
1278
1279         if (!bucket->occupied) {
1280                 /* The bucket is not occupied, its NHGE pointer is either
1281                  * NULL or obsolete. We _have to_ migrate: set force.
1282                  */
1283                 *force = true;
1284                 return true;
1285         }
1286
1287         nhge = nh_res_dereference(bucket->nh_entry);
1288
1289         /* If the bucket is populated by an underweight or balanced
1290          * nexthop, do not migrate.
1291          */
1292         if (!nh_res_nhge_is_ow(nhge))
1293                 return false;
1294
1295         /* At this point we know that the bucket is populated with an
1296          * overweight nexthop. It needs to be migrated to a new nexthop if
1297          * the idle timer of unbalanced timer expired.
1298          */
1299
1300         idle_point = nh_res_bucket_idle_point(res_table, bucket, now);
1301         if (time_after_eq(now, idle_point)) {
1302                 /* The bucket is idle. We _can_ migrate: unset force. */
1303                 *force = false;
1304                 return true;
1305         }
1306
1307         /* Unbalanced timer of 0 means "never force". */
1308         if (res_table->unbalanced_timer) {
1309                 unsigned long unb_point;
1310
1311                 unb_point = nh_res_table_unb_point(res_table);
1312                 if (time_after(now, unb_point)) {
1313                         /* The bucket is not idle, but the unbalanced timer
1314                          * expired. We _can_ migrate, but set force anyway,
1315                          * so that drivers know to ignore activity reports
1316                          * from the HW.
1317                          */
1318                         *force = true;
1319                         return true;
1320                 }
1321
1322                 nh_res_time_set_deadline(unb_point, deadline);
1323         }
1324
1325         nh_res_time_set_deadline(idle_point, deadline);
1326         return false;
1327 }
1328
1329 static bool nh_res_bucket_migrate(struct nh_res_table *res_table,
1330                                   u16 bucket_index, bool notify, bool force)
1331 {
1332         struct nh_res_bucket *bucket = &res_table->nh_buckets[bucket_index];
1333         struct nh_grp_entry *new_nhge;
1334         struct netlink_ext_ack extack;
1335         int err;
1336
1337         new_nhge = list_first_entry_or_null(&res_table->uw_nh_entries,
1338                                             struct nh_grp_entry,
1339                                             res.uw_nh_entry);
1340         if (WARN_ON_ONCE(!new_nhge))
1341                 /* If this function is called, "bucket" is either not
1342                  * occupied, or it belongs to a next hop that is
1343                  * overweight. In either case, there ought to be a
1344                  * corresponding underweight next hop.
1345                  */
1346                 return false;
1347
1348         if (notify) {
1349                 struct nh_grp_entry *old_nhge;
1350
1351                 old_nhge = nh_res_dereference(bucket->nh_entry);
1352                 err = call_nexthop_res_bucket_notifiers(res_table->net,
1353                                                         res_table->nhg_id,
1354                                                         bucket_index, force,
1355                                                         old_nhge->nh,
1356                                                         new_nhge->nh, &extack);
1357                 if (err) {
1358                         pr_err_ratelimited("%s\n", extack._msg);
1359                         if (!force)
1360                                 return false;
1361                         /* It is not possible to veto a forced replacement, so
1362                          * just clear the hardware flags from the nexthop
1363                          * bucket to indicate to user space that this bucket is
1364                          * not correctly populated in hardware.
1365                          */
1366                         bucket->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP);
1367                 }
1368         }
1369
1370         nh_res_bucket_set_nh(bucket, new_nhge);
1371         nh_res_bucket_set_idle(res_table, bucket);
1372
1373         if (nh_res_nhge_is_balanced(new_nhge))
1374                 list_del(&new_nhge->res.uw_nh_entry);
1375         return true;
1376 }
1377
1378 #define NH_RES_UPKEEP_DW_MINIMUM_INTERVAL (HZ / 2)
1379
1380 static void nh_res_table_upkeep(struct nh_res_table *res_table, bool notify)
1381 {
1382         unsigned long now = jiffies;
1383         unsigned long deadline;
1384         u16 i;
1385
1386         /* Deadline is the next time that upkeep should be run. It is the
1387          * earliest time at which one of the buckets might be migrated.
1388          * Start at the most pessimistic estimate: either unbalanced_timer
1389          * from now, or if there is none, idle_timer from now. For each
1390          * encountered time point, call nh_res_time_set_deadline() to
1391          * refine the estimate.
1392          */
1393         if (res_table->unbalanced_timer)
1394                 deadline = now + res_table->unbalanced_timer;
1395         else
1396                 deadline = now + res_table->idle_timer;
1397
1398         for (i = 0; i < res_table->num_nh_buckets; i++) {
1399                 struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
1400                 bool force;
1401
1402                 if (nh_res_bucket_should_migrate(res_table, bucket,
1403                                                  &deadline, &force)) {
1404                         if (!nh_res_bucket_migrate(res_table, i, notify,
1405                                                    force)) {
1406                                 unsigned long idle_point;
1407
1408                                 /* A driver can override the migration
1409                                  * decision if the HW reports that the
1410                                  * bucket is actually not idle. Therefore
1411                                  * remark the bucket as busy again and
1412                                  * update the deadline.
1413                                  */
1414                                 nh_res_bucket_set_busy(bucket);
1415                                 idle_point = nh_res_bucket_idle_point(res_table,
1416                                                                       bucket,
1417                                                                       now);
1418                                 nh_res_time_set_deadline(idle_point, &deadline);
1419                         }
1420                 }
1421         }
1422
1423         /* If the group is still unbalanced, schedule the next upkeep to
1424          * either the deadline computed above, or the minimum deadline,
1425          * whichever comes later.
1426          */
1427         if (!nh_res_table_is_balanced(res_table)) {
1428                 unsigned long now = jiffies;
1429                 unsigned long min_deadline;
1430
1431                 min_deadline = now + NH_RES_UPKEEP_DW_MINIMUM_INTERVAL;
1432                 if (time_before(deadline, min_deadline))
1433                         deadline = min_deadline;
1434
1435                 queue_delayed_work(system_power_efficient_wq,
1436                                    &res_table->upkeep_dw, deadline - now);
1437         }
1438 }
1439
1440 static void nh_res_table_upkeep_dw(struct work_struct *work)
1441 {
1442         struct delayed_work *dw = to_delayed_work(work);
1443         struct nh_res_table *res_table;
1444
1445         res_table = container_of(dw, struct nh_res_table, upkeep_dw);
1446         nh_res_table_upkeep(res_table, true);
1447 }
1448
1449 static void nh_res_table_cancel_upkeep(struct nh_res_table *res_table)
1450 {
1451         cancel_delayed_work_sync(&res_table->upkeep_dw);
1452 }
1453
1454 static void nh_res_group_rebalance(struct nh_group *nhg,
1455                                    struct nh_res_table *res_table)
1456 {
1457         int prev_upper_bound = 0;
1458         int total = 0;
1459         int w = 0;
1460         int i;
1461
1462         INIT_LIST_HEAD(&res_table->uw_nh_entries);
1463
1464         for (i = 0; i < nhg->num_nh; ++i)
1465                 total += nhg->nh_entries[i].weight;
1466
1467         for (i = 0; i < nhg->num_nh; ++i) {
1468                 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1469                 int upper_bound;
1470
1471                 w += nhge->weight;
1472                 upper_bound = DIV_ROUND_CLOSEST(res_table->num_nh_buckets * w,
1473                                                 total);
1474                 nhge->res.wants_buckets = upper_bound - prev_upper_bound;
1475                 prev_upper_bound = upper_bound;
1476
1477                 if (nh_res_nhge_is_uw(nhge)) {
1478                         if (list_empty(&res_table->uw_nh_entries))
1479                                 res_table->unbalanced_since = jiffies;
1480                         list_add(&nhge->res.uw_nh_entry,
1481                                  &res_table->uw_nh_entries);
1482                 }
1483         }
1484 }
1485
1486 /* Migrate buckets in res_table so that they reference NHGE's from NHG with
1487  * the right NH ID. Set those buckets that do not have a corresponding NHGE
1488  * entry in NHG as not occupied.
1489  */
1490 static void nh_res_table_migrate_buckets(struct nh_res_table *res_table,
1491                                          struct nh_group *nhg)
1492 {
1493         u16 i;
1494
1495         for (i = 0; i < res_table->num_nh_buckets; i++) {
1496                 struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
1497                 u32 id = rtnl_dereference(bucket->nh_entry)->nh->id;
1498                 bool found = false;
1499                 int j;
1500
1501                 for (j = 0; j < nhg->num_nh; j++) {
1502                         struct nh_grp_entry *nhge = &nhg->nh_entries[j];
1503
1504                         if (nhge->nh->id == id) {
1505                                 nh_res_bucket_set_nh(bucket, nhge);
1506                                 found = true;
1507                                 break;
1508                         }
1509                 }
1510
1511                 if (!found)
1512                         nh_res_bucket_unset_nh(bucket);
1513         }
1514 }
1515
1516 static void replace_nexthop_grp_res(struct nh_group *oldg,
1517                                     struct nh_group *newg)
1518 {
1519         /* For NH group replacement, the new NHG might only have a stub
1520          * hash table with 0 buckets, because the number of buckets was not
1521          * specified. For NH removal, oldg and newg both reference the same
1522          * res_table. So in any case, in the following, we want to work
1523          * with oldg->res_table.
1524          */
1525         struct nh_res_table *old_res_table = rtnl_dereference(oldg->res_table);
1526         unsigned long prev_unbalanced_since = old_res_table->unbalanced_since;
1527         bool prev_has_uw = !list_empty(&old_res_table->uw_nh_entries);
1528
1529         nh_res_table_cancel_upkeep(old_res_table);
1530         nh_res_table_migrate_buckets(old_res_table, newg);
1531         nh_res_group_rebalance(newg, old_res_table);
1532         if (prev_has_uw && !list_empty(&old_res_table->uw_nh_entries))
1533                 old_res_table->unbalanced_since = prev_unbalanced_since;
1534         nh_res_table_upkeep(old_res_table, true);
1535 }
1536
1537 static void nh_mp_group_rebalance(struct nh_group *nhg)
1538 {
1539         int total = 0;
1540         int w = 0;
1541         int i;
1542
1543         for (i = 0; i < nhg->num_nh; ++i)
1544                 total += nhg->nh_entries[i].weight;
1545
1546         for (i = 0; i < nhg->num_nh; ++i) {
1547                 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1548                 int upper_bound;
1549
1550                 w += nhge->weight;
1551                 upper_bound = DIV_ROUND_CLOSEST_ULL((u64)w << 31, total) - 1;
1552                 atomic_set(&nhge->mpath.upper_bound, upper_bound);
1553         }
1554 }
1555
1556 static void remove_nh_grp_entry(struct net *net, struct nh_grp_entry *nhge,
1557                                 struct nl_info *nlinfo)
1558 {
1559         struct nh_grp_entry *nhges, *new_nhges;
1560         struct nexthop *nhp = nhge->nh_parent;
1561         struct netlink_ext_ack extack;
1562         struct nexthop *nh = nhge->nh;
1563         struct nh_group *nhg, *newg;
1564         int i, j, err;
1565
1566         WARN_ON(!nh);
1567
1568         nhg = rtnl_dereference(nhp->nh_grp);
1569         newg = nhg->spare;
1570
1571         /* last entry, keep it visible and remove the parent */
1572         if (nhg->num_nh == 1) {
1573                 remove_nexthop(net, nhp, nlinfo);
1574                 return;
1575         }
1576
1577         newg->has_v4 = false;
1578         newg->is_multipath = nhg->is_multipath;
1579         newg->mpath = nhg->mpath;
1580         newg->resilient = nhg->resilient;
1581         newg->fdb_nh = nhg->fdb_nh;
1582         newg->num_nh = nhg->num_nh;
1583
1584         /* copy old entries to new except the one getting removed */
1585         nhges = nhg->nh_entries;
1586         new_nhges = newg->nh_entries;
1587         for (i = 0, j = 0; i < nhg->num_nh; ++i) {
1588                 struct nh_info *nhi;
1589
1590                 /* current nexthop getting removed */
1591                 if (nhg->nh_entries[i].nh == nh) {
1592                         newg->num_nh--;
1593                         continue;
1594                 }
1595
1596                 nhi = rtnl_dereference(nhges[i].nh->nh_info);
1597                 if (nhi->family == AF_INET)
1598                         newg->has_v4 = true;
1599
1600                 list_del(&nhges[i].nh_list);
1601                 new_nhges[j].nh_parent = nhges[i].nh_parent;
1602                 new_nhges[j].nh = nhges[i].nh;
1603                 new_nhges[j].weight = nhges[i].weight;
1604                 list_add(&new_nhges[j].nh_list, &new_nhges[j].nh->grp_list);
1605                 j++;
1606         }
1607
1608         if (newg->mpath)
1609                 nh_mp_group_rebalance(newg);
1610         else if (newg->resilient)
1611                 replace_nexthop_grp_res(nhg, newg);
1612
1613         rcu_assign_pointer(nhp->nh_grp, newg);
1614
1615         list_del(&nhge->nh_list);
1616         nexthop_put(nhge->nh);
1617
1618         /* Removal of a NH from a resilient group is notified through
1619          * bucket notifications.
1620          */
1621         if (newg->mpath) {
1622                 err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, nhp,
1623                                              &extack);
1624                 if (err)
1625                         pr_err("%s\n", extack._msg);
1626         }
1627
1628         if (nlinfo)
1629                 nexthop_notify(RTM_NEWNEXTHOP, nhp, nlinfo);
1630 }
1631
1632 static void remove_nexthop_from_groups(struct net *net, struct nexthop *nh,
1633                                        struct nl_info *nlinfo)
1634 {
1635         struct nh_grp_entry *nhge, *tmp;
1636
1637         list_for_each_entry_safe(nhge, tmp, &nh->grp_list, nh_list)
1638                 remove_nh_grp_entry(net, nhge, nlinfo);
1639
1640         /* make sure all see the newly published array before releasing rtnl */
1641         synchronize_net();
1642 }
1643
1644 static void remove_nexthop_group(struct nexthop *nh, struct nl_info *nlinfo)
1645 {
1646         struct nh_group *nhg = rcu_dereference_rtnl(nh->nh_grp);
1647         struct nh_res_table *res_table;
1648         int i, num_nh = nhg->num_nh;
1649
1650         for (i = 0; i < num_nh; ++i) {
1651                 struct nh_grp_entry *nhge = &nhg->nh_entries[i];
1652
1653                 if (WARN_ON(!nhge->nh))
1654                         continue;
1655
1656                 list_del_init(&nhge->nh_list);
1657         }
1658
1659         if (nhg->resilient) {
1660                 res_table = rtnl_dereference(nhg->res_table);
1661                 nh_res_table_cancel_upkeep(res_table);
1662         }
1663 }
1664
1665 /* not called for nexthop replace */
1666 static void __remove_nexthop_fib(struct net *net, struct nexthop *nh)
1667 {
1668         struct fib6_info *f6i, *tmp;
1669         bool do_flush = false;
1670         struct fib_info *fi;
1671
1672         list_for_each_entry(fi, &nh->fi_list, nh_list) {
1673                 fi->fib_flags |= RTNH_F_DEAD;
1674                 do_flush = true;
1675         }
1676         if (do_flush)
1677                 fib_flush(net);
1678
1679         /* ip6_del_rt removes the entry from this list hence the _safe */
1680         list_for_each_entry_safe(f6i, tmp, &nh->f6i_list, nh_list) {
1681                 /* __ip6_del_rt does a release, so do a hold here */
1682                 fib6_info_hold(f6i);
1683                 ipv6_stub->ip6_del_rt(net, f6i,
1684                                       !net->ipv4.sysctl_nexthop_compat_mode);
1685         }
1686 }
1687
1688 static void __remove_nexthop(struct net *net, struct nexthop *nh,
1689                              struct nl_info *nlinfo)
1690 {
1691         __remove_nexthop_fib(net, nh);
1692
1693         if (nh->is_group) {
1694                 remove_nexthop_group(nh, nlinfo);
1695         } else {
1696                 struct nh_info *nhi;
1697
1698                 nhi = rtnl_dereference(nh->nh_info);
1699                 if (nhi->fib_nhc.nhc_dev)
1700                         hlist_del(&nhi->dev_hash);
1701
1702                 remove_nexthop_from_groups(net, nh, nlinfo);
1703         }
1704 }
1705
1706 static void remove_nexthop(struct net *net, struct nexthop *nh,
1707                            struct nl_info *nlinfo)
1708 {
1709         call_nexthop_notifiers(net, NEXTHOP_EVENT_DEL, nh, NULL);
1710
1711         /* remove from the tree */
1712         rb_erase(&nh->rb_node, &net->nexthop.rb_root);
1713
1714         if (nlinfo)
1715                 nexthop_notify(RTM_DELNEXTHOP, nh, nlinfo);
1716
1717         __remove_nexthop(net, nh, nlinfo);
1718         nh_base_seq_inc(net);
1719
1720         nexthop_put(nh);
1721 }
1722
1723 /* if any FIB entries reference this nexthop, any dst entries
1724  * need to be regenerated
1725  */
1726 static void nh_rt_cache_flush(struct net *net, struct nexthop *nh)
1727 {
1728         struct fib6_info *f6i;
1729
1730         if (!list_empty(&nh->fi_list))
1731                 rt_cache_flush(net);
1732
1733         list_for_each_entry(f6i, &nh->f6i_list, nh_list)
1734                 ipv6_stub->fib6_update_sernum(net, f6i);
1735 }
1736
1737 static int replace_nexthop_grp(struct net *net, struct nexthop *old,
1738                                struct nexthop *new, const struct nh_config *cfg,
1739                                struct netlink_ext_ack *extack)
1740 {
1741         struct nh_res_table *tmp_table = NULL;
1742         struct nh_res_table *new_res_table;
1743         struct nh_res_table *old_res_table;
1744         struct nh_group *oldg, *newg;
1745         int i, err;
1746
1747         if (!new->is_group) {
1748                 NL_SET_ERR_MSG(extack, "Can not replace a nexthop group with a nexthop.");
1749                 return -EINVAL;
1750         }
1751
1752         oldg = rtnl_dereference(old->nh_grp);
1753         newg = rtnl_dereference(new->nh_grp);
1754
1755         if (newg->mpath != oldg->mpath) {
1756                 NL_SET_ERR_MSG(extack, "Can not replace a nexthop group with one of a different type.");
1757                 return -EINVAL;
1758         }
1759
1760         if (newg->mpath) {
1761                 err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new,
1762                                              extack);
1763                 if (err)
1764                         return err;
1765         } else if (newg->resilient) {
1766                 new_res_table = rtnl_dereference(newg->res_table);
1767                 old_res_table = rtnl_dereference(oldg->res_table);
1768
1769                 /* Accept if num_nh_buckets was not given, but if it was
1770                  * given, demand that the value be correct.
1771                  */
1772                 if (cfg->nh_grp_res_has_num_buckets &&
1773                     cfg->nh_grp_res_num_buckets !=
1774                     old_res_table->num_nh_buckets) {
1775                         NL_SET_ERR_MSG(extack, "Can not change number of buckets of a resilient nexthop group.");
1776                         return -EINVAL;
1777                 }
1778
1779                 /* Emit a pre-replace notification so that listeners could veto
1780                  * a potentially unsupported configuration. Otherwise,
1781                  * individual bucket replacement notifications would need to be
1782                  * vetoed, which is something that should only happen if the
1783                  * bucket is currently active.
1784                  */
1785                 err = call_nexthop_res_table_notifiers(net, new, extack);
1786                 if (err)
1787                         return err;
1788
1789                 if (cfg->nh_grp_res_has_idle_timer)
1790                         old_res_table->idle_timer = cfg->nh_grp_res_idle_timer;
1791                 if (cfg->nh_grp_res_has_unbalanced_timer)
1792                         old_res_table->unbalanced_timer =
1793                                 cfg->nh_grp_res_unbalanced_timer;
1794
1795                 replace_nexthop_grp_res(oldg, newg);
1796
1797                 tmp_table = new_res_table;
1798                 rcu_assign_pointer(newg->res_table, old_res_table);
1799                 rcu_assign_pointer(newg->spare->res_table, old_res_table);
1800         }
1801
1802         /* update parents - used by nexthop code for cleanup */
1803         for (i = 0; i < newg->num_nh; i++)
1804                 newg->nh_entries[i].nh_parent = old;
1805
1806         rcu_assign_pointer(old->nh_grp, newg);
1807
1808         if (newg->resilient) {
1809                 rcu_assign_pointer(oldg->res_table, tmp_table);
1810                 rcu_assign_pointer(oldg->spare->res_table, tmp_table);
1811         }
1812
1813         for (i = 0; i < oldg->num_nh; i++)
1814                 oldg->nh_entries[i].nh_parent = new;
1815
1816         rcu_assign_pointer(new->nh_grp, oldg);
1817
1818         return 0;
1819 }
1820
1821 static void nh_group_v4_update(struct nh_group *nhg)
1822 {
1823         struct nh_grp_entry *nhges;
1824         bool has_v4 = false;
1825         int i;
1826
1827         nhges = nhg->nh_entries;
1828         for (i = 0; i < nhg->num_nh; i++) {
1829                 struct nh_info *nhi;
1830
1831                 nhi = rtnl_dereference(nhges[i].nh->nh_info);
1832                 if (nhi->family == AF_INET)
1833                         has_v4 = true;
1834         }
1835         nhg->has_v4 = has_v4;
1836 }
1837
1838 static int replace_nexthop_single_notify_res(struct net *net,
1839                                              struct nh_res_table *res_table,
1840                                              struct nexthop *old,
1841                                              struct nh_info *oldi,
1842                                              struct nh_info *newi,
1843                                              struct netlink_ext_ack *extack)
1844 {
1845         u32 nhg_id = res_table->nhg_id;
1846         int err;
1847         u16 i;
1848
1849         for (i = 0; i < res_table->num_nh_buckets; i++) {
1850                 struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
1851                 struct nh_grp_entry *nhge;
1852
1853                 nhge = rtnl_dereference(bucket->nh_entry);
1854                 if (nhge->nh == old) {
1855                         err = __call_nexthop_res_bucket_notifiers(net, nhg_id,
1856                                                                   i, true,
1857                                                                   oldi, newi,
1858                                                                   extack);
1859                         if (err)
1860                                 goto err_notify;
1861                 }
1862         }
1863
1864         return 0;
1865
1866 err_notify:
1867         while (i-- > 0) {
1868                 struct nh_res_bucket *bucket = &res_table->nh_buckets[i];
1869                 struct nh_grp_entry *nhge;
1870
1871                 nhge = rtnl_dereference(bucket->nh_entry);
1872                 if (nhge->nh == old)
1873                         __call_nexthop_res_bucket_notifiers(net, nhg_id, i,
1874                                                             true, newi, oldi,
1875                                                             extack);
1876         }
1877         return err;
1878 }
1879
1880 static int replace_nexthop_single_notify(struct net *net,
1881                                          struct nexthop *group_nh,
1882                                          struct nexthop *old,
1883                                          struct nh_info *oldi,
1884                                          struct nh_info *newi,
1885                                          struct netlink_ext_ack *extack)
1886 {
1887         struct nh_group *nhg = rtnl_dereference(group_nh->nh_grp);
1888         struct nh_res_table *res_table;
1889
1890         if (nhg->mpath) {
1891                 return call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE,
1892                                               group_nh, extack);
1893         } else if (nhg->resilient) {
1894                 res_table = rtnl_dereference(nhg->res_table);
1895                 return replace_nexthop_single_notify_res(net, res_table,
1896                                                          old, oldi, newi,
1897                                                          extack);
1898         }
1899
1900         return -EINVAL;
1901 }
1902
1903 static int replace_nexthop_single(struct net *net, struct nexthop *old,
1904                                   struct nexthop *new,
1905                                   struct netlink_ext_ack *extack)
1906 {
1907         u8 old_protocol, old_nh_flags;
1908         struct nh_info *oldi, *newi;
1909         struct nh_grp_entry *nhge;
1910         int err;
1911
1912         if (new->is_group) {
1913                 NL_SET_ERR_MSG(extack, "Can not replace a nexthop with a nexthop group.");
1914                 return -EINVAL;
1915         }
1916
1917         err = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new, extack);
1918         if (err)
1919                 return err;
1920
1921         /* Hardware flags were set on 'old' as 'new' is not in the red-black
1922          * tree. Therefore, inherit the flags from 'old' to 'new'.
1923          */
1924         new->nh_flags |= old->nh_flags & (RTNH_F_OFFLOAD | RTNH_F_TRAP);
1925
1926         oldi = rtnl_dereference(old->nh_info);
1927         newi = rtnl_dereference(new->nh_info);
1928
1929         newi->nh_parent = old;
1930         oldi->nh_parent = new;
1931
1932         old_protocol = old->protocol;
1933         old_nh_flags = old->nh_flags;
1934
1935         old->protocol = new->protocol;
1936         old->nh_flags = new->nh_flags;
1937
1938         rcu_assign_pointer(old->nh_info, newi);
1939         rcu_assign_pointer(new->nh_info, oldi);
1940
1941         /* Send a replace notification for all the groups using the nexthop. */
1942         list_for_each_entry(nhge, &old->grp_list, nh_list) {
1943                 struct nexthop *nhp = nhge->nh_parent;
1944
1945                 err = replace_nexthop_single_notify(net, nhp, old, oldi, newi,
1946                                                     extack);
1947                 if (err)
1948                         goto err_notify;
1949         }
1950
1951         /* When replacing an IPv4 nexthop with an IPv6 nexthop, potentially
1952          * update IPv4 indication in all the groups using the nexthop.
1953          */
1954         if (oldi->family == AF_INET && newi->family == AF_INET6) {
1955                 list_for_each_entry(nhge, &old->grp_list, nh_list) {
1956                         struct nexthop *nhp = nhge->nh_parent;
1957                         struct nh_group *nhg;
1958
1959                         nhg = rtnl_dereference(nhp->nh_grp);
1960                         nh_group_v4_update(nhg);
1961                 }
1962         }
1963
1964         return 0;
1965
1966 err_notify:
1967         rcu_assign_pointer(new->nh_info, newi);
1968         rcu_assign_pointer(old->nh_info, oldi);
1969         old->nh_flags = old_nh_flags;
1970         old->protocol = old_protocol;
1971         oldi->nh_parent = old;
1972         newi->nh_parent = new;
1973         list_for_each_entry_continue_reverse(nhge, &old->grp_list, nh_list) {
1974                 struct nexthop *nhp = nhge->nh_parent;
1975
1976                 replace_nexthop_single_notify(net, nhp, old, newi, oldi, NULL);
1977         }
1978         call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, old, extack);
1979         return err;
1980 }
1981
1982 static void __nexthop_replace_notify(struct net *net, struct nexthop *nh,
1983                                      struct nl_info *info)
1984 {
1985         struct fib6_info *f6i;
1986
1987         if (!list_empty(&nh->fi_list)) {
1988                 struct fib_info *fi;
1989
1990                 /* expectation is a few fib_info per nexthop and then
1991                  * a lot of routes per fib_info. So mark the fib_info
1992                  * and then walk the fib tables once
1993                  */
1994                 list_for_each_entry(fi, &nh->fi_list, nh_list)
1995                         fi->nh_updated = true;
1996
1997                 fib_info_notify_update(net, info);
1998
1999                 list_for_each_entry(fi, &nh->fi_list, nh_list)
2000                         fi->nh_updated = false;
2001         }
2002
2003         list_for_each_entry(f6i, &nh->f6i_list, nh_list)
2004                 ipv6_stub->fib6_rt_update(net, f6i, info);
2005 }
2006
2007 /* send RTM_NEWROUTE with REPLACE flag set for all FIB entries
2008  * linked to this nexthop and for all groups that the nexthop
2009  * is a member of
2010  */
2011 static void nexthop_replace_notify(struct net *net, struct nexthop *nh,
2012                                    struct nl_info *info)
2013 {
2014         struct nh_grp_entry *nhge;
2015
2016         __nexthop_replace_notify(net, nh, info);
2017
2018         list_for_each_entry(nhge, &nh->grp_list, nh_list)
2019                 __nexthop_replace_notify(net, nhge->nh_parent, info);
2020 }
2021
2022 static int replace_nexthop(struct net *net, struct nexthop *old,
2023                            struct nexthop *new, const struct nh_config *cfg,
2024                            struct netlink_ext_ack *extack)
2025 {
2026         bool new_is_reject = false;
2027         struct nh_grp_entry *nhge;
2028         int err;
2029
2030         /* check that existing FIB entries are ok with the
2031          * new nexthop definition
2032          */
2033         err = fib_check_nh_list(old, new, extack);
2034         if (err)
2035                 return err;
2036
2037         err = fib6_check_nh_list(old, new, extack);
2038         if (err)
2039                 return err;
2040
2041         if (!new->is_group) {
2042                 struct nh_info *nhi = rtnl_dereference(new->nh_info);
2043
2044                 new_is_reject = nhi->reject_nh;
2045         }
2046
2047         list_for_each_entry(nhge, &old->grp_list, nh_list) {
2048                 /* if new nexthop is a blackhole, any groups using this
2049                  * nexthop cannot have more than 1 path
2050                  */
2051                 if (new_is_reject &&
2052                     nexthop_num_path(nhge->nh_parent) > 1) {
2053                         NL_SET_ERR_MSG(extack, "Blackhole nexthop can not be a member of a group with more than one path");
2054                         return -EINVAL;
2055                 }
2056
2057                 err = fib_check_nh_list(nhge->nh_parent, new, extack);
2058                 if (err)
2059                         return err;
2060
2061                 err = fib6_check_nh_list(nhge->nh_parent, new, extack);
2062                 if (err)
2063                         return err;
2064         }
2065
2066         if (old->is_group)
2067                 err = replace_nexthop_grp(net, old, new, cfg, extack);
2068         else
2069                 err = replace_nexthop_single(net, old, new, extack);
2070
2071         if (!err) {
2072                 nh_rt_cache_flush(net, old);
2073
2074                 __remove_nexthop(net, new, NULL);
2075                 nexthop_put(new);
2076         }
2077
2078         return err;
2079 }
2080
2081 /* called with rtnl_lock held */
2082 static int insert_nexthop(struct net *net, struct nexthop *new_nh,
2083                           struct nh_config *cfg, struct netlink_ext_ack *extack)
2084 {
2085         struct rb_node **pp, *parent = NULL, *next;
2086         struct rb_root *root = &net->nexthop.rb_root;
2087         bool replace = !!(cfg->nlflags & NLM_F_REPLACE);
2088         bool create = !!(cfg->nlflags & NLM_F_CREATE);
2089         u32 new_id = new_nh->id;
2090         int replace_notify = 0;
2091         int rc = -EEXIST;
2092
2093         pp = &root->rb_node;
2094         while (1) {
2095                 struct nexthop *nh;
2096
2097                 next = *pp;
2098                 if (!next)
2099                         break;
2100
2101                 parent = next;
2102
2103                 nh = rb_entry(parent, struct nexthop, rb_node);
2104                 if (new_id < nh->id) {
2105                         pp = &next->rb_left;
2106                 } else if (new_id > nh->id) {
2107                         pp = &next->rb_right;
2108                 } else if (replace) {
2109                         rc = replace_nexthop(net, nh, new_nh, cfg, extack);
2110                         if (!rc) {
2111                                 new_nh = nh; /* send notification with old nh */
2112                                 replace_notify = 1;
2113                         }
2114                         goto out;
2115                 } else {
2116                         /* id already exists and not a replace */
2117                         goto out;
2118                 }
2119         }
2120
2121         if (replace && !create) {
2122                 NL_SET_ERR_MSG(extack, "Replace specified without create and no entry exists");
2123                 rc = -ENOENT;
2124                 goto out;
2125         }
2126
2127         if (new_nh->is_group) {
2128                 struct nh_group *nhg = rtnl_dereference(new_nh->nh_grp);
2129                 struct nh_res_table *res_table;
2130
2131                 if (nhg->resilient) {
2132                         res_table = rtnl_dereference(nhg->res_table);
2133
2134                         /* Not passing the number of buckets is OK when
2135                          * replacing, but not when creating a new group.
2136                          */
2137                         if (!cfg->nh_grp_res_has_num_buckets) {
2138                                 NL_SET_ERR_MSG(extack, "Number of buckets not specified for nexthop group insertion");
2139                                 rc = -EINVAL;
2140                                 goto out;
2141                         }
2142
2143                         nh_res_group_rebalance(nhg, res_table);
2144
2145                         /* Do not send bucket notifications, we do full
2146                          * notification below.
2147                          */
2148                         nh_res_table_upkeep(res_table, false);
2149                 }
2150         }
2151
2152         rb_link_node_rcu(&new_nh->rb_node, parent, pp);
2153         rb_insert_color(&new_nh->rb_node, root);
2154
2155         /* The initial insertion is a full notification for mpath as well
2156          * as resilient groups.
2157          */
2158         rc = call_nexthop_notifiers(net, NEXTHOP_EVENT_REPLACE, new_nh, extack);
2159         if (rc)
2160                 rb_erase(&new_nh->rb_node, &net->nexthop.rb_root);
2161
2162 out:
2163         if (!rc) {
2164                 nh_base_seq_inc(net);
2165                 nexthop_notify(RTM_NEWNEXTHOP, new_nh, &cfg->nlinfo);
2166                 if (replace_notify && net->ipv4.sysctl_nexthop_compat_mode)
2167                         nexthop_replace_notify(net, new_nh, &cfg->nlinfo);
2168         }
2169
2170         return rc;
2171 }
2172
2173 /* rtnl */
2174 /* remove all nexthops tied to a device being deleted */
2175 static void nexthop_flush_dev(struct net_device *dev, unsigned long event)
2176 {
2177         unsigned int hash = nh_dev_hashfn(dev->ifindex);
2178         struct net *net = dev_net(dev);
2179         struct hlist_head *head = &net->nexthop.devhash[hash];
2180         struct hlist_node *n;
2181         struct nh_info *nhi;
2182
2183         hlist_for_each_entry_safe(nhi, n, head, dev_hash) {
2184                 if (nhi->fib_nhc.nhc_dev != dev)
2185                         continue;
2186
2187                 if (nhi->reject_nh &&
2188                     (event == NETDEV_DOWN || event == NETDEV_CHANGE))
2189                         continue;
2190
2191                 remove_nexthop(net, nhi->nh_parent, NULL);
2192         }
2193 }
2194
2195 /* rtnl; called when net namespace is deleted */
2196 static void flush_all_nexthops(struct net *net)
2197 {
2198         struct rb_root *root = &net->nexthop.rb_root;
2199         struct rb_node *node;
2200         struct nexthop *nh;
2201
2202         while ((node = rb_first(root))) {
2203                 nh = rb_entry(node, struct nexthop, rb_node);
2204                 remove_nexthop(net, nh, NULL);
2205                 cond_resched();
2206         }
2207 }
2208
2209 static struct nexthop *nexthop_create_group(struct net *net,
2210                                             struct nh_config *cfg)
2211 {
2212         struct nlattr *grps_attr = cfg->nh_grp;
2213         struct nexthop_grp *entry = nla_data(grps_attr);
2214         u16 num_nh = nla_len(grps_attr) / sizeof(*entry);
2215         struct nh_group *nhg;
2216         struct nexthop *nh;
2217         int err;
2218         int i;
2219
2220         if (WARN_ON(!num_nh))
2221                 return ERR_PTR(-EINVAL);
2222
2223         nh = nexthop_alloc();
2224         if (!nh)
2225                 return ERR_PTR(-ENOMEM);
2226
2227         nh->is_group = 1;
2228
2229         nhg = nexthop_grp_alloc(num_nh);
2230         if (!nhg) {
2231                 kfree(nh);
2232                 return ERR_PTR(-ENOMEM);
2233         }
2234
2235         /* spare group used for removals */
2236         nhg->spare = nexthop_grp_alloc(num_nh);
2237         if (!nhg->spare) {
2238                 kfree(nhg);
2239                 kfree(nh);
2240                 return ERR_PTR(-ENOMEM);
2241         }
2242         nhg->spare->spare = nhg;
2243
2244         for (i = 0; i < nhg->num_nh; ++i) {
2245                 struct nexthop *nhe;
2246                 struct nh_info *nhi;
2247
2248                 nhe = nexthop_find_by_id(net, entry[i].id);
2249                 if (!nexthop_get(nhe)) {
2250                         err = -ENOENT;
2251                         goto out_no_nh;
2252                 }
2253
2254                 nhi = rtnl_dereference(nhe->nh_info);
2255                 if (nhi->family == AF_INET)
2256                         nhg->has_v4 = true;
2257
2258                 nhg->nh_entries[i].nh = nhe;
2259                 nhg->nh_entries[i].weight = entry[i].weight + 1;
2260                 list_add(&nhg->nh_entries[i].nh_list, &nhe->grp_list);
2261                 nhg->nh_entries[i].nh_parent = nh;
2262         }
2263
2264         if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_MPATH) {
2265                 nhg->mpath = 1;
2266                 nhg->is_multipath = true;
2267         } else if (cfg->nh_grp_type == NEXTHOP_GRP_TYPE_RES) {
2268                 struct nh_res_table *res_table;
2269
2270                 /* Bounce resilient groups for now. */
2271                 err = -EINVAL;
2272                 goto out_no_nh;
2273
2274                 res_table = nexthop_res_table_alloc(net, cfg->nh_id, cfg);
2275                 if (!res_table) {
2276                         err = -ENOMEM;
2277                         goto out_no_nh;
2278                 }
2279
2280                 rcu_assign_pointer(nhg->spare->res_table, res_table);
2281                 rcu_assign_pointer(nhg->res_table, res_table);
2282                 nhg->resilient = true;
2283                 nhg->is_multipath = true;
2284         }
2285
2286         WARN_ON_ONCE(nhg->mpath + nhg->resilient != 1);
2287
2288         if (nhg->mpath)
2289                 nh_mp_group_rebalance(nhg);
2290
2291         if (cfg->nh_fdb)
2292                 nhg->fdb_nh = 1;
2293
2294         rcu_assign_pointer(nh->nh_grp, nhg);
2295
2296         return nh;
2297
2298 out_no_nh:
2299         for (i--; i >= 0; --i) {
2300                 list_del(&nhg->nh_entries[i].nh_list);
2301                 nexthop_put(nhg->nh_entries[i].nh);
2302         }
2303
2304         kfree(nhg->spare);
2305         kfree(nhg);
2306         kfree(nh);
2307
2308         return ERR_PTR(err);
2309 }
2310
2311 static int nh_create_ipv4(struct net *net, struct nexthop *nh,
2312                           struct nh_info *nhi, struct nh_config *cfg,
2313                           struct netlink_ext_ack *extack)
2314 {
2315         struct fib_nh *fib_nh = &nhi->fib_nh;
2316         struct fib_config fib_cfg = {
2317                 .fc_oif   = cfg->nh_ifindex,
2318                 .fc_gw4   = cfg->gw.ipv4,
2319                 .fc_gw_family = cfg->gw.ipv4 ? AF_INET : 0,
2320                 .fc_flags = cfg->nh_flags,
2321                 .fc_encap = cfg->nh_encap,
2322                 .fc_encap_type = cfg->nh_encap_type,
2323         };
2324         u32 tb_id = (cfg->dev ? l3mdev_fib_table(cfg->dev) : RT_TABLE_MAIN);
2325         int err;
2326
2327         err = fib_nh_init(net, fib_nh, &fib_cfg, 1, extack);
2328         if (err) {
2329                 fib_nh_release(net, fib_nh);
2330                 goto out;
2331         }
2332
2333         if (nhi->fdb_nh)
2334                 goto out;
2335
2336         /* sets nh_dev if successful */
2337         err = fib_check_nh(net, fib_nh, tb_id, 0, extack);
2338         if (!err) {
2339                 nh->nh_flags = fib_nh->fib_nh_flags;
2340                 fib_info_update_nhc_saddr(net, &fib_nh->nh_common,
2341                                           fib_nh->fib_nh_scope);
2342         } else {
2343                 fib_nh_release(net, fib_nh);
2344         }
2345 out:
2346         return err;
2347 }
2348
2349 static int nh_create_ipv6(struct net *net,  struct nexthop *nh,
2350                           struct nh_info *nhi, struct nh_config *cfg,
2351                           struct netlink_ext_ack *extack)
2352 {
2353         struct fib6_nh *fib6_nh = &nhi->fib6_nh;
2354         struct fib6_config fib6_cfg = {
2355                 .fc_table = l3mdev_fib_table(cfg->dev),
2356                 .fc_ifindex = cfg->nh_ifindex,
2357                 .fc_gateway = cfg->gw.ipv6,
2358                 .fc_flags = cfg->nh_flags,
2359                 .fc_encap = cfg->nh_encap,
2360                 .fc_encap_type = cfg->nh_encap_type,
2361                 .fc_is_fdb = cfg->nh_fdb,
2362         };
2363         int err;
2364
2365         if (!ipv6_addr_any(&cfg->gw.ipv6))
2366                 fib6_cfg.fc_flags |= RTF_GATEWAY;
2367
2368         /* sets nh_dev if successful */
2369         err = ipv6_stub->fib6_nh_init(net, fib6_nh, &fib6_cfg, GFP_KERNEL,
2370                                       extack);
2371         if (err)
2372                 ipv6_stub->fib6_nh_release(fib6_nh);
2373         else
2374                 nh->nh_flags = fib6_nh->fib_nh_flags;
2375
2376         return err;
2377 }
2378
2379 static struct nexthop *nexthop_create(struct net *net, struct nh_config *cfg,
2380                                       struct netlink_ext_ack *extack)
2381 {
2382         struct nh_info *nhi;
2383         struct nexthop *nh;
2384         int err = 0;
2385
2386         nh = nexthop_alloc();
2387         if (!nh)
2388                 return ERR_PTR(-ENOMEM);
2389
2390         nhi = kzalloc(sizeof(*nhi), GFP_KERNEL);
2391         if (!nhi) {
2392                 kfree(nh);
2393                 return ERR_PTR(-ENOMEM);
2394         }
2395
2396         nh->nh_flags = cfg->nh_flags;
2397         nh->net = net;
2398
2399         nhi->nh_parent = nh;
2400         nhi->family = cfg->nh_family;
2401         nhi->fib_nhc.nhc_scope = RT_SCOPE_LINK;
2402
2403         if (cfg->nh_fdb)
2404                 nhi->fdb_nh = 1;
2405
2406         if (cfg->nh_blackhole) {
2407                 nhi->reject_nh = 1;
2408                 cfg->nh_ifindex = net->loopback_dev->ifindex;
2409         }
2410
2411         switch (cfg->nh_family) {
2412         case AF_INET:
2413                 err = nh_create_ipv4(net, nh, nhi, cfg, extack);
2414                 break;
2415         case AF_INET6:
2416                 err = nh_create_ipv6(net, nh, nhi, cfg, extack);
2417                 break;
2418         }
2419
2420         if (err) {
2421                 kfree(nhi);
2422                 kfree(nh);
2423                 return ERR_PTR(err);
2424         }
2425
2426         /* add the entry to the device based hash */
2427         if (!nhi->fdb_nh)
2428                 nexthop_devhash_add(net, nhi);
2429
2430         rcu_assign_pointer(nh->nh_info, nhi);
2431
2432         return nh;
2433 }
2434
2435 /* called with rtnl lock held */
2436 static struct nexthop *nexthop_add(struct net *net, struct nh_config *cfg,
2437                                    struct netlink_ext_ack *extack)
2438 {
2439         struct nexthop *nh;
2440         int err;
2441
2442         if (cfg->nlflags & NLM_F_REPLACE && !cfg->nh_id) {
2443                 NL_SET_ERR_MSG(extack, "Replace requires nexthop id");
2444                 return ERR_PTR(-EINVAL);
2445         }
2446
2447         if (!cfg->nh_id) {
2448                 cfg->nh_id = nh_find_unused_id(net);
2449                 if (!cfg->nh_id) {
2450                         NL_SET_ERR_MSG(extack, "No unused id");
2451                         return ERR_PTR(-EINVAL);
2452                 }
2453         }
2454
2455         if (cfg->nh_grp)
2456                 nh = nexthop_create_group(net, cfg);
2457         else
2458                 nh = nexthop_create(net, cfg, extack);
2459
2460         if (IS_ERR(nh))
2461                 return nh;
2462
2463         refcount_set(&nh->refcnt, 1);
2464         nh->id = cfg->nh_id;
2465         nh->protocol = cfg->nh_protocol;
2466         nh->net = net;
2467
2468         err = insert_nexthop(net, nh, cfg, extack);
2469         if (err) {
2470                 __remove_nexthop(net, nh, NULL);
2471                 nexthop_put(nh);
2472                 nh = ERR_PTR(err);
2473         }
2474
2475         return nh;
2476 }
2477
2478 static int rtm_to_nh_config(struct net *net, struct sk_buff *skb,
2479                             struct nlmsghdr *nlh, struct nh_config *cfg,
2480                             struct netlink_ext_ack *extack)
2481 {
2482         struct nhmsg *nhm = nlmsg_data(nlh);
2483         struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_new)];
2484         int err;
2485
2486         err = nlmsg_parse(nlh, sizeof(*nhm), tb,
2487                           ARRAY_SIZE(rtm_nh_policy_new) - 1,
2488                           rtm_nh_policy_new, extack);
2489         if (err < 0)
2490                 return err;
2491
2492         err = -EINVAL;
2493         if (nhm->resvd || nhm->nh_scope) {
2494                 NL_SET_ERR_MSG(extack, "Invalid values in ancillary header");
2495                 goto out;
2496         }
2497         if (nhm->nh_flags & ~NEXTHOP_VALID_USER_FLAGS) {
2498                 NL_SET_ERR_MSG(extack, "Invalid nexthop flags in ancillary header");
2499                 goto out;
2500         }
2501
2502         switch (nhm->nh_family) {
2503         case AF_INET:
2504         case AF_INET6:
2505                 break;
2506         case AF_UNSPEC:
2507                 if (tb[NHA_GROUP])
2508                         break;
2509                 fallthrough;
2510         default:
2511                 NL_SET_ERR_MSG(extack, "Invalid address family");
2512                 goto out;
2513         }
2514
2515         memset(cfg, 0, sizeof(*cfg));
2516         cfg->nlflags = nlh->nlmsg_flags;
2517         cfg->nlinfo.portid = NETLINK_CB(skb).portid;
2518         cfg->nlinfo.nlh = nlh;
2519         cfg->nlinfo.nl_net = net;
2520
2521         cfg->nh_family = nhm->nh_family;
2522         cfg->nh_protocol = nhm->nh_protocol;
2523         cfg->nh_flags = nhm->nh_flags;
2524
2525         if (tb[NHA_ID])
2526                 cfg->nh_id = nla_get_u32(tb[NHA_ID]);
2527
2528         if (tb[NHA_FDB]) {
2529                 if (tb[NHA_OIF] || tb[NHA_BLACKHOLE] ||
2530                     tb[NHA_ENCAP]   || tb[NHA_ENCAP_TYPE]) {
2531                         NL_SET_ERR_MSG(extack, "Fdb attribute can not be used with encap, oif or blackhole");
2532                         goto out;
2533                 }
2534                 if (nhm->nh_flags) {
2535                         NL_SET_ERR_MSG(extack, "Unsupported nexthop flags in ancillary header");
2536                         goto out;
2537                 }
2538                 cfg->nh_fdb = nla_get_flag(tb[NHA_FDB]);
2539         }
2540
2541         if (tb[NHA_GROUP]) {
2542                 if (nhm->nh_family != AF_UNSPEC) {
2543                         NL_SET_ERR_MSG(extack, "Invalid family for group");
2544                         goto out;
2545                 }
2546                 cfg->nh_grp = tb[NHA_GROUP];
2547
2548                 cfg->nh_grp_type = NEXTHOP_GRP_TYPE_MPATH;
2549                 if (tb[NHA_GROUP_TYPE])
2550                         cfg->nh_grp_type = nla_get_u16(tb[NHA_GROUP_TYPE]);
2551
2552                 if (cfg->nh_grp_type > NEXTHOP_GRP_TYPE_MAX) {
2553                         NL_SET_ERR_MSG(extack, "Invalid group type");
2554                         goto out;
2555                 }
2556                 err = nh_check_attr_group(net, tb, ARRAY_SIZE(tb), extack);
2557
2558                 /* no other attributes should be set */
2559                 goto out;
2560         }
2561
2562         if (tb[NHA_BLACKHOLE]) {
2563                 if (tb[NHA_GATEWAY] || tb[NHA_OIF] ||
2564                     tb[NHA_ENCAP]   || tb[NHA_ENCAP_TYPE] || tb[NHA_FDB]) {
2565                         NL_SET_ERR_MSG(extack, "Blackhole attribute can not be used with gateway, oif, encap or fdb");
2566                         goto out;
2567                 }
2568
2569                 cfg->nh_blackhole = 1;
2570                 err = 0;
2571                 goto out;
2572         }
2573
2574         if (!cfg->nh_fdb && !tb[NHA_OIF]) {
2575                 NL_SET_ERR_MSG(extack, "Device attribute required for non-blackhole and non-fdb nexthops");
2576                 goto out;
2577         }
2578
2579         if (!cfg->nh_fdb && tb[NHA_OIF]) {
2580                 cfg->nh_ifindex = nla_get_u32(tb[NHA_OIF]);
2581                 if (cfg->nh_ifindex)
2582                         cfg->dev = __dev_get_by_index(net, cfg->nh_ifindex);
2583
2584                 if (!cfg->dev) {
2585                         NL_SET_ERR_MSG(extack, "Invalid device index");
2586                         goto out;
2587                 } else if (!(cfg->dev->flags & IFF_UP)) {
2588                         NL_SET_ERR_MSG(extack, "Nexthop device is not up");
2589                         err = -ENETDOWN;
2590                         goto out;
2591                 } else if (!netif_carrier_ok(cfg->dev)) {
2592                         NL_SET_ERR_MSG(extack, "Carrier for nexthop device is down");
2593                         err = -ENETDOWN;
2594                         goto out;
2595                 }
2596         }
2597
2598         err = -EINVAL;
2599         if (tb[NHA_GATEWAY]) {
2600                 struct nlattr *gwa = tb[NHA_GATEWAY];
2601
2602                 switch (cfg->nh_family) {
2603                 case AF_INET:
2604                         if (nla_len(gwa) != sizeof(u32)) {
2605                                 NL_SET_ERR_MSG(extack, "Invalid gateway");
2606                                 goto out;
2607                         }
2608                         cfg->gw.ipv4 = nla_get_be32(gwa);
2609                         break;
2610                 case AF_INET6:
2611                         if (nla_len(gwa) != sizeof(struct in6_addr)) {
2612                                 NL_SET_ERR_MSG(extack, "Invalid gateway");
2613                                 goto out;
2614                         }
2615                         cfg->gw.ipv6 = nla_get_in6_addr(gwa);
2616                         break;
2617                 default:
2618                         NL_SET_ERR_MSG(extack,
2619                                        "Unknown address family for gateway");
2620                         goto out;
2621                 }
2622         } else {
2623                 /* device only nexthop (no gateway) */
2624                 if (cfg->nh_flags & RTNH_F_ONLINK) {
2625                         NL_SET_ERR_MSG(extack,
2626                                        "ONLINK flag can not be set for nexthop without a gateway");
2627                         goto out;
2628                 }
2629         }
2630
2631         if (tb[NHA_ENCAP]) {
2632                 cfg->nh_encap = tb[NHA_ENCAP];
2633
2634                 if (!tb[NHA_ENCAP_TYPE]) {
2635                         NL_SET_ERR_MSG(extack, "LWT encapsulation type is missing");
2636                         goto out;
2637                 }
2638
2639                 cfg->nh_encap_type = nla_get_u16(tb[NHA_ENCAP_TYPE]);
2640                 err = lwtunnel_valid_encap_type(cfg->nh_encap_type, extack);
2641                 if (err < 0)
2642                         goto out;
2643
2644         } else if (tb[NHA_ENCAP_TYPE]) {
2645                 NL_SET_ERR_MSG(extack, "LWT encapsulation attribute is missing");
2646                 goto out;
2647         }
2648
2649
2650         err = 0;
2651 out:
2652         return err;
2653 }
2654
2655 /* rtnl */
2656 static int rtm_new_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh,
2657                            struct netlink_ext_ack *extack)
2658 {
2659         struct net *net = sock_net(skb->sk);
2660         struct nh_config cfg;
2661         struct nexthop *nh;
2662         int err;
2663
2664         err = rtm_to_nh_config(net, skb, nlh, &cfg, extack);
2665         if (!err) {
2666                 nh = nexthop_add(net, &cfg, extack);
2667                 if (IS_ERR(nh))
2668                         err = PTR_ERR(nh);
2669         }
2670
2671         return err;
2672 }
2673
2674 static int __nh_valid_get_del_req(const struct nlmsghdr *nlh,
2675                                   struct nlattr **tb, u32 *id,
2676                                   struct netlink_ext_ack *extack)
2677 {
2678         struct nhmsg *nhm = nlmsg_data(nlh);
2679
2680         if (nhm->nh_protocol || nhm->resvd || nhm->nh_scope || nhm->nh_flags) {
2681                 NL_SET_ERR_MSG(extack, "Invalid values in header");
2682                 return -EINVAL;
2683         }
2684
2685         if (!tb[NHA_ID]) {
2686                 NL_SET_ERR_MSG(extack, "Nexthop id is missing");
2687                 return -EINVAL;
2688         }
2689
2690         *id = nla_get_u32(tb[NHA_ID]);
2691         if (!(*id)) {
2692                 NL_SET_ERR_MSG(extack, "Invalid nexthop id");
2693                 return -EINVAL;
2694         }
2695
2696         return 0;
2697 }
2698
2699 static int nh_valid_get_del_req(const struct nlmsghdr *nlh, u32 *id,
2700                                 struct netlink_ext_ack *extack)
2701 {
2702         struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_get)];
2703         int err;
2704
2705         err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
2706                           ARRAY_SIZE(rtm_nh_policy_get) - 1,
2707                           rtm_nh_policy_get, extack);
2708         if (err < 0)
2709                 return err;
2710
2711         return __nh_valid_get_del_req(nlh, tb, id, extack);
2712 }
2713
2714 /* rtnl */
2715 static int rtm_del_nexthop(struct sk_buff *skb, struct nlmsghdr *nlh,
2716                            struct netlink_ext_ack *extack)
2717 {
2718         struct net *net = sock_net(skb->sk);
2719         struct nl_info nlinfo = {
2720                 .nlh = nlh,
2721                 .nl_net = net,
2722                 .portid = NETLINK_CB(skb).portid,
2723         };
2724         struct nexthop *nh;
2725         int err;
2726         u32 id;
2727
2728         err = nh_valid_get_del_req(nlh, &id, extack);
2729         if (err)
2730                 return err;
2731
2732         nh = nexthop_find_by_id(net, id);
2733         if (!nh)
2734                 return -ENOENT;
2735
2736         remove_nexthop(net, nh, &nlinfo);
2737
2738         return 0;
2739 }
2740
2741 /* rtnl */
2742 static int rtm_get_nexthop(struct sk_buff *in_skb, struct nlmsghdr *nlh,
2743                            struct netlink_ext_ack *extack)
2744 {
2745         struct net *net = sock_net(in_skb->sk);
2746         struct sk_buff *skb = NULL;
2747         struct nexthop *nh;
2748         int err;
2749         u32 id;
2750
2751         err = nh_valid_get_del_req(nlh, &id, extack);
2752         if (err)
2753                 return err;
2754
2755         err = -ENOBUFS;
2756         skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2757         if (!skb)
2758                 goto out;
2759
2760         err = -ENOENT;
2761         nh = nexthop_find_by_id(net, id);
2762         if (!nh)
2763                 goto errout_free;
2764
2765         err = nh_fill_node(skb, nh, RTM_NEWNEXTHOP, NETLINK_CB(in_skb).portid,
2766                            nlh->nlmsg_seq, 0);
2767         if (err < 0) {
2768                 WARN_ON(err == -EMSGSIZE);
2769                 goto errout_free;
2770         }
2771
2772         err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
2773 out:
2774         return err;
2775 errout_free:
2776         kfree_skb(skb);
2777         goto out;
2778 }
2779
2780 struct nh_dump_filter {
2781         int dev_idx;
2782         int master_idx;
2783         bool group_filter;
2784         bool fdb_filter;
2785 };
2786
2787 static bool nh_dump_filtered(struct nexthop *nh,
2788                              struct nh_dump_filter *filter, u8 family)
2789 {
2790         const struct net_device *dev;
2791         const struct nh_info *nhi;
2792
2793         if (filter->group_filter && !nh->is_group)
2794                 return true;
2795
2796         if (!filter->dev_idx && !filter->master_idx && !family)
2797                 return false;
2798
2799         if (nh->is_group)
2800                 return true;
2801
2802         nhi = rtnl_dereference(nh->nh_info);
2803         if (family && nhi->family != family)
2804                 return true;
2805
2806         dev = nhi->fib_nhc.nhc_dev;
2807         if (filter->dev_idx && (!dev || dev->ifindex != filter->dev_idx))
2808                 return true;
2809
2810         if (filter->master_idx) {
2811                 struct net_device *master;
2812
2813                 if (!dev)
2814                         return true;
2815
2816                 master = netdev_master_upper_dev_get((struct net_device *)dev);
2817                 if (!master || master->ifindex != filter->master_idx)
2818                         return true;
2819         }
2820
2821         return false;
2822 }
2823
2824 static int __nh_valid_dump_req(const struct nlmsghdr *nlh, struct nlattr **tb,
2825                                struct nh_dump_filter *filter,
2826                                struct netlink_ext_ack *extack)
2827 {
2828         struct nhmsg *nhm;
2829         u32 idx;
2830
2831         if (tb[NHA_OIF]) {
2832                 idx = nla_get_u32(tb[NHA_OIF]);
2833                 if (idx > INT_MAX) {
2834                         NL_SET_ERR_MSG(extack, "Invalid device index");
2835                         return -EINVAL;
2836                 }
2837                 filter->dev_idx = idx;
2838         }
2839         if (tb[NHA_MASTER]) {
2840                 idx = nla_get_u32(tb[NHA_MASTER]);
2841                 if (idx > INT_MAX) {
2842                         NL_SET_ERR_MSG(extack, "Invalid master device index");
2843                         return -EINVAL;
2844                 }
2845                 filter->master_idx = idx;
2846         }
2847         filter->group_filter = nla_get_flag(tb[NHA_GROUPS]);
2848         filter->fdb_filter = nla_get_flag(tb[NHA_FDB]);
2849
2850         nhm = nlmsg_data(nlh);
2851         if (nhm->nh_protocol || nhm->resvd || nhm->nh_scope || nhm->nh_flags) {
2852                 NL_SET_ERR_MSG(extack, "Invalid values in header for nexthop dump request");
2853                 return -EINVAL;
2854         }
2855
2856         return 0;
2857 }
2858
2859 static int nh_valid_dump_req(const struct nlmsghdr *nlh,
2860                              struct nh_dump_filter *filter,
2861                              struct netlink_callback *cb)
2862 {
2863         struct nlattr *tb[ARRAY_SIZE(rtm_nh_policy_dump)];
2864         int err;
2865
2866         err = nlmsg_parse(nlh, sizeof(struct nhmsg), tb,
2867                           ARRAY_SIZE(rtm_nh_policy_dump) - 1,
2868                           rtm_nh_policy_dump, cb->extack);
2869         if (err < 0)
2870                 return err;
2871
2872         return __nh_valid_dump_req(nlh, tb, filter, cb->extack);
2873 }
2874
2875 struct rtm_dump_nh_ctx {
2876         u32 idx;
2877 };
2878
2879 static struct rtm_dump_nh_ctx *
2880 rtm_dump_nh_ctx(struct netlink_callback *cb)
2881 {
2882         struct rtm_dump_nh_ctx *ctx = (void *)cb->ctx;
2883
2884         BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
2885         return ctx;
2886 }
2887
2888 static int rtm_dump_walk_nexthops(struct sk_buff *skb,
2889                                   struct netlink_callback *cb,
2890                                   struct rb_root *root,
2891                                   struct rtm_dump_nh_ctx *ctx,
2892                                   int (*nh_cb)(struct sk_buff *skb,
2893                                                struct netlink_callback *cb,
2894                                                struct nexthop *nh, void *data),
2895                                   void *data)
2896 {
2897         struct rb_node *node;
2898         int idx = 0, s_idx;
2899         int err;
2900
2901         s_idx = ctx->idx;
2902         for (node = rb_first(root); node; node = rb_next(node)) {
2903                 struct nexthop *nh;
2904
2905                 if (idx < s_idx)
2906                         goto cont;
2907
2908                 nh = rb_entry(node, struct nexthop, rb_node);
2909                 ctx->idx = idx;
2910                 err = nh_cb(skb, cb, nh, data);
2911                 if (err)
2912                         return err;
2913 cont:
2914                 idx++;
2915         }
2916
2917         ctx->idx = idx;
2918         return 0;
2919 }
2920
2921 static int rtm_dump_nexthop_cb(struct sk_buff *skb, struct netlink_callback *cb,
2922                                struct nexthop *nh, void *data)
2923 {
2924         struct nhmsg *nhm = nlmsg_data(cb->nlh);
2925         struct nh_dump_filter *filter = data;
2926
2927         if (nh_dump_filtered(nh, filter, nhm->nh_family))
2928                 return 0;
2929
2930         return nh_fill_node(skb, nh, RTM_NEWNEXTHOP,
2931                             NETLINK_CB(cb->skb).portid,
2932                             cb->nlh->nlmsg_seq, NLM_F_MULTI);
2933 }
2934
2935 /* rtnl */
2936 static int rtm_dump_nexthop(struct sk_buff *skb, struct netlink_callback *cb)
2937 {
2938         struct rtm_dump_nh_ctx *ctx = rtm_dump_nh_ctx(cb);
2939         struct net *net = sock_net(skb->sk);
2940         struct rb_root *root = &net->nexthop.rb_root;
2941         struct nh_dump_filter filter = {};
2942         int err;
2943
2944         err = nh_valid_dump_req(cb->nlh, &filter, cb);
2945         if (err < 0)
2946                 return err;
2947
2948         err = rtm_dump_walk_nexthops(skb, cb, root, ctx,
2949                                      &rtm_dump_nexthop_cb, &filter);
2950         if (err < 0) {
2951                 if (likely(skb->len))
2952                         goto out;
2953                 goto out_err;
2954         }
2955
2956 out:
2957         err = skb->len;
2958 out_err:
2959         cb->seq = net->nexthop.seq;
2960         nl_dump_check_consistent(cb, nlmsg_hdr(skb));
2961         return err;
2962 }
2963
2964 static void nexthop_sync_mtu(struct net_device *dev, u32 orig_mtu)
2965 {
2966         unsigned int hash = nh_dev_hashfn(dev->ifindex);
2967         struct net *net = dev_net(dev);
2968         struct hlist_head *head = &net->nexthop.devhash[hash];
2969         struct hlist_node *n;
2970         struct nh_info *nhi;
2971
2972         hlist_for_each_entry_safe(nhi, n, head, dev_hash) {
2973                 if (nhi->fib_nhc.nhc_dev == dev) {
2974                         if (nhi->family == AF_INET)
2975                                 fib_nhc_update_mtu(&nhi->fib_nhc, dev->mtu,
2976                                                    orig_mtu);
2977                 }
2978         }
2979 }
2980
2981 /* rtnl */
2982 static int nh_netdev_event(struct notifier_block *this,
2983                            unsigned long event, void *ptr)
2984 {
2985         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2986         struct netdev_notifier_info_ext *info_ext;
2987
2988         switch (event) {
2989         case NETDEV_DOWN:
2990         case NETDEV_UNREGISTER:
2991                 nexthop_flush_dev(dev, event);
2992                 break;
2993         case NETDEV_CHANGE:
2994                 if (!(dev_get_flags(dev) & (IFF_RUNNING | IFF_LOWER_UP)))
2995                         nexthop_flush_dev(dev, event);
2996                 break;
2997         case NETDEV_CHANGEMTU:
2998                 info_ext = ptr;
2999                 nexthop_sync_mtu(dev, info_ext->ext.mtu);
3000                 rt_cache_flush(dev_net(dev));
3001                 break;
3002         }
3003         return NOTIFY_DONE;
3004 }
3005
3006 static struct notifier_block nh_netdev_notifier = {
3007         .notifier_call = nh_netdev_event,
3008 };
3009
3010 static int nexthops_dump(struct net *net, struct notifier_block *nb,
3011                          struct netlink_ext_ack *extack)
3012 {
3013         struct rb_root *root = &net->nexthop.rb_root;
3014         struct rb_node *node;
3015         int err = 0;
3016
3017         for (node = rb_first(root); node; node = rb_next(node)) {
3018                 struct nexthop *nh;
3019
3020                 nh = rb_entry(node, struct nexthop, rb_node);
3021                 err = call_nexthop_notifier(nb, net, NEXTHOP_EVENT_REPLACE, nh,
3022                                             extack);
3023                 if (err)
3024                         break;
3025         }
3026
3027         return err;
3028 }
3029
3030 int register_nexthop_notifier(struct net *net, struct notifier_block *nb,
3031                               struct netlink_ext_ack *extack)
3032 {
3033         int err;
3034
3035         rtnl_lock();
3036         err = nexthops_dump(net, nb, extack);
3037         if (err)
3038                 goto unlock;
3039         err = blocking_notifier_chain_register(&net->nexthop.notifier_chain,
3040                                                nb);
3041 unlock:
3042         rtnl_unlock();
3043         return err;
3044 }
3045 EXPORT_SYMBOL(register_nexthop_notifier);
3046
3047 int unregister_nexthop_notifier(struct net *net, struct notifier_block *nb)
3048 {
3049         return blocking_notifier_chain_unregister(&net->nexthop.notifier_chain,
3050                                                   nb);
3051 }
3052 EXPORT_SYMBOL(unregister_nexthop_notifier);
3053
3054 void nexthop_set_hw_flags(struct net *net, u32 id, bool offload, bool trap)
3055 {
3056         struct nexthop *nexthop;
3057
3058         rcu_read_lock();
3059
3060         nexthop = nexthop_find_by_id(net, id);
3061         if (!nexthop)
3062                 goto out;
3063
3064         nexthop->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP);
3065         if (offload)
3066                 nexthop->nh_flags |= RTNH_F_OFFLOAD;
3067         if (trap)
3068                 nexthop->nh_flags |= RTNH_F_TRAP;
3069
3070 out:
3071         rcu_read_unlock();
3072 }
3073 EXPORT_SYMBOL(nexthop_set_hw_flags);
3074
3075 void nexthop_bucket_set_hw_flags(struct net *net, u32 id, u16 bucket_index,
3076                                  bool offload, bool trap)
3077 {
3078         struct nh_res_table *res_table;
3079         struct nh_res_bucket *bucket;
3080         struct nexthop *nexthop;
3081         struct nh_group *nhg;
3082
3083         rcu_read_lock();
3084
3085         nexthop = nexthop_find_by_id(net, id);
3086         if (!nexthop || !nexthop->is_group)
3087                 goto out;
3088
3089         nhg = rcu_dereference(nexthop->nh_grp);
3090         if (!nhg->resilient)
3091                 goto out;
3092
3093         if (bucket_index >= nhg->res_table->num_nh_buckets)
3094                 goto out;
3095
3096         res_table = rcu_dereference(nhg->res_table);
3097         bucket = &res_table->nh_buckets[bucket_index];
3098         bucket->nh_flags &= ~(RTNH_F_OFFLOAD | RTNH_F_TRAP);
3099         if (offload)
3100                 bucket->nh_flags |= RTNH_F_OFFLOAD;
3101         if (trap)
3102                 bucket->nh_flags |= RTNH_F_TRAP;
3103
3104 out:
3105         rcu_read_unlock();
3106 }
3107 EXPORT_SYMBOL(nexthop_bucket_set_hw_flags);
3108
3109 void nexthop_res_grp_activity_update(struct net *net, u32 id, u16 num_buckets,
3110                                      unsigned long *activity)
3111 {
3112         struct nh_res_table *res_table;
3113         struct nexthop *nexthop;
3114         struct nh_group *nhg;
3115         u16 i;
3116
3117         rcu_read_lock();
3118
3119         nexthop = nexthop_find_by_id(net, id);
3120         if (!nexthop || !nexthop->is_group)
3121                 goto out;
3122
3123         nhg = rcu_dereference(nexthop->nh_grp);
3124         if (!nhg->resilient)
3125                 goto out;
3126
3127         /* Instead of silently ignoring some buckets, demand that the sizes
3128          * be the same.
3129          */
3130         res_table = rcu_dereference(nhg->res_table);
3131         if (num_buckets != res_table->num_nh_buckets)
3132                 goto out;
3133
3134         for (i = 0; i < num_buckets; i++) {
3135                 if (test_bit(i, activity))
3136                         nh_res_bucket_set_busy(&res_table->nh_buckets[i]);
3137         }
3138
3139 out:
3140         rcu_read_unlock();
3141 }
3142 EXPORT_SYMBOL(nexthop_res_grp_activity_update);
3143
3144 static void __net_exit nexthop_net_exit(struct net *net)
3145 {
3146         rtnl_lock();
3147         flush_all_nexthops(net);
3148         rtnl_unlock();
3149         kfree(net->nexthop.devhash);
3150 }
3151
3152 static int __net_init nexthop_net_init(struct net *net)
3153 {
3154         size_t sz = sizeof(struct hlist_head) * NH_DEV_HASHSIZE;
3155
3156         net->nexthop.rb_root = RB_ROOT;
3157         net->nexthop.devhash = kzalloc(sz, GFP_KERNEL);
3158         if (!net->nexthop.devhash)
3159                 return -ENOMEM;
3160         BLOCKING_INIT_NOTIFIER_HEAD(&net->nexthop.notifier_chain);
3161
3162         return 0;
3163 }
3164
3165 static struct pernet_operations nexthop_net_ops = {
3166         .init = nexthop_net_init,
3167         .exit = nexthop_net_exit,
3168 };
3169
3170 static int __init nexthop_init(void)
3171 {
3172         register_pernet_subsys(&nexthop_net_ops);
3173
3174         register_netdevice_notifier(&nh_netdev_notifier);
3175
3176         rtnl_register(PF_UNSPEC, RTM_NEWNEXTHOP, rtm_new_nexthop, NULL, 0);
3177         rtnl_register(PF_UNSPEC, RTM_DELNEXTHOP, rtm_del_nexthop, NULL, 0);
3178         rtnl_register(PF_UNSPEC, RTM_GETNEXTHOP, rtm_get_nexthop,
3179                       rtm_dump_nexthop, 0);
3180
3181         rtnl_register(PF_INET, RTM_NEWNEXTHOP, rtm_new_nexthop, NULL, 0);
3182         rtnl_register(PF_INET, RTM_GETNEXTHOP, NULL, rtm_dump_nexthop, 0);
3183
3184         rtnl_register(PF_INET6, RTM_NEWNEXTHOP, rtm_new_nexthop, NULL, 0);
3185         rtnl_register(PF_INET6, RTM_GETNEXTHOP, NULL, rtm_dump_nexthop, 0);
3186
3187         return 0;
3188 }
3189 subsys_initcall(nexthop_init);