1 // SPDX-License-Identifier: GPL-2.0
3 * DECnet An implementation of the DECnet protocol suite for the LINUX
4 * operating system. DECnet is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
7 * DECnet Routing Forwarding Information Base (Routing Tables)
9 * Author: Steve Whitehouse <SteveW@ACM.org>
10 * Mostly copied from the IPv4 routing code
16 #include <linux/string.h>
17 #include <linux/net.h>
18 #include <linux/socket.h>
19 #include <linux/slab.h>
20 #include <linux/sockios.h>
21 #include <linux/init.h>
22 #include <linux/skbuff.h>
23 #include <linux/rtnetlink.h>
24 #include <linux/proc_fs.h>
25 #include <linux/netdevice.h>
26 #include <linux/timer.h>
27 #include <linux/spinlock.h>
28 #include <linux/atomic.h>
29 #include <linux/uaccess.h>
30 #include <linux/route.h> /* RTF_xxx */
31 #include <net/neighbour.h>
32 #include <net/netlink.h>
36 #include <net/fib_rules.h>
38 #include <net/dn_route.h>
39 #include <net/dn_fib.h>
40 #include <net/dn_neigh.h>
41 #include <net/dn_dev.h>
45 struct dn_zone *dz_next;
46 struct dn_fib_node **dz_hash;
50 #define DZ_HASHMASK(dz) ((dz)->dz_hashmask)
53 #define DZ_MASK(dz) ((dz)->dz_mask)
58 struct dn_zone *dh_zones[17];
59 struct dn_zone *dh_zone_list;
62 #define dz_key_0(key) ((key).datum = 0)
64 #define for_nexthops(fi) { int nhsel; const struct dn_fib_nh *nh;\
65 for(nhsel = 0, nh = (fi)->fib_nh; nhsel < (fi)->fib_nhs; nh++, nhsel++)
67 #define endfor_nexthops(fi) }
69 #define DN_MAX_DIVISOR 1024
71 #define DN_S_ACCESSED 2
73 #define DN_FIB_SCAN(f, fp) \
74 for( ; ((f) = *(fp)) != NULL; (fp) = &(f)->fn_next)
76 #define DN_FIB_SCAN_KEY(f, fp, key) \
77 for( ; ((f) = *(fp)) != NULL && dn_key_eq((f)->fn_key, (key)); (fp) = &(f)->fn_next)
79 #define RT_TABLE_MIN 1
80 #define DN_FIB_TABLE_HASHSZ 256
81 static struct hlist_head dn_fib_table_hash[DN_FIB_TABLE_HASHSZ];
82 static DEFINE_RWLOCK(dn_fib_tables_lock);
84 static struct kmem_cache *dn_hash_kmem __read_mostly;
85 static int dn_fib_hash_zombies;
87 static inline dn_fib_idx_t dn_hash(dn_fib_key_t key, struct dn_zone *dz)
89 u16 h = le16_to_cpu(key.datum)>>(16 - dz->dz_order);
93 return *(dn_fib_idx_t *)&h;
96 static inline dn_fib_key_t dz_key(__le16 dst, struct dn_zone *dz)
99 k.datum = dst & DZ_MASK(dz);
103 static inline struct dn_fib_node **dn_chain_p(dn_fib_key_t key, struct dn_zone *dz)
105 return &dz->dz_hash[dn_hash(key, dz).datum];
108 static inline struct dn_fib_node *dz_chain(dn_fib_key_t key, struct dn_zone *dz)
110 return dz->dz_hash[dn_hash(key, dz).datum];
113 static inline int dn_key_eq(dn_fib_key_t a, dn_fib_key_t b)
115 return a.datum == b.datum;
118 static inline int dn_key_leq(dn_fib_key_t a, dn_fib_key_t b)
120 return a.datum <= b.datum;
123 static inline void dn_rebuild_zone(struct dn_zone *dz,
124 struct dn_fib_node **old_ht,
127 struct dn_fib_node *f, **fp, *next;
130 for(i = 0; i < old_divisor; i++) {
131 for(f = old_ht[i]; f; f = next) {
133 for(fp = dn_chain_p(f->fn_key, dz);
134 *fp && dn_key_leq((*fp)->fn_key, f->fn_key);
135 fp = &(*fp)->fn_next)
143 static void dn_rehash_zone(struct dn_zone *dz)
145 struct dn_fib_node **ht, **old_ht;
146 int old_divisor, new_divisor;
149 old_divisor = dz->dz_divisor;
151 switch (old_divisor) {
157 printk(KERN_DEBUG "DECnet: dn_rehash_zone: BUG! %d\n",
162 new_hashmask = 0x3FF;
166 ht = kcalloc(new_divisor, sizeof(struct dn_fib_node*), GFP_KERNEL);
170 write_lock_bh(&dn_fib_tables_lock);
171 old_ht = dz->dz_hash;
173 dz->dz_hashmask = new_hashmask;
174 dz->dz_divisor = new_divisor;
175 dn_rebuild_zone(dz, old_ht, old_divisor);
176 write_unlock_bh(&dn_fib_tables_lock);
180 static void dn_free_node(struct dn_fib_node *f)
182 dn_fib_release_info(DN_FIB_INFO(f));
183 kmem_cache_free(dn_hash_kmem, f);
187 static struct dn_zone *dn_new_zone(struct dn_hash *table, int z)
190 struct dn_zone *dz = kzalloc(sizeof(struct dn_zone), GFP_KERNEL);
196 dz->dz_hashmask = 0x0F;
202 dz->dz_hash = kcalloc(dz->dz_divisor, sizeof(struct dn_fib_node *), GFP_KERNEL);
209 dz->dz_mask = dnet_make_mask(z);
211 for(i = z + 1; i <= 16; i++)
212 if (table->dh_zones[i])
215 write_lock_bh(&dn_fib_tables_lock);
217 dz->dz_next = table->dh_zone_list;
218 table->dh_zone_list = dz;
220 dz->dz_next = table->dh_zones[i]->dz_next;
221 table->dh_zones[i]->dz_next = dz;
223 table->dh_zones[z] = dz;
224 write_unlock_bh(&dn_fib_tables_lock);
229 static int dn_fib_nh_match(struct rtmsg *r, struct nlmsghdr *nlh, struct nlattr *attrs[], struct dn_fib_info *fi)
231 struct rtnexthop *nhp;
234 if (attrs[RTA_PRIORITY] &&
235 nla_get_u32(attrs[RTA_PRIORITY]) != fi->fib_priority)
238 if (attrs[RTA_OIF] || attrs[RTA_GATEWAY]) {
239 if ((!attrs[RTA_OIF] || nla_get_u32(attrs[RTA_OIF]) == fi->fib_nh->nh_oif) &&
240 (!attrs[RTA_GATEWAY] || nla_get_le16(attrs[RTA_GATEWAY]) != fi->fib_nh->nh_gw))
245 if (!attrs[RTA_MULTIPATH])
248 nhp = nla_data(attrs[RTA_MULTIPATH]);
249 nhlen = nla_len(attrs[RTA_MULTIPATH]);
252 int attrlen = nhlen - sizeof(struct rtnexthop);
255 if (attrlen < 0 || (nhlen -= nhp->rtnh_len) < 0)
257 if (nhp->rtnh_ifindex && nhp->rtnh_ifindex != nh->nh_oif)
260 struct nlattr *gw_attr;
262 gw_attr = nla_find((struct nlattr *) (nhp + 1), attrlen, RTA_GATEWAY);
263 gw = gw_attr ? nla_get_le16(gw_attr) : 0;
265 if (gw && gw != nh->nh_gw)
268 nhp = RTNH_NEXT(nhp);
269 } endfor_nexthops(fi);
274 static inline size_t dn_fib_nlmsg_size(struct dn_fib_info *fi)
276 size_t payload = NLMSG_ALIGN(sizeof(struct rtmsg))
277 + nla_total_size(4) /* RTA_TABLE */
278 + nla_total_size(2) /* RTA_DST */
279 + nla_total_size(4) /* RTA_PRIORITY */
280 + nla_total_size(TCP_CA_NAME_MAX); /* RTAX_CC_ALGO */
282 /* space for nested metrics */
283 payload += nla_total_size((RTAX_MAX * nla_total_size(4)));
286 /* Also handles the special case fib_nhs == 1 */
288 /* each nexthop is packed in an attribute */
289 size_t nhsize = nla_total_size(sizeof(struct rtnexthop));
291 /* may contain a gateway attribute */
292 nhsize += nla_total_size(4);
294 /* all nexthops are packed in a nested attribute */
295 payload += nla_total_size(fi->fib_nhs * nhsize);
301 static int dn_fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
302 u32 tb_id, u8 type, u8 scope, void *dst, int dst_len,
303 struct dn_fib_info *fi, unsigned int flags)
306 struct nlmsghdr *nlh;
308 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*rtm), flags);
312 rtm = nlmsg_data(nlh);
313 rtm->rtm_family = AF_DECnet;
314 rtm->rtm_dst_len = dst_len;
315 rtm->rtm_src_len = 0;
317 rtm->rtm_table = tb_id;
318 rtm->rtm_flags = fi->fib_flags;
319 rtm->rtm_scope = scope;
320 rtm->rtm_type = type;
321 rtm->rtm_protocol = fi->fib_protocol;
323 if (nla_put_u32(skb, RTA_TABLE, tb_id) < 0)
326 if (rtm->rtm_dst_len &&
327 nla_put(skb, RTA_DST, 2, dst) < 0)
330 if (fi->fib_priority &&
331 nla_put_u32(skb, RTA_PRIORITY, fi->fib_priority) < 0)
334 if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0)
337 if (fi->fib_nhs == 1) {
338 if (fi->fib_nh->nh_gw &&
339 nla_put_le16(skb, RTA_GATEWAY, fi->fib_nh->nh_gw) < 0)
342 if (fi->fib_nh->nh_oif &&
343 nla_put_u32(skb, RTA_OIF, fi->fib_nh->nh_oif) < 0)
347 if (fi->fib_nhs > 1) {
348 struct rtnexthop *nhp;
349 struct nlattr *mp_head;
351 if (!(mp_head = nla_nest_start(skb, RTA_MULTIPATH)))
355 if (!(nhp = nla_reserve_nohdr(skb, sizeof(*nhp))))
358 nhp->rtnh_flags = nh->nh_flags & 0xFF;
359 nhp->rtnh_hops = nh->nh_weight - 1;
360 nhp->rtnh_ifindex = nh->nh_oif;
363 nla_put_le16(skb, RTA_GATEWAY, nh->nh_gw) < 0)
366 nhp->rtnh_len = skb_tail_pointer(skb) - (unsigned char *)nhp;
367 } endfor_nexthops(fi);
369 nla_nest_end(skb, mp_head);
376 nlmsg_cancel(skb, nlh);
381 static void dn_rtmsg_fib(int event, struct dn_fib_node *f, int z, u32 tb_id,
382 struct nlmsghdr *nlh, struct netlink_skb_parms *req)
385 u32 portid = req ? req->portid : 0;
388 skb = nlmsg_new(dn_fib_nlmsg_size(DN_FIB_INFO(f)), GFP_KERNEL);
392 err = dn_fib_dump_info(skb, portid, nlh->nlmsg_seq, event, tb_id,
393 f->fn_type, f->fn_scope, &f->fn_key, z,
396 /* -EMSGSIZE implies BUG in dn_fib_nlmsg_size() */
397 WARN_ON(err == -EMSGSIZE);
401 rtnl_notify(skb, &init_net, portid, RTNLGRP_DECnet_ROUTE, nlh, GFP_KERNEL);
405 rtnl_set_sk_err(&init_net, RTNLGRP_DECnet_ROUTE, err);
408 static __inline__ int dn_hash_dump_bucket(struct sk_buff *skb,
409 struct netlink_callback *cb,
410 struct dn_fib_table *tb,
412 struct dn_fib_node *f)
417 for(i = 0; f; i++, f = f->fn_next) {
420 if (f->fn_state & DN_S_ZOMBIE)
422 if (dn_fib_dump_info(skb, NETLINK_CB(cb->skb).portid,
426 (f->fn_state & DN_S_ZOMBIE) ? 0 : f->fn_type,
427 f->fn_scope, &f->fn_key, dz->dz_order,
428 f->fn_info, NLM_F_MULTI) < 0) {
437 static __inline__ int dn_hash_dump_zone(struct sk_buff *skb,
438 struct netlink_callback *cb,
439 struct dn_fib_table *tb,
445 for(h = 0; h < dz->dz_divisor; h++) {
449 memset(&cb->args[4], 0, sizeof(cb->args) - 4*sizeof(cb->args[0]));
450 if (dz->dz_hash == NULL || dz->dz_hash[h] == NULL)
452 if (dn_hash_dump_bucket(skb, cb, tb, dz, dz->dz_hash[h]) < 0) {
461 static int dn_fib_table_dump(struct dn_fib_table *tb, struct sk_buff *skb,
462 struct netlink_callback *cb)
466 struct dn_hash *table = (struct dn_hash *)tb->data;
469 read_lock(&dn_fib_tables_lock);
470 for(dz = table->dh_zone_list, m = 0; dz; dz = dz->dz_next, m++) {
474 memset(&cb->args[3], 0, sizeof(cb->args) - 3*sizeof(cb->args[0]));
476 if (dn_hash_dump_zone(skb, cb, tb, dz) < 0) {
478 read_unlock(&dn_fib_tables_lock);
482 read_unlock(&dn_fib_tables_lock);
488 int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb)
490 struct net *net = sock_net(skb->sk);
492 unsigned int e = 0, s_e;
493 struct dn_fib_table *tb;
496 if (!net_eq(net, &init_net))
499 if (nlmsg_len(cb->nlh) >= sizeof(struct rtmsg) &&
500 ((struct rtmsg *)nlmsg_data(cb->nlh))->rtm_flags&RTM_F_CLONED)
501 return dn_cache_dump(skb, cb);
506 for (h = s_h; h < DN_FIB_TABLE_HASHSZ; h++, s_h = 0) {
508 hlist_for_each_entry(tb, &dn_fib_table_hash[h], hlist) {
512 memset(&cb->args[2], 0, sizeof(cb->args) -
513 2 * sizeof(cb->args[0]));
514 if (tb->dump(tb, skb, cb) < 0)
528 static int dn_fib_table_insert(struct dn_fib_table *tb, struct rtmsg *r, struct nlattr *attrs[],
529 struct nlmsghdr *n, struct netlink_skb_parms *req)
531 struct dn_hash *table = (struct dn_hash *)tb->data;
532 struct dn_fib_node *new_f, *f, **fp, **del_fp;
534 struct dn_fib_info *fi;
535 int z = r->rtm_dst_len;
536 int type = r->rtm_type;
543 dz = table->dh_zones[z];
544 if (!dz && !(dz = dn_new_zone(table, z)))
548 if (attrs[RTA_DST]) {
549 __le16 dst = nla_get_le16(attrs[RTA_DST]);
550 if (dst & ~DZ_MASK(dz))
552 key = dz_key(dst, dz);
555 if ((fi = dn_fib_create_info(r, attrs, n, &err)) == NULL)
558 if (dz->dz_nent > (dz->dz_divisor << 2) &&
559 dz->dz_divisor > DN_MAX_DIVISOR &&
560 (z==16 || (1<<z) > dz->dz_divisor))
563 fp = dn_chain_p(key, dz);
566 if (dn_key_leq(key, f->fn_key))
572 if (f && (f->fn_state & DN_S_ZOMBIE) &&
573 dn_key_eq(f->fn_key, key)) {
580 DN_FIB_SCAN_KEY(f, fp, key) {
581 if (fi->fib_priority <= DN_FIB_INFO(f)->fib_priority)
585 if (f && dn_key_eq(f->fn_key, key) &&
586 fi->fib_priority == DN_FIB_INFO(f)->fib_priority) {
587 struct dn_fib_node **ins_fp;
590 if (n->nlmsg_flags & NLM_F_EXCL)
593 if (n->nlmsg_flags & NLM_F_REPLACE) {
603 DN_FIB_SCAN_KEY(f, fp, key) {
604 if (fi->fib_priority != DN_FIB_INFO(f)->fib_priority)
606 if (f->fn_type == type &&
607 f->fn_scope == r->rtm_scope &&
608 DN_FIB_INFO(f) == fi)
612 if (!(n->nlmsg_flags & NLM_F_APPEND)) {
620 if (!(n->nlmsg_flags & NLM_F_CREATE))
625 new_f = kmem_cache_zalloc(dn_hash_kmem, GFP_KERNEL);
630 new_f->fn_type = type;
631 new_f->fn_scope = r->rtm_scope;
632 DN_FIB_INFO(new_f) = fi;
635 write_lock_bh(&dn_fib_tables_lock);
637 write_unlock_bh(&dn_fib_tables_lock);
642 write_lock_bh(&dn_fib_tables_lock);
643 *del_fp = f->fn_next;
644 write_unlock_bh(&dn_fib_tables_lock);
646 if (!(f->fn_state & DN_S_ZOMBIE))
647 dn_rtmsg_fib(RTM_DELROUTE, f, z, tb->n, n, req);
648 if (f->fn_state & DN_S_ACCESSED)
649 dn_rt_cache_flush(-1);
653 dn_rt_cache_flush(-1);
656 dn_rtmsg_fib(RTM_NEWROUTE, new_f, z, tb->n, n, req);
660 dn_fib_release_info(fi);
665 static int dn_fib_table_delete(struct dn_fib_table *tb, struct rtmsg *r, struct nlattr *attrs[],
666 struct nlmsghdr *n, struct netlink_skb_parms *req)
668 struct dn_hash *table = (struct dn_hash*)tb->data;
669 struct dn_fib_node **fp, **del_fp, *f;
670 int z = r->rtm_dst_len;
679 if ((dz = table->dh_zones[z]) == NULL)
683 if (attrs[RTA_DST]) {
684 __le16 dst = nla_get_le16(attrs[RTA_DST]);
685 if (dst & ~DZ_MASK(dz))
687 key = dz_key(dst, dz);
690 fp = dn_chain_p(key, dz);
693 if (dn_key_eq(f->fn_key, key))
695 if (dn_key_leq(key, f->fn_key))
701 DN_FIB_SCAN_KEY(f, fp, key) {
702 struct dn_fib_info *fi = DN_FIB_INFO(f);
704 if (f->fn_state & DN_S_ZOMBIE)
709 if (del_fp == NULL &&
710 (!r->rtm_type || f->fn_type == r->rtm_type) &&
711 (r->rtm_scope == RT_SCOPE_NOWHERE || f->fn_scope == r->rtm_scope) &&
713 fi->fib_protocol == r->rtm_protocol) &&
714 dn_fib_nh_match(r, n, attrs, fi) == 0)
720 dn_rtmsg_fib(RTM_DELROUTE, f, z, tb->n, n, req);
723 write_lock_bh(&dn_fib_tables_lock);
724 *del_fp = f->fn_next;
725 write_unlock_bh(&dn_fib_tables_lock);
727 if (f->fn_state & DN_S_ACCESSED)
728 dn_rt_cache_flush(-1);
732 f->fn_state |= DN_S_ZOMBIE;
733 if (f->fn_state & DN_S_ACCESSED) {
734 f->fn_state &= ~DN_S_ACCESSED;
735 dn_rt_cache_flush(-1);
737 if (++dn_fib_hash_zombies > 128)
747 static inline int dn_flush_list(struct dn_fib_node **fp, int z, struct dn_hash *table)
750 struct dn_fib_node *f;
752 while((f = *fp) != NULL) {
753 struct dn_fib_info *fi = DN_FIB_INFO(f);
755 if (fi && ((f->fn_state & DN_S_ZOMBIE) || (fi->fib_flags & RTNH_F_DEAD))) {
756 write_lock_bh(&dn_fib_tables_lock);
758 write_unlock_bh(&dn_fib_tables_lock);
770 static int dn_fib_table_flush(struct dn_fib_table *tb)
772 struct dn_hash *table = (struct dn_hash *)tb->data;
776 dn_fib_hash_zombies = 0;
777 for(dz = table->dh_zone_list; dz; dz = dz->dz_next) {
780 for(i = dz->dz_divisor-1; i >= 0; i--)
781 tmp += dn_flush_list(&dz->dz_hash[i], dz->dz_order, table);
789 static int dn_fib_table_lookup(struct dn_fib_table *tb, const struct flowidn *flp, struct dn_fib_res *res)
793 struct dn_hash *t = (struct dn_hash *)tb->data;
795 read_lock(&dn_fib_tables_lock);
796 for(dz = t->dh_zone_list; dz; dz = dz->dz_next) {
797 struct dn_fib_node *f;
798 dn_fib_key_t k = dz_key(flp->daddr, dz);
800 for(f = dz_chain(k, dz); f; f = f->fn_next) {
801 if (!dn_key_eq(k, f->fn_key)) {
802 if (dn_key_leq(k, f->fn_key))
808 f->fn_state |= DN_S_ACCESSED;
810 if (f->fn_state&DN_S_ZOMBIE)
813 if (f->fn_scope < flp->flowidn_scope)
816 err = dn_fib_semantic_match(f->fn_type, DN_FIB_INFO(f), flp, res);
819 res->type = f->fn_type;
820 res->scope = f->fn_scope;
821 res->prefixlen = dz->dz_order;
830 read_unlock(&dn_fib_tables_lock);
835 struct dn_fib_table *dn_fib_get_table(u32 n, int create)
837 struct dn_fib_table *t;
840 if (n < RT_TABLE_MIN)
843 if (n > RT_TABLE_MAX)
846 h = n & (DN_FIB_TABLE_HASHSZ - 1);
848 hlist_for_each_entry_rcu(t, &dn_fib_table_hash[h], hlist) {
859 if (in_interrupt()) {
860 net_dbg_ratelimited("DECnet: BUG! Attempt to create routing table from interrupt\n");
864 t = kzalloc(sizeof(struct dn_fib_table) + sizeof(struct dn_hash),
870 t->insert = dn_fib_table_insert;
871 t->delete = dn_fib_table_delete;
872 t->lookup = dn_fib_table_lookup;
873 t->flush = dn_fib_table_flush;
874 t->dump = dn_fib_table_dump;
875 hlist_add_head_rcu(&t->hlist, &dn_fib_table_hash[h]);
880 struct dn_fib_table *dn_fib_empty_table(void)
884 for(id = RT_TABLE_MIN; id <= RT_TABLE_MAX; id++)
885 if (dn_fib_get_table(id, 0) == NULL)
886 return dn_fib_get_table(id, 1);
890 void dn_fib_flush(void)
893 struct dn_fib_table *tb;
896 for (h = 0; h < DN_FIB_TABLE_HASHSZ; h++) {
897 hlist_for_each_entry(tb, &dn_fib_table_hash[h], hlist)
898 flushed += tb->flush(tb);
902 dn_rt_cache_flush(-1);
905 void __init dn_fib_table_init(void)
907 dn_hash_kmem = kmem_cache_create("dn_fib_info_cache",
908 sizeof(struct dn_fib_info),
909 0, SLAB_HWCACHE_ALIGN,
913 void __exit dn_fib_table_cleanup(void)
915 struct dn_fib_table *t;
916 struct hlist_node *next;
919 write_lock(&dn_fib_tables_lock);
920 for (h = 0; h < DN_FIB_TABLE_HASHSZ; h++) {
921 hlist_for_each_entry_safe(t, next, &dn_fib_table_hash[h],
923 hlist_del(&t->hlist);
927 write_unlock(&dn_fib_tables_lock);