1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Copyright Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
5 * Copyright Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
6 * Copyright Tomi Manninen OH2BNS (oh2bns@sral.fi)
8 #include <linux/errno.h>
9 #include <linux/types.h>
10 #include <linux/socket.h>
12 #include <linux/kernel.h>
13 #include <linux/timer.h>
14 #include <linux/string.h>
15 #include <linux/sockios.h>
16 #include <linux/net.h>
17 #include <linux/slab.h>
19 #include <linux/inet.h>
20 #include <linux/netdevice.h>
22 #include <linux/if_arp.h>
23 #include <linux/skbuff.h>
25 #include <linux/uaccess.h>
26 #include <linux/fcntl.h>
27 #include <linux/termios.h> /* For TIOCINQ/OUTQ */
29 #include <linux/interrupt.h>
30 #include <linux/notifier.h>
31 #include <linux/init.h>
32 #include <linux/spinlock.h>
33 #include <net/netrom.h>
34 #include <linux/seq_file.h>
35 #include <linux/export.h>
37 static unsigned int nr_neigh_no = 1;
39 static HLIST_HEAD(nr_node_list);
40 static DEFINE_SPINLOCK(nr_node_list_lock);
41 static HLIST_HEAD(nr_neigh_list);
42 static DEFINE_SPINLOCK(nr_neigh_list_lock);
44 static struct nr_node *nr_node_get(ax25_address *callsign)
46 struct nr_node *found = NULL;
47 struct nr_node *nr_node;
49 spin_lock_bh(&nr_node_list_lock);
50 nr_node_for_each(nr_node, &nr_node_list)
51 if (ax25cmp(callsign, &nr_node->callsign) == 0) {
52 nr_node_hold(nr_node);
56 spin_unlock_bh(&nr_node_list_lock);
60 static struct nr_neigh *nr_neigh_get_dev(ax25_address *callsign,
61 struct net_device *dev)
63 struct nr_neigh *found = NULL;
64 struct nr_neigh *nr_neigh;
66 spin_lock_bh(&nr_neigh_list_lock);
67 nr_neigh_for_each(nr_neigh, &nr_neigh_list)
68 if (ax25cmp(callsign, &nr_neigh->callsign) == 0 &&
69 nr_neigh->dev == dev) {
70 nr_neigh_hold(nr_neigh);
74 spin_unlock_bh(&nr_neigh_list_lock);
78 static void nr_remove_neigh(struct nr_neigh *);
80 /* re-sort the routes in quality order. */
81 static void re_sort_routes(struct nr_node *nr_node, int x, int y)
83 if (nr_node->routes[y].quality > nr_node->routes[x].quality) {
84 if (nr_node->which == x)
86 else if (nr_node->which == y)
89 swap(nr_node->routes[x], nr_node->routes[y]);
94 * Add a new route to a node, and in the process add the node and the
95 * neighbour if it is new.
97 static int __must_check nr_add_node(ax25_address *nr, const char *mnemonic,
98 ax25_address *ax25, ax25_digi *ax25_digi, struct net_device *dev,
99 int quality, int obs_count)
101 struct nr_node *nr_node;
102 struct nr_neigh *nr_neigh;
104 struct net_device *odev;
106 if ((odev=nr_dev_get(nr)) != NULL) { /* Can't add routes to ourself */
111 nr_node = nr_node_get(nr);
113 nr_neigh = nr_neigh_get_dev(ax25, dev);
116 * The L2 link to a neighbour has failed in the past
117 * and now a frame comes from this neighbour. We assume
118 * it was a temporary trouble with the link and reset the
119 * routes now (and not wait for a node broadcast).
121 if (nr_neigh != NULL && nr_neigh->failed != 0 && quality == 0) {
122 struct nr_node *nr_nodet;
124 spin_lock_bh(&nr_node_list_lock);
125 nr_node_for_each(nr_nodet, &nr_node_list) {
126 nr_node_lock(nr_nodet);
127 for (i = 0; i < nr_nodet->count; i++)
128 if (nr_nodet->routes[i].neighbour == nr_neigh)
129 if (i < nr_nodet->which)
131 nr_node_unlock(nr_nodet);
133 spin_unlock_bh(&nr_node_list_lock);
136 if (nr_neigh != NULL)
137 nr_neigh->failed = 0;
139 if (quality == 0 && nr_neigh != NULL && nr_node != NULL) {
140 nr_neigh_put(nr_neigh);
141 nr_node_put(nr_node);
145 if (nr_neigh == NULL) {
146 if ((nr_neigh = kmalloc(sizeof(*nr_neigh), GFP_ATOMIC)) == NULL) {
148 nr_node_put(nr_node);
152 nr_neigh->callsign = *ax25;
153 nr_neigh->digipeat = NULL;
154 nr_neigh->ax25 = NULL;
156 nr_neigh->quality = sysctl_netrom_default_path_quality;
157 nr_neigh->locked = 0;
159 nr_neigh->number = nr_neigh_no++;
160 nr_neigh->failed = 0;
161 refcount_set(&nr_neigh->refcount, 1);
163 if (ax25_digi != NULL && ax25_digi->ndigi > 0) {
164 nr_neigh->digipeat = kmemdup(ax25_digi,
167 if (nr_neigh->digipeat == NULL) {
170 nr_node_put(nr_node);
175 spin_lock_bh(&nr_neigh_list_lock);
176 hlist_add_head(&nr_neigh->neigh_node, &nr_neigh_list);
177 nr_neigh_hold(nr_neigh);
178 spin_unlock_bh(&nr_neigh_list_lock);
181 if (quality != 0 && ax25cmp(nr, ax25) == 0 && !nr_neigh->locked)
182 nr_neigh->quality = quality;
184 if (nr_node == NULL) {
185 if ((nr_node = kmalloc(sizeof(*nr_node), GFP_ATOMIC)) == NULL) {
187 nr_neigh_put(nr_neigh);
191 nr_node->callsign = *nr;
192 strcpy(nr_node->mnemonic, mnemonic);
196 refcount_set(&nr_node->refcount, 1);
197 spin_lock_init(&nr_node->node_lock);
199 nr_node->routes[0].quality = quality;
200 nr_node->routes[0].obs_count = obs_count;
201 nr_node->routes[0].neighbour = nr_neigh;
203 nr_neigh_hold(nr_neigh);
206 spin_lock_bh(&nr_node_list_lock);
207 hlist_add_head(&nr_node->node_node, &nr_node_list);
208 /* refcount initialized at 1 */
209 spin_unlock_bh(&nr_node_list_lock);
211 nr_neigh_put(nr_neigh);
214 nr_node_lock(nr_node);
217 strcpy(nr_node->mnemonic, mnemonic);
219 for (found = 0, i = 0; i < nr_node->count; i++) {
220 if (nr_node->routes[i].neighbour == nr_neigh) {
221 nr_node->routes[i].quality = quality;
222 nr_node->routes[i].obs_count = obs_count;
229 /* We have space at the bottom, slot it in */
230 if (nr_node->count < 3) {
231 nr_node->routes[2] = nr_node->routes[1];
232 nr_node->routes[1] = nr_node->routes[0];
234 nr_node->routes[0].quality = quality;
235 nr_node->routes[0].obs_count = obs_count;
236 nr_node->routes[0].neighbour = nr_neigh;
240 nr_neigh_hold(nr_neigh);
243 /* It must be better than the worst */
244 if (quality > nr_node->routes[2].quality) {
245 nr_node->routes[2].neighbour->count--;
246 nr_neigh_put(nr_node->routes[2].neighbour);
248 if (nr_node->routes[2].neighbour->count == 0 && !nr_node->routes[2].neighbour->locked)
249 nr_remove_neigh(nr_node->routes[2].neighbour);
251 nr_node->routes[2].quality = quality;
252 nr_node->routes[2].obs_count = obs_count;
253 nr_node->routes[2].neighbour = nr_neigh;
255 nr_neigh_hold(nr_neigh);
261 /* Now re-sort the routes in quality order */
262 switch (nr_node->count) {
264 re_sort_routes(nr_node, 0, 1);
265 re_sort_routes(nr_node, 1, 2);
268 re_sort_routes(nr_node, 0, 1);
274 for (i = 0; i < nr_node->count; i++) {
275 if (nr_node->routes[i].neighbour == nr_neigh) {
276 if (i < nr_node->which)
282 nr_neigh_put(nr_neigh);
283 nr_node_unlock(nr_node);
284 nr_node_put(nr_node);
288 static inline void __nr_remove_node(struct nr_node *nr_node)
290 hlist_del_init(&nr_node->node_node);
291 nr_node_put(nr_node);
294 #define nr_remove_node_locked(__node) \
295 __nr_remove_node(__node)
297 static void nr_remove_node(struct nr_node *nr_node)
299 spin_lock_bh(&nr_node_list_lock);
300 __nr_remove_node(nr_node);
301 spin_unlock_bh(&nr_node_list_lock);
304 static inline void __nr_remove_neigh(struct nr_neigh *nr_neigh)
306 hlist_del_init(&nr_neigh->neigh_node);
307 nr_neigh_put(nr_neigh);
310 #define nr_remove_neigh_locked(__neigh) \
311 __nr_remove_neigh(__neigh)
313 static void nr_remove_neigh(struct nr_neigh *nr_neigh)
315 spin_lock_bh(&nr_neigh_list_lock);
316 __nr_remove_neigh(nr_neigh);
317 spin_unlock_bh(&nr_neigh_list_lock);
321 * "Delete" a node. Strictly speaking remove a route to a node. The node
322 * is only deleted if no routes are left to it.
324 static int nr_del_node(ax25_address *callsign, ax25_address *neighbour, struct net_device *dev)
326 struct nr_node *nr_node;
327 struct nr_neigh *nr_neigh;
330 nr_node = nr_node_get(callsign);
335 nr_neigh = nr_neigh_get_dev(neighbour, dev);
337 if (nr_neigh == NULL) {
338 nr_node_put(nr_node);
342 nr_node_lock(nr_node);
343 for (i = 0; i < nr_node->count; i++) {
344 if (nr_node->routes[i].neighbour == nr_neigh) {
346 nr_neigh_put(nr_neigh);
348 if (nr_neigh->count == 0 && !nr_neigh->locked)
349 nr_remove_neigh(nr_neigh);
350 nr_neigh_put(nr_neigh);
354 if (nr_node->count == 0) {
355 nr_remove_node(nr_node);
359 nr_node->routes[0] = nr_node->routes[1];
362 nr_node->routes[1] = nr_node->routes[2];
367 nr_node_put(nr_node);
369 nr_node_unlock(nr_node);
374 nr_neigh_put(nr_neigh);
375 nr_node_unlock(nr_node);
376 nr_node_put(nr_node);
382 * Lock a neighbour with a quality.
384 static int __must_check nr_add_neigh(ax25_address *callsign,
385 ax25_digi *ax25_digi, struct net_device *dev, unsigned int quality)
387 struct nr_neigh *nr_neigh;
389 nr_neigh = nr_neigh_get_dev(callsign, dev);
391 nr_neigh->quality = quality;
392 nr_neigh->locked = 1;
393 nr_neigh_put(nr_neigh);
397 if ((nr_neigh = kmalloc(sizeof(*nr_neigh), GFP_ATOMIC)) == NULL)
400 nr_neigh->callsign = *callsign;
401 nr_neigh->digipeat = NULL;
402 nr_neigh->ax25 = NULL;
404 nr_neigh->quality = quality;
405 nr_neigh->locked = 1;
407 nr_neigh->number = nr_neigh_no++;
408 nr_neigh->failed = 0;
409 refcount_set(&nr_neigh->refcount, 1);
411 if (ax25_digi != NULL && ax25_digi->ndigi > 0) {
412 nr_neigh->digipeat = kmemdup(ax25_digi, sizeof(*ax25_digi),
414 if (nr_neigh->digipeat == NULL) {
420 spin_lock_bh(&nr_neigh_list_lock);
421 hlist_add_head(&nr_neigh->neigh_node, &nr_neigh_list);
422 /* refcount is initialized at 1 */
423 spin_unlock_bh(&nr_neigh_list_lock);
429 * "Delete" a neighbour. The neighbour is only removed if the number
430 * of nodes that may use it is zero.
432 static int nr_del_neigh(ax25_address *callsign, struct net_device *dev, unsigned int quality)
434 struct nr_neigh *nr_neigh;
436 nr_neigh = nr_neigh_get_dev(callsign, dev);
438 if (nr_neigh == NULL) return -EINVAL;
440 nr_neigh->quality = quality;
441 nr_neigh->locked = 0;
443 if (nr_neigh->count == 0)
444 nr_remove_neigh(nr_neigh);
445 nr_neigh_put(nr_neigh);
451 * Decrement the obsolescence count by one. If a route is reduced to a
452 * count of zero, remove it. Also remove any unlocked neighbours with
453 * zero nodes routing via it.
455 static int nr_dec_obs(void)
457 struct nr_neigh *nr_neigh;
459 struct hlist_node *nodet;
462 spin_lock_bh(&nr_node_list_lock);
463 nr_node_for_each_safe(s, nodet, &nr_node_list) {
465 for (i = 0; i < s->count; i++) {
466 switch (s->routes[i].obs_count) {
467 case 0: /* A locked entry */
470 case 1: /* From 1 -> 0 */
471 nr_neigh = s->routes[i].neighbour;
474 nr_neigh_put(nr_neigh);
476 if (nr_neigh->count == 0 && !nr_neigh->locked)
477 nr_remove_neigh(nr_neigh);
483 s->routes[0] = s->routes[1];
486 s->routes[1] = s->routes[2];
494 s->routes[i].obs_count--;
501 nr_remove_node_locked(s);
504 spin_unlock_bh(&nr_node_list_lock);
510 * A device has been removed. Remove its routes and neighbours.
512 void nr_rt_device_down(struct net_device *dev)
515 struct hlist_node *nodet, *node2t;
519 spin_lock_bh(&nr_neigh_list_lock);
520 nr_neigh_for_each_safe(s, nodet, &nr_neigh_list) {
522 spin_lock_bh(&nr_node_list_lock);
523 nr_node_for_each_safe(t, node2t, &nr_node_list) {
525 for (i = 0; i < t->count; i++) {
526 if (t->routes[i].neighbour == s) {
531 t->routes[0] = t->routes[1];
534 t->routes[1] = t->routes[2];
543 nr_remove_node_locked(t);
546 spin_unlock_bh(&nr_node_list_lock);
548 nr_remove_neigh_locked(s);
551 spin_unlock_bh(&nr_neigh_list_lock);
555 * Check that the device given is a valid AX.25 interface that is "up".
556 * Or a valid ethernet interface with an AX.25 callsign binding.
558 static struct net_device *nr_ax25_dev_get(char *devname)
560 struct net_device *dev;
562 if ((dev = dev_get_by_name(&init_net, devname)) == NULL)
565 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_AX25)
573 * Find the first active NET/ROM device, usually "nr0".
575 struct net_device *nr_dev_first(void)
577 struct net_device *dev, *first = NULL;
580 for_each_netdev_rcu(&init_net, dev) {
581 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM)
582 if (first == NULL || strncmp(dev->name, first->name, 3) < 0)
593 * Find the NET/ROM device for the given callsign.
595 struct net_device *nr_dev_get(ax25_address *addr)
597 struct net_device *dev;
600 for_each_netdev_rcu(&init_net, dev) {
601 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM &&
602 ax25cmp(addr, (ax25_address *)dev->dev_addr) == 0) {
613 static ax25_digi *nr_call_to_digi(ax25_digi *digi, int ndigis,
614 ax25_address *digipeaters)
621 for (i = 0; i < ndigis; i++) {
622 digi->calls[i] = digipeaters[i];
623 digi->repeated[i] = 0;
626 digi->ndigi = ndigis;
627 digi->lastrepeat = -1;
633 * Handle the ioctls that control the routing functions.
635 int nr_rt_ioctl(unsigned int cmd, void __user *arg)
637 struct nr_route_struct nr_route;
638 struct net_device *dev;
644 if (copy_from_user(&nr_route, arg, sizeof(struct nr_route_struct)))
646 if (nr_route.ndigis > AX25_MAX_DIGIS)
648 if ((dev = nr_ax25_dev_get(nr_route.device)) == NULL)
650 switch (nr_route.type) {
652 if (strnlen(nr_route.mnemonic, 7) == 7) {
657 ret = nr_add_node(&nr_route.callsign,
660 nr_call_to_digi(&digi, nr_route.ndigis,
661 nr_route.digipeaters),
662 dev, nr_route.quality,
666 ret = nr_add_neigh(&nr_route.callsign,
667 nr_call_to_digi(&digi, nr_route.ndigis,
668 nr_route.digipeaters),
669 dev, nr_route.quality);
678 if (copy_from_user(&nr_route, arg, sizeof(struct nr_route_struct)))
680 if ((dev = nr_ax25_dev_get(nr_route.device)) == NULL)
682 switch (nr_route.type) {
684 ret = nr_del_node(&nr_route.callsign,
685 &nr_route.neighbour, dev);
688 ret = nr_del_neigh(&nr_route.callsign,
689 dev, nr_route.quality);
708 * A level 2 link has timed out, therefore it appears to be a poor link,
709 * then don't use that neighbour until it is reset.
711 void nr_link_failed(ax25_cb *ax25, int reason)
713 struct nr_neigh *s, *nr_neigh = NULL;
714 struct nr_node *nr_node = NULL;
716 spin_lock_bh(&nr_neigh_list_lock);
717 nr_neigh_for_each(s, &nr_neigh_list) {
718 if (s->ax25 == ax25) {
724 spin_unlock_bh(&nr_neigh_list_lock);
726 if (nr_neigh == NULL)
729 nr_neigh->ax25 = NULL;
732 if (++nr_neigh->failed < sysctl_netrom_link_fails_count) {
733 nr_neigh_put(nr_neigh);
736 spin_lock_bh(&nr_node_list_lock);
737 nr_node_for_each(nr_node, &nr_node_list) {
738 nr_node_lock(nr_node);
739 if (nr_node->which < nr_node->count &&
740 nr_node->routes[nr_node->which].neighbour == nr_neigh)
742 nr_node_unlock(nr_node);
744 spin_unlock_bh(&nr_node_list_lock);
745 nr_neigh_put(nr_neigh);
749 * Route a frame to an appropriate AX.25 connection. A NULL ax25_cb
750 * indicates an internally generated frame.
752 int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
754 ax25_address *nr_src, *nr_dest;
755 struct nr_neigh *nr_neigh;
756 struct nr_node *nr_node;
757 struct net_device *dev;
761 struct sk_buff *skbn;
764 nr_src = (ax25_address *)(skb->data + 0);
765 nr_dest = (ax25_address *)(skb->data + 7);
768 ret = nr_add_node(nr_src, "", &ax25->dest_addr, ax25->digipeat,
769 ax25->ax25_dev->dev, 0,
770 sysctl_netrom_obsolescence_count_initialiser);
775 if ((dev = nr_dev_get(nr_dest)) != NULL) { /* Its for me */
776 if (ax25 == NULL) /* Its from me */
777 ret = nr_loopback_queue(skb);
779 ret = nr_rx_frame(skb, dev);
784 if (!sysctl_netrom_routing_control && ax25 != NULL)
787 /* Its Time-To-Live has expired */
788 if (skb->data[14] == 1) {
792 nr_node = nr_node_get(nr_dest);
795 nr_node_lock(nr_node);
797 if (nr_node->which >= nr_node->count) {
798 nr_node_unlock(nr_node);
799 nr_node_put(nr_node);
803 nr_neigh = nr_node->routes[nr_node->which].neighbour;
805 if ((dev = nr_dev_first()) == NULL) {
806 nr_node_unlock(nr_node);
807 nr_node_put(nr_node);
811 /* We are going to change the netrom headers so we should get our
812 own skb, we also did not know until now how much header space
813 we had to reserve... - RXQ */
814 if ((skbn=skb_copy_expand(skb, dev->hard_header_len, 0, GFP_ATOMIC)) == NULL) {
815 nr_node_unlock(nr_node);
816 nr_node_put(nr_node);
824 dptr = skb_push(skb, 1);
825 *dptr = AX25_P_NETROM;
827 ax25s = nr_neigh->ax25;
828 nr_neigh->ax25 = ax25_send_frame(skb, 256,
829 (ax25_address *)dev->dev_addr,
831 nr_neigh->digipeat, nr_neigh->dev);
836 ret = (nr_neigh->ax25 != NULL);
837 nr_node_unlock(nr_node);
838 nr_node_put(nr_node);
843 #ifdef CONFIG_PROC_FS
845 static void *nr_node_start(struct seq_file *seq, loff_t *pos)
846 __acquires(&nr_node_list_lock)
848 spin_lock_bh(&nr_node_list_lock);
849 return seq_hlist_start_head(&nr_node_list, *pos);
852 static void *nr_node_next(struct seq_file *seq, void *v, loff_t *pos)
854 return seq_hlist_next(v, &nr_node_list, pos);
857 static void nr_node_stop(struct seq_file *seq, void *v)
858 __releases(&nr_node_list_lock)
860 spin_unlock_bh(&nr_node_list_lock);
863 static int nr_node_show(struct seq_file *seq, void *v)
868 if (v == SEQ_START_TOKEN)
870 "callsign mnemonic w n qual obs neigh qual obs neigh qual obs neigh\n");
872 struct nr_node *nr_node = hlist_entry(v, struct nr_node,
875 nr_node_lock(nr_node);
876 seq_printf(seq, "%-9s %-7s %d %d",
877 ax2asc(buf, &nr_node->callsign),
878 (nr_node->mnemonic[0] == '\0') ? "*" : nr_node->mnemonic,
882 for (i = 0; i < nr_node->count; i++) {
883 seq_printf(seq, " %3d %d %05d",
884 nr_node->routes[i].quality,
885 nr_node->routes[i].obs_count,
886 nr_node->routes[i].neighbour->number);
888 nr_node_unlock(nr_node);
895 const struct seq_operations nr_node_seqops = {
896 .start = nr_node_start,
897 .next = nr_node_next,
898 .stop = nr_node_stop,
899 .show = nr_node_show,
902 static void *nr_neigh_start(struct seq_file *seq, loff_t *pos)
903 __acquires(&nr_neigh_list_lock)
905 spin_lock_bh(&nr_neigh_list_lock);
906 return seq_hlist_start_head(&nr_neigh_list, *pos);
909 static void *nr_neigh_next(struct seq_file *seq, void *v, loff_t *pos)
911 return seq_hlist_next(v, &nr_neigh_list, pos);
914 static void nr_neigh_stop(struct seq_file *seq, void *v)
915 __releases(&nr_neigh_list_lock)
917 spin_unlock_bh(&nr_neigh_list_lock);
920 static int nr_neigh_show(struct seq_file *seq, void *v)
925 if (v == SEQ_START_TOKEN)
926 seq_puts(seq, "addr callsign dev qual lock count failed digipeaters\n");
928 struct nr_neigh *nr_neigh;
930 nr_neigh = hlist_entry(v, struct nr_neigh, neigh_node);
931 seq_printf(seq, "%05d %-9s %-4s %3d %d %3d %3d",
933 ax2asc(buf, &nr_neigh->callsign),
934 nr_neigh->dev ? nr_neigh->dev->name : "???",
940 if (nr_neigh->digipeat != NULL) {
941 for (i = 0; i < nr_neigh->digipeat->ndigi; i++)
942 seq_printf(seq, " %s",
943 ax2asc(buf, &nr_neigh->digipeat->calls[i]));
951 const struct seq_operations nr_neigh_seqops = {
952 .start = nr_neigh_start,
953 .next = nr_neigh_next,
954 .stop = nr_neigh_stop,
955 .show = nr_neigh_show,
960 * Free all memory associated with the nodes and routes lists.
962 void nr_rt_free(void)
964 struct nr_neigh *s = NULL;
965 struct nr_node *t = NULL;
966 struct hlist_node *nodet;
968 spin_lock_bh(&nr_neigh_list_lock);
969 spin_lock_bh(&nr_node_list_lock);
970 nr_node_for_each_safe(t, nodet, &nr_node_list) {
972 nr_remove_node_locked(t);
975 nr_neigh_for_each_safe(s, nodet, &nr_neigh_list) {
980 nr_remove_neigh_locked(s);
982 spin_unlock_bh(&nr_node_list_lock);
983 spin_unlock_bh(&nr_neigh_list_lock);