1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Copyright Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
5 * Copyright Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk)
6 * Copyright Tomi Manninen OH2BNS (oh2bns@sral.fi)
8 #include <linux/errno.h>
9 #include <linux/types.h>
10 #include <linux/socket.h>
12 #include <linux/kernel.h>
13 #include <linux/timer.h>
14 #include <linux/string.h>
15 #include <linux/sockios.h>
16 #include <linux/net.h>
17 #include <linux/slab.h>
19 #include <linux/inet.h>
20 #include <linux/netdevice.h>
22 #include <linux/if_arp.h>
23 #include <linux/skbuff.h>
25 #include <linux/uaccess.h>
26 #include <linux/fcntl.h>
27 #include <linux/termios.h> /* For TIOCINQ/OUTQ */
29 #include <linux/interrupt.h>
30 #include <linux/notifier.h>
31 #include <linux/init.h>
32 #include <linux/spinlock.h>
33 #include <net/netrom.h>
34 #include <linux/seq_file.h>
35 #include <linux/export.h>
37 static unsigned int nr_neigh_no = 1;
39 static HLIST_HEAD(nr_node_list);
40 static DEFINE_SPINLOCK(nr_node_list_lock);
41 static HLIST_HEAD(nr_neigh_list);
42 static DEFINE_SPINLOCK(nr_neigh_list_lock);
44 static struct nr_node *nr_node_get(ax25_address *callsign)
46 struct nr_node *found = NULL;
47 struct nr_node *nr_node;
49 spin_lock_bh(&nr_node_list_lock);
50 nr_node_for_each(nr_node, &nr_node_list)
51 if (ax25cmp(callsign, &nr_node->callsign) == 0) {
52 nr_node_hold(nr_node);
56 spin_unlock_bh(&nr_node_list_lock);
60 static struct nr_neigh *nr_neigh_get_dev(ax25_address *callsign,
61 struct net_device *dev)
63 struct nr_neigh *found = NULL;
64 struct nr_neigh *nr_neigh;
66 spin_lock_bh(&nr_neigh_list_lock);
67 nr_neigh_for_each(nr_neigh, &nr_neigh_list)
68 if (ax25cmp(callsign, &nr_neigh->callsign) == 0 &&
69 nr_neigh->dev == dev) {
70 nr_neigh_hold(nr_neigh);
74 spin_unlock_bh(&nr_neigh_list_lock);
78 static void nr_remove_neigh(struct nr_neigh *);
80 /* re-sort the routes in quality order. */
81 static void re_sort_routes(struct nr_node *nr_node, int x, int y)
83 if (nr_node->routes[y].quality > nr_node->routes[x].quality) {
84 if (nr_node->which == x)
86 else if (nr_node->which == y)
89 swap(nr_node->routes[x], nr_node->routes[y]);
94 * Add a new route to a node, and in the process add the node and the
95 * neighbour if it is new.
97 static int __must_check nr_add_node(ax25_address *nr, const char *mnemonic,
98 ax25_address *ax25, ax25_digi *ax25_digi, struct net_device *dev,
99 int quality, int obs_count)
101 struct nr_node *nr_node;
102 struct nr_neigh *nr_neigh;
104 struct net_device *odev;
106 if ((odev=nr_dev_get(nr)) != NULL) { /* Can't add routes to ourself */
111 nr_node = nr_node_get(nr);
113 nr_neigh = nr_neigh_get_dev(ax25, dev);
116 * The L2 link to a neighbour has failed in the past
117 * and now a frame comes from this neighbour. We assume
118 * it was a temporary trouble with the link and reset the
119 * routes now (and not wait for a node broadcast).
121 if (nr_neigh != NULL && nr_neigh->failed != 0 && quality == 0) {
122 struct nr_node *nr_nodet;
124 spin_lock_bh(&nr_node_list_lock);
125 nr_node_for_each(nr_nodet, &nr_node_list) {
126 nr_node_lock(nr_nodet);
127 for (i = 0; i < nr_nodet->count; i++)
128 if (nr_nodet->routes[i].neighbour == nr_neigh)
129 if (i < nr_nodet->which)
131 nr_node_unlock(nr_nodet);
133 spin_unlock_bh(&nr_node_list_lock);
136 if (nr_neigh != NULL)
137 nr_neigh->failed = 0;
139 if (quality == 0 && nr_neigh != NULL && nr_node != NULL) {
140 nr_neigh_put(nr_neigh);
141 nr_node_put(nr_node);
145 if (nr_neigh == NULL) {
146 if ((nr_neigh = kmalloc(sizeof(*nr_neigh), GFP_ATOMIC)) == NULL) {
148 nr_node_put(nr_node);
152 nr_neigh->callsign = *ax25;
153 nr_neigh->digipeat = NULL;
154 nr_neigh->ax25 = NULL;
156 nr_neigh->quality = READ_ONCE(sysctl_netrom_default_path_quality);
157 nr_neigh->locked = 0;
159 nr_neigh->number = nr_neigh_no++;
160 nr_neigh->failed = 0;
161 refcount_set(&nr_neigh->refcount, 1);
163 if (ax25_digi != NULL && ax25_digi->ndigi > 0) {
164 nr_neigh->digipeat = kmemdup(ax25_digi,
167 if (nr_neigh->digipeat == NULL) {
170 nr_node_put(nr_node);
175 spin_lock_bh(&nr_neigh_list_lock);
176 hlist_add_head(&nr_neigh->neigh_node, &nr_neigh_list);
177 nr_neigh_hold(nr_neigh);
178 spin_unlock_bh(&nr_neigh_list_lock);
181 if (quality != 0 && ax25cmp(nr, ax25) == 0 && !nr_neigh->locked)
182 nr_neigh->quality = quality;
184 if (nr_node == NULL) {
185 if ((nr_node = kmalloc(sizeof(*nr_node), GFP_ATOMIC)) == NULL) {
187 nr_neigh_put(nr_neigh);
191 nr_node->callsign = *nr;
192 strcpy(nr_node->mnemonic, mnemonic);
196 refcount_set(&nr_node->refcount, 1);
197 spin_lock_init(&nr_node->node_lock);
199 nr_node->routes[0].quality = quality;
200 nr_node->routes[0].obs_count = obs_count;
201 nr_node->routes[0].neighbour = nr_neigh;
203 nr_neigh_hold(nr_neigh);
206 spin_lock_bh(&nr_node_list_lock);
207 hlist_add_head(&nr_node->node_node, &nr_node_list);
208 /* refcount initialized at 1 */
209 spin_unlock_bh(&nr_node_list_lock);
211 nr_neigh_put(nr_neigh);
214 nr_node_lock(nr_node);
217 strcpy(nr_node->mnemonic, mnemonic);
219 for (found = 0, i = 0; i < nr_node->count; i++) {
220 if (nr_node->routes[i].neighbour == nr_neigh) {
221 nr_node->routes[i].quality = quality;
222 nr_node->routes[i].obs_count = obs_count;
229 /* We have space at the bottom, slot it in */
230 if (nr_node->count < 3) {
231 nr_node->routes[2] = nr_node->routes[1];
232 nr_node->routes[1] = nr_node->routes[0];
234 nr_node->routes[0].quality = quality;
235 nr_node->routes[0].obs_count = obs_count;
236 nr_node->routes[0].neighbour = nr_neigh;
240 nr_neigh_hold(nr_neigh);
243 /* It must be better than the worst */
244 if (quality > nr_node->routes[2].quality) {
245 nr_node->routes[2].neighbour->count--;
246 nr_neigh_put(nr_node->routes[2].neighbour);
248 if (nr_node->routes[2].neighbour->count == 0 && !nr_node->routes[2].neighbour->locked)
249 nr_remove_neigh(nr_node->routes[2].neighbour);
251 nr_node->routes[2].quality = quality;
252 nr_node->routes[2].obs_count = obs_count;
253 nr_node->routes[2].neighbour = nr_neigh;
255 nr_neigh_hold(nr_neigh);
261 /* Now re-sort the routes in quality order */
262 switch (nr_node->count) {
264 re_sort_routes(nr_node, 0, 1);
265 re_sort_routes(nr_node, 1, 2);
268 re_sort_routes(nr_node, 0, 1);
273 for (i = 0; i < nr_node->count; i++) {
274 if (nr_node->routes[i].neighbour == nr_neigh) {
275 if (i < nr_node->which)
281 nr_neigh_put(nr_neigh);
282 nr_node_unlock(nr_node);
283 nr_node_put(nr_node);
287 static void nr_remove_node_locked(struct nr_node *nr_node)
289 lockdep_assert_held(&nr_node_list_lock);
291 hlist_del_init(&nr_node->node_node);
292 nr_node_put(nr_node);
295 static inline void __nr_remove_neigh(struct nr_neigh *nr_neigh)
297 hlist_del_init(&nr_neigh->neigh_node);
298 nr_neigh_put(nr_neigh);
301 #define nr_remove_neigh_locked(__neigh) \
302 __nr_remove_neigh(__neigh)
304 static void nr_remove_neigh(struct nr_neigh *nr_neigh)
306 spin_lock_bh(&nr_neigh_list_lock);
307 __nr_remove_neigh(nr_neigh);
308 spin_unlock_bh(&nr_neigh_list_lock);
312 * "Delete" a node. Strictly speaking remove a route to a node. The node
313 * is only deleted if no routes are left to it.
315 static int nr_del_node(ax25_address *callsign, ax25_address *neighbour, struct net_device *dev)
317 struct nr_node *nr_node;
318 struct nr_neigh *nr_neigh;
321 nr_node = nr_node_get(callsign);
326 nr_neigh = nr_neigh_get_dev(neighbour, dev);
328 if (nr_neigh == NULL) {
329 nr_node_put(nr_node);
333 spin_lock_bh(&nr_node_list_lock);
334 nr_node_lock(nr_node);
335 for (i = 0; i < nr_node->count; i++) {
336 if (nr_node->routes[i].neighbour == nr_neigh) {
338 nr_neigh_put(nr_neigh);
340 if (nr_neigh->count == 0 && !nr_neigh->locked)
341 nr_remove_neigh(nr_neigh);
342 nr_neigh_put(nr_neigh);
346 if (nr_node->count == 0) {
347 nr_remove_node_locked(nr_node);
351 nr_node->routes[0] = nr_node->routes[1];
354 nr_node->routes[1] = nr_node->routes[2];
358 nr_node_put(nr_node);
360 nr_node_unlock(nr_node);
361 spin_unlock_bh(&nr_node_list_lock);
366 nr_neigh_put(nr_neigh);
367 nr_node_unlock(nr_node);
368 spin_unlock_bh(&nr_node_list_lock);
369 nr_node_put(nr_node);
375 * Lock a neighbour with a quality.
377 static int __must_check nr_add_neigh(ax25_address *callsign,
378 ax25_digi *ax25_digi, struct net_device *dev, unsigned int quality)
380 struct nr_neigh *nr_neigh;
382 nr_neigh = nr_neigh_get_dev(callsign, dev);
384 nr_neigh->quality = quality;
385 nr_neigh->locked = 1;
386 nr_neigh_put(nr_neigh);
390 if ((nr_neigh = kmalloc(sizeof(*nr_neigh), GFP_ATOMIC)) == NULL)
393 nr_neigh->callsign = *callsign;
394 nr_neigh->digipeat = NULL;
395 nr_neigh->ax25 = NULL;
397 nr_neigh->quality = quality;
398 nr_neigh->locked = 1;
400 nr_neigh->number = nr_neigh_no++;
401 nr_neigh->failed = 0;
402 refcount_set(&nr_neigh->refcount, 1);
404 if (ax25_digi != NULL && ax25_digi->ndigi > 0) {
405 nr_neigh->digipeat = kmemdup(ax25_digi, sizeof(*ax25_digi),
407 if (nr_neigh->digipeat == NULL) {
413 spin_lock_bh(&nr_neigh_list_lock);
414 hlist_add_head(&nr_neigh->neigh_node, &nr_neigh_list);
415 /* refcount is initialized at 1 */
416 spin_unlock_bh(&nr_neigh_list_lock);
422 * "Delete" a neighbour. The neighbour is only removed if the number
423 * of nodes that may use it is zero.
425 static int nr_del_neigh(ax25_address *callsign, struct net_device *dev, unsigned int quality)
427 struct nr_neigh *nr_neigh;
429 nr_neigh = nr_neigh_get_dev(callsign, dev);
431 if (nr_neigh == NULL) return -EINVAL;
433 nr_neigh->quality = quality;
434 nr_neigh->locked = 0;
436 if (nr_neigh->count == 0)
437 nr_remove_neigh(nr_neigh);
438 nr_neigh_put(nr_neigh);
444 * Decrement the obsolescence count by one. If a route is reduced to a
445 * count of zero, remove it. Also remove any unlocked neighbours with
446 * zero nodes routing via it.
448 static int nr_dec_obs(void)
450 struct nr_neigh *nr_neigh;
452 struct hlist_node *nodet;
455 spin_lock_bh(&nr_node_list_lock);
456 nr_node_for_each_safe(s, nodet, &nr_node_list) {
458 for (i = 0; i < s->count; i++) {
459 switch (s->routes[i].obs_count) {
460 case 0: /* A locked entry */
463 case 1: /* From 1 -> 0 */
464 nr_neigh = s->routes[i].neighbour;
467 nr_neigh_put(nr_neigh);
469 if (nr_neigh->count == 0 && !nr_neigh->locked)
470 nr_remove_neigh(nr_neigh);
476 s->routes[0] = s->routes[1];
479 s->routes[1] = s->routes[2];
486 s->routes[i].obs_count--;
493 nr_remove_node_locked(s);
496 spin_unlock_bh(&nr_node_list_lock);
502 * A device has been removed. Remove its routes and neighbours.
504 void nr_rt_device_down(struct net_device *dev)
507 struct hlist_node *nodet, *node2t;
511 spin_lock_bh(&nr_neigh_list_lock);
512 nr_neigh_for_each_safe(s, nodet, &nr_neigh_list) {
514 spin_lock_bh(&nr_node_list_lock);
515 nr_node_for_each_safe(t, node2t, &nr_node_list) {
517 for (i = 0; i < t->count; i++) {
518 if (t->routes[i].neighbour == s) {
523 t->routes[0] = t->routes[1];
526 t->routes[1] = t->routes[2];
534 nr_remove_node_locked(t);
537 spin_unlock_bh(&nr_node_list_lock);
539 nr_remove_neigh_locked(s);
542 spin_unlock_bh(&nr_neigh_list_lock);
546 * Check that the device given is a valid AX.25 interface that is "up".
547 * Or a valid ethernet interface with an AX.25 callsign binding.
549 static struct net_device *nr_ax25_dev_get(char *devname)
551 struct net_device *dev;
553 if ((dev = dev_get_by_name(&init_net, devname)) == NULL)
556 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_AX25)
564 * Find the first active NET/ROM device, usually "nr0".
566 struct net_device *nr_dev_first(void)
568 struct net_device *dev, *first = NULL;
571 for_each_netdev_rcu(&init_net, dev) {
572 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM)
573 if (first == NULL || strncmp(dev->name, first->name, 3) < 0)
584 * Find the NET/ROM device for the given callsign.
586 struct net_device *nr_dev_get(ax25_address *addr)
588 struct net_device *dev;
591 for_each_netdev_rcu(&init_net, dev) {
592 if ((dev->flags & IFF_UP) && dev->type == ARPHRD_NETROM &&
593 ax25cmp(addr, (ax25_address *)dev->dev_addr) == 0) {
604 static ax25_digi *nr_call_to_digi(ax25_digi *digi, int ndigis,
605 ax25_address *digipeaters)
612 for (i = 0; i < ndigis; i++) {
613 digi->calls[i] = digipeaters[i];
614 digi->repeated[i] = 0;
617 digi->ndigi = ndigis;
618 digi->lastrepeat = -1;
624 * Handle the ioctls that control the routing functions.
626 int nr_rt_ioctl(unsigned int cmd, void __user *arg)
628 struct nr_route_struct nr_route;
629 struct net_device *dev;
635 if (copy_from_user(&nr_route, arg, sizeof(struct nr_route_struct)))
637 if (nr_route.ndigis > AX25_MAX_DIGIS)
639 if ((dev = nr_ax25_dev_get(nr_route.device)) == NULL)
641 switch (nr_route.type) {
643 if (strnlen(nr_route.mnemonic, 7) == 7) {
648 ret = nr_add_node(&nr_route.callsign,
651 nr_call_to_digi(&digi, nr_route.ndigis,
652 nr_route.digipeaters),
653 dev, nr_route.quality,
657 ret = nr_add_neigh(&nr_route.callsign,
658 nr_call_to_digi(&digi, nr_route.ndigis,
659 nr_route.digipeaters),
660 dev, nr_route.quality);
669 if (copy_from_user(&nr_route, arg, sizeof(struct nr_route_struct)))
671 if ((dev = nr_ax25_dev_get(nr_route.device)) == NULL)
673 switch (nr_route.type) {
675 ret = nr_del_node(&nr_route.callsign,
676 &nr_route.neighbour, dev);
679 ret = nr_del_neigh(&nr_route.callsign,
680 dev, nr_route.quality);
699 * A level 2 link has timed out, therefore it appears to be a poor link,
700 * then don't use that neighbour until it is reset.
702 void nr_link_failed(ax25_cb *ax25, int reason)
704 struct nr_neigh *s, *nr_neigh = NULL;
705 struct nr_node *nr_node = NULL;
707 spin_lock_bh(&nr_neigh_list_lock);
708 nr_neigh_for_each(s, &nr_neigh_list) {
709 if (s->ax25 == ax25) {
715 spin_unlock_bh(&nr_neigh_list_lock);
717 if (nr_neigh == NULL)
720 nr_neigh->ax25 = NULL;
723 if (++nr_neigh->failed < READ_ONCE(sysctl_netrom_link_fails_count)) {
724 nr_neigh_put(nr_neigh);
727 spin_lock_bh(&nr_node_list_lock);
728 nr_node_for_each(nr_node, &nr_node_list) {
729 nr_node_lock(nr_node);
730 if (nr_node->which < nr_node->count &&
731 nr_node->routes[nr_node->which].neighbour == nr_neigh)
733 nr_node_unlock(nr_node);
735 spin_unlock_bh(&nr_node_list_lock);
736 nr_neigh_put(nr_neigh);
740 * Route a frame to an appropriate AX.25 connection. A NULL ax25_cb
741 * indicates an internally generated frame.
743 int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
745 ax25_address *nr_src, *nr_dest;
746 struct nr_neigh *nr_neigh;
747 struct nr_node *nr_node;
748 struct net_device *dev;
752 struct sk_buff *skbn;
755 nr_src = (ax25_address *)(skb->data + 0);
756 nr_dest = (ax25_address *)(skb->data + 7);
759 ret = nr_add_node(nr_src, "", &ax25->dest_addr, ax25->digipeat,
760 ax25->ax25_dev->dev, 0,
761 READ_ONCE(sysctl_netrom_obsolescence_count_initialiser));
766 if ((dev = nr_dev_get(nr_dest)) != NULL) { /* Its for me */
767 if (ax25 == NULL) /* Its from me */
768 ret = nr_loopback_queue(skb);
770 ret = nr_rx_frame(skb, dev);
775 if (!READ_ONCE(sysctl_netrom_routing_control) && ax25 != NULL)
778 /* Its Time-To-Live has expired */
779 if (skb->data[14] == 1) {
783 nr_node = nr_node_get(nr_dest);
786 nr_node_lock(nr_node);
788 if (nr_node->which >= nr_node->count) {
789 nr_node_unlock(nr_node);
790 nr_node_put(nr_node);
794 nr_neigh = nr_node->routes[nr_node->which].neighbour;
796 if ((dev = nr_dev_first()) == NULL) {
797 nr_node_unlock(nr_node);
798 nr_node_put(nr_node);
802 /* We are going to change the netrom headers so we should get our
803 own skb, we also did not know until now how much header space
804 we had to reserve... - RXQ */
805 if ((skbn=skb_copy_expand(skb, dev->hard_header_len, 0, GFP_ATOMIC)) == NULL) {
806 nr_node_unlock(nr_node);
807 nr_node_put(nr_node);
815 dptr = skb_push(skb, 1);
816 *dptr = AX25_P_NETROM;
818 ax25s = nr_neigh->ax25;
819 nr_neigh->ax25 = ax25_send_frame(skb, 256,
820 (ax25_address *)dev->dev_addr,
822 nr_neigh->digipeat, nr_neigh->dev);
827 ret = (nr_neigh->ax25 != NULL);
828 nr_node_unlock(nr_node);
829 nr_node_put(nr_node);
834 #ifdef CONFIG_PROC_FS
836 static void *nr_node_start(struct seq_file *seq, loff_t *pos)
837 __acquires(&nr_node_list_lock)
839 spin_lock_bh(&nr_node_list_lock);
840 return seq_hlist_start_head(&nr_node_list, *pos);
843 static void *nr_node_next(struct seq_file *seq, void *v, loff_t *pos)
845 return seq_hlist_next(v, &nr_node_list, pos);
848 static void nr_node_stop(struct seq_file *seq, void *v)
849 __releases(&nr_node_list_lock)
851 spin_unlock_bh(&nr_node_list_lock);
854 static int nr_node_show(struct seq_file *seq, void *v)
859 if (v == SEQ_START_TOKEN)
861 "callsign mnemonic w n qual obs neigh qual obs neigh qual obs neigh\n");
863 struct nr_node *nr_node = hlist_entry(v, struct nr_node,
866 nr_node_lock(nr_node);
867 seq_printf(seq, "%-9s %-7s %d %d",
868 ax2asc(buf, &nr_node->callsign),
869 (nr_node->mnemonic[0] == '\0') ? "*" : nr_node->mnemonic,
873 for (i = 0; i < nr_node->count; i++) {
874 seq_printf(seq, " %3d %d %05d",
875 nr_node->routes[i].quality,
876 nr_node->routes[i].obs_count,
877 nr_node->routes[i].neighbour->number);
879 nr_node_unlock(nr_node);
886 const struct seq_operations nr_node_seqops = {
887 .start = nr_node_start,
888 .next = nr_node_next,
889 .stop = nr_node_stop,
890 .show = nr_node_show,
893 static void *nr_neigh_start(struct seq_file *seq, loff_t *pos)
894 __acquires(&nr_neigh_list_lock)
896 spin_lock_bh(&nr_neigh_list_lock);
897 return seq_hlist_start_head(&nr_neigh_list, *pos);
900 static void *nr_neigh_next(struct seq_file *seq, void *v, loff_t *pos)
902 return seq_hlist_next(v, &nr_neigh_list, pos);
905 static void nr_neigh_stop(struct seq_file *seq, void *v)
906 __releases(&nr_neigh_list_lock)
908 spin_unlock_bh(&nr_neigh_list_lock);
911 static int nr_neigh_show(struct seq_file *seq, void *v)
916 if (v == SEQ_START_TOKEN)
917 seq_puts(seq, "addr callsign dev qual lock count failed digipeaters\n");
919 struct nr_neigh *nr_neigh;
921 nr_neigh = hlist_entry(v, struct nr_neigh, neigh_node);
922 seq_printf(seq, "%05d %-9s %-4s %3d %d %3d %3d",
924 ax2asc(buf, &nr_neigh->callsign),
925 nr_neigh->dev ? nr_neigh->dev->name : "???",
931 if (nr_neigh->digipeat != NULL) {
932 for (i = 0; i < nr_neigh->digipeat->ndigi; i++)
933 seq_printf(seq, " %s",
934 ax2asc(buf, &nr_neigh->digipeat->calls[i]));
942 const struct seq_operations nr_neigh_seqops = {
943 .start = nr_neigh_start,
944 .next = nr_neigh_next,
945 .stop = nr_neigh_stop,
946 .show = nr_neigh_show,
951 * Free all memory associated with the nodes and routes lists.
953 void nr_rt_free(void)
955 struct nr_neigh *s = NULL;
956 struct nr_node *t = NULL;
957 struct hlist_node *nodet;
959 spin_lock_bh(&nr_neigh_list_lock);
960 spin_lock_bh(&nr_node_list_lock);
961 nr_node_for_each_safe(t, nodet, &nr_node_list) {
963 nr_remove_node_locked(t);
966 nr_neigh_for_each_safe(s, nodet, &nr_neigh_list) {
971 nr_remove_neigh_locked(s);
973 spin_unlock_bh(&nr_node_list_lock);
974 spin_unlock_bh(&nr_neigh_list_lock);