1 // SPDX-License-Identifier: GPL-2.0-only
3 * Generic HDLC support routines for Linux
6 * Copyright (C) 1999 - 2006 Krzysztof Halasa <khc@pm.waw.pl>
9 #include <linux/errno.h>
10 #include <linux/gfp.h>
11 #include <linux/hdlc.h>
12 #include <linux/if_arp.h>
13 #include <linux/inetdevice.h>
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/lapb.h>
17 #include <linux/module.h>
18 #include <linux/pkt_sched.h>
19 #include <linux/poll.h>
20 #include <linux/rtnetlink.h>
21 #include <linux/skbuff.h>
22 #include <net/x25device.h>
25 x25_hdlc_proto settings;
27 spinlock_t up_lock; /* Protects "up" */
28 struct sk_buff_head rx_queue;
29 struct tasklet_struct rx_tasklet;
32 static int x25_ioctl(struct net_device *dev, struct ifreq *ifr);
34 static struct x25_state *state(hdlc_device *hdlc)
39 static void x25_rx_queue_kick(struct tasklet_struct *t)
41 struct x25_state *x25st = from_tasklet(x25st, t, rx_tasklet);
42 struct sk_buff *skb = skb_dequeue(&x25st->rx_queue);
45 netif_receive_skb_core(skb);
46 skb = skb_dequeue(&x25st->rx_queue);
50 /* These functions are callbacks called by LAPB layer */
52 static void x25_connect_disconnect(struct net_device *dev, int reason, int code)
54 struct x25_state *x25st = state(dev_to_hdlc(dev));
58 skb = __dev_alloc_skb(1, GFP_ATOMIC | __GFP_NOMEMALLOC);
60 netdev_err(dev, "out of memory\n");
64 ptr = skb_put(skb, 1);
67 skb->protocol = x25_type_trans(skb, dev);
69 skb_queue_tail(&x25st->rx_queue, skb);
70 tasklet_schedule(&x25st->rx_tasklet);
75 static void x25_connected(struct net_device *dev, int reason)
77 x25_connect_disconnect(dev, reason, X25_IFACE_CONNECT);
82 static void x25_disconnected(struct net_device *dev, int reason)
84 x25_connect_disconnect(dev, reason, X25_IFACE_DISCONNECT);
89 static int x25_data_indication(struct net_device *dev, struct sk_buff *skb)
91 struct x25_state *x25st = state(dev_to_hdlc(dev));
94 if (skb_cow(skb, 1)) {
102 *ptr = X25_IFACE_DATA;
104 skb->protocol = x25_type_trans(skb, dev);
106 skb_queue_tail(&x25st->rx_queue, skb);
107 tasklet_schedule(&x25st->rx_tasklet);
108 return NET_RX_SUCCESS;
113 static void x25_data_transmit(struct net_device *dev, struct sk_buff *skb)
115 hdlc_device *hdlc = dev_to_hdlc(dev);
117 skb_reset_network_header(skb);
118 skb->protocol = hdlc_type_trans(skb, dev);
120 if (dev_nit_active(dev))
121 dev_queue_xmit_nit(skb, dev);
123 hdlc->xmit(skb, dev); /* Ignore return value :-( */
128 static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
130 hdlc_device *hdlc = dev_to_hdlc(dev);
131 struct x25_state *x25st = state(hdlc);
134 /* There should be a pseudo header of 1 byte added by upper layers.
135 * Check to make sure it is there before reading it.
142 spin_lock_bh(&x25st->up_lock);
144 spin_unlock_bh(&x25st->up_lock);
149 switch (skb->data[0]) {
150 case X25_IFACE_DATA: /* Data to be transmitted */
152 if ((result = lapb_data_request(dev, skb)) != LAPB_OK)
154 spin_unlock_bh(&x25st->up_lock);
157 case X25_IFACE_CONNECT:
158 if ((result = lapb_connect_request(dev))!= LAPB_OK) {
159 if (result == LAPB_CONNECTED)
160 /* Send connect confirm. msg to level 3 */
161 x25_connected(dev, 0);
163 netdev_err(dev, "LAPB connect request failed, error code = %i\n",
168 case X25_IFACE_DISCONNECT:
169 if ((result = lapb_disconnect_request(dev)) != LAPB_OK) {
170 if (result == LAPB_NOTCONNECTED)
171 /* Send disconnect confirm. msg to level 3 */
172 x25_disconnected(dev, 0);
174 netdev_err(dev, "LAPB disconnect request failed, error code = %i\n",
179 default: /* to be defined */
183 spin_unlock_bh(&x25st->up_lock);
190 static int x25_open(struct net_device *dev)
192 static const struct lapb_register_struct cb = {
193 .connect_confirmation = x25_connected,
194 .connect_indication = x25_connected,
195 .disconnect_confirmation = x25_disconnected,
196 .disconnect_indication = x25_disconnected,
197 .data_indication = x25_data_indication,
198 .data_transmit = x25_data_transmit,
200 hdlc_device *hdlc = dev_to_hdlc(dev);
201 struct x25_state *x25st = state(hdlc);
202 struct lapb_parms_struct params;
205 result = lapb_register(dev, &cb);
206 if (result != LAPB_OK)
209 result = lapb_getparms(dev, ¶ms);
210 if (result != LAPB_OK)
213 if (state(hdlc)->settings.dce)
214 params.mode = params.mode | LAPB_DCE;
216 if (state(hdlc)->settings.modulo == 128)
217 params.mode = params.mode | LAPB_EXTENDED;
219 params.window = state(hdlc)->settings.window;
220 params.t1 = state(hdlc)->settings.t1;
221 params.t2 = state(hdlc)->settings.t2;
222 params.n2 = state(hdlc)->settings.n2;
224 result = lapb_setparms(dev, ¶ms);
225 if (result != LAPB_OK)
228 spin_lock_bh(&x25st->up_lock);
230 spin_unlock_bh(&x25st->up_lock);
237 static void x25_close(struct net_device *dev)
239 hdlc_device *hdlc = dev_to_hdlc(dev);
240 struct x25_state *x25st = state(hdlc);
242 spin_lock_bh(&x25st->up_lock);
244 spin_unlock_bh(&x25st->up_lock);
246 lapb_unregister(dev);
247 tasklet_kill(&x25st->rx_tasklet);
252 static int x25_rx(struct sk_buff *skb)
254 struct net_device *dev = skb->dev;
255 hdlc_device *hdlc = dev_to_hdlc(dev);
256 struct x25_state *x25st = state(hdlc);
258 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
259 dev->stats.rx_dropped++;
263 spin_lock_bh(&x25st->up_lock);
265 spin_unlock_bh(&x25st->up_lock);
267 dev->stats.rx_dropped++;
271 if (lapb_data_received(dev, skb) == LAPB_OK) {
272 spin_unlock_bh(&x25st->up_lock);
273 return NET_RX_SUCCESS;
276 spin_unlock_bh(&x25st->up_lock);
277 dev->stats.rx_errors++;
278 dev_kfree_skb_any(skb);
283 static struct hdlc_proto proto = {
289 .module = THIS_MODULE,
293 static int x25_ioctl(struct net_device *dev, struct ifreq *ifr)
295 x25_hdlc_proto __user *x25_s = ifr->ifr_settings.ifs_ifsu.x25;
296 const size_t size = sizeof(x25_hdlc_proto);
297 hdlc_device *hdlc = dev_to_hdlc(dev);
298 x25_hdlc_proto new_settings;
301 switch (ifr->ifr_settings.type) {
303 if (dev_to_hdlc(dev)->proto != &proto)
305 ifr->ifr_settings.type = IF_PROTO_X25;
306 if (ifr->ifr_settings.size < size) {
307 ifr->ifr_settings.size = size; /* data size wanted */
310 if (copy_to_user(x25_s, &state(hdlc)->settings, size))
315 if (!capable(CAP_NET_ADMIN))
318 if (dev->flags & IFF_UP)
321 /* backward compatibility */
322 if (ifr->ifr_settings.size == 0) {
323 new_settings.dce = 0;
324 new_settings.modulo = 8;
325 new_settings.window = 7;
328 new_settings.n2 = 10;
331 if (copy_from_user(&new_settings, x25_s, size))
334 if ((new_settings.dce != 0 &&
335 new_settings.dce != 1) ||
336 (new_settings.modulo != 8 &&
337 new_settings.modulo != 128) ||
338 new_settings.window < 1 ||
339 (new_settings.modulo == 8 &&
340 new_settings.window > 7) ||
341 (new_settings.modulo == 128 &&
342 new_settings.window > 127) ||
343 new_settings.t1 < 1 ||
344 new_settings.t1 > 255 ||
345 new_settings.t2 < 1 ||
346 new_settings.t2 > 255 ||
347 new_settings.n2 < 1 ||
348 new_settings.n2 > 255)
352 result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
356 if ((result = attach_hdlc_protocol(dev, &proto,
357 sizeof(struct x25_state))))
360 memcpy(&state(hdlc)->settings, &new_settings, size);
361 state(hdlc)->up = false;
362 spin_lock_init(&state(hdlc)->up_lock);
363 skb_queue_head_init(&state(hdlc)->rx_queue);
364 tasklet_setup(&state(hdlc)->rx_tasklet, x25_rx_queue_kick);
366 /* There's no header_ops so hard_header_len should be 0. */
367 dev->hard_header_len = 0;
368 /* When transmitting data:
369 * first we'll remove a pseudo header of 1 byte,
370 * then we'll prepend an LAPB header of at most 3 bytes.
372 dev->needed_headroom = 3 - 1;
374 dev->type = ARPHRD_X25;
375 call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
376 netif_dormant_off(dev);
384 static int __init mod_init(void)
386 register_hdlc_protocol(&proto);
392 static void __exit mod_exit(void)
394 unregister_hdlc_protocol(&proto);
398 module_init(mod_init);
399 module_exit(mod_exit);
401 MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
402 MODULE_DESCRIPTION("X.25 protocol support for generic HDLC");
403 MODULE_LICENSE("GPL v2");