1 // SPDX-License-Identifier: GPL-2.0-only
3 * Generic HDLC support routines for Linux
6 * Copyright (C) 1999 - 2006 Krzysztof Halasa <khc@pm.waw.pl>
13 (exist,new) -> 0,0 when "PVC create" or if "link unreliable"
14 0,x -> 1,1 if "link reliable" when sending FULL STATUS
15 1,1 -> 1,0 if received FULL STATUS ACK
17 (active) -> 0 when "ifconfig PVC down" or "link unreliable" or "PVC create"
18 -> 1 when "PVC up" and (exist,new) = 1,0
21 (exist,new,active) = FULL STATUS if "link reliable"
22 = 0, 0, 0 if "link unreliable"
24 active = open and "link reliable"
25 exist = new = not used
27 CCITT LMI: ITU-T Q.933 Annex A
28 ANSI LMI: ANSI T1.617 Annex D
29 CISCO LMI: the original, aka "Gang of Four" LMI
33 #include <linux/errno.h>
34 #include <linux/etherdevice.h>
35 #include <linux/hdlc.h>
36 #include <linux/if_arp.h>
37 #include <linux/inetdevice.h>
38 #include <linux/init.h>
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/pkt_sched.h>
42 #include <linux/poll.h>
43 #include <linux/rtnetlink.h>
44 #include <linux/skbuff.h>
45 #include <linux/slab.h>
57 #define NLPID_IPV6 0x8E
58 #define NLPID_SNAP 0x80
59 #define NLPID_PAD 0x00
60 #define NLPID_CCITT_ANSI_LMI 0x08
61 #define NLPID_CISCO_LMI 0x09
64 #define LMI_CCITT_ANSI_DLCI 0 /* LMI DLCI */
65 #define LMI_CISCO_DLCI 1023
67 #define LMI_CALLREF 0x00 /* Call Reference */
68 #define LMI_ANSI_LOCKSHIFT 0x95 /* ANSI locking shift */
69 #define LMI_ANSI_CISCO_REPTYPE 0x01 /* report type */
70 #define LMI_CCITT_REPTYPE 0x51
71 #define LMI_ANSI_CISCO_ALIVE 0x03 /* keep alive */
72 #define LMI_CCITT_ALIVE 0x53
73 #define LMI_ANSI_CISCO_PVCSTAT 0x07 /* PVC status */
74 #define LMI_CCITT_PVCSTAT 0x57
76 #define LMI_FULLREP 0x00 /* full report */
77 #define LMI_INTEGRITY 0x01 /* link integrity report */
78 #define LMI_SINGLE 0x02 /* single PVC report */
80 #define LMI_STATUS_ENQUIRY 0x75
81 #define LMI_STATUS 0x7D /* reply */
83 #define LMI_REPT_LEN 1 /* report type element length */
84 #define LMI_INTEG_LEN 2 /* link integrity element length */
86 #define LMI_CCITT_CISCO_LENGTH 13 /* LMI frame lengths */
87 #define LMI_ANSI_LENGTH 14
91 #if defined(__LITTLE_ENDIAN_BITFIELD)
116 struct net_device *frad;
117 struct net_device *main;
118 struct net_device *ether; /* bridged Ethernet interface */
119 struct pvc_device *next; /* Sorted in ascending DLCI order */
125 unsigned int active: 1;
126 unsigned int exist: 1;
127 unsigned int deleted: 1;
128 unsigned int fecn: 1;
129 unsigned int becn: 1;
130 unsigned int bandwidth; /* Cisco LMI reporting only */
136 struct pvc_device *first_pvc;
139 struct timer_list timer;
140 struct net_device *dev;
141 unsigned long last_poll;
146 u32 last_errors; /* last errors bit list */
148 u8 txseq; /* TX sequence number */
149 u8 rxseq; /* RX sequence number */
153 static int fr_ioctl(struct net_device *dev, struct ifreq *ifr);
156 static inline u16 q922_to_dlci(u8 *hdr)
158 return ((hdr[0] & 0xFC) << 2) | ((hdr[1] & 0xF0) >> 4);
162 static inline void dlci_to_q922(u8 *hdr, u16 dlci)
164 hdr[0] = (dlci >> 2) & 0xFC;
165 hdr[1] = ((dlci << 4) & 0xF0) | 0x01;
169 static inline struct frad_state* state(hdlc_device *hdlc)
171 return(struct frad_state *)(hdlc->state);
175 static inline struct pvc_device *find_pvc(hdlc_device *hdlc, u16 dlci)
177 struct pvc_device *pvc = state(hdlc)->first_pvc;
180 if (pvc->dlci == dlci)
182 if (pvc->dlci > dlci)
183 return NULL; /* the list is sorted */
191 static struct pvc_device *add_pvc(struct net_device *dev, u16 dlci)
193 hdlc_device *hdlc = dev_to_hdlc(dev);
194 struct pvc_device *pvc, **pvc_p = &state(hdlc)->first_pvc;
197 if ((*pvc_p)->dlci == dlci)
199 if ((*pvc_p)->dlci > dlci)
200 break; /* the list is sorted */
201 pvc_p = &(*pvc_p)->next;
204 pvc = kzalloc(sizeof(*pvc), GFP_ATOMIC);
206 printk(KERN_DEBUG "add_pvc: allocated pvc %p, frad %p\n", pvc, dev);
213 pvc->next = *pvc_p; /* Put it in the chain */
219 static inline int pvc_is_used(struct pvc_device *pvc)
221 return pvc->main || pvc->ether;
225 static inline void pvc_carrier(int on, struct pvc_device *pvc)
229 if (!netif_carrier_ok(pvc->main))
230 netif_carrier_on(pvc->main);
232 if (!netif_carrier_ok(pvc->ether))
233 netif_carrier_on(pvc->ether);
236 if (netif_carrier_ok(pvc->main))
237 netif_carrier_off(pvc->main);
239 if (netif_carrier_ok(pvc->ether))
240 netif_carrier_off(pvc->ether);
245 static inline void delete_unused_pvcs(hdlc_device *hdlc)
247 struct pvc_device **pvc_p = &state(hdlc)->first_pvc;
250 if (!pvc_is_used(*pvc_p)) {
251 struct pvc_device *pvc = *pvc_p;
253 printk(KERN_DEBUG "freeing unused pvc: %p\n", pvc);
259 pvc_p = &(*pvc_p)->next;
264 static inline struct net_device **get_dev_p(struct pvc_device *pvc,
267 if (type == ARPHRD_ETHER)
274 static int fr_hard_header(struct sk_buff *skb, u16 dlci)
276 if (!skb->dev) { /* Control packets */
278 case LMI_CCITT_ANSI_DLCI:
280 skb->data[3] = NLPID_CCITT_ANSI_LMI;
285 skb->data[3] = NLPID_CISCO_LMI;
292 } else if (skb->dev->type == ARPHRD_DLCI) {
293 switch (skb->protocol) {
294 case htons(ETH_P_IP):
296 skb->data[3] = NLPID_IP;
299 case htons(ETH_P_IPV6):
301 skb->data[3] = NLPID_IPV6;
306 skb->data[3] = FR_PAD;
307 skb->data[4] = NLPID_SNAP;
308 /* OUI 00-00-00 indicates an Ethertype follows */
312 /* This should be an Ethertype: */
313 *(__be16 *)(skb->data + 8) = skb->protocol;
316 } else if (skb->dev->type == ARPHRD_ETHER) {
318 skb->data[3] = FR_PAD;
319 skb->data[4] = NLPID_SNAP;
320 /* OUI 00-80-C2 stands for the 802.1 organization */
324 /* PID 00-07 stands for Ethernet frames without FCS */
332 dlci_to_q922(skb->data, dlci);
333 skb->data[2] = FR_UI;
339 static int pvc_open(struct net_device *dev)
341 struct pvc_device *pvc = dev->ml_priv;
343 if ((pvc->frad->flags & IFF_UP) == 0)
344 return -EIO; /* Frad must be UP in order to activate PVC */
346 if (pvc->open_count++ == 0) {
347 hdlc_device *hdlc = dev_to_hdlc(pvc->frad);
348 if (state(hdlc)->settings.lmi == LMI_NONE)
349 pvc->state.active = netif_carrier_ok(pvc->frad);
351 pvc_carrier(pvc->state.active, pvc);
352 state(hdlc)->dce_changed = 1;
359 static int pvc_close(struct net_device *dev)
361 struct pvc_device *pvc = dev->ml_priv;
363 if (--pvc->open_count == 0) {
364 hdlc_device *hdlc = dev_to_hdlc(pvc->frad);
365 if (state(hdlc)->settings.lmi == LMI_NONE)
366 pvc->state.active = 0;
368 if (state(hdlc)->settings.dce) {
369 state(hdlc)->dce_changed = 1;
370 pvc->state.active = 0;
378 static int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
380 struct pvc_device *pvc = dev->ml_priv;
381 fr_proto_pvc_info info;
383 if (ifr->ifr_settings.type == IF_GET_PROTO) {
384 if (dev->type == ARPHRD_ETHER)
385 ifr->ifr_settings.type = IF_PROTO_FR_ETH_PVC;
387 ifr->ifr_settings.type = IF_PROTO_FR_PVC;
389 if (ifr->ifr_settings.size < sizeof(info)) {
390 /* data size wanted */
391 ifr->ifr_settings.size = sizeof(info);
395 info.dlci = pvc->dlci;
396 memcpy(info.master, pvc->frad->name, IFNAMSIZ);
397 if (copy_to_user(ifr->ifr_settings.ifs_ifsu.fr_pvc_info,
398 &info, sizeof(info)))
406 static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev)
408 struct pvc_device *pvc = dev->ml_priv;
410 if (!pvc->state.active)
413 if (dev->type == ARPHRD_ETHER) {
414 int pad = ETH_ZLEN - skb->len;
416 if (pad > 0) { /* Pad the frame with zeros */
417 if (__skb_pad(skb, pad, false))
423 /* We already requested the header space with dev->needed_headroom.
424 * So this is just a protection in case the upper layer didn't take
425 * dev->needed_headroom into consideration.
427 if (skb_headroom(skb) < 10) {
428 struct sk_buff *skb2 = skb_realloc_headroom(skb, 10);
437 if (fr_hard_header(skb, pvc->dlci))
440 dev->stats.tx_bytes += skb->len;
441 dev->stats.tx_packets++;
442 if (pvc->state.fecn) /* TX Congestion counter */
443 dev->stats.tx_compressed++;
444 skb->dev = pvc->frad;
445 skb->protocol = htons(ETH_P_HDLC);
446 skb_reset_network_header(skb);
453 dev->stats.tx_dropped++;
457 static inline void fr_log_dlci_active(struct pvc_device *pvc)
459 netdev_info(pvc->frad, "DLCI %d [%s%s%s]%s %s\n",
461 pvc->main ? pvc->main->name : "",
462 pvc->main && pvc->ether ? " " : "",
463 pvc->ether ? pvc->ether->name : "",
464 pvc->state.new ? " new" : "",
465 !pvc->state.exist ? "deleted" :
466 pvc->state.active ? "active" : "inactive");
471 static inline u8 fr_lmi_nextseq(u8 x)
478 static void fr_lmi_send(struct net_device *dev, int fullrep)
480 hdlc_device *hdlc = dev_to_hdlc(dev);
482 struct pvc_device *pvc = state(hdlc)->first_pvc;
483 int lmi = state(hdlc)->settings.lmi;
484 int dce = state(hdlc)->settings.dce;
485 int len = lmi == LMI_ANSI ? LMI_ANSI_LENGTH : LMI_CCITT_CISCO_LENGTH;
486 int stat_len = (lmi == LMI_CISCO) ? 6 : 3;
490 if (dce && fullrep) {
491 len += state(hdlc)->dce_pvc_count * (2 + stat_len);
492 if (len > HDLC_MAX_MRU) {
493 netdev_warn(dev, "Too many PVCs while sending LMI full report\n");
498 skb = dev_alloc_skb(len);
500 netdev_warn(dev, "Memory squeeze on fr_lmi_send()\n");
503 memset(skb->data, 0, len);
505 if (lmi == LMI_CISCO) {
506 fr_hard_header(skb, LMI_CISCO_DLCI);
508 fr_hard_header(skb, LMI_CCITT_ANSI_DLCI);
510 data = skb_tail_pointer(skb);
511 data[i++] = LMI_CALLREF;
512 data[i++] = dce ? LMI_STATUS : LMI_STATUS_ENQUIRY;
514 data[i++] = LMI_ANSI_LOCKSHIFT;
515 data[i++] = lmi == LMI_CCITT ? LMI_CCITT_REPTYPE :
516 LMI_ANSI_CISCO_REPTYPE;
517 data[i++] = LMI_REPT_LEN;
518 data[i++] = fullrep ? LMI_FULLREP : LMI_INTEGRITY;
519 data[i++] = lmi == LMI_CCITT ? LMI_CCITT_ALIVE : LMI_ANSI_CISCO_ALIVE;
520 data[i++] = LMI_INTEG_LEN;
521 data[i++] = state(hdlc)->txseq =
522 fr_lmi_nextseq(state(hdlc)->txseq);
523 data[i++] = state(hdlc)->rxseq;
525 if (dce && fullrep) {
527 data[i++] = lmi == LMI_CCITT ? LMI_CCITT_PVCSTAT :
528 LMI_ANSI_CISCO_PVCSTAT;
529 data[i++] = stat_len;
531 /* LMI start/restart */
532 if (state(hdlc)->reliable && !pvc->state.exist) {
533 pvc->state.exist = pvc->state.new = 1;
534 fr_log_dlci_active(pvc);
537 /* ifconfig PVC up */
538 if (pvc->open_count && !pvc->state.active &&
539 pvc->state.exist && !pvc->state.new) {
541 pvc->state.active = 1;
542 fr_log_dlci_active(pvc);
545 if (lmi == LMI_CISCO) {
546 data[i] = pvc->dlci >> 8;
547 data[i + 1] = pvc->dlci & 0xFF;
549 data[i] = (pvc->dlci >> 4) & 0x3F;
550 data[i + 1] = ((pvc->dlci << 3) & 0x78) | 0x80;
556 else if (pvc->state.active)
565 skb->priority = TC_PRIO_CONTROL;
567 skb->protocol = htons(ETH_P_HDLC);
568 skb_reset_network_header(skb);
575 static void fr_set_link_state(int reliable, struct net_device *dev)
577 hdlc_device *hdlc = dev_to_hdlc(dev);
578 struct pvc_device *pvc = state(hdlc)->first_pvc;
580 state(hdlc)->reliable = reliable;
582 netif_dormant_off(dev);
583 state(hdlc)->n391cnt = 0; /* Request full status */
584 state(hdlc)->dce_changed = 1;
586 if (state(hdlc)->settings.lmi == LMI_NONE) {
587 while (pvc) { /* Activate all PVCs */
589 pvc->state.exist = pvc->state.active = 1;
595 netif_dormant_on(dev);
596 while (pvc) { /* Deactivate all PVCs */
598 pvc->state.exist = pvc->state.active = 0;
600 if (!state(hdlc)->settings.dce)
601 pvc->state.bandwidth = 0;
608 static void fr_timer(struct timer_list *t)
610 struct frad_state *st = from_timer(st, t, timer);
611 struct net_device *dev = st->dev;
612 hdlc_device *hdlc = dev_to_hdlc(dev);
613 int i, cnt = 0, reliable;
616 if (state(hdlc)->settings.dce) {
617 reliable = state(hdlc)->request &&
618 time_before(jiffies, state(hdlc)->last_poll +
619 state(hdlc)->settings.t392 * HZ);
620 state(hdlc)->request = 0;
622 state(hdlc)->last_errors <<= 1; /* Shift the list */
623 if (state(hdlc)->request) {
624 if (state(hdlc)->reliable)
625 netdev_info(dev, "No LMI status reply received\n");
626 state(hdlc)->last_errors |= 1;
629 list = state(hdlc)->last_errors;
630 for (i = 0; i < state(hdlc)->settings.n393; i++, list >>= 1)
631 cnt += (list & 1); /* errors count */
633 reliable = (cnt < state(hdlc)->settings.n392);
636 if (state(hdlc)->reliable != reliable) {
637 netdev_info(dev, "Link %sreliable\n", reliable ? "" : "un");
638 fr_set_link_state(reliable, dev);
641 if (state(hdlc)->settings.dce)
642 state(hdlc)->timer.expires = jiffies +
643 state(hdlc)->settings.t392 * HZ;
645 if (state(hdlc)->n391cnt)
646 state(hdlc)->n391cnt--;
648 fr_lmi_send(dev, state(hdlc)->n391cnt == 0);
650 state(hdlc)->last_poll = jiffies;
651 state(hdlc)->request = 1;
652 state(hdlc)->timer.expires = jiffies +
653 state(hdlc)->settings.t391 * HZ;
656 add_timer(&state(hdlc)->timer);
660 static int fr_lmi_recv(struct net_device *dev, struct sk_buff *skb)
662 hdlc_device *hdlc = dev_to_hdlc(dev);
663 struct pvc_device *pvc;
665 int lmi = state(hdlc)->settings.lmi;
666 int dce = state(hdlc)->settings.dce;
667 int stat_len = (lmi == LMI_CISCO) ? 6 : 3, reptype, error, no_ram, i;
669 if (skb->len < (lmi == LMI_ANSI ? LMI_ANSI_LENGTH :
670 LMI_CCITT_CISCO_LENGTH)) {
671 netdev_info(dev, "Short LMI frame\n");
675 if (skb->data[3] != (lmi == LMI_CISCO ? NLPID_CISCO_LMI :
676 NLPID_CCITT_ANSI_LMI)) {
677 netdev_info(dev, "Received non-LMI frame with LMI DLCI\n");
681 if (skb->data[4] != LMI_CALLREF) {
682 netdev_info(dev, "Invalid LMI Call reference (0x%02X)\n",
687 if (skb->data[5] != (dce ? LMI_STATUS_ENQUIRY : LMI_STATUS)) {
688 netdev_info(dev, "Invalid LMI Message type (0x%02X)\n",
693 if (lmi == LMI_ANSI) {
694 if (skb->data[6] != LMI_ANSI_LOCKSHIFT) {
695 netdev_info(dev, "Not ANSI locking shift in LMI message (0x%02X)\n",
703 if (skb->data[i] != (lmi == LMI_CCITT ? LMI_CCITT_REPTYPE :
704 LMI_ANSI_CISCO_REPTYPE)) {
705 netdev_info(dev, "Not an LMI Report type IE (0x%02X)\n",
710 if (skb->data[++i] != LMI_REPT_LEN) {
711 netdev_info(dev, "Invalid LMI Report type IE length (%u)\n",
716 reptype = skb->data[++i];
717 if (reptype != LMI_INTEGRITY && reptype != LMI_FULLREP) {
718 netdev_info(dev, "Unsupported LMI Report type (0x%02X)\n",
723 if (skb->data[++i] != (lmi == LMI_CCITT ? LMI_CCITT_ALIVE :
724 LMI_ANSI_CISCO_ALIVE)) {
725 netdev_info(dev, "Not an LMI Link integrity verification IE (0x%02X)\n",
730 if (skb->data[++i] != LMI_INTEG_LEN) {
731 netdev_info(dev, "Invalid LMI Link integrity verification IE length (%u)\n",
737 state(hdlc)->rxseq = skb->data[i++]; /* TX sequence from peer */
738 rxseq = skb->data[i++]; /* Should confirm our sequence */
740 txseq = state(hdlc)->txseq;
743 state(hdlc)->last_poll = jiffies;
746 if (!state(hdlc)->reliable)
749 if (rxseq == 0 || rxseq != txseq) { /* Ask for full report next time */
750 state(hdlc)->n391cnt = 0;
755 if (state(hdlc)->fullrep_sent && !error) {
756 /* Stop sending full report - the last one has been confirmed by DTE */
757 state(hdlc)->fullrep_sent = 0;
758 pvc = state(hdlc)->first_pvc;
760 if (pvc->state.new) {
763 /* Tell DTE that new PVC is now active */
764 state(hdlc)->dce_changed = 1;
770 if (state(hdlc)->dce_changed) {
771 reptype = LMI_FULLREP;
772 state(hdlc)->fullrep_sent = 1;
773 state(hdlc)->dce_changed = 0;
776 state(hdlc)->request = 1; /* got request */
777 fr_lmi_send(dev, reptype == LMI_FULLREP ? 1 : 0);
783 state(hdlc)->request = 0; /* got response, no request pending */
788 if (reptype != LMI_FULLREP)
791 pvc = state(hdlc)->first_pvc;
794 pvc->state.deleted = 1;
799 while (skb->len >= i + 2 + stat_len) {
802 unsigned int active, new;
804 if (skb->data[i] != (lmi == LMI_CCITT ? LMI_CCITT_PVCSTAT :
805 LMI_ANSI_CISCO_PVCSTAT)) {
806 netdev_info(dev, "Not an LMI PVC status IE (0x%02X)\n",
811 if (skb->data[++i] != stat_len) {
812 netdev_info(dev, "Invalid LMI PVC status IE length (%u)\n",
818 new = !! (skb->data[i + 2] & 0x08);
819 active = !! (skb->data[i + 2] & 0x02);
820 if (lmi == LMI_CISCO) {
821 dlci = (skb->data[i] << 8) | skb->data[i + 1];
822 bw = (skb->data[i + 3] << 16) |
823 (skb->data[i + 4] << 8) |
826 dlci = ((skb->data[i] & 0x3F) << 4) |
827 ((skb->data[i + 1] & 0x78) >> 3);
831 pvc = add_pvc(dev, dlci);
833 if (!pvc && !no_ram) {
834 netdev_warn(dev, "Memory squeeze on fr_lmi_recv()\n");
839 pvc->state.exist = 1;
840 pvc->state.deleted = 0;
841 if (active != pvc->state.active ||
842 new != pvc->state.new ||
843 bw != pvc->state.bandwidth ||
845 pvc->state.new = new;
846 pvc->state.active = active;
847 pvc->state.bandwidth = bw;
848 pvc_carrier(active, pvc);
849 fr_log_dlci_active(pvc);
856 pvc = state(hdlc)->first_pvc;
859 if (pvc->state.deleted && pvc->state.exist) {
861 pvc->state.active = pvc->state.new = 0;
862 pvc->state.exist = 0;
863 pvc->state.bandwidth = 0;
864 fr_log_dlci_active(pvc);
869 /* Next full report after N391 polls */
870 state(hdlc)->n391cnt = state(hdlc)->settings.n391;
875 static int fr_snap_parse(struct sk_buff *skb, struct pvc_device *pvc)
877 /* OUI 00-00-00 indicates an Ethertype follows */
878 if (skb->data[0] == 0x00 &&
879 skb->data[1] == 0x00 &&
880 skb->data[2] == 0x00) {
883 skb->dev = pvc->main;
884 skb->protocol = *(__be16 *)(skb->data + 3); /* Ethertype */
886 skb_reset_mac_header(skb);
889 /* OUI 00-80-C2 stands for the 802.1 organization */
890 } else if (skb->data[0] == 0x00 &&
891 skb->data[1] == 0x80 &&
892 skb->data[2] == 0xC2) {
893 /* PID 00-07 stands for Ethernet frames without FCS */
894 if (skb->data[3] == 0x00 &&
895 skb->data[4] == 0x07) {
899 if (skb->len < ETH_HLEN)
901 skb->protocol = eth_type_trans(skb, pvc->ether);
904 /* PID unsupported */
909 /* OUI unsupported */
915 static int fr_rx(struct sk_buff *skb)
917 struct net_device *frad = skb->dev;
918 hdlc_device *hdlc = dev_to_hdlc(frad);
919 struct fr_hdr *fh = (struct fr_hdr *)skb->data;
920 u8 *data = skb->data;
922 struct pvc_device *pvc;
923 struct net_device *dev;
925 if (skb->len < 4 || fh->ea1 || !fh->ea2 || data[2] != FR_UI)
928 dlci = q922_to_dlci(skb->data);
930 if ((dlci == LMI_CCITT_ANSI_DLCI &&
931 (state(hdlc)->settings.lmi == LMI_ANSI ||
932 state(hdlc)->settings.lmi == LMI_CCITT)) ||
933 (dlci == LMI_CISCO_DLCI &&
934 state(hdlc)->settings.lmi == LMI_CISCO)) {
935 if (fr_lmi_recv(frad, skb))
937 dev_kfree_skb_any(skb);
938 return NET_RX_SUCCESS;
941 pvc = find_pvc(hdlc, dlci);
944 netdev_info(frad, "No PVC for received frame's DLCI %d\n",
950 if (pvc->state.fecn != fh->fecn) {
952 printk(KERN_DEBUG "%s: DLCI %d FECN O%s\n", frad->name,
953 dlci, fh->fecn ? "N" : "FF");
955 pvc->state.fecn ^= 1;
958 if (pvc->state.becn != fh->becn) {
960 printk(KERN_DEBUG "%s: DLCI %d BECN O%s\n", frad->name,
961 dlci, fh->becn ? "N" : "FF");
963 pvc->state.becn ^= 1;
967 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
968 frad->stats.rx_dropped++;
972 if (data[3] == NLPID_IP) {
975 skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */
976 skb->dev = pvc->main;
977 skb->protocol = htons(ETH_P_IP);
978 skb_reset_mac_header(skb);
980 } else if (data[3] == NLPID_IPV6) {
983 skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */
984 skb->dev = pvc->main;
985 skb->protocol = htons(ETH_P_IPV6);
986 skb_reset_mac_header(skb);
988 } else if (data[3] == FR_PAD) {
991 if (data[4] == NLPID_SNAP) { /* A SNAP header follows */
993 if (skb->len < 5) /* Incomplete SNAP header */
995 if (fr_snap_parse(skb, pvc))
1002 netdev_info(frad, "Unsupported protocol, NLPID=%x length=%i\n",
1008 dev->stats.rx_packets++; /* PVC traffic */
1009 dev->stats.rx_bytes += skb->len;
1010 if (pvc->state.becn)
1011 dev->stats.rx_compressed++;
1013 return NET_RX_SUCCESS;
1016 frad->stats.rx_errors++; /* Mark error */
1018 dev_kfree_skb_any(skb);
1024 static void fr_start(struct net_device *dev)
1026 hdlc_device *hdlc = dev_to_hdlc(dev);
1028 printk(KERN_DEBUG "fr_start\n");
1030 if (state(hdlc)->settings.lmi != LMI_NONE) {
1031 state(hdlc)->reliable = 0;
1032 state(hdlc)->dce_changed = 1;
1033 state(hdlc)->request = 0;
1034 state(hdlc)->fullrep_sent = 0;
1035 state(hdlc)->last_errors = 0xFFFFFFFF;
1036 state(hdlc)->n391cnt = 0;
1037 state(hdlc)->txseq = state(hdlc)->rxseq = 0;
1039 state(hdlc)->dev = dev;
1040 timer_setup(&state(hdlc)->timer, fr_timer, 0);
1041 /* First poll after 1 s */
1042 state(hdlc)->timer.expires = jiffies + HZ;
1043 add_timer(&state(hdlc)->timer);
1045 fr_set_link_state(1, dev);
1049 static void fr_stop(struct net_device *dev)
1051 hdlc_device *hdlc = dev_to_hdlc(dev);
1053 printk(KERN_DEBUG "fr_stop\n");
1055 if (state(hdlc)->settings.lmi != LMI_NONE)
1056 del_timer_sync(&state(hdlc)->timer);
1057 fr_set_link_state(0, dev);
1061 static void fr_close(struct net_device *dev)
1063 hdlc_device *hdlc = dev_to_hdlc(dev);
1064 struct pvc_device *pvc = state(hdlc)->first_pvc;
1066 while (pvc) { /* Shutdown all PVCs for this FRAD */
1068 dev_close(pvc->main);
1070 dev_close(pvc->ether);
1076 static void pvc_setup(struct net_device *dev)
1078 dev->type = ARPHRD_DLCI;
1079 dev->flags = IFF_POINTOPOINT;
1080 dev->hard_header_len = 0;
1082 netif_keep_dst(dev);
1085 static const struct net_device_ops pvc_ops = {
1086 .ndo_open = pvc_open,
1087 .ndo_stop = pvc_close,
1088 .ndo_start_xmit = pvc_xmit,
1089 .ndo_do_ioctl = pvc_ioctl,
1092 static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
1094 hdlc_device *hdlc = dev_to_hdlc(frad);
1095 struct pvc_device *pvc;
1096 struct net_device *dev;
1099 if ((pvc = add_pvc(frad, dlci)) == NULL) {
1100 netdev_warn(frad, "Memory squeeze on fr_add_pvc()\n");
1104 if (*get_dev_p(pvc, type))
1107 used = pvc_is_used(pvc);
1109 if (type == ARPHRD_ETHER)
1110 dev = alloc_netdev(0, "pvceth%d", NET_NAME_UNKNOWN,
1113 dev = alloc_netdev(0, "pvc%d", NET_NAME_UNKNOWN, pvc_setup);
1116 netdev_warn(frad, "Memory squeeze on fr_pvc()\n");
1117 delete_unused_pvcs(hdlc);
1121 if (type == ARPHRD_ETHER) {
1122 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1123 eth_hw_addr_random(dev);
1125 *(__be16*)dev->dev_addr = htons(dlci);
1126 dlci_to_q922(dev->broadcast, dlci);
1128 dev->netdev_ops = &pvc_ops;
1129 dev->mtu = HDLC_MAX_MTU;
1131 dev->max_mtu = HDLC_MAX_MTU;
1132 dev->needed_headroom = 10;
1133 dev->priv_flags |= IFF_NO_QUEUE;
1136 if (register_netdevice(dev) != 0) {
1138 delete_unused_pvcs(hdlc);
1142 dev->needs_free_netdev = true;
1143 *get_dev_p(pvc, type) = dev;
1145 state(hdlc)->dce_changed = 1;
1146 state(hdlc)->dce_pvc_count++;
1153 static int fr_del_pvc(hdlc_device *hdlc, unsigned int dlci, int type)
1155 struct pvc_device *pvc;
1156 struct net_device *dev;
1158 if ((pvc = find_pvc(hdlc, dlci)) == NULL)
1161 if ((dev = *get_dev_p(pvc, type)) == NULL)
1164 if (dev->flags & IFF_UP)
1165 return -EBUSY; /* PVC in use */
1167 unregister_netdevice(dev); /* the destructor will free_netdev(dev) */
1168 *get_dev_p(pvc, type) = NULL;
1170 if (!pvc_is_used(pvc)) {
1171 state(hdlc)->dce_pvc_count--;
1172 state(hdlc)->dce_changed = 1;
1174 delete_unused_pvcs(hdlc);
1180 static void fr_destroy(struct net_device *frad)
1182 hdlc_device *hdlc = dev_to_hdlc(frad);
1183 struct pvc_device *pvc = state(hdlc)->first_pvc;
1184 state(hdlc)->first_pvc = NULL; /* All PVCs destroyed */
1185 state(hdlc)->dce_pvc_count = 0;
1186 state(hdlc)->dce_changed = 1;
1189 struct pvc_device *next = pvc->next;
1190 /* destructors will free_netdev() main and ether */
1192 unregister_netdevice(pvc->main);
1195 unregister_netdevice(pvc->ether);
1203 static struct hdlc_proto proto = {
1207 .detach = fr_destroy,
1210 .module = THIS_MODULE,
1214 static int fr_ioctl(struct net_device *dev, struct ifreq *ifr)
1216 fr_proto __user *fr_s = ifr->ifr_settings.ifs_ifsu.fr;
1217 const size_t size = sizeof(fr_proto);
1218 fr_proto new_settings;
1219 hdlc_device *hdlc = dev_to_hdlc(dev);
1223 switch (ifr->ifr_settings.type) {
1225 if (dev_to_hdlc(dev)->proto != &proto) /* Different proto */
1227 ifr->ifr_settings.type = IF_PROTO_FR;
1228 if (ifr->ifr_settings.size < size) {
1229 ifr->ifr_settings.size = size; /* data size wanted */
1232 if (copy_to_user(fr_s, &state(hdlc)->settings, size))
1237 if (!capable(CAP_NET_ADMIN))
1240 if (dev->flags & IFF_UP)
1243 if (copy_from_user(&new_settings, fr_s, size))
1246 if (new_settings.lmi == LMI_DEFAULT)
1247 new_settings.lmi = LMI_ANSI;
1249 if ((new_settings.lmi != LMI_NONE &&
1250 new_settings.lmi != LMI_ANSI &&
1251 new_settings.lmi != LMI_CCITT &&
1252 new_settings.lmi != LMI_CISCO) ||
1253 new_settings.t391 < 1 ||
1254 new_settings.t392 < 2 ||
1255 new_settings.n391 < 1 ||
1256 new_settings.n392 < 1 ||
1257 new_settings.n393 < new_settings.n392 ||
1258 new_settings.n393 > 32 ||
1259 (new_settings.dce != 0 &&
1260 new_settings.dce != 1))
1263 result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
1267 if (dev_to_hdlc(dev)->proto != &proto) { /* Different proto */
1268 result = attach_hdlc_protocol(dev, &proto,
1269 sizeof(struct frad_state));
1272 state(hdlc)->first_pvc = NULL;
1273 state(hdlc)->dce_pvc_count = 0;
1275 memcpy(&state(hdlc)->settings, &new_settings, size);
1276 dev->type = ARPHRD_FRAD;
1277 call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
1280 case IF_PROTO_FR_ADD_PVC:
1281 case IF_PROTO_FR_DEL_PVC:
1282 case IF_PROTO_FR_ADD_ETH_PVC:
1283 case IF_PROTO_FR_DEL_ETH_PVC:
1284 if (dev_to_hdlc(dev)->proto != &proto) /* Different proto */
1287 if (!capable(CAP_NET_ADMIN))
1290 if (copy_from_user(&pvc, ifr->ifr_settings.ifs_ifsu.fr_pvc,
1291 sizeof(fr_proto_pvc)))
1294 if (pvc.dlci <= 0 || pvc.dlci >= 1024)
1295 return -EINVAL; /* Only 10 bits, DLCI 0 reserved */
1297 if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC ||
1298 ifr->ifr_settings.type == IF_PROTO_FR_DEL_ETH_PVC)
1299 result = ARPHRD_ETHER; /* bridged Ethernet device */
1301 result = ARPHRD_DLCI;
1303 if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_PVC ||
1304 ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC)
1305 return fr_add_pvc(dev, pvc.dlci, result);
1307 return fr_del_pvc(hdlc, pvc.dlci, result);
1314 static int __init mod_init(void)
1316 register_hdlc_protocol(&proto);
1321 static void __exit mod_exit(void)
1323 unregister_hdlc_protocol(&proto);
1327 module_init(mod_init);
1328 module_exit(mod_exit);
1330 MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
1331 MODULE_DESCRIPTION("Frame-Relay protocol support for generic HDLC");
1332 MODULE_LICENSE("GPL v2");