Merge branches 'pm-cpuidle' and 'pm-em'
[linux-2.6-microblaze.git] / net / tipc / link.c
1 /*
2  * net/tipc/link.c: TIPC link code
3  *
4  * Copyright (c) 1996-2007, 2012-2016, Ericsson AB
5  * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36
37 #include "core.h"
38 #include "subscr.h"
39 #include "link.h"
40 #include "bcast.h"
41 #include "socket.h"
42 #include "name_distr.h"
43 #include "discover.h"
44 #include "netlink.h"
45 #include "monitor.h"
46 #include "trace.h"
47 #include "crypto.h"
48
49 #include <linux/pkt_sched.h>
50
51 struct tipc_stats {
52         u32 sent_pkts;
53         u32 recv_pkts;
54         u32 sent_states;
55         u32 recv_states;
56         u32 sent_probes;
57         u32 recv_probes;
58         u32 sent_nacks;
59         u32 recv_nacks;
60         u32 sent_acks;
61         u32 sent_bundled;
62         u32 sent_bundles;
63         u32 recv_bundled;
64         u32 recv_bundles;
65         u32 retransmitted;
66         u32 sent_fragmented;
67         u32 sent_fragments;
68         u32 recv_fragmented;
69         u32 recv_fragments;
70         u32 link_congs;         /* # port sends blocked by congestion */
71         u32 deferred_recv;
72         u32 duplicates;
73         u32 max_queue_sz;       /* send queue size high water mark */
74         u32 accu_queue_sz;      /* used for send queue size profiling */
75         u32 queue_sz_counts;    /* used for send queue size profiling */
76         u32 msg_length_counts;  /* used for message length profiling */
77         u32 msg_lengths_total;  /* used for message length profiling */
78         u32 msg_length_profile[7]; /* used for msg. length profiling */
79 };
80
81 /**
82  * struct tipc_link - TIPC link data structure
83  * @addr: network address of link's peer node
84  * @name: link name character string
85  * @media_addr: media address to use when sending messages over link
86  * @timer: link timer
87  * @net: pointer to namespace struct
88  * @refcnt: reference counter for permanent references (owner node & timer)
89  * @peer_session: link session # being used by peer end of link
90  * @peer_bearer_id: bearer id used by link's peer endpoint
91  * @bearer_id: local bearer id used by link
92  * @tolerance: minimum link continuity loss needed to reset link [in ms]
93  * @abort_limit: # of unacknowledged continuity probes needed to reset link
94  * @state: current state of link FSM
95  * @peer_caps: bitmap describing capabilities of peer node
96  * @silent_intv_cnt: # of timer intervals without any reception from peer
97  * @proto_msg: template for control messages generated by link
98  * @pmsg: convenience pointer to "proto_msg" field
99  * @priority: current link priority
100  * @net_plane: current link network plane ('A' through 'H')
101  * @mon_state: cookie with information needed by link monitor
102  * @backlog_limit: backlog queue congestion thresholds (indexed by importance)
103  * @exp_msg_count: # of tunnelled messages expected during link changeover
104  * @reset_rcv_checkpt: seq # of last acknowledged message at time of link reset
105  * @mtu: current maximum packet size for this link
106  * @advertised_mtu: advertised own mtu when link is being established
107  * @transmitq: queue for sent, non-acked messages
108  * @backlogq: queue for messages waiting to be sent
109  * @snt_nxt: next sequence number to use for outbound messages
110  * @ackers: # of peers that needs to ack each packet before it can be released
111  * @acked: # last packet acked by a certain peer. Used for broadcast.
112  * @rcv_nxt: next sequence number to expect for inbound messages
113  * @deferred_queue: deferred queue saved OOS b'cast message received from node
114  * @unacked_window: # of inbound messages rx'd without ack'ing back to peer
115  * @inputq: buffer queue for messages to be delivered upwards
116  * @namedq: buffer queue for name table messages to be delivered upwards
117  * @next_out: ptr to first unsent outbound message in queue
118  * @wakeupq: linked list of wakeup msgs waiting for link congestion to abate
119  * @long_msg_seq_no: next identifier to use for outbound fragmented messages
120  * @reasm_buf: head of partially reassembled inbound message fragments
121  * @bc_rcvr: marks that this is a broadcast receiver link
122  * @stats: collects statistics regarding link activity
123  */
124 struct tipc_link {
125         u32 addr;
126         char name[TIPC_MAX_LINK_NAME];
127         struct net *net;
128
129         /* Management and link supervision data */
130         u16 peer_session;
131         u16 session;
132         u16 snd_nxt_state;
133         u16 rcv_nxt_state;
134         u32 peer_bearer_id;
135         u32 bearer_id;
136         u32 tolerance;
137         u32 abort_limit;
138         u32 state;
139         u16 peer_caps;
140         bool in_session;
141         bool active;
142         u32 silent_intv_cnt;
143         char if_name[TIPC_MAX_IF_NAME];
144         u32 priority;
145         char net_plane;
146         struct tipc_mon_state mon_state;
147         u16 rst_cnt;
148
149         /* Failover/synch */
150         u16 drop_point;
151         struct sk_buff *failover_reasm_skb;
152         struct sk_buff_head failover_deferdq;
153
154         /* Max packet negotiation */
155         u16 mtu;
156         u16 advertised_mtu;
157
158         /* Sending */
159         struct sk_buff_head transmq;
160         struct sk_buff_head backlogq;
161         struct {
162                 u16 len;
163                 u16 limit;
164                 struct sk_buff *target_bskb;
165         } backlog[5];
166         u16 snd_nxt;
167
168         /* Reception */
169         u16 rcv_nxt;
170         u32 rcv_unacked;
171         struct sk_buff_head deferdq;
172         struct sk_buff_head *inputq;
173         struct sk_buff_head *namedq;
174
175         /* Congestion handling */
176         struct sk_buff_head wakeupq;
177         u16 window;
178         u16 min_win;
179         u16 ssthresh;
180         u16 max_win;
181         u16 cong_acks;
182         u16 checkpoint;
183
184         /* Fragmentation/reassembly */
185         struct sk_buff *reasm_buf;
186         struct sk_buff *reasm_tnlmsg;
187
188         /* Broadcast */
189         u16 ackers;
190         u16 acked;
191         u16 last_gap;
192         struct tipc_gap_ack_blks *last_ga;
193         struct tipc_link *bc_rcvlink;
194         struct tipc_link *bc_sndlink;
195         u8 nack_state;
196         bool bc_peer_is_up;
197
198         /* Statistics */
199         struct tipc_stats stats;
200 };
201
202 /*
203  * Error message prefixes
204  */
205 static const char *link_co_err = "Link tunneling error, ";
206 static const char *link_rst_msg = "Resetting link ";
207
208 /* Send states for broadcast NACKs
209  */
210 enum {
211         BC_NACK_SND_CONDITIONAL,
212         BC_NACK_SND_UNCONDITIONAL,
213         BC_NACK_SND_SUPPRESS,
214 };
215
216 #define TIPC_BC_RETR_LIM  (jiffies + msecs_to_jiffies(10))
217 #define TIPC_UC_RETR_TIME (jiffies + msecs_to_jiffies(1))
218
219 /* Link FSM states:
220  */
221 enum {
222         LINK_ESTABLISHED     = 0xe,
223         LINK_ESTABLISHING    = 0xe  << 4,
224         LINK_RESET           = 0x1  << 8,
225         LINK_RESETTING       = 0x2  << 12,
226         LINK_PEER_RESET      = 0xd  << 16,
227         LINK_FAILINGOVER     = 0xf  << 20,
228         LINK_SYNCHING        = 0xc  << 24
229 };
230
231 /* Link FSM state checking routines
232  */
233 static int link_is_up(struct tipc_link *l)
234 {
235         return l->state & (LINK_ESTABLISHED | LINK_SYNCHING);
236 }
237
238 static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
239                                struct sk_buff_head *xmitq);
240 static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
241                                       bool probe_reply, u16 rcvgap,
242                                       int tolerance, int priority,
243                                       struct sk_buff_head *xmitq);
244 static void link_print(struct tipc_link *l, const char *str);
245 static int tipc_link_build_nack_msg(struct tipc_link *l,
246                                     struct sk_buff_head *xmitq);
247 static void tipc_link_build_bc_init_msg(struct tipc_link *l,
248                                         struct sk_buff_head *xmitq);
249 static u8 __tipc_build_gap_ack_blks(struct tipc_gap_ack_blks *ga,
250                                     struct tipc_link *l, u8 start_index);
251 static u16 tipc_build_gap_ack_blks(struct tipc_link *l, struct tipc_msg *hdr);
252 static int tipc_link_advance_transmq(struct tipc_link *l, struct tipc_link *r,
253                                      u16 acked, u16 gap,
254                                      struct tipc_gap_ack_blks *ga,
255                                      struct sk_buff_head *xmitq,
256                                      bool *retransmitted, int *rc);
257 static void tipc_link_update_cwin(struct tipc_link *l, int released,
258                                   bool retransmitted);
259 /*
260  *  Simple non-static link routines (i.e. referenced outside this file)
261  */
262 bool tipc_link_is_up(struct tipc_link *l)
263 {
264         return link_is_up(l);
265 }
266
267 bool tipc_link_peer_is_down(struct tipc_link *l)
268 {
269         return l->state == LINK_PEER_RESET;
270 }
271
272 bool tipc_link_is_reset(struct tipc_link *l)
273 {
274         return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING);
275 }
276
277 bool tipc_link_is_establishing(struct tipc_link *l)
278 {
279         return l->state == LINK_ESTABLISHING;
280 }
281
282 bool tipc_link_is_synching(struct tipc_link *l)
283 {
284         return l->state == LINK_SYNCHING;
285 }
286
287 bool tipc_link_is_failingover(struct tipc_link *l)
288 {
289         return l->state == LINK_FAILINGOVER;
290 }
291
292 bool tipc_link_is_blocked(struct tipc_link *l)
293 {
294         return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER);
295 }
296
297 static bool link_is_bc_sndlink(struct tipc_link *l)
298 {
299         return !l->bc_sndlink;
300 }
301
302 static bool link_is_bc_rcvlink(struct tipc_link *l)
303 {
304         return ((l->bc_rcvlink == l) && !link_is_bc_sndlink(l));
305 }
306
307 void tipc_link_set_active(struct tipc_link *l, bool active)
308 {
309         l->active = active;
310 }
311
312 u32 tipc_link_id(struct tipc_link *l)
313 {
314         return l->peer_bearer_id << 16 | l->bearer_id;
315 }
316
317 int tipc_link_min_win(struct tipc_link *l)
318 {
319         return l->min_win;
320 }
321
322 int tipc_link_max_win(struct tipc_link *l)
323 {
324         return l->max_win;
325 }
326
327 int tipc_link_prio(struct tipc_link *l)
328 {
329         return l->priority;
330 }
331
332 unsigned long tipc_link_tolerance(struct tipc_link *l)
333 {
334         return l->tolerance;
335 }
336
337 struct sk_buff_head *tipc_link_inputq(struct tipc_link *l)
338 {
339         return l->inputq;
340 }
341
342 char tipc_link_plane(struct tipc_link *l)
343 {
344         return l->net_plane;
345 }
346
347 void tipc_link_update_caps(struct tipc_link *l, u16 capabilities)
348 {
349         l->peer_caps = capabilities;
350 }
351
352 void tipc_link_add_bc_peer(struct tipc_link *snd_l,
353                            struct tipc_link *uc_l,
354                            struct sk_buff_head *xmitq)
355 {
356         struct tipc_link *rcv_l = uc_l->bc_rcvlink;
357
358         snd_l->ackers++;
359         rcv_l->acked = snd_l->snd_nxt - 1;
360         snd_l->state = LINK_ESTABLISHED;
361         tipc_link_build_bc_init_msg(uc_l, xmitq);
362 }
363
364 void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
365                               struct tipc_link *rcv_l,
366                               struct sk_buff_head *xmitq)
367 {
368         u16 ack = snd_l->snd_nxt - 1;
369
370         snd_l->ackers--;
371         rcv_l->bc_peer_is_up = true;
372         rcv_l->state = LINK_ESTABLISHED;
373         tipc_link_bc_ack_rcv(rcv_l, ack, 0, NULL, xmitq, NULL);
374         trace_tipc_link_reset(rcv_l, TIPC_DUMP_ALL, "bclink removed!");
375         tipc_link_reset(rcv_l);
376         rcv_l->state = LINK_RESET;
377         if (!snd_l->ackers) {
378                 trace_tipc_link_reset(snd_l, TIPC_DUMP_ALL, "zero ackers!");
379                 tipc_link_reset(snd_l);
380                 snd_l->state = LINK_RESET;
381                 __skb_queue_purge(xmitq);
382         }
383 }
384
385 int tipc_link_bc_peers(struct tipc_link *l)
386 {
387         return l->ackers;
388 }
389
390 static u16 link_bc_rcv_gap(struct tipc_link *l)
391 {
392         struct sk_buff *skb = skb_peek(&l->deferdq);
393         u16 gap = 0;
394
395         if (more(l->snd_nxt, l->rcv_nxt))
396                 gap = l->snd_nxt - l->rcv_nxt;
397         if (skb)
398                 gap = buf_seqno(skb) - l->rcv_nxt;
399         return gap;
400 }
401
402 void tipc_link_set_mtu(struct tipc_link *l, int mtu)
403 {
404         l->mtu = mtu;
405 }
406
407 int tipc_link_mtu(struct tipc_link *l)
408 {
409         return l->mtu;
410 }
411
412 int tipc_link_mss(struct tipc_link *l)
413 {
414 #ifdef CONFIG_TIPC_CRYPTO
415         return l->mtu - INT_H_SIZE - EMSG_OVERHEAD;
416 #else
417         return l->mtu - INT_H_SIZE;
418 #endif
419 }
420
421 u16 tipc_link_rcv_nxt(struct tipc_link *l)
422 {
423         return l->rcv_nxt;
424 }
425
426 u16 tipc_link_acked(struct tipc_link *l)
427 {
428         return l->acked;
429 }
430
431 char *tipc_link_name(struct tipc_link *l)
432 {
433         return l->name;
434 }
435
436 u32 tipc_link_state(struct tipc_link *l)
437 {
438         return l->state;
439 }
440
441 /**
442  * tipc_link_create - create a new link
443  * @net: pointer to associated network namespace
444  * @if_name: associated interface name
445  * @bearer_id: id (index) of associated bearer
446  * @tolerance: link tolerance to be used by link
447  * @net_plane: network plane (A,B,c..) this link belongs to
448  * @mtu: mtu to be advertised by link
449  * @priority: priority to be used by link
450  * @min_win: minimal send window to be used by link
451  * @max_win: maximal send window to be used by link
452  * @session: session to be used by link
453  * @ownnode: identity of own node
454  * @peer: node id of peer node
455  * @peer_caps: bitmap describing peer node capabilities
456  * @bc_sndlink: the namespace global link used for broadcast sending
457  * @bc_rcvlink: the peer specific link used for broadcast reception
458  * @inputq: queue to put messages ready for delivery
459  * @namedq: queue to put binding table update messages ready for delivery
460  * @link: return value, pointer to put the created link
461  *
462  * Returns true if link was created, otherwise false
463  */
464 bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
465                       int tolerance, char net_plane, u32 mtu, int priority,
466                       u32 min_win, u32 max_win, u32 session, u32 self,
467                       u32 peer, u8 *peer_id, u16 peer_caps,
468                       struct tipc_link *bc_sndlink,
469                       struct tipc_link *bc_rcvlink,
470                       struct sk_buff_head *inputq,
471                       struct sk_buff_head *namedq,
472                       struct tipc_link **link)
473 {
474         char peer_str[NODE_ID_STR_LEN] = {0,};
475         char self_str[NODE_ID_STR_LEN] = {0,};
476         struct tipc_link *l;
477
478         l = kzalloc(sizeof(*l), GFP_ATOMIC);
479         if (!l)
480                 return false;
481         *link = l;
482         l->session = session;
483
484         /* Set link name for unicast links only */
485         if (peer_id) {
486                 tipc_nodeid2string(self_str, tipc_own_id(net));
487                 if (strlen(self_str) > 16)
488                         sprintf(self_str, "%x", self);
489                 tipc_nodeid2string(peer_str, peer_id);
490                 if (strlen(peer_str) > 16)
491                         sprintf(peer_str, "%x", peer);
492         }
493         /* Peer i/f name will be completed by reset/activate message */
494         snprintf(l->name, sizeof(l->name), "%s:%s-%s:unknown",
495                  self_str, if_name, peer_str);
496
497         strcpy(l->if_name, if_name);
498         l->addr = peer;
499         l->peer_caps = peer_caps;
500         l->net = net;
501         l->in_session = false;
502         l->bearer_id = bearer_id;
503         l->tolerance = tolerance;
504         if (bc_rcvlink)
505                 bc_rcvlink->tolerance = tolerance;
506         l->net_plane = net_plane;
507         l->advertised_mtu = mtu;
508         l->mtu = mtu;
509         l->priority = priority;
510         tipc_link_set_queue_limits(l, min_win, max_win);
511         l->ackers = 1;
512         l->bc_sndlink = bc_sndlink;
513         l->bc_rcvlink = bc_rcvlink;
514         l->inputq = inputq;
515         l->namedq = namedq;
516         l->state = LINK_RESETTING;
517         __skb_queue_head_init(&l->transmq);
518         __skb_queue_head_init(&l->backlogq);
519         __skb_queue_head_init(&l->deferdq);
520         __skb_queue_head_init(&l->failover_deferdq);
521         skb_queue_head_init(&l->wakeupq);
522         skb_queue_head_init(l->inputq);
523         return true;
524 }
525
526 /**
527  * tipc_link_bc_create - create new link to be used for broadcast
528  * @net: pointer to associated network namespace
529  * @mtu: mtu to be used initially if no peers
530  * @min_win: minimal send window to be used by link
531  * @max_win: maximal send window to be used by link
532  * @inputq: queue to put messages ready for delivery
533  * @namedq: queue to put binding table update messages ready for delivery
534  * @link: return value, pointer to put the created link
535  *
536  * Returns true if link was created, otherwise false
537  */
538 bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer, u8 *peer_id,
539                          int mtu, u32 min_win, u32 max_win, u16 peer_caps,
540                          struct sk_buff_head *inputq,
541                          struct sk_buff_head *namedq,
542                          struct tipc_link *bc_sndlink,
543                          struct tipc_link **link)
544 {
545         struct tipc_link *l;
546
547         if (!tipc_link_create(net, "", MAX_BEARERS, 0, 'Z', mtu, 0, min_win,
548                               max_win, 0, ownnode, peer, NULL, peer_caps,
549                               bc_sndlink, NULL, inputq, namedq, link))
550                 return false;
551
552         l = *link;
553         if (peer_id) {
554                 char peer_str[NODE_ID_STR_LEN] = {0,};
555
556                 tipc_nodeid2string(peer_str, peer_id);
557                 if (strlen(peer_str) > 16)
558                         sprintf(peer_str, "%x", peer);
559                 /* Broadcast receiver link name: "broadcast-link:<peer>" */
560                 snprintf(l->name, sizeof(l->name), "%s:%s", tipc_bclink_name,
561                          peer_str);
562         } else {
563                 strcpy(l->name, tipc_bclink_name);
564         }
565         trace_tipc_link_reset(l, TIPC_DUMP_ALL, "bclink created!");
566         tipc_link_reset(l);
567         l->state = LINK_RESET;
568         l->ackers = 0;
569         l->bc_rcvlink = l;
570
571         /* Broadcast send link is always up */
572         if (link_is_bc_sndlink(l))
573                 l->state = LINK_ESTABLISHED;
574
575         /* Disable replicast if even a single peer doesn't support it */
576         if (link_is_bc_rcvlink(l) && !(peer_caps & TIPC_BCAST_RCAST))
577                 tipc_bcast_toggle_rcast(net, false);
578
579         return true;
580 }
581
582 /**
583  * tipc_link_fsm_evt - link finite state machine
584  * @l: pointer to link
585  * @evt: state machine event to be processed
586  */
587 int tipc_link_fsm_evt(struct tipc_link *l, int evt)
588 {
589         int rc = 0;
590         int old_state = l->state;
591
592         switch (l->state) {
593         case LINK_RESETTING:
594                 switch (evt) {
595                 case LINK_PEER_RESET_EVT:
596                         l->state = LINK_PEER_RESET;
597                         break;
598                 case LINK_RESET_EVT:
599                         l->state = LINK_RESET;
600                         break;
601                 case LINK_FAILURE_EVT:
602                 case LINK_FAILOVER_BEGIN_EVT:
603                 case LINK_ESTABLISH_EVT:
604                 case LINK_FAILOVER_END_EVT:
605                 case LINK_SYNCH_BEGIN_EVT:
606                 case LINK_SYNCH_END_EVT:
607                 default:
608                         goto illegal_evt;
609                 }
610                 break;
611         case LINK_RESET:
612                 switch (evt) {
613                 case LINK_PEER_RESET_EVT:
614                         l->state = LINK_ESTABLISHING;
615                         break;
616                 case LINK_FAILOVER_BEGIN_EVT:
617                         l->state = LINK_FAILINGOVER;
618                 case LINK_FAILURE_EVT:
619                 case LINK_RESET_EVT:
620                 case LINK_ESTABLISH_EVT:
621                 case LINK_FAILOVER_END_EVT:
622                         break;
623                 case LINK_SYNCH_BEGIN_EVT:
624                 case LINK_SYNCH_END_EVT:
625                 default:
626                         goto illegal_evt;
627                 }
628                 break;
629         case LINK_PEER_RESET:
630                 switch (evt) {
631                 case LINK_RESET_EVT:
632                         l->state = LINK_ESTABLISHING;
633                         break;
634                 case LINK_PEER_RESET_EVT:
635                 case LINK_ESTABLISH_EVT:
636                 case LINK_FAILURE_EVT:
637                         break;
638                 case LINK_SYNCH_BEGIN_EVT:
639                 case LINK_SYNCH_END_EVT:
640                 case LINK_FAILOVER_BEGIN_EVT:
641                 case LINK_FAILOVER_END_EVT:
642                 default:
643                         goto illegal_evt;
644                 }
645                 break;
646         case LINK_FAILINGOVER:
647                 switch (evt) {
648                 case LINK_FAILOVER_END_EVT:
649                         l->state = LINK_RESET;
650                         break;
651                 case LINK_PEER_RESET_EVT:
652                 case LINK_RESET_EVT:
653                 case LINK_ESTABLISH_EVT:
654                 case LINK_FAILURE_EVT:
655                         break;
656                 case LINK_FAILOVER_BEGIN_EVT:
657                 case LINK_SYNCH_BEGIN_EVT:
658                 case LINK_SYNCH_END_EVT:
659                 default:
660                         goto illegal_evt;
661                 }
662                 break;
663         case LINK_ESTABLISHING:
664                 switch (evt) {
665                 case LINK_ESTABLISH_EVT:
666                         l->state = LINK_ESTABLISHED;
667                         break;
668                 case LINK_FAILOVER_BEGIN_EVT:
669                         l->state = LINK_FAILINGOVER;
670                         break;
671                 case LINK_RESET_EVT:
672                         l->state = LINK_RESET;
673                         break;
674                 case LINK_FAILURE_EVT:
675                 case LINK_PEER_RESET_EVT:
676                 case LINK_SYNCH_BEGIN_EVT:
677                 case LINK_FAILOVER_END_EVT:
678                         break;
679                 case LINK_SYNCH_END_EVT:
680                 default:
681                         goto illegal_evt;
682                 }
683                 break;
684         case LINK_ESTABLISHED:
685                 switch (evt) {
686                 case LINK_PEER_RESET_EVT:
687                         l->state = LINK_PEER_RESET;
688                         rc |= TIPC_LINK_DOWN_EVT;
689                         break;
690                 case LINK_FAILURE_EVT:
691                         l->state = LINK_RESETTING;
692                         rc |= TIPC_LINK_DOWN_EVT;
693                         break;
694                 case LINK_RESET_EVT:
695                         l->state = LINK_RESET;
696                         break;
697                 case LINK_ESTABLISH_EVT:
698                 case LINK_SYNCH_END_EVT:
699                         break;
700                 case LINK_SYNCH_BEGIN_EVT:
701                         l->state = LINK_SYNCHING;
702                         break;
703                 case LINK_FAILOVER_BEGIN_EVT:
704                 case LINK_FAILOVER_END_EVT:
705                 default:
706                         goto illegal_evt;
707                 }
708                 break;
709         case LINK_SYNCHING:
710                 switch (evt) {
711                 case LINK_PEER_RESET_EVT:
712                         l->state = LINK_PEER_RESET;
713                         rc |= TIPC_LINK_DOWN_EVT;
714                         break;
715                 case LINK_FAILURE_EVT:
716                         l->state = LINK_RESETTING;
717                         rc |= TIPC_LINK_DOWN_EVT;
718                         break;
719                 case LINK_RESET_EVT:
720                         l->state = LINK_RESET;
721                         break;
722                 case LINK_ESTABLISH_EVT:
723                 case LINK_SYNCH_BEGIN_EVT:
724                         break;
725                 case LINK_SYNCH_END_EVT:
726                         l->state = LINK_ESTABLISHED;
727                         break;
728                 case LINK_FAILOVER_BEGIN_EVT:
729                 case LINK_FAILOVER_END_EVT:
730                 default:
731                         goto illegal_evt;
732                 }
733                 break;
734         default:
735                 pr_err("Unknown FSM state %x in %s\n", l->state, l->name);
736         }
737         trace_tipc_link_fsm(l->name, old_state, l->state, evt);
738         return rc;
739 illegal_evt:
740         pr_err("Illegal FSM event %x in state %x on link %s\n",
741                evt, l->state, l->name);
742         trace_tipc_link_fsm(l->name, old_state, l->state, evt);
743         return rc;
744 }
745
746 /* link_profile_stats - update statistical profiling of traffic
747  */
748 static void link_profile_stats(struct tipc_link *l)
749 {
750         struct sk_buff *skb;
751         struct tipc_msg *msg;
752         int length;
753
754         /* Update counters used in statistical profiling of send traffic */
755         l->stats.accu_queue_sz += skb_queue_len(&l->transmq);
756         l->stats.queue_sz_counts++;
757
758         skb = skb_peek(&l->transmq);
759         if (!skb)
760                 return;
761         msg = buf_msg(skb);
762         length = msg_size(msg);
763
764         if (msg_user(msg) == MSG_FRAGMENTER) {
765                 if (msg_type(msg) != FIRST_FRAGMENT)
766                         return;
767                 length = msg_size(msg_inner_hdr(msg));
768         }
769         l->stats.msg_lengths_total += length;
770         l->stats.msg_length_counts++;
771         if (length <= 64)
772                 l->stats.msg_length_profile[0]++;
773         else if (length <= 256)
774                 l->stats.msg_length_profile[1]++;
775         else if (length <= 1024)
776                 l->stats.msg_length_profile[2]++;
777         else if (length <= 4096)
778                 l->stats.msg_length_profile[3]++;
779         else if (length <= 16384)
780                 l->stats.msg_length_profile[4]++;
781         else if (length <= 32768)
782                 l->stats.msg_length_profile[5]++;
783         else
784                 l->stats.msg_length_profile[6]++;
785 }
786
787 /**
788  * tipc_link_too_silent - check if link is "too silent"
789  * @l: tipc link to be checked
790  *
791  * Returns true if the link 'silent_intv_cnt' is about to reach the
792  * 'abort_limit' value, otherwise false
793  */
794 bool tipc_link_too_silent(struct tipc_link *l)
795 {
796         return (l->silent_intv_cnt + 2 > l->abort_limit);
797 }
798
799 /* tipc_link_timeout - perform periodic task as instructed from node timeout
800  */
801 int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
802 {
803         int mtyp = 0;
804         int rc = 0;
805         bool state = false;
806         bool probe = false;
807         bool setup = false;
808         u16 bc_snt = l->bc_sndlink->snd_nxt - 1;
809         u16 bc_acked = l->bc_rcvlink->acked;
810         struct tipc_mon_state *mstate = &l->mon_state;
811
812         trace_tipc_link_timeout(l, TIPC_DUMP_NONE, " ");
813         trace_tipc_link_too_silent(l, TIPC_DUMP_ALL, " ");
814         switch (l->state) {
815         case LINK_ESTABLISHED:
816         case LINK_SYNCHING:
817                 mtyp = STATE_MSG;
818                 link_profile_stats(l);
819                 tipc_mon_get_state(l->net, l->addr, mstate, l->bearer_id);
820                 if (mstate->reset || (l->silent_intv_cnt > l->abort_limit))
821                         return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
822                 state = bc_acked != bc_snt;
823                 state |= l->bc_rcvlink->rcv_unacked;
824                 state |= l->rcv_unacked;
825                 state |= !skb_queue_empty(&l->transmq);
826                 probe = mstate->probing;
827                 probe |= l->silent_intv_cnt;
828                 if (probe || mstate->monitoring)
829                         l->silent_intv_cnt++;
830                 probe |= !skb_queue_empty(&l->deferdq);
831                 if (l->snd_nxt == l->checkpoint) {
832                         tipc_link_update_cwin(l, 0, 0);
833                         probe = true;
834                 }
835                 l->checkpoint = l->snd_nxt;
836                 break;
837         case LINK_RESET:
838                 setup = l->rst_cnt++ <= 4;
839                 setup |= !(l->rst_cnt % 16);
840                 mtyp = RESET_MSG;
841                 break;
842         case LINK_ESTABLISHING:
843                 setup = true;
844                 mtyp = ACTIVATE_MSG;
845                 break;
846         case LINK_PEER_RESET:
847         case LINK_RESETTING:
848         case LINK_FAILINGOVER:
849                 break;
850         default:
851                 break;
852         }
853
854         if (state || probe || setup)
855                 tipc_link_build_proto_msg(l, mtyp, probe, 0, 0, 0, 0, xmitq);
856
857         return rc;
858 }
859
860 /**
861  * link_schedule_user - schedule a message sender for wakeup after congestion
862  * @l: congested link
863  * @hdr: header of message that is being sent
864  * Create pseudo msg to send back to user when congestion abates
865  */
866 static int link_schedule_user(struct tipc_link *l, struct tipc_msg *hdr)
867 {
868         u32 dnode = tipc_own_addr(l->net);
869         u32 dport = msg_origport(hdr);
870         struct sk_buff *skb;
871
872         /* Create and schedule wakeup pseudo message */
873         skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
874                               dnode, l->addr, dport, 0, 0);
875         if (!skb)
876                 return -ENOBUFS;
877         msg_set_dest_droppable(buf_msg(skb), true);
878         TIPC_SKB_CB(skb)->chain_imp = msg_importance(hdr);
879         skb_queue_tail(&l->wakeupq, skb);
880         l->stats.link_congs++;
881         trace_tipc_link_conges(l, TIPC_DUMP_ALL, "wakeup scheduled!");
882         return -ELINKCONG;
883 }
884
885 /**
886  * link_prepare_wakeup - prepare users for wakeup after congestion
887  * @l: congested link
888  * Wake up a number of waiting users, as permitted by available space
889  * in the send queue
890  */
891 static void link_prepare_wakeup(struct tipc_link *l)
892 {
893         struct sk_buff_head *wakeupq = &l->wakeupq;
894         struct sk_buff_head *inputq = l->inputq;
895         struct sk_buff *skb, *tmp;
896         struct sk_buff_head tmpq;
897         int avail[5] = {0,};
898         int imp = 0;
899
900         __skb_queue_head_init(&tmpq);
901
902         for (; imp <= TIPC_SYSTEM_IMPORTANCE; imp++)
903                 avail[imp] = l->backlog[imp].limit - l->backlog[imp].len;
904
905         skb_queue_walk_safe(wakeupq, skb, tmp) {
906                 imp = TIPC_SKB_CB(skb)->chain_imp;
907                 if (avail[imp] <= 0)
908                         continue;
909                 avail[imp]--;
910                 __skb_unlink(skb, wakeupq);
911                 __skb_queue_tail(&tmpq, skb);
912         }
913
914         spin_lock_bh(&inputq->lock);
915         skb_queue_splice_tail(&tmpq, inputq);
916         spin_unlock_bh(&inputq->lock);
917
918 }
919
920 /**
921  * tipc_link_set_skb_retransmit_time - set the time at which retransmission of
922  *                                     the given skb should be next attempted
923  * @skb: skb to set a future retransmission time for
924  * @l: link the skb will be transmitted on
925  */
926 static void tipc_link_set_skb_retransmit_time(struct sk_buff *skb,
927                                               struct tipc_link *l)
928 {
929         if (link_is_bc_sndlink(l))
930                 TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
931         else
932                 TIPC_SKB_CB(skb)->nxt_retr = TIPC_UC_RETR_TIME;
933 }
934
935 void tipc_link_reset(struct tipc_link *l)
936 {
937         struct sk_buff_head list;
938         u32 imp;
939
940         __skb_queue_head_init(&list);
941
942         l->in_session = false;
943         /* Force re-synch of peer session number before establishing */
944         l->peer_session--;
945         l->session++;
946         l->mtu = l->advertised_mtu;
947
948         spin_lock_bh(&l->wakeupq.lock);
949         skb_queue_splice_init(&l->wakeupq, &list);
950         spin_unlock_bh(&l->wakeupq.lock);
951
952         spin_lock_bh(&l->inputq->lock);
953         skb_queue_splice_init(&list, l->inputq);
954         spin_unlock_bh(&l->inputq->lock);
955
956         __skb_queue_purge(&l->transmq);
957         __skb_queue_purge(&l->deferdq);
958         __skb_queue_purge(&l->backlogq);
959         __skb_queue_purge(&l->failover_deferdq);
960         for (imp = 0; imp <= TIPC_SYSTEM_IMPORTANCE; imp++) {
961                 l->backlog[imp].len = 0;
962                 l->backlog[imp].target_bskb = NULL;
963         }
964         kfree_skb(l->reasm_buf);
965         kfree_skb(l->reasm_tnlmsg);
966         kfree_skb(l->failover_reasm_skb);
967         l->reasm_buf = NULL;
968         l->reasm_tnlmsg = NULL;
969         l->failover_reasm_skb = NULL;
970         l->rcv_unacked = 0;
971         l->snd_nxt = 1;
972         l->rcv_nxt = 1;
973         l->snd_nxt_state = 1;
974         l->rcv_nxt_state = 1;
975         l->acked = 0;
976         l->last_gap = 0;
977         kfree(l->last_ga);
978         l->last_ga = NULL;
979         l->silent_intv_cnt = 0;
980         l->rst_cnt = 0;
981         l->bc_peer_is_up = false;
982         memset(&l->mon_state, 0, sizeof(l->mon_state));
983         tipc_link_reset_stats(l);
984 }
985
986 /**
987  * tipc_link_xmit(): enqueue buffer list according to queue situation
988  * @l: link to use
989  * @list: chain of buffers containing message
990  * @xmitq: returned list of packets to be sent by caller
991  *
992  * Consumes the buffer chain.
993  * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
994  * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
995  */
996 int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
997                    struct sk_buff_head *xmitq)
998 {
999         struct tipc_msg *hdr = buf_msg(skb_peek(list));
1000         struct sk_buff_head *backlogq = &l->backlogq;
1001         struct sk_buff_head *transmq = &l->transmq;
1002         struct sk_buff *skb, *_skb;
1003         u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
1004         u16 ack = l->rcv_nxt - 1;
1005         u16 seqno = l->snd_nxt;
1006         int pkt_cnt = skb_queue_len(list);
1007         int imp = msg_importance(hdr);
1008         unsigned int mss = tipc_link_mss(l);
1009         unsigned int cwin = l->window;
1010         unsigned int mtu = l->mtu;
1011         bool new_bundle;
1012         int rc = 0;
1013
1014         if (unlikely(msg_size(hdr) > mtu)) {
1015                 pr_warn("Too large msg, purging xmit list %d %d %d %d %d!\n",
1016                         skb_queue_len(list), msg_user(hdr),
1017                         msg_type(hdr), msg_size(hdr), mtu);
1018                 __skb_queue_purge(list);
1019                 return -EMSGSIZE;
1020         }
1021
1022         /* Allow oversubscription of one data msg per source at congestion */
1023         if (unlikely(l->backlog[imp].len >= l->backlog[imp].limit)) {
1024                 if (imp == TIPC_SYSTEM_IMPORTANCE) {
1025                         pr_warn("%s<%s>, link overflow", link_rst_msg, l->name);
1026                         return -ENOBUFS;
1027                 }
1028                 rc = link_schedule_user(l, hdr);
1029         }
1030
1031         if (pkt_cnt > 1) {
1032                 l->stats.sent_fragmented++;
1033                 l->stats.sent_fragments += pkt_cnt;
1034         }
1035
1036         /* Prepare each packet for sending, and add to relevant queue: */
1037         while ((skb = __skb_dequeue(list))) {
1038                 if (likely(skb_queue_len(transmq) < cwin)) {
1039                         hdr = buf_msg(skb);
1040                         msg_set_seqno(hdr, seqno);
1041                         msg_set_ack(hdr, ack);
1042                         msg_set_bcast_ack(hdr, bc_ack);
1043                         _skb = skb_clone(skb, GFP_ATOMIC);
1044                         if (!_skb) {
1045                                 kfree_skb(skb);
1046                                 __skb_queue_purge(list);
1047                                 return -ENOBUFS;
1048                         }
1049                         __skb_queue_tail(transmq, skb);
1050                         tipc_link_set_skb_retransmit_time(skb, l);
1051                         __skb_queue_tail(xmitq, _skb);
1052                         TIPC_SKB_CB(skb)->ackers = l->ackers;
1053                         l->rcv_unacked = 0;
1054                         l->stats.sent_pkts++;
1055                         seqno++;
1056                         continue;
1057                 }
1058                 if (tipc_msg_try_bundle(l->backlog[imp].target_bskb, &skb,
1059                                         mss, l->addr, &new_bundle)) {
1060                         if (skb) {
1061                                 /* Keep a ref. to the skb for next try */
1062                                 l->backlog[imp].target_bskb = skb;
1063                                 l->backlog[imp].len++;
1064                                 __skb_queue_tail(backlogq, skb);
1065                         } else {
1066                                 if (new_bundle) {
1067                                         l->stats.sent_bundles++;
1068                                         l->stats.sent_bundled++;
1069                                 }
1070                                 l->stats.sent_bundled++;
1071                         }
1072                         continue;
1073                 }
1074                 l->backlog[imp].target_bskb = NULL;
1075                 l->backlog[imp].len += (1 + skb_queue_len(list));
1076                 __skb_queue_tail(backlogq, skb);
1077                 skb_queue_splice_tail_init(list, backlogq);
1078         }
1079         l->snd_nxt = seqno;
1080         return rc;
1081 }
1082
1083 static void tipc_link_update_cwin(struct tipc_link *l, int released,
1084                                   bool retransmitted)
1085 {
1086         int bklog_len = skb_queue_len(&l->backlogq);
1087         struct sk_buff_head *txq = &l->transmq;
1088         int txq_len = skb_queue_len(txq);
1089         u16 cwin = l->window;
1090
1091         /* Enter fast recovery */
1092         if (unlikely(retransmitted)) {
1093                 l->ssthresh = max_t(u16, l->window / 2, 300);
1094                 l->window = min_t(u16, l->ssthresh, l->window);
1095                 return;
1096         }
1097         /* Enter slow start */
1098         if (unlikely(!released)) {
1099                 l->ssthresh = max_t(u16, l->window / 2, 300);
1100                 l->window = l->min_win;
1101                 return;
1102         }
1103         /* Don't increase window if no pressure on the transmit queue */
1104         if (txq_len + bklog_len < cwin)
1105                 return;
1106
1107         /* Don't increase window if there are holes the transmit queue */
1108         if (txq_len && l->snd_nxt - buf_seqno(skb_peek(txq)) != txq_len)
1109                 return;
1110
1111         l->cong_acks += released;
1112
1113         /* Slow start  */
1114         if (cwin <= l->ssthresh) {
1115                 l->window = min_t(u16, cwin + released, l->max_win);
1116                 return;
1117         }
1118         /* Congestion avoidance */
1119         if (l->cong_acks < cwin)
1120                 return;
1121         l->window = min_t(u16, ++cwin, l->max_win);
1122         l->cong_acks = 0;
1123 }
1124
1125 static void tipc_link_advance_backlog(struct tipc_link *l,
1126                                       struct sk_buff_head *xmitq)
1127 {
1128         u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
1129         struct sk_buff_head *txq = &l->transmq;
1130         struct sk_buff *skb, *_skb;
1131         u16 ack = l->rcv_nxt - 1;
1132         u16 seqno = l->snd_nxt;
1133         struct tipc_msg *hdr;
1134         u16 cwin = l->window;
1135         u32 imp;
1136
1137         while (skb_queue_len(txq) < cwin) {
1138                 skb = skb_peek(&l->backlogq);
1139                 if (!skb)
1140                         break;
1141                 _skb = skb_clone(skb, GFP_ATOMIC);
1142                 if (!_skb)
1143                         break;
1144                 __skb_dequeue(&l->backlogq);
1145                 hdr = buf_msg(skb);
1146                 imp = msg_importance(hdr);
1147                 l->backlog[imp].len--;
1148                 if (unlikely(skb == l->backlog[imp].target_bskb))
1149                         l->backlog[imp].target_bskb = NULL;
1150                 __skb_queue_tail(&l->transmq, skb);
1151                 tipc_link_set_skb_retransmit_time(skb, l);
1152
1153                 __skb_queue_tail(xmitq, _skb);
1154                 TIPC_SKB_CB(skb)->ackers = l->ackers;
1155                 msg_set_seqno(hdr, seqno);
1156                 msg_set_ack(hdr, ack);
1157                 msg_set_bcast_ack(hdr, bc_ack);
1158                 l->rcv_unacked = 0;
1159                 l->stats.sent_pkts++;
1160                 seqno++;
1161         }
1162         l->snd_nxt = seqno;
1163 }
1164
1165 /**
1166  * link_retransmit_failure() - Detect repeated retransmit failures
1167  * @l: tipc link sender
1168  * @r: tipc link receiver (= l in case of unicast)
1169  * @rc: returned code
1170  *
1171  * Return: true if the repeated retransmit failures happens, otherwise
1172  * false
1173  */
1174 static bool link_retransmit_failure(struct tipc_link *l, struct tipc_link *r,
1175                                     int *rc)
1176 {
1177         struct sk_buff *skb = skb_peek(&l->transmq);
1178         struct tipc_msg *hdr;
1179
1180         if (!skb)
1181                 return false;
1182
1183         if (!TIPC_SKB_CB(skb)->retr_cnt)
1184                 return false;
1185
1186         if (!time_after(jiffies, TIPC_SKB_CB(skb)->retr_stamp +
1187                         msecs_to_jiffies(r->tolerance * 10)))
1188                 return false;
1189
1190         hdr = buf_msg(skb);
1191         if (link_is_bc_sndlink(l) && !less(r->acked, msg_seqno(hdr)))
1192                 return false;
1193
1194         pr_warn("Retransmission failure on link <%s>\n", l->name);
1195         link_print(l, "State of link ");
1196         pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
1197                 msg_user(hdr), msg_type(hdr), msg_size(hdr), msg_errcode(hdr));
1198         pr_info("sqno %u, prev: %x, dest: %x\n",
1199                 msg_seqno(hdr), msg_prevnode(hdr), msg_destnode(hdr));
1200         pr_info("retr_stamp %d, retr_cnt %d\n",
1201                 jiffies_to_msecs(TIPC_SKB_CB(skb)->retr_stamp),
1202                 TIPC_SKB_CB(skb)->retr_cnt);
1203
1204         trace_tipc_list_dump(&l->transmq, true, "retrans failure!");
1205         trace_tipc_link_dump(l, TIPC_DUMP_NONE, "retrans failure!");
1206         trace_tipc_link_dump(r, TIPC_DUMP_NONE, "retrans failure!");
1207
1208         if (link_is_bc_sndlink(l)) {
1209                 r->state = LINK_RESET;
1210                 *rc |= TIPC_LINK_DOWN_EVT;
1211         } else {
1212                 *rc |= tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1213         }
1214
1215         return true;
1216 }
1217
1218 /* tipc_data_input - deliver data and name distr msgs to upper layer
1219  *
1220  * Consumes buffer if message is of right type
1221  * Node lock must be held
1222  */
1223 static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
1224                             struct sk_buff_head *inputq)
1225 {
1226         struct sk_buff_head *mc_inputq = l->bc_rcvlink->inputq;
1227         struct tipc_msg *hdr = buf_msg(skb);
1228
1229         switch (msg_user(hdr)) {
1230         case TIPC_LOW_IMPORTANCE:
1231         case TIPC_MEDIUM_IMPORTANCE:
1232         case TIPC_HIGH_IMPORTANCE:
1233         case TIPC_CRITICAL_IMPORTANCE:
1234                 if (unlikely(msg_in_group(hdr) || msg_mcast(hdr))) {
1235                         skb_queue_tail(mc_inputq, skb);
1236                         return true;
1237                 }
1238                 fallthrough;
1239         case CONN_MANAGER:
1240                 skb_queue_tail(inputq, skb);
1241                 return true;
1242         case GROUP_PROTOCOL:
1243                 skb_queue_tail(mc_inputq, skb);
1244                 return true;
1245         case NAME_DISTRIBUTOR:
1246                 l->bc_rcvlink->state = LINK_ESTABLISHED;
1247                 skb_queue_tail(l->namedq, skb);
1248                 return true;
1249         case MSG_BUNDLER:
1250         case TUNNEL_PROTOCOL:
1251         case MSG_FRAGMENTER:
1252         case BCAST_PROTOCOL:
1253                 return false;
1254 #ifdef CONFIG_TIPC_CRYPTO
1255         case MSG_CRYPTO:
1256                 tipc_crypto_msg_rcv(l->net, skb);
1257                 return true;
1258 #endif
1259         default:
1260                 pr_warn("Dropping received illegal msg type\n");
1261                 kfree_skb(skb);
1262                 return true;
1263         };
1264 }
1265
1266 /* tipc_link_input - process packet that has passed link protocol check
1267  *
1268  * Consumes buffer
1269  */
1270 static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb,
1271                            struct sk_buff_head *inputq,
1272                            struct sk_buff **reasm_skb)
1273 {
1274         struct tipc_msg *hdr = buf_msg(skb);
1275         struct sk_buff *iskb;
1276         struct sk_buff_head tmpq;
1277         int usr = msg_user(hdr);
1278         int pos = 0;
1279
1280         if (usr == MSG_BUNDLER) {
1281                 skb_queue_head_init(&tmpq);
1282                 l->stats.recv_bundles++;
1283                 l->stats.recv_bundled += msg_msgcnt(hdr);
1284                 while (tipc_msg_extract(skb, &iskb, &pos))
1285                         tipc_data_input(l, iskb, &tmpq);
1286                 tipc_skb_queue_splice_tail(&tmpq, inputq);
1287                 return 0;
1288         } else if (usr == MSG_FRAGMENTER) {
1289                 l->stats.recv_fragments++;
1290                 if (tipc_buf_append(reasm_skb, &skb)) {
1291                         l->stats.recv_fragmented++;
1292                         tipc_data_input(l, skb, inputq);
1293                 } else if (!*reasm_skb && !link_is_bc_rcvlink(l)) {
1294                         pr_warn_ratelimited("Unable to build fragment list\n");
1295                         return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1296                 }
1297                 return 0;
1298         } else if (usr == BCAST_PROTOCOL) {
1299                 tipc_bcast_lock(l->net);
1300                 tipc_link_bc_init_rcv(l->bc_rcvlink, hdr);
1301                 tipc_bcast_unlock(l->net);
1302         }
1303
1304         kfree_skb(skb);
1305         return 0;
1306 }
1307
1308 /* tipc_link_tnl_rcv() - receive TUNNEL_PROTOCOL message, drop or process the
1309  *                       inner message along with the ones in the old link's
1310  *                       deferdq
1311  * @l: tunnel link
1312  * @skb: TUNNEL_PROTOCOL message
1313  * @inputq: queue to put messages ready for delivery
1314  */
1315 static int tipc_link_tnl_rcv(struct tipc_link *l, struct sk_buff *skb,
1316                              struct sk_buff_head *inputq)
1317 {
1318         struct sk_buff **reasm_skb = &l->failover_reasm_skb;
1319         struct sk_buff **reasm_tnlmsg = &l->reasm_tnlmsg;
1320         struct sk_buff_head *fdefq = &l->failover_deferdq;
1321         struct tipc_msg *hdr = buf_msg(skb);
1322         struct sk_buff *iskb;
1323         int ipos = 0;
1324         int rc = 0;
1325         u16 seqno;
1326
1327         if (msg_type(hdr) == SYNCH_MSG) {
1328                 kfree_skb(skb);
1329                 return 0;
1330         }
1331
1332         /* Not a fragment? */
1333         if (likely(!msg_nof_fragms(hdr))) {
1334                 if (unlikely(!tipc_msg_extract(skb, &iskb, &ipos))) {
1335                         pr_warn_ratelimited("Unable to extract msg, defq: %d\n",
1336                                             skb_queue_len(fdefq));
1337                         return 0;
1338                 }
1339                 kfree_skb(skb);
1340         } else {
1341                 /* Set fragment type for buf_append */
1342                 if (msg_fragm_no(hdr) == 1)
1343                         msg_set_type(hdr, FIRST_FRAGMENT);
1344                 else if (msg_fragm_no(hdr) < msg_nof_fragms(hdr))
1345                         msg_set_type(hdr, FRAGMENT);
1346                 else
1347                         msg_set_type(hdr, LAST_FRAGMENT);
1348
1349                 if (!tipc_buf_append(reasm_tnlmsg, &skb)) {
1350                         /* Successful but non-complete reassembly? */
1351                         if (*reasm_tnlmsg || link_is_bc_rcvlink(l))
1352                                 return 0;
1353                         pr_warn_ratelimited("Unable to reassemble tunnel msg\n");
1354                         return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1355                 }
1356                 iskb = skb;
1357         }
1358
1359         do {
1360                 seqno = buf_seqno(iskb);
1361                 if (unlikely(less(seqno, l->drop_point))) {
1362                         kfree_skb(iskb);
1363                         continue;
1364                 }
1365                 if (unlikely(seqno != l->drop_point)) {
1366                         __tipc_skb_queue_sorted(fdefq, seqno, iskb);
1367                         continue;
1368                 }
1369
1370                 l->drop_point++;
1371                 if (!tipc_data_input(l, iskb, inputq))
1372                         rc |= tipc_link_input(l, iskb, inputq, reasm_skb);
1373                 if (unlikely(rc))
1374                         break;
1375         } while ((iskb = __tipc_skb_dequeue(fdefq, l->drop_point)));
1376
1377         return rc;
1378 }
1379
1380 /**
1381  * tipc_get_gap_ack_blks - get Gap ACK blocks from PROTOCOL/STATE_MSG
1382  * @ga: returned pointer to the Gap ACK blocks if any
1383  * @l: the tipc link
1384  * @hdr: the PROTOCOL/STATE_MSG header
1385  * @uc: desired Gap ACK blocks type, i.e. unicast (= 1) or broadcast (= 0)
1386  *
1387  * Return: the total Gap ACK blocks size
1388  */
1389 u16 tipc_get_gap_ack_blks(struct tipc_gap_ack_blks **ga, struct tipc_link *l,
1390                           struct tipc_msg *hdr, bool uc)
1391 {
1392         struct tipc_gap_ack_blks *p;
1393         u16 sz = 0;
1394
1395         /* Does peer support the Gap ACK blocks feature? */
1396         if (l->peer_caps & TIPC_GAP_ACK_BLOCK) {
1397                 p = (struct tipc_gap_ack_blks *)msg_data(hdr);
1398                 sz = ntohs(p->len);
1399                 /* Sanity check */
1400                 if (sz == struct_size(p, gacks, p->ugack_cnt + p->bgack_cnt)) {
1401                         /* Good, check if the desired type exists */
1402                         if ((uc && p->ugack_cnt) || (!uc && p->bgack_cnt))
1403                                 goto ok;
1404                 /* Backward compatible: peer might not support bc, but uc? */
1405                 } else if (uc && sz == struct_size(p, gacks, p->ugack_cnt)) {
1406                         if (p->ugack_cnt) {
1407                                 p->bgack_cnt = 0;
1408                                 goto ok;
1409                         }
1410                 }
1411         }
1412         /* Other cases: ignore! */
1413         p = NULL;
1414
1415 ok:
1416         *ga = p;
1417         return sz;
1418 }
1419
1420 static u8 __tipc_build_gap_ack_blks(struct tipc_gap_ack_blks *ga,
1421                                     struct tipc_link *l, u8 start_index)
1422 {
1423         struct tipc_gap_ack *gacks = &ga->gacks[start_index];
1424         struct sk_buff *skb = skb_peek(&l->deferdq);
1425         u16 expect, seqno = 0;
1426         u8 n = 0;
1427
1428         if (!skb)
1429                 return 0;
1430
1431         expect = buf_seqno(skb);
1432         skb_queue_walk(&l->deferdq, skb) {
1433                 seqno = buf_seqno(skb);
1434                 if (unlikely(more(seqno, expect))) {
1435                         gacks[n].ack = htons(expect - 1);
1436                         gacks[n].gap = htons(seqno - expect);
1437                         if (++n >= MAX_GAP_ACK_BLKS / 2) {
1438                                 pr_info_ratelimited("Gacks on %s: %d, ql: %d!\n",
1439                                                     l->name, n,
1440                                                     skb_queue_len(&l->deferdq));
1441                                 return n;
1442                         }
1443                 } else if (unlikely(less(seqno, expect))) {
1444                         pr_warn("Unexpected skb in deferdq!\n");
1445                         continue;
1446                 }
1447                 expect = seqno + 1;
1448         }
1449
1450         /* last block */
1451         gacks[n].ack = htons(seqno);
1452         gacks[n].gap = 0;
1453         n++;
1454         return n;
1455 }
1456
1457 /* tipc_build_gap_ack_blks - build Gap ACK blocks
1458  * @l: tipc unicast link
1459  * @hdr: the tipc message buffer to store the Gap ACK blocks after built
1460  *
1461  * The function builds Gap ACK blocks for both the unicast & broadcast receiver
1462  * links of a certain peer, the buffer after built has the network data format
1463  * as found at the struct tipc_gap_ack_blks definition.
1464  *
1465  * returns the actual allocated memory size
1466  */
1467 static u16 tipc_build_gap_ack_blks(struct tipc_link *l, struct tipc_msg *hdr)
1468 {
1469         struct tipc_link *bcl = l->bc_rcvlink;
1470         struct tipc_gap_ack_blks *ga;
1471         u16 len;
1472
1473         ga = (struct tipc_gap_ack_blks *)msg_data(hdr);
1474
1475         /* Start with broadcast link first */
1476         tipc_bcast_lock(bcl->net);
1477         msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1);
1478         msg_set_bc_gap(hdr, link_bc_rcv_gap(bcl));
1479         ga->bgack_cnt = __tipc_build_gap_ack_blks(ga, bcl, 0);
1480         tipc_bcast_unlock(bcl->net);
1481
1482         /* Now for unicast link, but an explicit NACK only (???) */
1483         ga->ugack_cnt = (msg_seq_gap(hdr)) ?
1484                         __tipc_build_gap_ack_blks(ga, l, ga->bgack_cnt) : 0;
1485
1486         /* Total len */
1487         len = struct_size(ga, gacks, ga->bgack_cnt + ga->ugack_cnt);
1488         ga->len = htons(len);
1489         return len;
1490 }
1491
1492 /* tipc_link_advance_transmq - advance TIPC link transmq queue by releasing
1493  *                             acked packets, also doing retransmissions if
1494  *                             gaps found
1495  * @l: tipc link with transmq queue to be advanced
1496  * @r: tipc link "receiver" i.e. in case of broadcast (= "l" if unicast)
1497  * @acked: seqno of last packet acked by peer without any gaps before
1498  * @gap: # of gap packets
1499  * @ga: buffer pointer to Gap ACK blocks from peer
1500  * @xmitq: queue for accumulating the retransmitted packets if any
1501  * @retransmitted: returned boolean value if a retransmission is really issued
1502  * @rc: returned code e.g. TIPC_LINK_DOWN_EVT if a repeated retransmit failures
1503  *      happens (- unlikely case)
1504  *
1505  * Return: the number of packets released from the link transmq
1506  */
1507 static int tipc_link_advance_transmq(struct tipc_link *l, struct tipc_link *r,
1508                                      u16 acked, u16 gap,
1509                                      struct tipc_gap_ack_blks *ga,
1510                                      struct sk_buff_head *xmitq,
1511                                      bool *retransmitted, int *rc)
1512 {
1513         struct tipc_gap_ack_blks *last_ga = r->last_ga, *this_ga = NULL;
1514         struct tipc_gap_ack *gacks = NULL;
1515         struct sk_buff *skb, *_skb, *tmp;
1516         struct tipc_msg *hdr;
1517         u32 qlen = skb_queue_len(&l->transmq);
1518         u16 nacked = acked, ngap = gap, gack_cnt = 0;
1519         u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
1520         u16 ack = l->rcv_nxt - 1;
1521         u16 seqno, n = 0;
1522         u16 end = r->acked, start = end, offset = r->last_gap;
1523         u16 si = (last_ga) ? last_ga->start_index : 0;
1524         bool is_uc = !link_is_bc_sndlink(l);
1525         bool bc_has_acked = false;
1526
1527         trace_tipc_link_retrans(r, acked + 1, acked + gap, &l->transmq);
1528
1529         /* Determine Gap ACK blocks if any for the particular link */
1530         if (ga && is_uc) {
1531                 /* Get the Gap ACKs, uc part */
1532                 gack_cnt = ga->ugack_cnt;
1533                 gacks = &ga->gacks[ga->bgack_cnt];
1534         } else if (ga) {
1535                 /* Copy the Gap ACKs, bc part, for later renewal if needed */
1536                 this_ga = kmemdup(ga, struct_size(ga, gacks, ga->bgack_cnt),
1537                                   GFP_ATOMIC);
1538                 if (likely(this_ga)) {
1539                         this_ga->start_index = 0;
1540                         /* Start with the bc Gap ACKs */
1541                         gack_cnt = this_ga->bgack_cnt;
1542                         gacks = &this_ga->gacks[0];
1543                 } else {
1544                         /* Hmm, we can get in trouble..., simply ignore it */
1545                         pr_warn_ratelimited("Ignoring bc Gap ACKs, no memory\n");
1546                 }
1547         }
1548
1549         /* Advance the link transmq */
1550         skb_queue_walk_safe(&l->transmq, skb, tmp) {
1551                 seqno = buf_seqno(skb);
1552
1553 next_gap_ack:
1554                 if (less_eq(seqno, nacked)) {
1555                         if (is_uc)
1556                                 goto release;
1557                         /* Skip packets peer has already acked */
1558                         if (!more(seqno, r->acked))
1559                                 continue;
1560                         /* Get the next of last Gap ACK blocks */
1561                         while (more(seqno, end)) {
1562                                 if (!last_ga || si >= last_ga->bgack_cnt)
1563                                         break;
1564                                 start = end + offset + 1;
1565                                 end = ntohs(last_ga->gacks[si].ack);
1566                                 offset = ntohs(last_ga->gacks[si].gap);
1567                                 si++;
1568                                 WARN_ONCE(more(start, end) ||
1569                                           (!offset &&
1570                                            si < last_ga->bgack_cnt) ||
1571                                           si > MAX_GAP_ACK_BLKS,
1572                                           "Corrupted Gap ACK: %d %d %d %d %d\n",
1573                                           start, end, offset, si,
1574                                           last_ga->bgack_cnt);
1575                         }
1576                         /* Check against the last Gap ACK block */
1577                         if (in_range(seqno, start, end))
1578                                 continue;
1579                         /* Update/release the packet peer is acking */
1580                         bc_has_acked = true;
1581                         if (--TIPC_SKB_CB(skb)->ackers)
1582                                 continue;
1583 release:
1584                         /* release skb */
1585                         __skb_unlink(skb, &l->transmq);
1586                         kfree_skb(skb);
1587                 } else if (less_eq(seqno, nacked + ngap)) {
1588                         /* First gap: check if repeated retrans failures? */
1589                         if (unlikely(seqno == acked + 1 &&
1590                                      link_retransmit_failure(l, r, rc))) {
1591                                 /* Ignore this bc Gap ACKs if any */
1592                                 kfree(this_ga);
1593                                 this_ga = NULL;
1594                                 break;
1595                         }
1596                         /* retransmit skb if unrestricted*/
1597                         if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))
1598                                 continue;
1599                         tipc_link_set_skb_retransmit_time(skb, l);
1600                         _skb = pskb_copy(skb, GFP_ATOMIC);
1601                         if (!_skb)
1602                                 continue;
1603                         hdr = buf_msg(_skb);
1604                         msg_set_ack(hdr, ack);
1605                         msg_set_bcast_ack(hdr, bc_ack);
1606                         _skb->priority = TC_PRIO_CONTROL;
1607                         __skb_queue_tail(xmitq, _skb);
1608                         l->stats.retransmitted++;
1609                         if (!is_uc)
1610                                 r->stats.retransmitted++;
1611                         *retransmitted = true;
1612                         /* Increase actual retrans counter & mark first time */
1613                         if (!TIPC_SKB_CB(skb)->retr_cnt++)
1614                                 TIPC_SKB_CB(skb)->retr_stamp = jiffies;
1615                 } else {
1616                         /* retry with Gap ACK blocks if any */
1617                         if (n >= gack_cnt)
1618                                 break;
1619                         nacked = ntohs(gacks[n].ack);
1620                         ngap = ntohs(gacks[n].gap);
1621                         n++;
1622                         goto next_gap_ack;
1623                 }
1624         }
1625
1626         /* Renew last Gap ACK blocks for bc if needed */
1627         if (bc_has_acked) {
1628                 if (this_ga) {
1629                         kfree(last_ga);
1630                         r->last_ga = this_ga;
1631                         r->last_gap = gap;
1632                 } else if (last_ga) {
1633                         if (less(acked, start)) {
1634                                 si--;
1635                                 offset = start - acked - 1;
1636                         } else if (less(acked, end)) {
1637                                 acked = end;
1638                         }
1639                         if (si < last_ga->bgack_cnt) {
1640                                 last_ga->start_index = si;
1641                                 r->last_gap = offset;
1642                         } else {
1643                                 kfree(last_ga);
1644                                 r->last_ga = NULL;
1645                                 r->last_gap = 0;
1646                         }
1647                 } else {
1648                         r->last_gap = 0;
1649                 }
1650                 r->acked = acked;
1651         } else {
1652                 kfree(this_ga);
1653         }
1654
1655         return qlen - skb_queue_len(&l->transmq);
1656 }
1657
1658 /* tipc_link_build_state_msg: prepare link state message for transmission
1659  *
1660  * Note that sending of broadcast ack is coordinated among nodes, to reduce
1661  * risk of ack storms towards the sender
1662  */
1663 int tipc_link_build_state_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
1664 {
1665         if (!l)
1666                 return 0;
1667
1668         /* Broadcast ACK must be sent via a unicast link => defer to caller */
1669         if (link_is_bc_rcvlink(l)) {
1670                 if (((l->rcv_nxt ^ tipc_own_addr(l->net)) & 0xf) != 0xf)
1671                         return 0;
1672                 l->rcv_unacked = 0;
1673
1674                 /* Use snd_nxt to store peer's snd_nxt in broadcast rcv link */
1675                 l->snd_nxt = l->rcv_nxt;
1676                 return TIPC_LINK_SND_STATE;
1677         }
1678         /* Unicast ACK */
1679         l->rcv_unacked = 0;
1680         l->stats.sent_acks++;
1681         tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, 0, xmitq);
1682         return 0;
1683 }
1684
1685 /* tipc_link_build_reset_msg: prepare link RESET or ACTIVATE message
1686  */
1687 void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
1688 {
1689         int mtyp = RESET_MSG;
1690         struct sk_buff *skb;
1691
1692         if (l->state == LINK_ESTABLISHING)
1693                 mtyp = ACTIVATE_MSG;
1694
1695         tipc_link_build_proto_msg(l, mtyp, 0, 0, 0, 0, 0, xmitq);
1696
1697         /* Inform peer that this endpoint is going down if applicable */
1698         skb = skb_peek_tail(xmitq);
1699         if (skb && (l->state == LINK_RESET))
1700                 msg_set_peer_stopping(buf_msg(skb), 1);
1701 }
1702
1703 /* tipc_link_build_nack_msg: prepare link nack message for transmission
1704  * Note that sending of broadcast NACK is coordinated among nodes, to
1705  * reduce the risk of NACK storms towards the sender
1706  */
1707 static int tipc_link_build_nack_msg(struct tipc_link *l,
1708                                     struct sk_buff_head *xmitq)
1709 {
1710         u32 def_cnt = ++l->stats.deferred_recv;
1711         struct sk_buff_head *dfq = &l->deferdq;
1712         u32 defq_len = skb_queue_len(dfq);
1713         int match1, match2;
1714
1715         if (link_is_bc_rcvlink(l)) {
1716                 match1 = def_cnt & 0xf;
1717                 match2 = tipc_own_addr(l->net) & 0xf;
1718                 if (match1 == match2)
1719                         return TIPC_LINK_SND_STATE;
1720                 return 0;
1721         }
1722
1723         if (defq_len >= 3 && !((defq_len - 3) % 16)) {
1724                 u16 rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt;
1725
1726                 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0,
1727                                           rcvgap, 0, 0, xmitq);
1728         }
1729         return 0;
1730 }
1731
1732 /* tipc_link_rcv - process TIPC packets/messages arriving from off-node
1733  * @l: the link that should handle the message
1734  * @skb: TIPC packet
1735  * @xmitq: queue to place packets to be sent after this call
1736  */
1737 int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
1738                   struct sk_buff_head *xmitq)
1739 {
1740         struct sk_buff_head *defq = &l->deferdq;
1741         struct tipc_msg *hdr = buf_msg(skb);
1742         u16 seqno, rcv_nxt, win_lim;
1743         int released = 0;
1744         int rc = 0;
1745
1746         /* Verify and update link state */
1747         if (unlikely(msg_user(hdr) == LINK_PROTOCOL))
1748                 return tipc_link_proto_rcv(l, skb, xmitq);
1749
1750         /* Don't send probe at next timeout expiration */
1751         l->silent_intv_cnt = 0;
1752
1753         do {
1754                 hdr = buf_msg(skb);
1755                 seqno = msg_seqno(hdr);
1756                 rcv_nxt = l->rcv_nxt;
1757                 win_lim = rcv_nxt + TIPC_MAX_LINK_WIN;
1758
1759                 if (unlikely(!link_is_up(l))) {
1760                         if (l->state == LINK_ESTABLISHING)
1761                                 rc = TIPC_LINK_UP_EVT;
1762                         kfree_skb(skb);
1763                         break;
1764                 }
1765
1766                 /* Drop if outside receive window */
1767                 if (unlikely(less(seqno, rcv_nxt) || more(seqno, win_lim))) {
1768                         l->stats.duplicates++;
1769                         kfree_skb(skb);
1770                         break;
1771                 }
1772                 released += tipc_link_advance_transmq(l, l, msg_ack(hdr), 0,
1773                                                       NULL, NULL, NULL, NULL);
1774
1775                 /* Defer delivery if sequence gap */
1776                 if (unlikely(seqno != rcv_nxt)) {
1777                         if (!__tipc_skb_queue_sorted(defq, seqno, skb))
1778                                 l->stats.duplicates++;
1779                         rc |= tipc_link_build_nack_msg(l, xmitq);
1780                         break;
1781                 }
1782
1783                 /* Deliver packet */
1784                 l->rcv_nxt++;
1785                 l->stats.recv_pkts++;
1786
1787                 if (unlikely(msg_user(hdr) == TUNNEL_PROTOCOL))
1788                         rc |= tipc_link_tnl_rcv(l, skb, l->inputq);
1789                 else if (!tipc_data_input(l, skb, l->inputq))
1790                         rc |= tipc_link_input(l, skb, l->inputq, &l->reasm_buf);
1791                 if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN))
1792                         rc |= tipc_link_build_state_msg(l, xmitq);
1793                 if (unlikely(rc & ~TIPC_LINK_SND_STATE))
1794                         break;
1795         } while ((skb = __tipc_skb_dequeue(defq, l->rcv_nxt)));
1796
1797         /* Forward queues and wake up waiting users */
1798         if (released) {
1799                 tipc_link_update_cwin(l, released, 0);
1800                 tipc_link_advance_backlog(l, xmitq);
1801                 if (unlikely(!skb_queue_empty(&l->wakeupq)))
1802                         link_prepare_wakeup(l);
1803         }
1804         return rc;
1805 }
1806
1807 static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
1808                                       bool probe_reply, u16 rcvgap,
1809                                       int tolerance, int priority,
1810                                       struct sk_buff_head *xmitq)
1811 {
1812         struct tipc_mon_state *mstate = &l->mon_state;
1813         struct sk_buff_head *dfq = &l->deferdq;
1814         struct tipc_link *bcl = l->bc_rcvlink;
1815         struct tipc_msg *hdr;
1816         struct sk_buff *skb;
1817         bool node_up = link_is_up(bcl);
1818         u16 glen = 0, bc_rcvgap = 0;
1819         int dlen = 0;
1820         void *data;
1821
1822         /* Don't send protocol message during reset or link failover */
1823         if (tipc_link_is_blocked(l))
1824                 return;
1825
1826         if (!tipc_link_is_up(l) && (mtyp == STATE_MSG))
1827                 return;
1828
1829         if ((probe || probe_reply) && !skb_queue_empty(dfq))
1830                 rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt;
1831
1832         skb = tipc_msg_create(LINK_PROTOCOL, mtyp, INT_H_SIZE,
1833                               tipc_max_domain_size + MAX_GAP_ACK_BLKS_SZ,
1834                               l->addr, tipc_own_addr(l->net), 0, 0, 0);
1835         if (!skb)
1836                 return;
1837
1838         hdr = buf_msg(skb);
1839         data = msg_data(hdr);
1840         msg_set_session(hdr, l->session);
1841         msg_set_bearer_id(hdr, l->bearer_id);
1842         msg_set_net_plane(hdr, l->net_plane);
1843         msg_set_next_sent(hdr, l->snd_nxt);
1844         msg_set_ack(hdr, l->rcv_nxt - 1);
1845         msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1);
1846         msg_set_bc_ack_invalid(hdr, !node_up);
1847         msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
1848         msg_set_link_tolerance(hdr, tolerance);
1849         msg_set_linkprio(hdr, priority);
1850         msg_set_redundant_link(hdr, node_up);
1851         msg_set_seq_gap(hdr, 0);
1852         msg_set_seqno(hdr, l->snd_nxt + U16_MAX / 2);
1853
1854         if (mtyp == STATE_MSG) {
1855                 if (l->peer_caps & TIPC_LINK_PROTO_SEQNO)
1856                         msg_set_seqno(hdr, l->snd_nxt_state++);
1857                 msg_set_seq_gap(hdr, rcvgap);
1858                 bc_rcvgap = link_bc_rcv_gap(bcl);
1859                 msg_set_bc_gap(hdr, bc_rcvgap);
1860                 msg_set_probe(hdr, probe);
1861                 msg_set_is_keepalive(hdr, probe || probe_reply);
1862                 if (l->peer_caps & TIPC_GAP_ACK_BLOCK)
1863                         glen = tipc_build_gap_ack_blks(l, hdr);
1864                 tipc_mon_prep(l->net, data + glen, &dlen, mstate, l->bearer_id);
1865                 msg_set_size(hdr, INT_H_SIZE + glen + dlen);
1866                 skb_trim(skb, INT_H_SIZE + glen + dlen);
1867                 l->stats.sent_states++;
1868                 l->rcv_unacked = 0;
1869         } else {
1870                 /* RESET_MSG or ACTIVATE_MSG */
1871                 if (mtyp == ACTIVATE_MSG) {
1872                         msg_set_dest_session_valid(hdr, 1);
1873                         msg_set_dest_session(hdr, l->peer_session);
1874                 }
1875                 msg_set_max_pkt(hdr, l->advertised_mtu);
1876                 strcpy(data, l->if_name);
1877                 msg_set_size(hdr, INT_H_SIZE + TIPC_MAX_IF_NAME);
1878                 skb_trim(skb, INT_H_SIZE + TIPC_MAX_IF_NAME);
1879         }
1880         if (probe)
1881                 l->stats.sent_probes++;
1882         if (rcvgap)
1883                 l->stats.sent_nacks++;
1884         if (bc_rcvgap)
1885                 bcl->stats.sent_nacks++;
1886         skb->priority = TC_PRIO_CONTROL;
1887         __skb_queue_tail(xmitq, skb);
1888         trace_tipc_proto_build(skb, false, l->name);
1889 }
1890
1891 void tipc_link_create_dummy_tnl_msg(struct tipc_link *l,
1892                                     struct sk_buff_head *xmitq)
1893 {
1894         u32 onode = tipc_own_addr(l->net);
1895         struct tipc_msg *hdr, *ihdr;
1896         struct sk_buff_head tnlq;
1897         struct sk_buff *skb;
1898         u32 dnode = l->addr;
1899
1900         __skb_queue_head_init(&tnlq);
1901         skb = tipc_msg_create(TUNNEL_PROTOCOL, FAILOVER_MSG,
1902                               INT_H_SIZE, BASIC_H_SIZE,
1903                               dnode, onode, 0, 0, 0);
1904         if (!skb) {
1905                 pr_warn("%sunable to create tunnel packet\n", link_co_err);
1906                 return;
1907         }
1908
1909         hdr = buf_msg(skb);
1910         msg_set_msgcnt(hdr, 1);
1911         msg_set_bearer_id(hdr, l->peer_bearer_id);
1912
1913         ihdr = (struct tipc_msg *)msg_data(hdr);
1914         tipc_msg_init(onode, ihdr, TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
1915                       BASIC_H_SIZE, dnode);
1916         msg_set_errcode(ihdr, TIPC_ERR_NO_PORT);
1917         __skb_queue_tail(&tnlq, skb);
1918         tipc_link_xmit(l, &tnlq, xmitq);
1919 }
1920
1921 /* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
1922  * with contents of the link's transmit and backlog queues.
1923  */
1924 void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
1925                            int mtyp, struct sk_buff_head *xmitq)
1926 {
1927         struct sk_buff_head *fdefq = &tnl->failover_deferdq;
1928         struct sk_buff *skb, *tnlskb;
1929         struct tipc_msg *hdr, tnlhdr;
1930         struct sk_buff_head *queue = &l->transmq;
1931         struct sk_buff_head tmpxq, tnlq, frags;
1932         u16 pktlen, pktcnt, seqno = l->snd_nxt;
1933         bool pktcnt_need_update = false;
1934         u16 syncpt;
1935         int rc;
1936
1937         if (!tnl)
1938                 return;
1939
1940         __skb_queue_head_init(&tnlq);
1941         /* Link Synching:
1942          * From now on, send only one single ("dummy") SYNCH message
1943          * to peer. The SYNCH message does not contain any data, just
1944          * a header conveying the synch point to the peer.
1945          */
1946         if (mtyp == SYNCH_MSG && (tnl->peer_caps & TIPC_TUNNEL_ENHANCED)) {
1947                 tnlskb = tipc_msg_create(TUNNEL_PROTOCOL, SYNCH_MSG,
1948                                          INT_H_SIZE, 0, l->addr,
1949                                          tipc_own_addr(l->net),
1950                                          0, 0, 0);
1951                 if (!tnlskb) {
1952                         pr_warn("%sunable to create dummy SYNCH_MSG\n",
1953                                 link_co_err);
1954                         return;
1955                 }
1956
1957                 hdr = buf_msg(tnlskb);
1958                 syncpt = l->snd_nxt + skb_queue_len(&l->backlogq) - 1;
1959                 msg_set_syncpt(hdr, syncpt);
1960                 msg_set_bearer_id(hdr, l->peer_bearer_id);
1961                 __skb_queue_tail(&tnlq, tnlskb);
1962                 tipc_link_xmit(tnl, &tnlq, xmitq);
1963                 return;
1964         }
1965
1966         __skb_queue_head_init(&tmpxq);
1967         __skb_queue_head_init(&frags);
1968         /* At least one packet required for safe algorithm => add dummy */
1969         skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
1970                               BASIC_H_SIZE, 0, l->addr, tipc_own_addr(l->net),
1971                               0, 0, TIPC_ERR_NO_PORT);
1972         if (!skb) {
1973                 pr_warn("%sunable to create tunnel packet\n", link_co_err);
1974                 return;
1975         }
1976         __skb_queue_tail(&tnlq, skb);
1977         tipc_link_xmit(l, &tnlq, &tmpxq);
1978         __skb_queue_purge(&tmpxq);
1979
1980         /* Initialize reusable tunnel packet header */
1981         tipc_msg_init(tipc_own_addr(l->net), &tnlhdr, TUNNEL_PROTOCOL,
1982                       mtyp, INT_H_SIZE, l->addr);
1983         if (mtyp == SYNCH_MSG)
1984                 pktcnt = l->snd_nxt - buf_seqno(skb_peek(&l->transmq));
1985         else
1986                 pktcnt = skb_queue_len(&l->transmq);
1987         pktcnt += skb_queue_len(&l->backlogq);
1988         msg_set_msgcnt(&tnlhdr, pktcnt);
1989         msg_set_bearer_id(&tnlhdr, l->peer_bearer_id);
1990 tnl:
1991         /* Wrap each packet into a tunnel packet */
1992         skb_queue_walk(queue, skb) {
1993                 hdr = buf_msg(skb);
1994                 if (queue == &l->backlogq)
1995                         msg_set_seqno(hdr, seqno++);
1996                 pktlen = msg_size(hdr);
1997
1998                 /* Tunnel link MTU is not large enough? This could be
1999                  * due to:
2000                  * 1) Link MTU has just changed or set differently;
2001                  * 2) Or FAILOVER on the top of a SYNCH message
2002                  *
2003                  * The 2nd case should not happen if peer supports
2004                  * TIPC_TUNNEL_ENHANCED
2005                  */
2006                 if (pktlen > tnl->mtu - INT_H_SIZE) {
2007                         if (mtyp == FAILOVER_MSG &&
2008                             (tnl->peer_caps & TIPC_TUNNEL_ENHANCED)) {
2009                                 rc = tipc_msg_fragment(skb, &tnlhdr, tnl->mtu,
2010                                                        &frags);
2011                                 if (rc) {
2012                                         pr_warn("%sunable to frag msg: rc %d\n",
2013                                                 link_co_err, rc);
2014                                         return;
2015                                 }
2016                                 pktcnt += skb_queue_len(&frags) - 1;
2017                                 pktcnt_need_update = true;
2018                                 skb_queue_splice_tail_init(&frags, &tnlq);
2019                                 continue;
2020                         }
2021                         /* Unluckily, peer doesn't have TIPC_TUNNEL_ENHANCED
2022                          * => Just warn it and return!
2023                          */
2024                         pr_warn_ratelimited("%stoo large msg <%d, %d>: %d!\n",
2025                                             link_co_err, msg_user(hdr),
2026                                             msg_type(hdr), msg_size(hdr));
2027                         return;
2028                 }
2029
2030                 msg_set_size(&tnlhdr, pktlen + INT_H_SIZE);
2031                 tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE, GFP_ATOMIC);
2032                 if (!tnlskb) {
2033                         pr_warn("%sunable to send packet\n", link_co_err);
2034                         return;
2035                 }
2036                 skb_copy_to_linear_data(tnlskb, &tnlhdr, INT_H_SIZE);
2037                 skb_copy_to_linear_data_offset(tnlskb, INT_H_SIZE, hdr, pktlen);
2038                 __skb_queue_tail(&tnlq, tnlskb);
2039         }
2040         if (queue != &l->backlogq) {
2041                 queue = &l->backlogq;
2042                 goto tnl;
2043         }
2044
2045         if (pktcnt_need_update)
2046                 skb_queue_walk(&tnlq, skb) {
2047                         hdr = buf_msg(skb);
2048                         msg_set_msgcnt(hdr, pktcnt);
2049                 }
2050
2051         tipc_link_xmit(tnl, &tnlq, xmitq);
2052
2053         if (mtyp == FAILOVER_MSG) {
2054                 tnl->drop_point = l->rcv_nxt;
2055                 tnl->failover_reasm_skb = l->reasm_buf;
2056                 l->reasm_buf = NULL;
2057
2058                 /* Failover the link's deferdq */
2059                 if (unlikely(!skb_queue_empty(fdefq))) {
2060                         pr_warn("Link failover deferdq not empty: %d!\n",
2061                                 skb_queue_len(fdefq));
2062                         __skb_queue_purge(fdefq);
2063                 }
2064                 skb_queue_splice_init(&l->deferdq, fdefq);
2065         }
2066 }
2067
2068 /**
2069  * tipc_link_failover_prepare() - prepare tnl for link failover
2070  *
2071  * This is a special version of the precursor - tipc_link_tnl_prepare(),
2072  * see the tipc_node_link_failover() for details
2073  *
2074  * @l: failover link
2075  * @tnl: tunnel link
2076  * @xmitq: queue for messages to be xmited
2077  */
2078 void tipc_link_failover_prepare(struct tipc_link *l, struct tipc_link *tnl,
2079                                 struct sk_buff_head *xmitq)
2080 {
2081         struct sk_buff_head *fdefq = &tnl->failover_deferdq;
2082
2083         tipc_link_create_dummy_tnl_msg(tnl, xmitq);
2084
2085         /* This failover link endpoint was never established before,
2086          * so it has not received anything from peer.
2087          * Otherwise, it must be a normal failover situation or the
2088          * node has entered SELF_DOWN_PEER_LEAVING and both peer nodes
2089          * would have to start over from scratch instead.
2090          */
2091         tnl->drop_point = 1;
2092         tnl->failover_reasm_skb = NULL;
2093
2094         /* Initiate the link's failover deferdq */
2095         if (unlikely(!skb_queue_empty(fdefq))) {
2096                 pr_warn("Link failover deferdq not empty: %d!\n",
2097                         skb_queue_len(fdefq));
2098                 __skb_queue_purge(fdefq);
2099         }
2100 }
2101
2102 /* tipc_link_validate_msg(): validate message against current link state
2103  * Returns true if message should be accepted, otherwise false
2104  */
2105 bool tipc_link_validate_msg(struct tipc_link *l, struct tipc_msg *hdr)
2106 {
2107         u16 curr_session = l->peer_session;
2108         u16 session = msg_session(hdr);
2109         int mtyp = msg_type(hdr);
2110
2111         if (msg_user(hdr) != LINK_PROTOCOL)
2112                 return true;
2113
2114         switch (mtyp) {
2115         case RESET_MSG:
2116                 if (!l->in_session)
2117                         return true;
2118                 /* Accept only RESET with new session number */
2119                 return more(session, curr_session);
2120         case ACTIVATE_MSG:
2121                 if (!l->in_session)
2122                         return true;
2123                 /* Accept only ACTIVATE with new or current session number */
2124                 return !less(session, curr_session);
2125         case STATE_MSG:
2126                 /* Accept only STATE with current session number */
2127                 if (!l->in_session)
2128                         return false;
2129                 if (session != curr_session)
2130                         return false;
2131                 /* Extra sanity check */
2132                 if (!link_is_up(l) && msg_ack(hdr))
2133                         return false;
2134                 if (!(l->peer_caps & TIPC_LINK_PROTO_SEQNO))
2135                         return true;
2136                 /* Accept only STATE with new sequence number */
2137                 return !less(msg_seqno(hdr), l->rcv_nxt_state);
2138         default:
2139                 return false;
2140         }
2141 }
2142
2143 /* tipc_link_proto_rcv(): receive link level protocol message :
2144  * Note that network plane id propagates through the network, and may
2145  * change at any time. The node with lowest numerical id determines
2146  * network plane
2147  */
2148 static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
2149                                struct sk_buff_head *xmitq)
2150 {
2151         struct tipc_msg *hdr = buf_msg(skb);
2152         struct tipc_gap_ack_blks *ga = NULL;
2153         bool reply = msg_probe(hdr), retransmitted = false;
2154         u16 dlen = msg_data_sz(hdr), glen = 0;
2155         u16 peers_snd_nxt =  msg_next_sent(hdr);
2156         u16 peers_tol = msg_link_tolerance(hdr);
2157         u16 peers_prio = msg_linkprio(hdr);
2158         u16 gap = msg_seq_gap(hdr);
2159         u16 ack = msg_ack(hdr);
2160         u16 rcv_nxt = l->rcv_nxt;
2161         u16 rcvgap = 0;
2162         int mtyp = msg_type(hdr);
2163         int rc = 0, released;
2164         char *if_name;
2165         void *data;
2166
2167         trace_tipc_proto_rcv(skb, false, l->name);
2168         if (tipc_link_is_blocked(l) || !xmitq)
2169                 goto exit;
2170
2171         if (tipc_own_addr(l->net) > msg_prevnode(hdr))
2172                 l->net_plane = msg_net_plane(hdr);
2173
2174         skb_linearize(skb);
2175         hdr = buf_msg(skb);
2176         data = msg_data(hdr);
2177
2178         if (!tipc_link_validate_msg(l, hdr)) {
2179                 trace_tipc_skb_dump(skb, false, "PROTO invalid (1)!");
2180                 trace_tipc_link_dump(l, TIPC_DUMP_NONE, "PROTO invalid (1)!");
2181                 goto exit;
2182         }
2183
2184         switch (mtyp) {
2185         case RESET_MSG:
2186         case ACTIVATE_MSG:
2187                 /* Complete own link name with peer's interface name */
2188                 if_name =  strrchr(l->name, ':') + 1;
2189                 if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
2190                         break;
2191                 if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME)
2192                         break;
2193                 strncpy(if_name, data, TIPC_MAX_IF_NAME);
2194
2195                 /* Update own tolerance if peer indicates a non-zero value */
2196                 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) {
2197                         l->tolerance = peers_tol;
2198                         l->bc_rcvlink->tolerance = peers_tol;
2199                 }
2200                 /* Update own priority if peer's priority is higher */
2201                 if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
2202                         l->priority = peers_prio;
2203
2204                 /* If peer is going down we want full re-establish cycle */
2205                 if (msg_peer_stopping(hdr)) {
2206                         rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
2207                         break;
2208                 }
2209
2210                 /* If this endpoint was re-created while peer was ESTABLISHING
2211                  * it doesn't know current session number. Force re-synch.
2212                  */
2213                 if (mtyp == ACTIVATE_MSG && msg_dest_session_valid(hdr) &&
2214                     l->session != msg_dest_session(hdr)) {
2215                         if (less(l->session, msg_dest_session(hdr)))
2216                                 l->session = msg_dest_session(hdr) + 1;
2217                         break;
2218                 }
2219
2220                 /* ACTIVATE_MSG serves as PEER_RESET if link is already down */
2221                 if (mtyp == RESET_MSG || !link_is_up(l))
2222                         rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
2223
2224                 /* ACTIVATE_MSG takes up link if it was already locally reset */
2225                 if (mtyp == ACTIVATE_MSG && l->state == LINK_ESTABLISHING)
2226                         rc = TIPC_LINK_UP_EVT;
2227
2228                 l->peer_session = msg_session(hdr);
2229                 l->in_session = true;
2230                 l->peer_bearer_id = msg_bearer_id(hdr);
2231                 if (l->mtu > msg_max_pkt(hdr))
2232                         l->mtu = msg_max_pkt(hdr);
2233                 break;
2234
2235         case STATE_MSG:
2236                 l->rcv_nxt_state = msg_seqno(hdr) + 1;
2237
2238                 /* Update own tolerance if peer indicates a non-zero value */
2239                 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) {
2240                         l->tolerance = peers_tol;
2241                         l->bc_rcvlink->tolerance = peers_tol;
2242                 }
2243                 /* Update own prio if peer indicates a different value */
2244                 if ((peers_prio != l->priority) &&
2245                     in_range(peers_prio, 1, TIPC_MAX_LINK_PRI)) {
2246                         l->priority = peers_prio;
2247                         rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
2248                 }
2249
2250                 l->silent_intv_cnt = 0;
2251                 l->stats.recv_states++;
2252                 if (msg_probe(hdr))
2253                         l->stats.recv_probes++;
2254
2255                 if (!link_is_up(l)) {
2256                         if (l->state == LINK_ESTABLISHING)
2257                                 rc = TIPC_LINK_UP_EVT;
2258                         break;
2259                 }
2260
2261                 /* Receive Gap ACK blocks from peer if any */
2262                 glen = tipc_get_gap_ack_blks(&ga, l, hdr, true);
2263
2264                 tipc_mon_rcv(l->net, data + glen, dlen - glen, l->addr,
2265                              &l->mon_state, l->bearer_id);
2266
2267                 /* Send NACK if peer has sent pkts we haven't received yet */
2268                 if ((reply || msg_is_keepalive(hdr)) &&
2269                     more(peers_snd_nxt, rcv_nxt) &&
2270                     !tipc_link_is_synching(l) &&
2271                     skb_queue_empty(&l->deferdq))
2272                         rcvgap = peers_snd_nxt - l->rcv_nxt;
2273                 if (rcvgap || reply)
2274                         tipc_link_build_proto_msg(l, STATE_MSG, 0, reply,
2275                                                   rcvgap, 0, 0, xmitq);
2276
2277                 released = tipc_link_advance_transmq(l, l, ack, gap, ga, xmitq,
2278                                                      &retransmitted, &rc);
2279                 if (gap)
2280                         l->stats.recv_nacks++;
2281                 if (released || retransmitted)
2282                         tipc_link_update_cwin(l, released, retransmitted);
2283                 if (released)
2284                         tipc_link_advance_backlog(l, xmitq);
2285                 if (unlikely(!skb_queue_empty(&l->wakeupq)))
2286                         link_prepare_wakeup(l);
2287         }
2288 exit:
2289         kfree_skb(skb);
2290         return rc;
2291 }
2292
2293 /* tipc_link_build_bc_proto_msg() - create broadcast protocol message
2294  */
2295 static bool tipc_link_build_bc_proto_msg(struct tipc_link *l, bool bcast,
2296                                          u16 peers_snd_nxt,
2297                                          struct sk_buff_head *xmitq)
2298 {
2299         struct sk_buff *skb;
2300         struct tipc_msg *hdr;
2301         struct sk_buff *dfrd_skb = skb_peek(&l->deferdq);
2302         u16 ack = l->rcv_nxt - 1;
2303         u16 gap_to = peers_snd_nxt - 1;
2304
2305         skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE,
2306                               0, l->addr, tipc_own_addr(l->net), 0, 0, 0);
2307         if (!skb)
2308                 return false;
2309         hdr = buf_msg(skb);
2310         msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
2311         msg_set_bcast_ack(hdr, ack);
2312         msg_set_bcgap_after(hdr, ack);
2313         if (dfrd_skb)
2314                 gap_to = buf_seqno(dfrd_skb) - 1;
2315         msg_set_bcgap_to(hdr, gap_to);
2316         msg_set_non_seq(hdr, bcast);
2317         __skb_queue_tail(xmitq, skb);
2318         return true;
2319 }
2320
2321 /* tipc_link_build_bc_init_msg() - synchronize broadcast link endpoints.
2322  *
2323  * Give a newly added peer node the sequence number where it should
2324  * start receiving and acking broadcast packets.
2325  */
2326 static void tipc_link_build_bc_init_msg(struct tipc_link *l,
2327                                         struct sk_buff_head *xmitq)
2328 {
2329         struct sk_buff_head list;
2330
2331         __skb_queue_head_init(&list);
2332         if (!tipc_link_build_bc_proto_msg(l->bc_rcvlink, false, 0, &list))
2333                 return;
2334         msg_set_bc_ack_invalid(buf_msg(skb_peek(&list)), true);
2335         tipc_link_xmit(l, &list, xmitq);
2336 }
2337
2338 /* tipc_link_bc_init_rcv - receive initial broadcast synch data from peer
2339  */
2340 void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr)
2341 {
2342         int mtyp = msg_type(hdr);
2343         u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
2344
2345         if (link_is_up(l))
2346                 return;
2347
2348         if (msg_user(hdr) == BCAST_PROTOCOL) {
2349                 l->rcv_nxt = peers_snd_nxt;
2350                 l->state = LINK_ESTABLISHED;
2351                 return;
2352         }
2353
2354         if (l->peer_caps & TIPC_BCAST_SYNCH)
2355                 return;
2356
2357         if (msg_peer_node_is_up(hdr))
2358                 return;
2359
2360         /* Compatibility: accept older, less safe initial synch data */
2361         if ((mtyp == RESET_MSG) || (mtyp == ACTIVATE_MSG))
2362                 l->rcv_nxt = peers_snd_nxt;
2363 }
2364
2365 /* tipc_link_bc_sync_rcv - update rcv link according to peer's send state
2366  */
2367 int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
2368                           struct sk_buff_head *xmitq)
2369 {
2370         u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
2371         int rc = 0;
2372
2373         if (!link_is_up(l))
2374                 return rc;
2375
2376         if (!msg_peer_node_is_up(hdr))
2377                 return rc;
2378
2379         /* Open when peer ackowledges our bcast init msg (pkt #1) */
2380         if (msg_ack(hdr))
2381                 l->bc_peer_is_up = true;
2382
2383         if (!l->bc_peer_is_up)
2384                 return rc;
2385
2386         /* Ignore if peers_snd_nxt goes beyond receive window */
2387         if (more(peers_snd_nxt, l->rcv_nxt + l->window))
2388                 return rc;
2389
2390         l->snd_nxt = peers_snd_nxt;
2391         if (link_bc_rcv_gap(l))
2392                 rc |= TIPC_LINK_SND_STATE;
2393
2394         /* Return now if sender supports nack via STATE messages */
2395         if (l->peer_caps & TIPC_BCAST_STATE_NACK)
2396                 return rc;
2397
2398         /* Otherwise, be backwards compatible */
2399
2400         if (!more(peers_snd_nxt, l->rcv_nxt)) {
2401                 l->nack_state = BC_NACK_SND_CONDITIONAL;
2402                 return 0;
2403         }
2404
2405         /* Don't NACK if one was recently sent or peeked */
2406         if (l->nack_state == BC_NACK_SND_SUPPRESS) {
2407                 l->nack_state = BC_NACK_SND_UNCONDITIONAL;
2408                 return 0;
2409         }
2410
2411         /* Conditionally delay NACK sending until next synch rcv */
2412         if (l->nack_state == BC_NACK_SND_CONDITIONAL) {
2413                 l->nack_state = BC_NACK_SND_UNCONDITIONAL;
2414                 if ((peers_snd_nxt - l->rcv_nxt) < TIPC_MIN_LINK_WIN)
2415                         return 0;
2416         }
2417
2418         /* Send NACK now but suppress next one */
2419         tipc_link_build_bc_proto_msg(l, true, peers_snd_nxt, xmitq);
2420         l->nack_state = BC_NACK_SND_SUPPRESS;
2421         return 0;
2422 }
2423
2424 int tipc_link_bc_ack_rcv(struct tipc_link *r, u16 acked, u16 gap,
2425                          struct tipc_gap_ack_blks *ga,
2426                          struct sk_buff_head *xmitq,
2427                          struct sk_buff_head *retrq)
2428 {
2429         struct tipc_link *l = r->bc_sndlink;
2430         bool unused = false;
2431         int rc = 0;
2432
2433         if (!link_is_up(r) || !r->bc_peer_is_up)
2434                 return 0;
2435
2436         if (gap) {
2437                 l->stats.recv_nacks++;
2438                 r->stats.recv_nacks++;
2439         }
2440
2441         if (less(acked, r->acked) || (acked == r->acked && !gap && !ga))
2442                 return 0;
2443
2444         trace_tipc_link_bc_ack(r, acked, gap, &l->transmq);
2445         tipc_link_advance_transmq(l, r, acked, gap, ga, retrq, &unused, &rc);
2446
2447         tipc_link_advance_backlog(l, xmitq);
2448         if (unlikely(!skb_queue_empty(&l->wakeupq)))
2449                 link_prepare_wakeup(l);
2450
2451         return rc;
2452 }
2453
2454 /* tipc_link_bc_nack_rcv(): receive broadcast nack message
2455  * This function is here for backwards compatibility, since
2456  * no BCAST_PROTOCOL/STATE messages occur from TIPC v2.5.
2457  */
2458 int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb,
2459                           struct sk_buff_head *xmitq)
2460 {
2461         struct tipc_msg *hdr = buf_msg(skb);
2462         u32 dnode = msg_destnode(hdr);
2463         int mtyp = msg_type(hdr);
2464         u16 acked = msg_bcast_ack(hdr);
2465         u16 from = acked + 1;
2466         u16 to = msg_bcgap_to(hdr);
2467         u16 peers_snd_nxt = to + 1;
2468         int rc = 0;
2469
2470         kfree_skb(skb);
2471
2472         if (!tipc_link_is_up(l) || !l->bc_peer_is_up)
2473                 return 0;
2474
2475         if (mtyp != STATE_MSG)
2476                 return 0;
2477
2478         if (dnode == tipc_own_addr(l->net)) {
2479                 rc = tipc_link_bc_ack_rcv(l, acked, to - acked, NULL, xmitq,
2480                                           xmitq);
2481                 l->stats.recv_nacks++;
2482                 return rc;
2483         }
2484
2485         /* Msg for other node => suppress own NACK at next sync if applicable */
2486         if (more(peers_snd_nxt, l->rcv_nxt) && !less(l->rcv_nxt, from))
2487                 l->nack_state = BC_NACK_SND_SUPPRESS;
2488
2489         return 0;
2490 }
2491
2492 void tipc_link_set_queue_limits(struct tipc_link *l, u32 min_win, u32 max_win)
2493 {
2494         int max_bulk = TIPC_MAX_PUBL / (l->mtu / ITEM_SIZE);
2495
2496         l->min_win = min_win;
2497         l->ssthresh = max_win;
2498         l->max_win = max_win;
2499         l->window = min_win;
2500         l->backlog[TIPC_LOW_IMPORTANCE].limit      = min_win * 2;
2501         l->backlog[TIPC_MEDIUM_IMPORTANCE].limit   = min_win * 4;
2502         l->backlog[TIPC_HIGH_IMPORTANCE].limit     = min_win * 6;
2503         l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = min_win * 8;
2504         l->backlog[TIPC_SYSTEM_IMPORTANCE].limit   = max_bulk;
2505 }
2506
2507 /**
2508  * link_reset_stats - reset link statistics
2509  * @l: pointer to link
2510  */
2511 void tipc_link_reset_stats(struct tipc_link *l)
2512 {
2513         memset(&l->stats, 0, sizeof(l->stats));
2514 }
2515
2516 static void link_print(struct tipc_link *l, const char *str)
2517 {
2518         struct sk_buff *hskb = skb_peek(&l->transmq);
2519         u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt - 1;
2520         u16 tail = l->snd_nxt - 1;
2521
2522         pr_info("%s Link <%s> state %x\n", str, l->name, l->state);
2523         pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
2524                 skb_queue_len(&l->transmq), head, tail,
2525                 skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt);
2526 }
2527
2528 /* Parse and validate nested (link) properties valid for media, bearer and link
2529  */
2530 int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
2531 {
2532         int err;
2533
2534         err = nla_parse_nested_deprecated(props, TIPC_NLA_PROP_MAX, prop,
2535                                           tipc_nl_prop_policy, NULL);
2536         if (err)
2537                 return err;
2538
2539         if (props[TIPC_NLA_PROP_PRIO]) {
2540                 u32 prio;
2541
2542                 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
2543                 if (prio > TIPC_MAX_LINK_PRI)
2544                         return -EINVAL;
2545         }
2546
2547         if (props[TIPC_NLA_PROP_TOL]) {
2548                 u32 tol;
2549
2550                 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
2551                 if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
2552                         return -EINVAL;
2553         }
2554
2555         if (props[TIPC_NLA_PROP_WIN]) {
2556                 u32 max_win;
2557
2558                 max_win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
2559                 if (max_win < TIPC_DEF_LINK_WIN || max_win > TIPC_MAX_LINK_WIN)
2560                         return -EINVAL;
2561         }
2562
2563         return 0;
2564 }
2565
2566 static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
2567 {
2568         int i;
2569         struct nlattr *stats;
2570
2571         struct nla_map {
2572                 u32 key;
2573                 u32 val;
2574         };
2575
2576         struct nla_map map[] = {
2577                 {TIPC_NLA_STATS_RX_INFO, 0},
2578                 {TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
2579                 {TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
2580                 {TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
2581                 {TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
2582                 {TIPC_NLA_STATS_TX_INFO, 0},
2583                 {TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
2584                 {TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
2585                 {TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
2586                 {TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
2587                 {TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
2588                         s->msg_length_counts : 1},
2589                 {TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
2590                 {TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
2591                 {TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
2592                 {TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
2593                 {TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
2594                 {TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
2595                 {TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
2596                 {TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
2597                 {TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
2598                 {TIPC_NLA_STATS_RX_STATES, s->recv_states},
2599                 {TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
2600                 {TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
2601                 {TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
2602                 {TIPC_NLA_STATS_TX_STATES, s->sent_states},
2603                 {TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
2604                 {TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
2605                 {TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
2606                 {TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
2607                 {TIPC_NLA_STATS_DUPLICATES, s->duplicates},
2608                 {TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
2609                 {TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
2610                 {TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
2611                         (s->accu_queue_sz / s->queue_sz_counts) : 0}
2612         };
2613
2614         stats = nla_nest_start_noflag(skb, TIPC_NLA_LINK_STATS);
2615         if (!stats)
2616                 return -EMSGSIZE;
2617
2618         for (i = 0; i <  ARRAY_SIZE(map); i++)
2619                 if (nla_put_u32(skb, map[i].key, map[i].val))
2620                         goto msg_full;
2621
2622         nla_nest_end(skb, stats);
2623
2624         return 0;
2625 msg_full:
2626         nla_nest_cancel(skb, stats);
2627
2628         return -EMSGSIZE;
2629 }
2630
2631 /* Caller should hold appropriate locks to protect the link */
2632 int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
2633                        struct tipc_link *link, int nlflags)
2634 {
2635         u32 self = tipc_own_addr(net);
2636         struct nlattr *attrs;
2637         struct nlattr *prop;
2638         void *hdr;
2639         int err;
2640
2641         hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2642                           nlflags, TIPC_NL_LINK_GET);
2643         if (!hdr)
2644                 return -EMSGSIZE;
2645
2646         attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK);
2647         if (!attrs)
2648                 goto msg_full;
2649
2650         if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
2651                 goto attr_msg_full;
2652         if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST, tipc_cluster_mask(self)))
2653                 goto attr_msg_full;
2654         if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
2655                 goto attr_msg_full;
2656         if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->stats.recv_pkts))
2657                 goto attr_msg_full;
2658         if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->stats.sent_pkts))
2659                 goto attr_msg_full;
2660
2661         if (tipc_link_is_up(link))
2662                 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2663                         goto attr_msg_full;
2664         if (link->active)
2665                 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
2666                         goto attr_msg_full;
2667
2668         prop = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK_PROP);
2669         if (!prop)
2670                 goto attr_msg_full;
2671         if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2672                 goto prop_msg_full;
2673         if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
2674                 goto prop_msg_full;
2675         if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
2676                         link->window))
2677                 goto prop_msg_full;
2678         if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2679                 goto prop_msg_full;
2680         nla_nest_end(msg->skb, prop);
2681
2682         err = __tipc_nl_add_stats(msg->skb, &link->stats);
2683         if (err)
2684                 goto attr_msg_full;
2685
2686         nla_nest_end(msg->skb, attrs);
2687         genlmsg_end(msg->skb, hdr);
2688
2689         return 0;
2690
2691 prop_msg_full:
2692         nla_nest_cancel(msg->skb, prop);
2693 attr_msg_full:
2694         nla_nest_cancel(msg->skb, attrs);
2695 msg_full:
2696         genlmsg_cancel(msg->skb, hdr);
2697
2698         return -EMSGSIZE;
2699 }
2700
2701 static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
2702                                       struct tipc_stats *stats)
2703 {
2704         int i;
2705         struct nlattr *nest;
2706
2707         struct nla_map {
2708                 __u32 key;
2709                 __u32 val;
2710         };
2711
2712         struct nla_map map[] = {
2713                 {TIPC_NLA_STATS_RX_INFO, stats->recv_pkts},
2714                 {TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
2715                 {TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
2716                 {TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
2717                 {TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
2718                 {TIPC_NLA_STATS_TX_INFO, stats->sent_pkts},
2719                 {TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
2720                 {TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
2721                 {TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
2722                 {TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled},
2723                 {TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks},
2724                 {TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv},
2725                 {TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks},
2726                 {TIPC_NLA_STATS_TX_ACKS, stats->sent_acks},
2727                 {TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted},
2728                 {TIPC_NLA_STATS_DUPLICATES, stats->duplicates},
2729                 {TIPC_NLA_STATS_LINK_CONGS, stats->link_congs},
2730                 {TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz},
2731                 {TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ?
2732                         (stats->accu_queue_sz / stats->queue_sz_counts) : 0}
2733         };
2734
2735         nest = nla_nest_start_noflag(skb, TIPC_NLA_LINK_STATS);
2736         if (!nest)
2737                 return -EMSGSIZE;
2738
2739         for (i = 0; i <  ARRAY_SIZE(map); i++)
2740                 if (nla_put_u32(skb, map[i].key, map[i].val))
2741                         goto msg_full;
2742
2743         nla_nest_end(skb, nest);
2744
2745         return 0;
2746 msg_full:
2747         nla_nest_cancel(skb, nest);
2748
2749         return -EMSGSIZE;
2750 }
2751
2752 int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg,
2753                         struct tipc_link *bcl)
2754 {
2755         int err;
2756         void *hdr;
2757         struct nlattr *attrs;
2758         struct nlattr *prop;
2759         u32 bc_mode = tipc_bcast_get_mode(net);
2760         u32 bc_ratio = tipc_bcast_get_broadcast_ratio(net);
2761
2762         if (!bcl)
2763                 return 0;
2764
2765         tipc_bcast_lock(net);
2766
2767         hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2768                           NLM_F_MULTI, TIPC_NL_LINK_GET);
2769         if (!hdr) {
2770                 tipc_bcast_unlock(net);
2771                 return -EMSGSIZE;
2772         }
2773
2774         attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK);
2775         if (!attrs)
2776                 goto msg_full;
2777
2778         /* The broadcast link is always up */
2779         if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2780                 goto attr_msg_full;
2781
2782         if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST))
2783                 goto attr_msg_full;
2784         if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
2785                 goto attr_msg_full;
2786         if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, 0))
2787                 goto attr_msg_full;
2788         if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, 0))
2789                 goto attr_msg_full;
2790
2791         prop = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK_PROP);
2792         if (!prop)
2793                 goto attr_msg_full;
2794         if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->max_win))
2795                 goto prop_msg_full;
2796         if (nla_put_u32(msg->skb, TIPC_NLA_PROP_BROADCAST, bc_mode))
2797                 goto prop_msg_full;
2798         if (bc_mode & BCLINK_MODE_SEL)
2799                 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_BROADCAST_RATIO,
2800                                 bc_ratio))
2801                         goto prop_msg_full;
2802         nla_nest_end(msg->skb, prop);
2803
2804         err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
2805         if (err)
2806                 goto attr_msg_full;
2807
2808         tipc_bcast_unlock(net);
2809         nla_nest_end(msg->skb, attrs);
2810         genlmsg_end(msg->skb, hdr);
2811
2812         return 0;
2813
2814 prop_msg_full:
2815         nla_nest_cancel(msg->skb, prop);
2816 attr_msg_full:
2817         nla_nest_cancel(msg->skb, attrs);
2818 msg_full:
2819         tipc_bcast_unlock(net);
2820         genlmsg_cancel(msg->skb, hdr);
2821
2822         return -EMSGSIZE;
2823 }
2824
2825 void tipc_link_set_tolerance(struct tipc_link *l, u32 tol,
2826                              struct sk_buff_head *xmitq)
2827 {
2828         l->tolerance = tol;
2829         if (l->bc_rcvlink)
2830                 l->bc_rcvlink->tolerance = tol;
2831         if (link_is_up(l))
2832                 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, tol, 0, xmitq);
2833 }
2834
2835 void tipc_link_set_prio(struct tipc_link *l, u32 prio,
2836                         struct sk_buff_head *xmitq)
2837 {
2838         l->priority = prio;
2839         tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, prio, xmitq);
2840 }
2841
2842 void tipc_link_set_abort_limit(struct tipc_link *l, u32 limit)
2843 {
2844         l->abort_limit = limit;
2845 }
2846
2847 /**
2848  * tipc_link_dump - dump TIPC link data
2849  * @l: tipc link to be dumped
2850  * @dqueues: bitmask to decide if any link queue to be dumped?
2851  *           - TIPC_DUMP_NONE: don't dump link queues
2852  *           - TIPC_DUMP_TRANSMQ: dump link transmq queue
2853  *           - TIPC_DUMP_BACKLOGQ: dump link backlog queue
2854  *           - TIPC_DUMP_DEFERDQ: dump link deferd queue
2855  *           - TIPC_DUMP_INPUTQ: dump link input queue
2856  *           - TIPC_DUMP_WAKEUP: dump link wakeup queue
2857  *           - TIPC_DUMP_ALL: dump all the link queues above
2858  * @buf: returned buffer of dump data in format
2859  */
2860 int tipc_link_dump(struct tipc_link *l, u16 dqueues, char *buf)
2861 {
2862         int i = 0;
2863         size_t sz = (dqueues) ? LINK_LMAX : LINK_LMIN;
2864         struct sk_buff_head *list;
2865         struct sk_buff *hskb, *tskb;
2866         u32 len;
2867
2868         if (!l) {
2869                 i += scnprintf(buf, sz, "link data: (null)\n");
2870                 return i;
2871         }
2872
2873         i += scnprintf(buf, sz, "link data: %x", l->addr);
2874         i += scnprintf(buf + i, sz - i, " %x", l->state);
2875         i += scnprintf(buf + i, sz - i, " %u", l->in_session);
2876         i += scnprintf(buf + i, sz - i, " %u", l->session);
2877         i += scnprintf(buf + i, sz - i, " %u", l->peer_session);
2878         i += scnprintf(buf + i, sz - i, " %u", l->snd_nxt);
2879         i += scnprintf(buf + i, sz - i, " %u", l->rcv_nxt);
2880         i += scnprintf(buf + i, sz - i, " %u", l->snd_nxt_state);
2881         i += scnprintf(buf + i, sz - i, " %u", l->rcv_nxt_state);
2882         i += scnprintf(buf + i, sz - i, " %x", l->peer_caps);
2883         i += scnprintf(buf + i, sz - i, " %u", l->silent_intv_cnt);
2884         i += scnprintf(buf + i, sz - i, " %u", l->rst_cnt);
2885         i += scnprintf(buf + i, sz - i, " %u", 0);
2886         i += scnprintf(buf + i, sz - i, " %u", 0);
2887         i += scnprintf(buf + i, sz - i, " %u", l->acked);
2888
2889         list = &l->transmq;
2890         len = skb_queue_len(list);
2891         hskb = skb_peek(list);
2892         tskb = skb_peek_tail(list);
2893         i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
2894                        (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2895                        (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2896
2897         list = &l->deferdq;
2898         len = skb_queue_len(list);
2899         hskb = skb_peek(list);
2900         tskb = skb_peek_tail(list);
2901         i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
2902                        (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2903                        (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2904
2905         list = &l->backlogq;
2906         len = skb_queue_len(list);
2907         hskb = skb_peek(list);
2908         tskb = skb_peek_tail(list);
2909         i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
2910                        (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2911                        (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2912
2913         list = l->inputq;
2914         len = skb_queue_len(list);
2915         hskb = skb_peek(list);
2916         tskb = skb_peek_tail(list);
2917         i += scnprintf(buf + i, sz - i, " | %u %u %u\n", len,
2918                        (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2919                        (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2920
2921         if (dqueues & TIPC_DUMP_TRANSMQ) {
2922                 i += scnprintf(buf + i, sz - i, "transmq: ");
2923                 i += tipc_list_dump(&l->transmq, false, buf + i);
2924         }
2925         if (dqueues & TIPC_DUMP_BACKLOGQ) {
2926                 i += scnprintf(buf + i, sz - i,
2927                                "backlogq: <%u %u %u %u %u>, ",
2928                                l->backlog[TIPC_LOW_IMPORTANCE].len,
2929                                l->backlog[TIPC_MEDIUM_IMPORTANCE].len,
2930                                l->backlog[TIPC_HIGH_IMPORTANCE].len,
2931                                l->backlog[TIPC_CRITICAL_IMPORTANCE].len,
2932                                l->backlog[TIPC_SYSTEM_IMPORTANCE].len);
2933                 i += tipc_list_dump(&l->backlogq, false, buf + i);
2934         }
2935         if (dqueues & TIPC_DUMP_DEFERDQ) {
2936                 i += scnprintf(buf + i, sz - i, "deferdq: ");
2937                 i += tipc_list_dump(&l->deferdq, false, buf + i);
2938         }
2939         if (dqueues & TIPC_DUMP_INPUTQ) {
2940                 i += scnprintf(buf + i, sz - i, "inputq: ");
2941                 i += tipc_list_dump(l->inputq, false, buf + i);
2942         }
2943         if (dqueues & TIPC_DUMP_WAKEUP) {
2944                 i += scnprintf(buf + i, sz - i, "wakeup: ");
2945                 i += tipc_list_dump(&l->wakeupq, false, buf + i);
2946         }
2947
2948         return i;
2949 }