1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* SCTP kernel implementation
3 * (C) Copyright IBM Corp. 2001, 2004
4 * Copyright (c) 1999 Cisco, Inc.
5 * Copyright (c) 1999-2001 Motorola, Inc.
7 * This file is part of the SCTP kernel implementation
9 * These functions work with the state functions in sctp_sm_statefuns.c
10 * to implement that state operations. These functions implement the
11 * steps which require modifying existing data structures.
13 * Please send any bug reports or fixes you make to the
15 * lksctp developers <linux-sctp@vger.kernel.org>
17 * Written or modified by:
18 * La Monte H.P. Yarroll <piggy@acm.org>
19 * Karl Knutson <karl@athena.chicago.il.us>
20 * Jon Grimm <jgrimm@austin.ibm.com>
21 * Hui Huang <hui.huang@nokia.com>
22 * Dajiang Zhang <dajiang.zhang@nokia.com>
23 * Daisy Chang <daisyc@us.ibm.com>
24 * Sridhar Samudrala <sri@us.ibm.com>
25 * Ardelle Fan <ardelle.fan@intel.com>
28 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30 #include <linux/skbuff.h>
31 #include <linux/types.h>
32 #include <linux/socket.h>
34 #include <linux/gfp.h>
36 #include <net/sctp/sctp.h>
37 #include <net/sctp/sm.h>
38 #include <net/sctp/stream_sched.h>
40 static int sctp_cmd_interpreter(enum sctp_event_type event_type,
41 union sctp_subtype subtype,
42 enum sctp_state state,
43 struct sctp_endpoint *ep,
44 struct sctp_association *asoc,
46 enum sctp_disposition status,
47 struct sctp_cmd_seq *commands,
49 static int sctp_side_effects(enum sctp_event_type event_type,
50 union sctp_subtype subtype,
51 enum sctp_state state,
52 struct sctp_endpoint *ep,
53 struct sctp_association **asoc,
55 enum sctp_disposition status,
56 struct sctp_cmd_seq *commands,
59 /********************************************************************
61 ********************************************************************/
63 /* A helper function for delayed processing of INET ECN CE bit. */
64 static void sctp_do_ecn_ce_work(struct sctp_association *asoc,
67 /* Save the TSN away for comparison when we receive CWR */
69 asoc->last_ecne_tsn = lowest_tsn;
73 /* Helper function for delayed processing of SCTP ECNE chunk. */
74 /* RFC 2960 Appendix A
76 * RFC 2481 details a specific bit for a sender to send in
77 * the header of its next outbound TCP segment to indicate to
78 * its peer that it has reduced its congestion window. This
79 * is termed the CWR bit. For SCTP the same indication is made
80 * by including the CWR chunk. This chunk contains one data
81 * element, i.e. the TSN number that was sent in the ECNE chunk.
82 * This element represents the lowest TSN number in the datagram
83 * that was originally marked with the CE bit.
85 static struct sctp_chunk *sctp_do_ecn_ecne_work(struct sctp_association *asoc,
87 struct sctp_chunk *chunk)
89 struct sctp_chunk *repl;
91 /* Our previously transmitted packet ran into some congestion
92 * so we should take action by reducing cwnd and ssthresh
93 * and then ACK our peer that we we've done so by
97 /* First, try to determine if we want to actually lower
98 * our cwnd variables. Only lower them if the ECNE looks more
99 * recent than the last response.
101 if (TSN_lt(asoc->last_cwr_tsn, lowest_tsn)) {
102 struct sctp_transport *transport;
104 /* Find which transport's congestion variables
105 * need to be adjusted.
107 transport = sctp_assoc_lookup_tsn(asoc, lowest_tsn);
109 /* Update the congestion variables. */
111 sctp_transport_lower_cwnd(transport,
112 SCTP_LOWER_CWND_ECNE);
113 asoc->last_cwr_tsn = lowest_tsn;
116 /* Always try to quiet the other end. In case of lost CWR,
117 * resend last_cwr_tsn.
119 repl = sctp_make_cwr(asoc, asoc->last_cwr_tsn, chunk);
121 /* If we run out of memory, it will look like a lost CWR. We'll
122 * get back in sync eventually.
127 /* Helper function to do delayed processing of ECN CWR chunk. */
128 static void sctp_do_ecn_cwr_work(struct sctp_association *asoc,
131 /* Turn off ECNE getting auto-prepended to every outgoing
137 /* Generate SACK if necessary. We call this at the end of a packet. */
138 static int sctp_gen_sack(struct sctp_association *asoc, int force,
139 struct sctp_cmd_seq *commands)
141 struct sctp_transport *trans = asoc->peer.last_data_from;
142 __u32 ctsn, max_tsn_seen;
143 struct sctp_chunk *sack;
147 (!trans && (asoc->param_flags & SPP_SACKDELAY_DISABLE)) ||
148 (trans && (trans->param_flags & SPP_SACKDELAY_DISABLE)))
149 asoc->peer.sack_needed = 1;
151 ctsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map);
152 max_tsn_seen = sctp_tsnmap_get_max_tsn_seen(&asoc->peer.tsn_map);
154 /* From 12.2 Parameters necessary per association (i.e. the TCB):
156 * Ack State : This flag indicates if the next received packet
157 * : is to be responded to with a SACK. ...
158 * : When DATA chunks are out of order, SACK's
159 * : are not delayed (see Section 6).
161 * [This is actually not mentioned in Section 6, but we
162 * implement it here anyway. --piggy]
164 if (max_tsn_seen != ctsn)
165 asoc->peer.sack_needed = 1;
167 /* From 6.2 Acknowledgement on Reception of DATA Chunks:
169 * Section 4.2 of [RFC2581] SHOULD be followed. Specifically,
170 * an acknowledgement SHOULD be generated for at least every
171 * second packet (not every second DATA chunk) received, and
172 * SHOULD be generated within 200 ms of the arrival of any
173 * unacknowledged DATA chunk. ...
175 if (!asoc->peer.sack_needed) {
176 asoc->peer.sack_cnt++;
178 /* Set the SACK delay timeout based on the
179 * SACK delay for the last transport
180 * data was received from, or the default
181 * for the association.
184 /* We will need a SACK for the next packet. */
185 if (asoc->peer.sack_cnt >= trans->sackfreq - 1)
186 asoc->peer.sack_needed = 1;
188 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] =
191 /* We will need a SACK for the next packet. */
192 if (asoc->peer.sack_cnt >= asoc->sackfreq - 1)
193 asoc->peer.sack_needed = 1;
195 asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] =
199 /* Restart the SACK timer. */
200 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
201 SCTP_TO(SCTP_EVENT_TIMEOUT_SACK));
203 __u32 old_a_rwnd = asoc->a_rwnd;
205 asoc->a_rwnd = asoc->rwnd;
206 sack = sctp_make_sack(asoc);
208 asoc->a_rwnd = old_a_rwnd;
212 asoc->peer.sack_needed = 0;
213 asoc->peer.sack_cnt = 0;
215 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(sack));
217 /* Stop the SACK timer. */
218 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
219 SCTP_TO(SCTP_EVENT_TIMEOUT_SACK));
228 /* When the T3-RTX timer expires, it calls this function to create the
229 * relevant state machine event.
231 void sctp_generate_t3_rtx_event(struct timer_list *t)
233 struct sctp_transport *transport =
234 from_timer(transport, t, T3_rtx_timer);
235 struct sctp_association *asoc = transport->asoc;
236 struct sock *sk = asoc->base.sk;
237 struct net *net = sock_net(sk);
240 /* Check whether a task is in the sock. */
243 if (sock_owned_by_user(sk)) {
244 pr_debug("%s: sock is busy\n", __func__);
246 /* Try again later. */
247 if (!mod_timer(&transport->T3_rtx_timer, jiffies + (HZ/20)))
248 sctp_transport_hold(transport);
252 /* Run through the state machine. */
253 error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
254 SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_T3_RTX),
257 transport, GFP_ATOMIC);
264 sctp_transport_put(transport);
267 /* This is a sa interface for producing timeout events. It works
268 * for timeouts which use the association as their parameter.
270 static void sctp_generate_timeout_event(struct sctp_association *asoc,
271 enum sctp_event_timeout timeout_type)
273 struct sock *sk = asoc->base.sk;
274 struct net *net = sock_net(sk);
278 if (sock_owned_by_user(sk)) {
279 pr_debug("%s: sock is busy: timer %d\n", __func__,
282 /* Try again later. */
283 if (!mod_timer(&asoc->timers[timeout_type], jiffies + (HZ/20)))
284 sctp_association_hold(asoc);
288 /* Is this association really dead and just waiting around for
289 * the timer to let go of the reference?
294 /* Run through the state machine. */
295 error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
296 SCTP_ST_TIMEOUT(timeout_type),
297 asoc->state, asoc->ep, asoc,
298 (void *)timeout_type, GFP_ATOMIC);
305 sctp_association_put(asoc);
308 static void sctp_generate_t1_cookie_event(struct timer_list *t)
310 struct sctp_association *asoc =
311 from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_T1_COOKIE]);
313 sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T1_COOKIE);
316 static void sctp_generate_t1_init_event(struct timer_list *t)
318 struct sctp_association *asoc =
319 from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_T1_INIT]);
321 sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T1_INIT);
324 static void sctp_generate_t2_shutdown_event(struct timer_list *t)
326 struct sctp_association *asoc =
327 from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN]);
329 sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T2_SHUTDOWN);
332 static void sctp_generate_t4_rto_event(struct timer_list *t)
334 struct sctp_association *asoc =
335 from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_T4_RTO]);
337 sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T4_RTO);
340 static void sctp_generate_t5_shutdown_guard_event(struct timer_list *t)
342 struct sctp_association *asoc =
344 timers[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD]);
346 sctp_generate_timeout_event(asoc,
347 SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD);
349 } /* sctp_generate_t5_shutdown_guard_event() */
351 static void sctp_generate_autoclose_event(struct timer_list *t)
353 struct sctp_association *asoc =
354 from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_AUTOCLOSE]);
356 sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_AUTOCLOSE);
359 /* Generate a heart beat event. If the sock is busy, reschedule. Make
360 * sure that the transport is still valid.
362 void sctp_generate_heartbeat_event(struct timer_list *t)
364 struct sctp_transport *transport = from_timer(transport, t, hb_timer);
365 struct sctp_association *asoc = transport->asoc;
366 struct sock *sk = asoc->base.sk;
367 struct net *net = sock_net(sk);
368 u32 elapsed, timeout;
372 if (sock_owned_by_user(sk)) {
373 pr_debug("%s: sock is busy\n", __func__);
375 /* Try again later. */
376 if (!mod_timer(&transport->hb_timer, jiffies + (HZ/20)))
377 sctp_transport_hold(transport);
381 /* Check if we should still send the heartbeat or reschedule */
382 elapsed = jiffies - transport->last_time_sent;
383 timeout = sctp_transport_timeout(transport);
384 if (elapsed < timeout) {
385 elapsed = timeout - elapsed;
386 if (!mod_timer(&transport->hb_timer, jiffies + elapsed))
387 sctp_transport_hold(transport);
391 error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
392 SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_HEARTBEAT),
393 asoc->state, asoc->ep, asoc,
394 transport, GFP_ATOMIC);
401 sctp_transport_put(transport);
404 /* Handle the timeout of the ICMP protocol unreachable timer. Trigger
405 * the correct state machine transition that will close the association.
407 void sctp_generate_proto_unreach_event(struct timer_list *t)
409 struct sctp_transport *transport =
410 from_timer(transport, t, proto_unreach_timer);
411 struct sctp_association *asoc = transport->asoc;
412 struct sock *sk = asoc->base.sk;
413 struct net *net = sock_net(sk);
416 if (sock_owned_by_user(sk)) {
417 pr_debug("%s: sock is busy\n", __func__);
419 /* Try again later. */
420 if (!mod_timer(&transport->proto_unreach_timer,
422 sctp_association_hold(asoc);
426 /* Is this structure just waiting around for us to actually
432 sctp_do_sm(net, SCTP_EVENT_T_OTHER,
433 SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH),
434 asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC);
438 sctp_association_put(asoc);
441 /* Handle the timeout of the RE-CONFIG timer. */
442 void sctp_generate_reconf_event(struct timer_list *t)
444 struct sctp_transport *transport =
445 from_timer(transport, t, reconf_timer);
446 struct sctp_association *asoc = transport->asoc;
447 struct sock *sk = asoc->base.sk;
448 struct net *net = sock_net(sk);
452 if (sock_owned_by_user(sk)) {
453 pr_debug("%s: sock is busy\n", __func__);
455 /* Try again later. */
456 if (!mod_timer(&transport->reconf_timer, jiffies + (HZ / 20)))
457 sctp_transport_hold(transport);
461 error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
462 SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_RECONF),
463 asoc->state, asoc->ep, asoc,
464 transport, GFP_ATOMIC);
471 sctp_transport_put(transport);
474 /* Inject a SACK Timeout event into the state machine. */
475 static void sctp_generate_sack_event(struct timer_list *t)
477 struct sctp_association *asoc =
478 from_timer(asoc, t, timers[SCTP_EVENT_TIMEOUT_SACK]);
480 sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_SACK);
483 sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES] = {
484 [SCTP_EVENT_TIMEOUT_NONE] = NULL,
485 [SCTP_EVENT_TIMEOUT_T1_COOKIE] = sctp_generate_t1_cookie_event,
486 [SCTP_EVENT_TIMEOUT_T1_INIT] = sctp_generate_t1_init_event,
487 [SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = sctp_generate_t2_shutdown_event,
488 [SCTP_EVENT_TIMEOUT_T3_RTX] = NULL,
489 [SCTP_EVENT_TIMEOUT_T4_RTO] = sctp_generate_t4_rto_event,
490 [SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD] =
491 sctp_generate_t5_shutdown_guard_event,
492 [SCTP_EVENT_TIMEOUT_HEARTBEAT] = NULL,
493 [SCTP_EVENT_TIMEOUT_RECONF] = NULL,
494 [SCTP_EVENT_TIMEOUT_SACK] = sctp_generate_sack_event,
495 [SCTP_EVENT_TIMEOUT_AUTOCLOSE] = sctp_generate_autoclose_event,
499 /* RFC 2960 8.2 Path Failure Detection
501 * When its peer endpoint is multi-homed, an endpoint should keep a
502 * error counter for each of the destination transport addresses of the
505 * Each time the T3-rtx timer expires on any address, or when a
506 * HEARTBEAT sent to an idle address is not acknowledged within a RTO,
507 * the error counter of that destination address will be incremented.
508 * When the value in the error counter exceeds the protocol parameter
509 * 'Path.Max.Retrans' of that destination address, the endpoint should
510 * mark the destination transport address as inactive, and a
511 * notification SHOULD be sent to the upper layer.
514 static void sctp_do_8_2_transport_strike(struct sctp_cmd_seq *commands,
515 struct sctp_association *asoc,
516 struct sctp_transport *transport,
519 struct net *net = sock_net(asoc->base.sk);
521 /* The check for association's overall error counter exceeding the
522 * threshold is done in the state function.
524 /* We are here due to a timer expiration. If the timer was
525 * not a HEARTBEAT, then normal error tracking is done.
526 * If the timer was a heartbeat, we only increment error counts
527 * when we already have an outstanding HEARTBEAT that has not
529 * Additionally, some tranport states inhibit error increments.
532 asoc->overall_error_count++;
533 if (transport->state != SCTP_INACTIVE)
534 transport->error_count++;
535 } else if (transport->hb_sent) {
536 if (transport->state != SCTP_UNCONFIRMED)
537 asoc->overall_error_count++;
538 if (transport->state != SCTP_INACTIVE)
539 transport->error_count++;
542 /* If the transport error count is greater than the pf_retrans
543 * threshold, and less than pathmaxrtx, and if the current state
544 * is SCTP_ACTIVE, then mark this transport as Partially Failed,
545 * see SCTP Quick Failover Draft, section 5.1
547 if (net->sctp.pf_enable &&
548 (transport->state == SCTP_ACTIVE) &&
549 (asoc->pf_retrans < transport->pathmaxrxt) &&
550 (transport->error_count > asoc->pf_retrans)) {
552 sctp_assoc_control_transport(asoc, transport,
556 /* Update the hb timer to resend a heartbeat every rto */
557 sctp_transport_reset_hb_timer(transport);
560 if (transport->state != SCTP_INACTIVE &&
561 (transport->error_count > transport->pathmaxrxt)) {
562 pr_debug("%s: association:%p transport addr:%pISpc failed\n",
563 __func__, asoc, &transport->ipaddr.sa);
565 sctp_assoc_control_transport(asoc, transport,
567 SCTP_FAILED_THRESHOLD);
570 /* E2) For the destination address for which the timer
571 * expires, set RTO <- RTO * 2 ("back off the timer"). The
572 * maximum value discussed in rule C7 above (RTO.max) may be
573 * used to provide an upper bound to this doubling operation.
575 * Special Case: the first HB doesn't trigger exponential backoff.
576 * The first unacknowledged HB triggers it. We do this with a flag
577 * that indicates that we have an outstanding HB.
579 if (!is_hb || transport->hb_sent) {
580 transport->rto = min((transport->rto * 2), transport->asoc->rto_max);
581 sctp_max_rto(asoc, transport);
585 /* Worker routine to handle INIT command failure. */
586 static void sctp_cmd_init_failed(struct sctp_cmd_seq *commands,
587 struct sctp_association *asoc,
590 struct sctp_ulpevent *event;
592 event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_CANT_STR_ASSOC,
593 (__u16)error, 0, 0, NULL,
597 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
598 SCTP_ULPEVENT(event));
600 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
601 SCTP_STATE(SCTP_STATE_CLOSED));
603 /* SEND_FAILED sent later when cleaning up the association. */
604 asoc->outqueue.error = error;
605 sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
608 /* Worker routine to handle SCTP_CMD_ASSOC_FAILED. */
609 static void sctp_cmd_assoc_failed(struct sctp_cmd_seq *commands,
610 struct sctp_association *asoc,
611 enum sctp_event_type event_type,
612 union sctp_subtype subtype,
613 struct sctp_chunk *chunk,
616 struct sctp_ulpevent *event;
617 struct sctp_chunk *abort;
619 /* Cancel any partial delivery in progress. */
620 asoc->stream.si->abort_pd(&asoc->ulpq, GFP_ATOMIC);
622 if (event_type == SCTP_EVENT_T_CHUNK && subtype.chunk == SCTP_CID_ABORT)
623 event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST,
624 (__u16)error, 0, 0, chunk,
627 event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST,
628 (__u16)error, 0, 0, NULL,
631 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
632 SCTP_ULPEVENT(event));
634 if (asoc->overall_error_count >= asoc->max_retrans) {
635 abort = sctp_make_violation_max_retrans(asoc, chunk);
637 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
641 sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
642 SCTP_STATE(SCTP_STATE_CLOSED));
644 /* SEND_FAILED sent later when cleaning up the association. */
645 asoc->outqueue.error = error;
646 sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
649 /* Process an init chunk (may be real INIT/INIT-ACK or an embedded INIT
650 * inside the cookie. In reality, this is only used for INIT-ACK processing
651 * since all other cases use "temporary" associations and can do all
652 * their work in statefuns directly.
654 static int sctp_cmd_process_init(struct sctp_cmd_seq *commands,
655 struct sctp_association *asoc,
656 struct sctp_chunk *chunk,
657 struct sctp_init_chunk *peer_init,
662 /* We only process the init as a sideeffect in a single
663 * case. This is when we process the INIT-ACK. If we
664 * fail during INIT processing (due to malloc problems),
665 * just return the error and stop processing the stack.
667 if (!sctp_process_init(asoc, chunk, sctp_source(chunk), peer_init, gfp))
675 /* Helper function to break out starting up of heartbeat timers. */
676 static void sctp_cmd_hb_timers_start(struct sctp_cmd_seq *cmds,
677 struct sctp_association *asoc)
679 struct sctp_transport *t;
681 /* Start a heartbeat timer for each transport on the association.
682 * hold a reference on the transport to make sure none of
683 * the needed data structures go away.
685 list_for_each_entry(t, &asoc->peer.transport_addr_list, transports)
686 sctp_transport_reset_hb_timer(t);
689 static void sctp_cmd_hb_timers_stop(struct sctp_cmd_seq *cmds,
690 struct sctp_association *asoc)
692 struct sctp_transport *t;
694 /* Stop all heartbeat timers. */
696 list_for_each_entry(t, &asoc->peer.transport_addr_list,
698 if (del_timer(&t->hb_timer))
699 sctp_transport_put(t);
703 /* Helper function to stop any pending T3-RTX timers */
704 static void sctp_cmd_t3_rtx_timers_stop(struct sctp_cmd_seq *cmds,
705 struct sctp_association *asoc)
707 struct sctp_transport *t;
709 list_for_each_entry(t, &asoc->peer.transport_addr_list,
711 if (del_timer(&t->T3_rtx_timer))
712 sctp_transport_put(t);
717 /* Helper function to handle the reception of an HEARTBEAT ACK. */
718 static void sctp_cmd_transport_on(struct sctp_cmd_seq *cmds,
719 struct sctp_association *asoc,
720 struct sctp_transport *t,
721 struct sctp_chunk *chunk)
723 struct sctp_sender_hb_info *hbinfo;
724 int was_unconfirmed = 0;
726 /* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the
727 * HEARTBEAT should clear the error counter of the destination
728 * transport address to which the HEARTBEAT was sent.
733 * Although RFC4960 specifies that the overall error count must
734 * be cleared when a HEARTBEAT ACK is received, we make an
735 * exception while in SHUTDOWN PENDING. If the peer keeps its
736 * window shut forever, we may never be able to transmit our
737 * outstanding data and rely on the retransmission limit be reached
738 * to shutdown the association.
740 if (t->asoc->state < SCTP_STATE_SHUTDOWN_PENDING)
741 t->asoc->overall_error_count = 0;
743 /* Clear the hb_sent flag to signal that we had a good
748 /* Mark the destination transport address as active if it is not so
751 if ((t->state == SCTP_INACTIVE) || (t->state == SCTP_UNCONFIRMED)) {
753 sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP,
754 SCTP_HEARTBEAT_SUCCESS);
757 if (t->state == SCTP_PF)
758 sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP,
759 SCTP_HEARTBEAT_SUCCESS);
761 /* HB-ACK was received for a the proper HB. Consider this
765 sctp_transport_dst_confirm(t);
767 /* The receiver of the HEARTBEAT ACK should also perform an
768 * RTT measurement for that destination transport address
769 * using the time value carried in the HEARTBEAT ACK chunk.
770 * If the transport's rto_pending variable has been cleared,
771 * it was most likely due to a retransmit. However, we want
772 * to re-enable it to properly update the rto.
774 if (t->rto_pending == 0)
777 hbinfo = (struct sctp_sender_hb_info *)chunk->skb->data;
778 sctp_transport_update_rto(t, (jiffies - hbinfo->sent_at));
780 /* Update the heartbeat timer. */
781 sctp_transport_reset_hb_timer(t);
783 if (was_unconfirmed && asoc->peer.transport_count == 1)
784 sctp_transport_immediate_rtx(t);
788 /* Helper function to process the process SACK command. */
789 static int sctp_cmd_process_sack(struct sctp_cmd_seq *cmds,
790 struct sctp_association *asoc,
791 struct sctp_chunk *chunk)
795 if (sctp_outq_sack(&asoc->outqueue, chunk)) {
796 struct net *net = sock_net(asoc->base.sk);
798 /* There are no more TSNs awaiting SACK. */
799 err = sctp_do_sm(net, SCTP_EVENT_T_OTHER,
800 SCTP_ST_OTHER(SCTP_EVENT_NO_PENDING_TSN),
801 asoc->state, asoc->ep, asoc, NULL,
808 /* Helper function to set the timeout value for T2-SHUTDOWN timer and to set
809 * the transport for a shutdown chunk.
811 static void sctp_cmd_setup_t2(struct sctp_cmd_seq *cmds,
812 struct sctp_association *asoc,
813 struct sctp_chunk *chunk)
815 struct sctp_transport *t;
817 if (chunk->transport)
818 t = chunk->transport;
820 t = sctp_assoc_choose_alter_transport(asoc,
821 asoc->shutdown_last_sent_to);
822 chunk->transport = t;
824 asoc->shutdown_last_sent_to = t;
825 asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = t->rto;
828 static void sctp_cmd_assoc_update(struct sctp_cmd_seq *cmds,
829 struct sctp_association *asoc,
830 struct sctp_association *new)
832 struct net *net = sock_net(asoc->base.sk);
833 struct sctp_chunk *abort;
835 if (!sctp_assoc_update(asoc, new))
838 abort = sctp_make_abort(asoc, NULL, sizeof(struct sctp_errhdr));
840 sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, 0);
841 sctp_add_cmd_sf(cmds, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
843 sctp_add_cmd_sf(cmds, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNABORTED));
844 sctp_add_cmd_sf(cmds, SCTP_CMD_ASSOC_FAILED,
845 SCTP_PERR(SCTP_ERROR_RSRC_LOW));
846 SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
847 SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
850 /* Helper function to change the state of an association. */
851 static void sctp_cmd_new_state(struct sctp_cmd_seq *cmds,
852 struct sctp_association *asoc,
853 enum sctp_state state)
855 struct sock *sk = asoc->base.sk;
859 pr_debug("%s: asoc:%p[%s]\n", __func__, asoc, sctp_state_tbl[state]);
861 if (sctp_style(sk, TCP)) {
862 /* Change the sk->sk_state of a TCP-style socket that has
863 * successfully completed a connect() call.
865 if (sctp_state(asoc, ESTABLISHED) && sctp_sstate(sk, CLOSED))
866 inet_sk_set_state(sk, SCTP_SS_ESTABLISHED);
868 /* Set the RCV_SHUTDOWN flag when a SHUTDOWN is received. */
869 if (sctp_state(asoc, SHUTDOWN_RECEIVED) &&
870 sctp_sstate(sk, ESTABLISHED)) {
871 inet_sk_set_state(sk, SCTP_SS_CLOSING);
872 sk->sk_shutdown |= RCV_SHUTDOWN;
876 if (sctp_state(asoc, COOKIE_WAIT)) {
877 /* Reset init timeouts since they may have been
878 * increased due to timer expirations.
880 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] =
882 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] =
886 if (sctp_state(asoc, ESTABLISHED) ||
887 sctp_state(asoc, CLOSED) ||
888 sctp_state(asoc, SHUTDOWN_RECEIVED)) {
889 /* Wake up any processes waiting in the asoc's wait queue in
890 * sctp_wait_for_connect() or sctp_wait_for_sndbuf().
892 if (waitqueue_active(&asoc->wait))
893 wake_up_interruptible(&asoc->wait);
895 /* Wake up any processes waiting in the sk's sleep queue of
896 * a TCP-style or UDP-style peeled-off socket in
897 * sctp_wait_for_accept() or sctp_wait_for_packet().
898 * For a UDP-style socket, the waiters are woken up by the
901 if (!sctp_style(sk, UDP))
902 sk->sk_state_change(sk);
905 if (sctp_state(asoc, SHUTDOWN_PENDING) &&
906 !sctp_outq_is_empty(&asoc->outqueue))
907 sctp_outq_uncork(&asoc->outqueue, GFP_ATOMIC);
910 /* Helper function to delete an association. */
911 static void sctp_cmd_delete_tcb(struct sctp_cmd_seq *cmds,
912 struct sctp_association *asoc)
914 struct sock *sk = asoc->base.sk;
916 /* If it is a non-temporary association belonging to a TCP-style
917 * listening socket that is not closed, do not free it so that accept()
918 * can pick it up later.
920 if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING) &&
921 (!asoc->temp) && (sk->sk_shutdown != SHUTDOWN_MASK))
924 sctp_association_free(asoc);
928 * ADDIP Section 4.1 ASCONF Chunk Procedures
929 * A4) Start a T-4 RTO timer, using the RTO value of the selected
930 * destination address (we use active path instead of primary path just
931 * because primary path may be inactive.
933 static void sctp_cmd_setup_t4(struct sctp_cmd_seq *cmds,
934 struct sctp_association *asoc,
935 struct sctp_chunk *chunk)
937 struct sctp_transport *t;
939 t = sctp_assoc_choose_alter_transport(asoc, chunk->transport);
940 asoc->timeouts[SCTP_EVENT_TIMEOUT_T4_RTO] = t->rto;
941 chunk->transport = t;
944 /* Process an incoming Operation Error Chunk. */
945 static void sctp_cmd_process_operr(struct sctp_cmd_seq *cmds,
946 struct sctp_association *asoc,
947 struct sctp_chunk *chunk)
949 struct sctp_errhdr *err_hdr;
950 struct sctp_ulpevent *ev;
952 while (chunk->chunk_end > chunk->skb->data) {
953 err_hdr = (struct sctp_errhdr *)(chunk->skb->data);
955 ev = sctp_ulpevent_make_remote_error(asoc, chunk, 0,
960 asoc->stream.si->enqueue_event(&asoc->ulpq, ev);
962 switch (err_hdr->cause) {
963 case SCTP_ERROR_UNKNOWN_CHUNK:
965 struct sctp_chunkhdr *unk_chunk_hdr;
967 unk_chunk_hdr = (struct sctp_chunkhdr *)
969 switch (unk_chunk_hdr->type) {
970 /* ADDIP 4.1 A9) If the peer responds to an ASCONF with
971 * an ERROR chunk reporting that it did not recognized
972 * the ASCONF chunk type, the sender of the ASCONF MUST
973 * NOT send any further ASCONF chunks and MUST stop its
976 case SCTP_CID_ASCONF:
977 if (asoc->peer.asconf_capable == 0)
980 asoc->peer.asconf_capable = 0;
981 sctp_add_cmd_sf(cmds, SCTP_CMD_TIMER_STOP,
982 SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
995 /* Helper function to remove the association non-primary peer
998 static void sctp_cmd_del_non_primary(struct sctp_association *asoc)
1000 struct sctp_transport *t;
1001 struct list_head *temp;
1002 struct list_head *pos;
1004 list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
1005 t = list_entry(pos, struct sctp_transport, transports);
1006 if (!sctp_cmp_addr_exact(&t->ipaddr,
1007 &asoc->peer.primary_addr)) {
1008 sctp_assoc_rm_peer(asoc, t);
1013 /* Helper function to set sk_err on a 1-1 style socket. */
1014 static void sctp_cmd_set_sk_err(struct sctp_association *asoc, int error)
1016 struct sock *sk = asoc->base.sk;
1018 if (!sctp_style(sk, UDP))
1022 /* Helper function to generate an association change event */
1023 static void sctp_cmd_assoc_change(struct sctp_cmd_seq *commands,
1024 struct sctp_association *asoc,
1027 struct sctp_ulpevent *ev;
1029 ev = sctp_ulpevent_make_assoc_change(asoc, 0, state, 0,
1030 asoc->c.sinit_num_ostreams,
1031 asoc->c.sinit_max_instreams,
1034 asoc->stream.si->enqueue_event(&asoc->ulpq, ev);
1037 static void sctp_cmd_peer_no_auth(struct sctp_cmd_seq *commands,
1038 struct sctp_association *asoc)
1040 struct sctp_ulpevent *ev;
1042 ev = sctp_ulpevent_make_authkey(asoc, 0, SCTP_AUTH_NO_AUTH, GFP_ATOMIC);
1044 asoc->stream.si->enqueue_event(&asoc->ulpq, ev);
1047 /* Helper function to generate an adaptation indication event */
1048 static void sctp_cmd_adaptation_ind(struct sctp_cmd_seq *commands,
1049 struct sctp_association *asoc)
1051 struct sctp_ulpevent *ev;
1053 ev = sctp_ulpevent_make_adaptation_indication(asoc, GFP_ATOMIC);
1056 asoc->stream.si->enqueue_event(&asoc->ulpq, ev);
1060 static void sctp_cmd_t1_timer_update(struct sctp_association *asoc,
1061 enum sctp_event_timeout timer,
1064 struct sctp_transport *t;
1066 t = asoc->init_last_sent_to;
1067 asoc->init_err_counter++;
1069 if (t->init_sent_count > (asoc->init_cycle + 1)) {
1070 asoc->timeouts[timer] *= 2;
1071 if (asoc->timeouts[timer] > asoc->max_init_timeo) {
1072 asoc->timeouts[timer] = asoc->max_init_timeo;
1076 pr_debug("%s: T1[%s] timeout adjustment init_err_counter:%d"
1077 " cycle:%d timeout:%ld\n", __func__, name,
1078 asoc->init_err_counter, asoc->init_cycle,
1079 asoc->timeouts[timer]);
1084 /* Send the whole message, chunk by chunk, to the outqueue.
1085 * This way the whole message is queued up and bundling if
1086 * encouraged for small fragments.
1088 static void sctp_cmd_send_msg(struct sctp_association *asoc,
1089 struct sctp_datamsg *msg, gfp_t gfp)
1091 struct sctp_chunk *chunk;
1093 list_for_each_entry(chunk, &msg->chunks, frag_list)
1094 sctp_outq_tail(&asoc->outqueue, chunk, gfp);
1096 asoc->outqueue.sched->enqueue(&asoc->outqueue, msg);
1100 /* These three macros allow us to pull the debugging code out of the
1101 * main flow of sctp_do_sm() to keep attention focused on the real
1102 * functionality there.
1104 #define debug_pre_sfn() \
1105 pr_debug("%s[pre-fn]: ep:%p, %s, %s, asoc:%p[%s], %s\n", __func__, \
1106 ep, sctp_evttype_tbl[event_type], (*debug_fn)(subtype), \
1107 asoc, sctp_state_tbl[state], state_fn->name)
1109 #define debug_post_sfn() \
1110 pr_debug("%s[post-fn]: asoc:%p, status:%s\n", __func__, asoc, \
1111 sctp_status_tbl[status])
1113 #define debug_post_sfx() \
1114 pr_debug("%s[post-sfx]: error:%d, asoc:%p[%s]\n", __func__, error, \
1115 asoc, sctp_state_tbl[(asoc && sctp_id2assoc(ep->base.sk, \
1116 sctp_assoc2id(asoc))) ? asoc->state : SCTP_STATE_CLOSED])
1119 * This is the master state machine processing function.
1121 * If you want to understand all of lksctp, this is a
1122 * good place to start.
1124 int sctp_do_sm(struct net *net, enum sctp_event_type event_type,
1125 union sctp_subtype subtype, enum sctp_state state,
1126 struct sctp_endpoint *ep, struct sctp_association *asoc,
1127 void *event_arg, gfp_t gfp)
1129 typedef const char *(printfn_t)(union sctp_subtype);
1130 static printfn_t *table[] = {
1131 NULL, sctp_cname, sctp_tname, sctp_oname, sctp_pname,
1133 printfn_t *debug_fn __attribute__ ((unused)) = table[event_type];
1134 const struct sctp_sm_table_entry *state_fn;
1135 struct sctp_cmd_seq commands;
1136 enum sctp_disposition status;
1139 /* Look up the state function, run it, and then process the
1140 * side effects. These three steps are the heart of lksctp.
1142 state_fn = sctp_sm_lookup_event(net, event_type, state, subtype);
1144 sctp_init_cmd_seq(&commands);
1147 status = state_fn->fn(net, ep, asoc, subtype, event_arg, &commands);
1150 error = sctp_side_effects(event_type, subtype, state,
1151 ep, &asoc, event_arg, status,
1158 /*****************************************************************
1159 * This the master state function side effect processing function.
1160 *****************************************************************/
1161 static int sctp_side_effects(enum sctp_event_type event_type,
1162 union sctp_subtype subtype,
1163 enum sctp_state state,
1164 struct sctp_endpoint *ep,
1165 struct sctp_association **asoc,
1167 enum sctp_disposition status,
1168 struct sctp_cmd_seq *commands,
1173 /* FIXME - Most of the dispositions left today would be categorized
1174 * as "exceptional" dispositions. For those dispositions, it
1175 * may not be proper to run through any of the commands at all.
1176 * For example, the command interpreter might be run only with
1177 * disposition SCTP_DISPOSITION_CONSUME.
1179 if (0 != (error = sctp_cmd_interpreter(event_type, subtype, state,
1186 case SCTP_DISPOSITION_DISCARD:
1187 pr_debug("%s: ignored sctp protocol event - state:%d, "
1188 "event_type:%d, event_id:%d\n", __func__, state,
1189 event_type, subtype.chunk);
1192 case SCTP_DISPOSITION_NOMEM:
1193 /* We ran out of memory, so we need to discard this
1196 /* BUG--we should now recover some memory, probably by
1202 case SCTP_DISPOSITION_DELETE_TCB:
1203 case SCTP_DISPOSITION_ABORT:
1204 /* This should now be a command. */
1208 case SCTP_DISPOSITION_CONSUME:
1210 * We should no longer have much work to do here as the
1211 * real work has been done as explicit commands above.
1215 case SCTP_DISPOSITION_VIOLATION:
1216 net_err_ratelimited("protocol violation state %d chunkid %d\n",
1217 state, subtype.chunk);
1220 case SCTP_DISPOSITION_NOT_IMPL:
1221 pr_warn("unimplemented feature in state %d, event_type %d, event_id %d\n",
1222 state, event_type, subtype.chunk);
1225 case SCTP_DISPOSITION_BUG:
1226 pr_err("bug in state %d, event_type %d, event_id %d\n",
1227 state, event_type, subtype.chunk);
1232 pr_err("impossible disposition %d in state %d, event_type %d, event_id %d\n",
1233 status, state, event_type, subtype.chunk);
1242 /********************************************************************
1243 * 2nd Level Abstractions
1244 ********************************************************************/
1246 /* This is the side-effect interpreter. */
1247 static int sctp_cmd_interpreter(enum sctp_event_type event_type,
1248 union sctp_subtype subtype,
1249 enum sctp_state state,
1250 struct sctp_endpoint *ep,
1251 struct sctp_association *asoc,
1253 enum sctp_disposition status,
1254 struct sctp_cmd_seq *commands,
1257 struct sctp_sock *sp = sctp_sk(ep->base.sk);
1258 struct sctp_chunk *chunk = NULL, *new_obj;
1259 struct sctp_packet *packet;
1260 struct sctp_sackhdr sackh;
1261 struct timer_list *timer;
1262 struct sctp_transport *t;
1263 unsigned long timeout;
1264 struct sctp_cmd *cmd;
1269 if (SCTP_EVENT_T_TIMEOUT != event_type)
1272 /* Note: This whole file is a huge candidate for rework.
1273 * For example, each command could either have its own handler, so
1274 * the loop would look like:
1276 * cmd->handle(x, y, z)
1279 while (NULL != (cmd = sctp_next_cmd(commands))) {
1280 switch (cmd->verb) {
1285 case SCTP_CMD_NEW_ASOC:
1286 /* Register a new association. */
1288 sctp_outq_uncork(&asoc->outqueue, gfp);
1292 /* Register with the endpoint. */
1293 asoc = cmd->obj.asoc;
1294 BUG_ON(asoc->peer.primary_path == NULL);
1295 sctp_endpoint_add_asoc(ep, asoc);
1298 case SCTP_CMD_UPDATE_ASSOC:
1299 sctp_cmd_assoc_update(commands, asoc, cmd->obj.asoc);
1302 case SCTP_CMD_PURGE_OUTQUEUE:
1303 sctp_outq_teardown(&asoc->outqueue);
1306 case SCTP_CMD_DELETE_TCB:
1308 sctp_outq_uncork(&asoc->outqueue, gfp);
1311 /* Delete the current association. */
1312 sctp_cmd_delete_tcb(commands, asoc);
1316 case SCTP_CMD_NEW_STATE:
1317 /* Enter a new state. */
1318 sctp_cmd_new_state(commands, asoc, cmd->obj.state);
1321 case SCTP_CMD_REPORT_TSN:
1322 /* Record the arrival of a TSN. */
1323 error = sctp_tsnmap_mark(&asoc->peer.tsn_map,
1324 cmd->obj.u32, NULL);
1327 case SCTP_CMD_REPORT_FWDTSN:
1328 asoc->stream.si->report_ftsn(&asoc->ulpq, cmd->obj.u32);
1331 case SCTP_CMD_PROCESS_FWDTSN:
1332 asoc->stream.si->handle_ftsn(&asoc->ulpq,
1336 case SCTP_CMD_GEN_SACK:
1337 /* Generate a Selective ACK.
1338 * The argument tells us whether to just count
1339 * the packet and MAYBE generate a SACK, or
1342 force = cmd->obj.i32;
1343 error = sctp_gen_sack(asoc, force, commands);
1346 case SCTP_CMD_PROCESS_SACK:
1347 /* Process an inbound SACK. */
1348 error = sctp_cmd_process_sack(commands, asoc,
1352 case SCTP_CMD_GEN_INIT_ACK:
1353 /* Generate an INIT ACK chunk. */
1354 new_obj = sctp_make_init_ack(asoc, chunk, GFP_ATOMIC,
1359 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
1360 SCTP_CHUNK(new_obj));
1363 case SCTP_CMD_PEER_INIT:
1364 /* Process a unified INIT from the peer.
1365 * Note: Only used during INIT-ACK processing. If
1366 * there is an error just return to the outter
1367 * layer which will bail.
1369 error = sctp_cmd_process_init(commands, asoc, chunk,
1370 cmd->obj.init, gfp);
1373 case SCTP_CMD_GEN_COOKIE_ECHO:
1374 /* Generate a COOKIE ECHO chunk. */
1375 new_obj = sctp_make_cookie_echo(asoc, chunk);
1378 sctp_chunk_free(cmd->obj.chunk);
1381 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
1382 SCTP_CHUNK(new_obj));
1384 /* If there is an ERROR chunk to be sent along with
1385 * the COOKIE_ECHO, send it, too.
1388 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
1389 SCTP_CHUNK(cmd->obj.chunk));
1391 if (new_obj->transport) {
1392 new_obj->transport->init_sent_count++;
1393 asoc->init_last_sent_to = new_obj->transport;
1396 /* FIXME - Eventually come up with a cleaner way to
1397 * enabling COOKIE-ECHO + DATA bundling during
1398 * multihoming stale cookie scenarios, the following
1399 * command plays with asoc->peer.retran_path to
1400 * avoid the problem of sending the COOKIE-ECHO and
1401 * DATA in different paths, which could result
1402 * in the association being ABORTed if the DATA chunk
1403 * is processed first by the server. Checking the
1404 * init error counter simply causes this command
1405 * to be executed only during failed attempts of
1406 * association establishment.
1408 if ((asoc->peer.retran_path !=
1409 asoc->peer.primary_path) &&
1410 (asoc->init_err_counter > 0)) {
1411 sctp_add_cmd_sf(commands,
1412 SCTP_CMD_FORCE_PRIM_RETRAN,
1418 case SCTP_CMD_GEN_SHUTDOWN:
1419 /* Generate SHUTDOWN when in SHUTDOWN_SENT state.
1420 * Reset error counts.
1422 asoc->overall_error_count = 0;
1424 /* Generate a SHUTDOWN chunk. */
1425 new_obj = sctp_make_shutdown(asoc, chunk);
1428 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
1429 SCTP_CHUNK(new_obj));
1432 case SCTP_CMD_CHUNK_ULP:
1433 /* Send a chunk to the sockets layer. */
1434 pr_debug("%s: sm_sideff: chunk_up:%p, ulpq:%p\n",
1435 __func__, cmd->obj.chunk, &asoc->ulpq);
1437 asoc->stream.si->ulpevent_data(&asoc->ulpq,
1442 case SCTP_CMD_EVENT_ULP:
1443 /* Send a notification to the sockets layer. */
1444 pr_debug("%s: sm_sideff: event_up:%p, ulpq:%p\n",
1445 __func__, cmd->obj.ulpevent, &asoc->ulpq);
1447 asoc->stream.si->enqueue_event(&asoc->ulpq,
1451 case SCTP_CMD_REPLY:
1452 /* If an caller has not already corked, do cork. */
1453 if (!asoc->outqueue.cork) {
1454 sctp_outq_cork(&asoc->outqueue);
1457 /* Send a chunk to our peer. */
1458 sctp_outq_tail(&asoc->outqueue, cmd->obj.chunk, gfp);
1461 case SCTP_CMD_SEND_PKT:
1462 /* Send a full packet to our peer. */
1463 packet = cmd->obj.packet;
1464 sctp_packet_transmit(packet, gfp);
1465 sctp_ootb_pkt_free(packet);
1468 case SCTP_CMD_T1_RETRAN:
1469 /* Mark a transport for retransmission. */
1470 sctp_retransmit(&asoc->outqueue, cmd->obj.transport,
1474 case SCTP_CMD_RETRAN:
1475 /* Mark a transport for retransmission. */
1476 sctp_retransmit(&asoc->outqueue, cmd->obj.transport,
1480 case SCTP_CMD_ECN_CE:
1481 /* Do delayed CE processing. */
1482 sctp_do_ecn_ce_work(asoc, cmd->obj.u32);
1485 case SCTP_CMD_ECN_ECNE:
1486 /* Do delayed ECNE processing. */
1487 new_obj = sctp_do_ecn_ecne_work(asoc, cmd->obj.u32,
1490 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
1491 SCTP_CHUNK(new_obj));
1494 case SCTP_CMD_ECN_CWR:
1495 /* Do delayed CWR processing. */
1496 sctp_do_ecn_cwr_work(asoc, cmd->obj.u32);
1499 case SCTP_CMD_SETUP_T2:
1500 sctp_cmd_setup_t2(commands, asoc, cmd->obj.chunk);
1503 case SCTP_CMD_TIMER_START_ONCE:
1504 timer = &asoc->timers[cmd->obj.to];
1506 if (timer_pending(timer))
1510 case SCTP_CMD_TIMER_START:
1511 timer = &asoc->timers[cmd->obj.to];
1512 timeout = asoc->timeouts[cmd->obj.to];
1515 timer->expires = jiffies + timeout;
1516 sctp_association_hold(asoc);
1520 case SCTP_CMD_TIMER_RESTART:
1521 timer = &asoc->timers[cmd->obj.to];
1522 timeout = asoc->timeouts[cmd->obj.to];
1523 if (!mod_timer(timer, jiffies + timeout))
1524 sctp_association_hold(asoc);
1527 case SCTP_CMD_TIMER_STOP:
1528 timer = &asoc->timers[cmd->obj.to];
1529 if (del_timer(timer))
1530 sctp_association_put(asoc);
1533 case SCTP_CMD_INIT_CHOOSE_TRANSPORT:
1534 chunk = cmd->obj.chunk;
1535 t = sctp_assoc_choose_alter_transport(asoc,
1536 asoc->init_last_sent_to);
1537 asoc->init_last_sent_to = t;
1538 chunk->transport = t;
1539 t->init_sent_count++;
1540 /* Set the new transport as primary */
1541 sctp_assoc_set_primary(asoc, t);
1544 case SCTP_CMD_INIT_RESTART:
1545 /* Do the needed accounting and updates
1546 * associated with restarting an initialization
1547 * timer. Only multiply the timeout by two if
1548 * all transports have been tried at the current
1551 sctp_cmd_t1_timer_update(asoc,
1552 SCTP_EVENT_TIMEOUT_T1_INIT,
1555 sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
1556 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
1559 case SCTP_CMD_COOKIEECHO_RESTART:
1560 /* Do the needed accounting and updates
1561 * associated with restarting an initialization
1562 * timer. Only multiply the timeout by two if
1563 * all transports have been tried at the current
1566 sctp_cmd_t1_timer_update(asoc,
1567 SCTP_EVENT_TIMEOUT_T1_COOKIE,
1570 /* If we've sent any data bundled with
1571 * COOKIE-ECHO we need to resend.
1573 list_for_each_entry(t, &asoc->peer.transport_addr_list,
1575 sctp_retransmit_mark(&asoc->outqueue, t,
1579 sctp_add_cmd_sf(commands,
1580 SCTP_CMD_TIMER_RESTART,
1581 SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
1584 case SCTP_CMD_INIT_FAILED:
1585 sctp_cmd_init_failed(commands, asoc, cmd->obj.u32);
1588 case SCTP_CMD_ASSOC_FAILED:
1589 sctp_cmd_assoc_failed(commands, asoc, event_type,
1590 subtype, chunk, cmd->obj.u32);
1593 case SCTP_CMD_INIT_COUNTER_INC:
1594 asoc->init_err_counter++;
1597 case SCTP_CMD_INIT_COUNTER_RESET:
1598 asoc->init_err_counter = 0;
1599 asoc->init_cycle = 0;
1600 list_for_each_entry(t, &asoc->peer.transport_addr_list,
1602 t->init_sent_count = 0;
1606 case SCTP_CMD_REPORT_DUP:
1607 sctp_tsnmap_mark_dup(&asoc->peer.tsn_map,
1611 case SCTP_CMD_REPORT_BAD_TAG:
1612 pr_debug("%s: vtag mismatch!\n", __func__);
1615 case SCTP_CMD_STRIKE:
1616 /* Mark one strike against a transport. */
1617 sctp_do_8_2_transport_strike(commands, asoc,
1618 cmd->obj.transport, 0);
1621 case SCTP_CMD_TRANSPORT_IDLE:
1622 t = cmd->obj.transport;
1623 sctp_transport_lower_cwnd(t, SCTP_LOWER_CWND_INACTIVE);
1626 case SCTP_CMD_TRANSPORT_HB_SENT:
1627 t = cmd->obj.transport;
1628 sctp_do_8_2_transport_strike(commands, asoc,
1633 case SCTP_CMD_TRANSPORT_ON:
1634 t = cmd->obj.transport;
1635 sctp_cmd_transport_on(commands, asoc, t, chunk);
1638 case SCTP_CMD_HB_TIMERS_START:
1639 sctp_cmd_hb_timers_start(commands, asoc);
1642 case SCTP_CMD_HB_TIMER_UPDATE:
1643 t = cmd->obj.transport;
1644 sctp_transport_reset_hb_timer(t);
1647 case SCTP_CMD_HB_TIMERS_STOP:
1648 sctp_cmd_hb_timers_stop(commands, asoc);
1651 case SCTP_CMD_REPORT_ERROR:
1652 error = cmd->obj.error;
1655 case SCTP_CMD_PROCESS_CTSN:
1656 /* Dummy up a SACK for processing. */
1657 sackh.cum_tsn_ack = cmd->obj.be32;
1658 sackh.a_rwnd = htonl(asoc->peer.rwnd +
1659 asoc->outqueue.outstanding_bytes);
1660 sackh.num_gap_ack_blocks = 0;
1661 sackh.num_dup_tsns = 0;
1662 chunk->subh.sack_hdr = &sackh;
1663 sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK,
1667 case SCTP_CMD_DISCARD_PACKET:
1668 /* We need to discard the whole packet.
1669 * Uncork the queue since there might be
1672 chunk->pdiscard = 1;
1674 sctp_outq_uncork(&asoc->outqueue, gfp);
1679 case SCTP_CMD_RTO_PENDING:
1680 t = cmd->obj.transport;
1684 case SCTP_CMD_PART_DELIVER:
1685 asoc->stream.si->start_pd(&asoc->ulpq, GFP_ATOMIC);
1688 case SCTP_CMD_RENEGE:
1689 asoc->stream.si->renege_events(&asoc->ulpq,
1694 case SCTP_CMD_SETUP_T4:
1695 sctp_cmd_setup_t4(commands, asoc, cmd->obj.chunk);
1698 case SCTP_CMD_PROCESS_OPERR:
1699 sctp_cmd_process_operr(commands, asoc, chunk);
1701 case SCTP_CMD_CLEAR_INIT_TAG:
1702 asoc->peer.i.init_tag = 0;
1704 case SCTP_CMD_DEL_NON_PRIMARY:
1705 sctp_cmd_del_non_primary(asoc);
1707 case SCTP_CMD_T3_RTX_TIMERS_STOP:
1708 sctp_cmd_t3_rtx_timers_stop(commands, asoc);
1710 case SCTP_CMD_FORCE_PRIM_RETRAN:
1711 t = asoc->peer.retran_path;
1712 asoc->peer.retran_path = asoc->peer.primary_path;
1713 sctp_outq_uncork(&asoc->outqueue, gfp);
1715 asoc->peer.retran_path = t;
1717 case SCTP_CMD_SET_SK_ERR:
1718 sctp_cmd_set_sk_err(asoc, cmd->obj.error);
1720 case SCTP_CMD_ASSOC_CHANGE:
1721 sctp_cmd_assoc_change(commands, asoc,
1724 case SCTP_CMD_ADAPTATION_IND:
1725 sctp_cmd_adaptation_ind(commands, asoc);
1727 case SCTP_CMD_PEER_NO_AUTH:
1728 sctp_cmd_peer_no_auth(commands, asoc);
1731 case SCTP_CMD_ASSOC_SHKEY:
1732 error = sctp_auth_asoc_init_active_key(asoc,
1735 case SCTP_CMD_UPDATE_INITTAG:
1736 asoc->peer.i.init_tag = cmd->obj.u32;
1738 case SCTP_CMD_SEND_MSG:
1739 if (!asoc->outqueue.cork) {
1740 sctp_outq_cork(&asoc->outqueue);
1743 sctp_cmd_send_msg(asoc, cmd->obj.msg, gfp);
1745 case SCTP_CMD_PURGE_ASCONF_QUEUE:
1746 sctp_asconf_queue_teardown(asoc);
1749 case SCTP_CMD_SET_ASOC:
1750 if (asoc && local_cork) {
1751 sctp_outq_uncork(&asoc->outqueue, gfp);
1754 asoc = cmd->obj.asoc;
1758 pr_warn("Impossible command: %u\n",
1768 /* If this is in response to a received chunk, wait until
1769 * we are done with the packet to open the queue so that we don't
1770 * send multiple packets in response to a single request.
1772 if (asoc && SCTP_EVENT_T_CHUNK == event_type && chunk) {
1773 if (chunk->end_of_packet || chunk->singleton)
1774 sctp_outq_uncork(&asoc->outqueue, gfp);
1775 } else if (local_cork)
1776 sctp_outq_uncork(&asoc->outqueue, gfp);
1778 if (sp->data_ready_signalled)
1779 sp->data_ready_signalled = 0;