1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* SCTP kernel implementation
3 * Copyright (c) 1999-2000 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
5 * Copyright (c) 2001-2003 International Business Machines Corp.
6 * Copyright (c) 2001 Intel Corp.
7 * Copyright (c) 2001 La Monte H.P. Yarroll
9 * This file is part of the SCTP kernel implementation
11 * This module provides the abstraction for an SCTP transport representing
12 * a remote transport address. For local transport addresses, we just use
15 * Please send any bug reports or fixes you make to the
17 * lksctp developers <linux-sctp@vger.kernel.org>
19 * Written or modified by:
20 * La Monte H.P. Yarroll <piggy@acm.org>
21 * Karl Knutson <karl@athena.chicago.il.us>
22 * Jon Grimm <jgrimm@us.ibm.com>
23 * Xingang Guo <xingang.guo@intel.com>
24 * Hui Huang <hui.huang@nokia.com>
25 * Sridhar Samudrala <sri@us.ibm.com>
26 * Ardelle Fan <ardelle.fan@intel.com>
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 #include <linux/slab.h>
32 #include <linux/types.h>
33 #include <linux/random.h>
34 #include <net/sctp/sctp.h>
35 #include <net/sctp/sm.h>
37 /* 1st Level Abstractions. */
39 /* Initialize a new transport from provided memory. */
40 static struct sctp_transport *sctp_transport_init(struct net *net,
41 struct sctp_transport *peer,
42 const union sctp_addr *addr,
45 /* Copy in the address. */
46 peer->af_specific = sctp_get_af_specific(addr->sa.sa_family);
47 memcpy(&peer->ipaddr, addr, peer->af_specific->sockaddr_len);
48 memset(&peer->saddr, 0, sizeof(union sctp_addr));
50 peer->sack_generation = 0;
52 /* From 6.3.1 RTO Calculation:
54 * C1) Until an RTT measurement has been made for a packet sent to the
55 * given destination transport address, set RTO to the protocol
56 * parameter 'RTO.Initial'.
58 peer->rto = msecs_to_jiffies(net->sctp.rto_initial);
60 peer->last_time_heard = 0;
61 peer->last_time_ecne_reduced = jiffies;
63 peer->param_flags = SPP_HB_DISABLE |
67 /* Initialize the default path max_retrans. */
68 peer->pathmaxrxt = net->sctp.max_retrans_path;
69 peer->pf_retrans = net->sctp.pf_retrans;
71 INIT_LIST_HEAD(&peer->transmitted);
72 INIT_LIST_HEAD(&peer->send_ready);
73 INIT_LIST_HEAD(&peer->transports);
75 timer_setup(&peer->T3_rtx_timer, sctp_generate_t3_rtx_event, 0);
76 timer_setup(&peer->hb_timer, sctp_generate_heartbeat_event, 0);
77 timer_setup(&peer->reconf_timer, sctp_generate_reconf_event, 0);
78 timer_setup(&peer->probe_timer, sctp_generate_probe_event, 0);
79 timer_setup(&peer->proto_unreach_timer,
80 sctp_generate_proto_unreach_event, 0);
82 /* Initialize the 64-bit random nonce sent with heartbeat. */
83 get_random_bytes(&peer->hb_nonce, sizeof(peer->hb_nonce));
85 refcount_set(&peer->refcnt, 1);
90 /* Allocate and initialize a new transport. */
91 struct sctp_transport *sctp_transport_new(struct net *net,
92 const union sctp_addr *addr,
95 struct sctp_transport *transport;
97 transport = kzalloc(sizeof(*transport), gfp);
101 if (!sctp_transport_init(net, transport, addr, gfp))
104 SCTP_DBG_OBJCNT_INC(transport);
115 /* This transport is no longer needed. Free up if possible, or
116 * delay until it last reference count.
118 void sctp_transport_free(struct sctp_transport *transport)
120 /* Try to delete the heartbeat timer. */
121 if (del_timer(&transport->hb_timer))
122 sctp_transport_put(transport);
124 /* Delete the T3_rtx timer if it's active.
125 * There is no point in not doing this now and letting
126 * structure hang around in memory since we know
127 * the transport is going away.
129 if (del_timer(&transport->T3_rtx_timer))
130 sctp_transport_put(transport);
132 if (del_timer(&transport->reconf_timer))
133 sctp_transport_put(transport);
135 if (del_timer(&transport->probe_timer))
136 sctp_transport_put(transport);
138 /* Delete the ICMP proto unreachable timer if it's active. */
139 if (del_timer(&transport->proto_unreach_timer))
140 sctp_transport_put(transport);
142 sctp_transport_put(transport);
145 static void sctp_transport_destroy_rcu(struct rcu_head *head)
147 struct sctp_transport *transport;
149 transport = container_of(head, struct sctp_transport, rcu);
151 dst_release(transport->dst);
153 SCTP_DBG_OBJCNT_DEC(transport);
156 /* Destroy the transport data structure.
157 * Assumes there are no more users of this structure.
159 static void sctp_transport_destroy(struct sctp_transport *transport)
161 if (unlikely(refcount_read(&transport->refcnt))) {
162 WARN(1, "Attempt to destroy undead transport %p!\n", transport);
166 sctp_packet_free(&transport->packet);
169 sctp_association_put(transport->asoc);
171 call_rcu(&transport->rcu, sctp_transport_destroy_rcu);
174 /* Start T3_rtx timer if it is not already running and update the heartbeat
175 * timer. This routine is called every time a DATA chunk is sent.
177 void sctp_transport_reset_t3_rtx(struct sctp_transport *transport)
179 /* RFC 2960 6.3.2 Retransmission Timer Rules
181 * R1) Every time a DATA chunk is sent to any address(including a
182 * retransmission), if the T3-rtx timer of that address is not running
183 * start it running so that it will expire after the RTO of that
187 if (!timer_pending(&transport->T3_rtx_timer))
188 if (!mod_timer(&transport->T3_rtx_timer,
189 jiffies + transport->rto))
190 sctp_transport_hold(transport);
193 void sctp_transport_reset_hb_timer(struct sctp_transport *transport)
195 unsigned long expires;
197 /* When a data chunk is sent, reset the heartbeat interval. */
198 expires = jiffies + sctp_transport_timeout(transport);
199 if ((time_before(transport->hb_timer.expires, expires) ||
200 !timer_pending(&transport->hb_timer)) &&
201 !mod_timer(&transport->hb_timer,
202 expires + prandom_u32_max(transport->rto)))
203 sctp_transport_hold(transport);
206 void sctp_transport_reset_reconf_timer(struct sctp_transport *transport)
208 if (!timer_pending(&transport->reconf_timer))
209 if (!mod_timer(&transport->reconf_timer,
210 jiffies + transport->rto))
211 sctp_transport_hold(transport);
214 void sctp_transport_reset_probe_timer(struct sctp_transport *transport)
216 if (timer_pending(&transport->probe_timer))
218 if (!mod_timer(&transport->probe_timer,
219 jiffies + transport->probe_interval))
220 sctp_transport_hold(transport);
223 /* This transport has been assigned to an association.
224 * Initialize fields from the association or from the sock itself.
225 * Register the reference count in the association.
227 void sctp_transport_set_owner(struct sctp_transport *transport,
228 struct sctp_association *asoc)
230 transport->asoc = asoc;
231 sctp_association_hold(asoc);
234 /* Initialize the pmtu of a transport. */
235 void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
237 /* If we don't have a fresh route, look one up */
238 if (!transport->dst || transport->dst->obsolete) {
239 sctp_transport_dst_release(transport);
240 transport->af_specific->get_dst(transport, &transport->saddr,
244 if (transport->param_flags & SPP_PMTUD_DISABLE) {
245 struct sctp_association *asoc = transport->asoc;
247 if (!transport->pathmtu && asoc && asoc->pathmtu)
248 transport->pathmtu = asoc->pathmtu;
249 if (transport->pathmtu)
254 transport->pathmtu = sctp_dst_mtu(transport->dst);
256 transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
258 sctp_transport_pl_update(transport);
261 bool sctp_transport_pl_send(struct sctp_transport *t)
263 if (t->pl.probe_count < SCTP_MAX_PROBES)
266 t->pl.probe_count = 0;
267 if (t->pl.state == SCTP_PL_BASE) {
268 if (t->pl.probe_size == SCTP_BASE_PLPMTU) { /* BASE_PLPMTU Confirmation Failed */
269 t->pl.state = SCTP_PL_ERROR; /* Base -> Error */
271 t->pl.pmtu = SCTP_MIN_PLPMTU;
272 t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
273 sctp_assoc_sync_pmtu(t->asoc);
275 } else if (t->pl.state == SCTP_PL_SEARCH) {
276 if (t->pl.pmtu == t->pl.probe_size) { /* Black Hole Detected */
277 t->pl.state = SCTP_PL_BASE; /* Search -> Base */
278 t->pl.probe_size = SCTP_BASE_PLPMTU;
279 t->pl.probe_high = 0;
281 t->pl.pmtu = SCTP_BASE_PLPMTU;
282 t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
283 sctp_assoc_sync_pmtu(t->asoc);
284 } else { /* Normal probe failure. */
285 t->pl.probe_high = t->pl.probe_size;
286 t->pl.probe_size = t->pl.pmtu;
288 } else if (t->pl.state == SCTP_PL_COMPLETE) {
289 if (t->pl.pmtu == t->pl.probe_size) { /* Black Hole Detected */
290 t->pl.state = SCTP_PL_BASE; /* Search Complete -> Base */
291 t->pl.probe_size = SCTP_BASE_PLPMTU;
293 t->pl.pmtu = SCTP_BASE_PLPMTU;
294 t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
295 sctp_assoc_sync_pmtu(t->asoc);
300 if (t->pl.state == SCTP_PL_COMPLETE && t->pl.raise_count < 30 &&
304 pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, high: %d\n",
305 __func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, t->pl.probe_high);
311 bool sctp_transport_pl_recv(struct sctp_transport *t)
313 pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, high: %d\n",
314 __func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, t->pl.probe_high);
316 t->pl.pmtu = t->pl.probe_size;
317 t->pl.probe_count = 0;
318 if (t->pl.state == SCTP_PL_BASE) {
319 t->pl.state = SCTP_PL_SEARCH; /* Base -> Search */
320 t->pl.probe_size += SCTP_PL_BIG_STEP;
321 } else if (t->pl.state == SCTP_PL_ERROR) {
322 t->pl.state = SCTP_PL_SEARCH; /* Error -> Search */
324 t->pl.pmtu = t->pl.probe_size;
325 t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
326 sctp_assoc_sync_pmtu(t->asoc);
327 t->pl.probe_size += SCTP_PL_BIG_STEP;
328 } else if (t->pl.state == SCTP_PL_SEARCH) {
329 if (!t->pl.probe_high) {
330 t->pl.probe_size = min(t->pl.probe_size + SCTP_PL_BIG_STEP,
334 t->pl.probe_size += SCTP_PL_MIN_STEP;
335 if (t->pl.probe_size >= t->pl.probe_high) {
336 t->pl.probe_high = 0;
337 t->pl.raise_count = 0;
338 t->pl.state = SCTP_PL_COMPLETE; /* Search -> Search Complete */
340 t->pl.probe_size = t->pl.pmtu;
341 t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
342 sctp_assoc_sync_pmtu(t->asoc);
344 } else if (t->pl.state == SCTP_PL_COMPLETE && t->pl.raise_count == 30) {
345 /* Raise probe_size again after 30 * interval in Search Complete */
346 t->pl.state = SCTP_PL_SEARCH; /* Search Complete -> Search */
347 t->pl.probe_size += SCTP_PL_MIN_STEP;
350 return t->pl.state == SCTP_PL_COMPLETE;
353 static bool sctp_transport_pl_toobig(struct sctp_transport *t, u32 pmtu)
355 pr_debug("%s: PLPMTUD: transport: %p, state: %d, pmtu: %d, size: %d, ptb: %d\n",
356 __func__, t, t->pl.state, t->pl.pmtu, t->pl.probe_size, pmtu);
358 if (pmtu < SCTP_MIN_PLPMTU || pmtu >= t->pl.probe_size)
361 if (t->pl.state == SCTP_PL_BASE) {
362 if (pmtu >= SCTP_MIN_PLPMTU && pmtu < SCTP_BASE_PLPMTU) {
363 t->pl.state = SCTP_PL_ERROR; /* Base -> Error */
365 t->pl.pmtu = SCTP_MIN_PLPMTU;
366 t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
368 } else if (t->pl.state == SCTP_PL_SEARCH) {
369 if (pmtu >= SCTP_BASE_PLPMTU && pmtu < t->pl.pmtu) {
370 t->pl.state = SCTP_PL_BASE; /* Search -> Base */
371 t->pl.probe_size = SCTP_BASE_PLPMTU;
372 t->pl.probe_count = 0;
374 t->pl.probe_high = 0;
375 t->pl.pmtu = SCTP_BASE_PLPMTU;
376 t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
377 } else if (pmtu > t->pl.pmtu && pmtu < t->pl.probe_size) {
378 t->pl.probe_size = pmtu;
379 t->pl.probe_count = 0;
383 } else if (t->pl.state == SCTP_PL_COMPLETE) {
384 if (pmtu >= SCTP_BASE_PLPMTU && pmtu < t->pl.pmtu) {
385 t->pl.state = SCTP_PL_BASE; /* Complete -> Base */
386 t->pl.probe_size = SCTP_BASE_PLPMTU;
387 t->pl.probe_count = 0;
389 t->pl.probe_high = 0;
390 t->pl.pmtu = SCTP_BASE_PLPMTU;
391 t->pathmtu = t->pl.pmtu + sctp_transport_pl_hlen(t);
398 bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
400 struct sock *sk = t->asoc->base.sk;
401 struct dst_entry *dst;
404 if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) {
405 pr_warn_ratelimited("%s: Reported pmtu %d too low, using default minimum of %d\n",
406 __func__, pmtu, SCTP_DEFAULT_MINSEGMENT);
407 /* Use default minimum segment instead */
408 pmtu = SCTP_DEFAULT_MINSEGMENT;
410 pmtu = SCTP_TRUNC4(pmtu);
412 if (sctp_transport_pl_enabled(t))
413 return sctp_transport_pl_toobig(t, pmtu - sctp_transport_pl_hlen(t));
415 dst = sctp_transport_dst_check(t);
417 struct sctp_pf *pf = sctp_get_pf_specific(dst->ops->family);
418 union sctp_addr addr;
420 pf->af->from_sk(&addr, sk);
421 pf->to_sk_daddr(&t->ipaddr, sk);
422 dst->ops->update_pmtu(dst, sk, NULL, pmtu, true);
423 pf->to_sk_daddr(&addr, sk);
425 dst = sctp_transport_dst_check(t);
429 t->af_specific->get_dst(t, &t->saddr, &t->fl, sk);
434 /* Re-fetch, as under layers may have a higher minimum size */
435 pmtu = sctp_dst_mtu(dst);
436 change = t->pathmtu != pmtu;
443 /* Caches the dst entry and source address for a transport's destination
446 void sctp_transport_route(struct sctp_transport *transport,
447 union sctp_addr *saddr, struct sctp_sock *opt)
449 struct sctp_association *asoc = transport->asoc;
450 struct sctp_af *af = transport->af_specific;
452 sctp_transport_dst_release(transport);
453 af->get_dst(transport, saddr, &transport->fl, sctp_opt2sk(opt));
456 memcpy(&transport->saddr, saddr, sizeof(union sctp_addr));
458 af->get_saddr(opt, transport, &transport->fl);
460 sctp_transport_pmtu(transport, sctp_opt2sk(opt));
462 /* Initialize sk->sk_rcv_saddr, if the transport is the
463 * association's active path for getsockname().
465 if (transport->dst && asoc &&
466 (!asoc->peer.primary_path || transport == asoc->peer.active_path))
467 opt->pf->to_sk_saddr(&transport->saddr, asoc->base.sk);
470 /* Hold a reference to a transport. */
471 int sctp_transport_hold(struct sctp_transport *transport)
473 return refcount_inc_not_zero(&transport->refcnt);
476 /* Release a reference to a transport and clean up
477 * if there are no more references.
479 void sctp_transport_put(struct sctp_transport *transport)
481 if (refcount_dec_and_test(&transport->refcnt))
482 sctp_transport_destroy(transport);
485 /* Update transport's RTO based on the newly calculated RTT. */
486 void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt)
488 if (unlikely(!tp->rto_pending))
489 /* We should not be doing any RTO updates unless rto_pending is set. */
490 pr_debug("%s: rto_pending not set on transport %p!\n", __func__, tp);
492 if (tp->rttvar || tp->srtt) {
493 struct net *net = tp->asoc->base.net;
494 /* 6.3.1 C3) When a new RTT measurement R' is made, set
495 * RTTVAR <- (1 - RTO.Beta) * RTTVAR + RTO.Beta * |SRTT - R'|
496 * SRTT <- (1 - RTO.Alpha) * SRTT + RTO.Alpha * R'
499 /* Note: The above algorithm has been rewritten to
500 * express rto_beta and rto_alpha as inverse powers
502 * For example, assuming the default value of RTO.Alpha of
503 * 1/8, rto_alpha would be expressed as 3.
505 tp->rttvar = tp->rttvar - (tp->rttvar >> net->sctp.rto_beta)
506 + (((__u32)abs((__s64)tp->srtt - (__s64)rtt)) >> net->sctp.rto_beta);
507 tp->srtt = tp->srtt - (tp->srtt >> net->sctp.rto_alpha)
508 + (rtt >> net->sctp.rto_alpha);
510 /* 6.3.1 C2) When the first RTT measurement R is made, set
511 * SRTT <- R, RTTVAR <- R/2.
514 tp->rttvar = rtt >> 1;
517 /* 6.3.1 G1) Whenever RTTVAR is computed, if RTTVAR = 0, then
518 * adjust RTTVAR <- G, where G is the CLOCK GRANULARITY.
521 tp->rttvar = SCTP_CLOCK_GRANULARITY;
523 /* 6.3.1 C3) After the computation, update RTO <- SRTT + 4 * RTTVAR. */
524 tp->rto = tp->srtt + (tp->rttvar << 2);
526 /* 6.3.1 C6) Whenever RTO is computed, if it is less than RTO.Min
527 * seconds then it is rounded up to RTO.Min seconds.
529 if (tp->rto < tp->asoc->rto_min)
530 tp->rto = tp->asoc->rto_min;
532 /* 6.3.1 C7) A maximum value may be placed on RTO provided it is
533 * at least RTO.max seconds.
535 if (tp->rto > tp->asoc->rto_max)
536 tp->rto = tp->asoc->rto_max;
538 sctp_max_rto(tp->asoc, tp);
541 /* Reset rto_pending so that a new RTT measurement is started when a
542 * new data chunk is sent.
546 pr_debug("%s: transport:%p, rtt:%d, srtt:%d rttvar:%d, rto:%ld\n",
547 __func__, tp, rtt, tp->srtt, tp->rttvar, tp->rto);
550 /* This routine updates the transport's cwnd and partial_bytes_acked
551 * parameters based on the bytes acked in the received SACK.
553 void sctp_transport_raise_cwnd(struct sctp_transport *transport,
554 __u32 sack_ctsn, __u32 bytes_acked)
556 struct sctp_association *asoc = transport->asoc;
557 __u32 cwnd, ssthresh, flight_size, pba, pmtu;
559 cwnd = transport->cwnd;
560 flight_size = transport->flight_size;
562 /* See if we need to exit Fast Recovery first */
563 if (asoc->fast_recovery &&
564 TSN_lte(asoc->fast_recovery_exit, sack_ctsn))
565 asoc->fast_recovery = 0;
567 ssthresh = transport->ssthresh;
568 pba = transport->partial_bytes_acked;
569 pmtu = transport->asoc->pathmtu;
571 if (cwnd <= ssthresh) {
573 * o When cwnd is less than or equal to ssthresh, an SCTP
574 * endpoint MUST use the slow-start algorithm to increase
575 * cwnd only if the current congestion window is being fully
576 * utilized, an incoming SACK advances the Cumulative TSN
577 * Ack Point, and the data sender is not in Fast Recovery.
578 * Only when these three conditions are met can the cwnd be
579 * increased; otherwise, the cwnd MUST not be increased.
580 * If these conditions are met, then cwnd MUST be increased
581 * by, at most, the lesser of 1) the total size of the
582 * previously outstanding DATA chunk(s) acknowledged, and
583 * 2) the destination's path MTU. This upper bound protects
584 * against the ACK-Splitting attack outlined in [SAVAGE99].
586 if (asoc->fast_recovery)
589 /* The appropriate cwnd increase algorithm is performed
590 * if, and only if the congestion window is being fully
591 * utilized. Note that RFC4960 Errata 3.22 removed the
592 * other condition on ctsn moving.
594 if (flight_size < cwnd)
597 if (bytes_acked > pmtu)
602 pr_debug("%s: slow start: transport:%p, bytes_acked:%d, "
603 "cwnd:%d, ssthresh:%d, flight_size:%d, pba:%d\n",
604 __func__, transport, bytes_acked, cwnd, ssthresh,
607 /* RFC 2960 7.2.2 Whenever cwnd is greater than ssthresh,
608 * upon each SACK arrival, increase partial_bytes_acked
609 * by the total number of bytes of all new chunks
610 * acknowledged in that SACK including chunks
611 * acknowledged by the new Cumulative TSN Ack and by Gap
612 * Ack Blocks. (updated by RFC4960 Errata 3.22)
614 * When partial_bytes_acked is greater than cwnd and
615 * before the arrival of the SACK the sender had less
616 * bytes of data outstanding than cwnd (i.e., before
617 * arrival of the SACK, flightsize was less than cwnd),
618 * reset partial_bytes_acked to cwnd. (RFC 4960 Errata
621 * When partial_bytes_acked is equal to or greater than
622 * cwnd and before the arrival of the SACK the sender
623 * had cwnd or more bytes of data outstanding (i.e.,
624 * before arrival of the SACK, flightsize was greater
625 * than or equal to cwnd), partial_bytes_acked is reset
626 * to (partial_bytes_acked - cwnd). Next, cwnd is
627 * increased by MTU. (RFC 4960 Errata 3.12)
630 if (pba > cwnd && flight_size < cwnd)
632 if (pba >= cwnd && flight_size >= cwnd) {
637 pr_debug("%s: congestion avoidance: transport:%p, "
638 "bytes_acked:%d, cwnd:%d, ssthresh:%d, "
639 "flight_size:%d, pba:%d\n", __func__,
640 transport, bytes_acked, cwnd, ssthresh,
644 transport->cwnd = cwnd;
645 transport->partial_bytes_acked = pba;
648 /* This routine is used to lower the transport's cwnd when congestion is
651 void sctp_transport_lower_cwnd(struct sctp_transport *transport,
652 enum sctp_lower_cwnd reason)
654 struct sctp_association *asoc = transport->asoc;
657 case SCTP_LOWER_CWND_T3_RTX:
658 /* RFC 2960 Section 7.2.3, sctpimpguide
659 * When the T3-rtx timer expires on an address, SCTP should
660 * perform slow start by:
661 * ssthresh = max(cwnd/2, 4*MTU)
663 * partial_bytes_acked = 0
665 transport->ssthresh = max(transport->cwnd/2,
667 transport->cwnd = asoc->pathmtu;
669 /* T3-rtx also clears fast recovery */
670 asoc->fast_recovery = 0;
673 case SCTP_LOWER_CWND_FAST_RTX:
674 /* RFC 2960 7.2.4 Adjust the ssthresh and cwnd of the
675 * destination address(es) to which the missing DATA chunks
676 * were last sent, according to the formula described in
679 * RFC 2960 7.2.3, sctpimpguide Upon detection of packet
680 * losses from SACK (see Section 7.2.4), An endpoint
681 * should do the following:
682 * ssthresh = max(cwnd/2, 4*MTU)
684 * partial_bytes_acked = 0
686 if (asoc->fast_recovery)
689 /* Mark Fast recovery */
690 asoc->fast_recovery = 1;
691 asoc->fast_recovery_exit = asoc->next_tsn - 1;
693 transport->ssthresh = max(transport->cwnd/2,
695 transport->cwnd = transport->ssthresh;
698 case SCTP_LOWER_CWND_ECNE:
699 /* RFC 2481 Section 6.1.2.
700 * If the sender receives an ECN-Echo ACK packet
701 * then the sender knows that congestion was encountered in the
702 * network on the path from the sender to the receiver. The
703 * indication of congestion should be treated just as a
704 * congestion loss in non-ECN Capable TCP. That is, the TCP
705 * source halves the congestion window "cwnd" and reduces the
706 * slow start threshold "ssthresh".
707 * A critical condition is that TCP does not react to
708 * congestion indications more than once every window of
709 * data (or more loosely more than once every round-trip time).
711 if (time_after(jiffies, transport->last_time_ecne_reduced +
713 transport->ssthresh = max(transport->cwnd/2,
715 transport->cwnd = transport->ssthresh;
716 transport->last_time_ecne_reduced = jiffies;
720 case SCTP_LOWER_CWND_INACTIVE:
721 /* RFC 2960 Section 7.2.1, sctpimpguide
722 * When the endpoint does not transmit data on a given
723 * transport address, the cwnd of the transport address
724 * should be adjusted to max(cwnd/2, 4*MTU) per RTO.
725 * NOTE: Although the draft recommends that this check needs
726 * to be done every RTO interval, we do it every hearbeat
729 transport->cwnd = max(transport->cwnd/2,
731 /* RFC 4960 Errata 3.27.2: also adjust sshthresh */
732 transport->ssthresh = transport->cwnd;
736 transport->partial_bytes_acked = 0;
738 pr_debug("%s: transport:%p, reason:%d, cwnd:%d, ssthresh:%d\n",
739 __func__, transport, reason, transport->cwnd,
740 transport->ssthresh);
743 /* Apply Max.Burst limit to the congestion window:
744 * sctpimpguide-05 2.14.2
745 * D) When the time comes for the sender to
746 * transmit new DATA chunks, the protocol parameter Max.Burst MUST
747 * first be applied to limit how many new DATA chunks may be sent.
748 * The limit is applied by adjusting cwnd as follows:
749 * if ((flightsize+ Max.Burst * MTU) < cwnd)
750 * cwnd = flightsize + Max.Burst * MTU
753 void sctp_transport_burst_limited(struct sctp_transport *t)
755 struct sctp_association *asoc = t->asoc;
756 u32 old_cwnd = t->cwnd;
759 if (t->burst_limited || asoc->max_burst == 0)
762 max_burst_bytes = t->flight_size + (asoc->max_burst * asoc->pathmtu);
763 if (max_burst_bytes < old_cwnd) {
764 t->cwnd = max_burst_bytes;
765 t->burst_limited = old_cwnd;
769 /* Restore the old cwnd congestion window, after the burst had it's
772 void sctp_transport_burst_reset(struct sctp_transport *t)
774 if (t->burst_limited) {
775 t->cwnd = t->burst_limited;
776 t->burst_limited = 0;
780 /* What is the next timeout value for this transport? */
781 unsigned long sctp_transport_timeout(struct sctp_transport *trans)
783 /* RTO + timer slack +/- 50% of RTO */
784 unsigned long timeout = trans->rto >> 1;
786 if (trans->state != SCTP_UNCONFIRMED &&
787 trans->state != SCTP_PF)
788 timeout += trans->hbinterval;
790 return max_t(unsigned long, timeout, HZ / 5);
793 /* Reset transport variables to their initial values */
794 void sctp_transport_reset(struct sctp_transport *t)
796 struct sctp_association *asoc = t->asoc;
798 /* RFC 2960 (bis), Section 5.2.4
799 * All the congestion control parameters (e.g., cwnd, ssthresh)
800 * related to this peer MUST be reset to their initial values
801 * (see Section 6.2.1)
803 t->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
804 t->burst_limited = 0;
805 t->ssthresh = asoc->peer.i.a_rwnd;
806 t->rto = asoc->rto_initial;
807 sctp_max_rto(asoc, t);
812 /* Reset these additional variables so that we have a clean slate. */
813 t->partial_bytes_acked = 0;
819 /* Initialize the state information for SFR-CACC */
820 t->cacc.changeover_active = 0;
821 t->cacc.cycling_changeover = 0;
822 t->cacc.next_tsn_at_change = 0;
823 t->cacc.cacc_saw_newack = 0;
826 /* Schedule retransmission on the given transport */
827 void sctp_transport_immediate_rtx(struct sctp_transport *t)
829 /* Stop pending T3_rtx_timer */
830 if (del_timer(&t->T3_rtx_timer))
831 sctp_transport_put(t);
833 sctp_retransmit(&t->asoc->outqueue, t, SCTP_RTXR_T3_RTX);
834 if (!timer_pending(&t->T3_rtx_timer)) {
835 if (!mod_timer(&t->T3_rtx_timer, jiffies + t->rto))
836 sctp_transport_hold(t);
841 void sctp_transport_dst_release(struct sctp_transport *t)
845 t->dst_pending_confirm = 0;
848 /* Schedule neighbour confirm */
849 void sctp_transport_dst_confirm(struct sctp_transport *t)
851 t->dst_pending_confirm = 1;