Merge v5.14-rc3 into usb-next
[linux-2.6-microblaze.git] / net / sctp / output.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* SCTP kernel implementation
3  * (C) Copyright IBM Corp. 2001, 2004
4  * Copyright (c) 1999-2000 Cisco, Inc.
5  * Copyright (c) 1999-2001 Motorola, Inc.
6  *
7  * This file is part of the SCTP kernel implementation
8  *
9  * These functions handle output processing.
10  *
11  * Please send any bug reports or fixes you make to the
12  * email address(es):
13  *    lksctp developers <linux-sctp@vger.kernel.org>
14  *
15  * Written or modified by:
16  *    La Monte H.P. Yarroll <piggy@acm.org>
17  *    Karl Knutson          <karl@athena.chicago.il.us>
18  *    Jon Grimm             <jgrimm@austin.ibm.com>
19  *    Sridhar Samudrala     <sri@us.ibm.com>
20  */
21
22 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/wait.h>
27 #include <linux/time.h>
28 #include <linux/ip.h>
29 #include <linux/ipv6.h>
30 #include <linux/init.h>
31 #include <linux/slab.h>
32 #include <net/inet_ecn.h>
33 #include <net/ip.h>
34 #include <net/icmp.h>
35 #include <net/net_namespace.h>
36
37 #include <linux/socket.h> /* for sa_family_t */
38 #include <net/sock.h>
39
40 #include <net/sctp/sctp.h>
41 #include <net/sctp/sm.h>
42 #include <net/sctp/checksum.h>
43
44 /* Forward declarations for private helpers. */
45 static enum sctp_xmit __sctp_packet_append_chunk(struct sctp_packet *packet,
46                                                  struct sctp_chunk *chunk);
47 static enum sctp_xmit sctp_packet_can_append_data(struct sctp_packet *packet,
48                                                   struct sctp_chunk *chunk);
49 static void sctp_packet_append_data(struct sctp_packet *packet,
50                                     struct sctp_chunk *chunk);
51 static enum sctp_xmit sctp_packet_will_fit(struct sctp_packet *packet,
52                                            struct sctp_chunk *chunk,
53                                            u16 chunk_len);
54
55 static void sctp_packet_reset(struct sctp_packet *packet)
56 {
57         /* sctp_packet_transmit() relies on this to reset size to the
58          * current overhead after sending packets.
59          */
60         packet->size = packet->overhead;
61
62         packet->has_cookie_echo = 0;
63         packet->has_sack = 0;
64         packet->has_data = 0;
65         packet->has_auth = 0;
66         packet->ipfragok = 0;
67         packet->auth = NULL;
68 }
69
70 /* Config a packet.
71  * This appears to be a followup set of initializations.
72  */
73 void sctp_packet_config(struct sctp_packet *packet, __u32 vtag,
74                         int ecn_capable)
75 {
76         struct sctp_transport *tp = packet->transport;
77         struct sctp_association *asoc = tp->asoc;
78         struct sctp_sock *sp = NULL;
79         struct sock *sk;
80
81         pr_debug("%s: packet:%p vtag:0x%x\n", __func__, packet, vtag);
82         packet->vtag = vtag;
83
84         /* do the following jobs only once for a flush schedule */
85         if (!sctp_packet_empty(packet))
86                 return;
87
88         /* set packet max_size with pathmtu, then calculate overhead */
89         packet->max_size = tp->pathmtu;
90
91         if (asoc) {
92                 sk = asoc->base.sk;
93                 sp = sctp_sk(sk);
94         }
95         packet->overhead = sctp_mtu_payload(sp, 0, 0);
96         packet->size = packet->overhead;
97
98         if (!asoc)
99                 return;
100
101         /* update dst or transport pathmtu if in need */
102         if (!sctp_transport_dst_check(tp)) {
103                 sctp_transport_route(tp, NULL, sp);
104                 if (asoc->param_flags & SPP_PMTUD_ENABLE)
105                         sctp_assoc_sync_pmtu(asoc);
106         } else if (!sctp_transport_pl_enabled(tp) &&
107                    asoc->param_flags & SPP_PMTUD_ENABLE) {
108                 if (!sctp_transport_pmtu_check(tp))
109                         sctp_assoc_sync_pmtu(asoc);
110         }
111
112         if (asoc->pmtu_pending) {
113                 if (asoc->param_flags & SPP_PMTUD_ENABLE)
114                         sctp_assoc_sync_pmtu(asoc);
115                 asoc->pmtu_pending = 0;
116         }
117
118         /* If there a is a prepend chunk stick it on the list before
119          * any other chunks get appended.
120          */
121         if (ecn_capable) {
122                 struct sctp_chunk *chunk = sctp_get_ecne_prepend(asoc);
123
124                 if (chunk)
125                         sctp_packet_append_chunk(packet, chunk);
126         }
127
128         if (!tp->dst)
129                 return;
130
131         /* set packet max_size with gso_max_size if gso is enabled*/
132         rcu_read_lock();
133         if (__sk_dst_get(sk) != tp->dst) {
134                 dst_hold(tp->dst);
135                 sk_setup_caps(sk, tp->dst);
136         }
137         packet->max_size = sk_can_gso(sk) ? tp->dst->dev->gso_max_size
138                                           : asoc->pathmtu;
139         rcu_read_unlock();
140 }
141
142 /* Initialize the packet structure. */
143 void sctp_packet_init(struct sctp_packet *packet,
144                       struct sctp_transport *transport,
145                       __u16 sport, __u16 dport)
146 {
147         pr_debug("%s: packet:%p transport:%p\n", __func__, packet, transport);
148
149         packet->transport = transport;
150         packet->source_port = sport;
151         packet->destination_port = dport;
152         INIT_LIST_HEAD(&packet->chunk_list);
153         /* The overhead will be calculated by sctp_packet_config() */
154         packet->overhead = 0;
155         sctp_packet_reset(packet);
156         packet->vtag = 0;
157 }
158
159 /* Free a packet.  */
160 void sctp_packet_free(struct sctp_packet *packet)
161 {
162         struct sctp_chunk *chunk, *tmp;
163
164         pr_debug("%s: packet:%p\n", __func__, packet);
165
166         list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
167                 list_del_init(&chunk->list);
168                 sctp_chunk_free(chunk);
169         }
170 }
171
172 /* This routine tries to append the chunk to the offered packet. If adding
173  * the chunk causes the packet to exceed the path MTU and COOKIE_ECHO chunk
174  * is not present in the packet, it transmits the input packet.
175  * Data can be bundled with a packet containing a COOKIE_ECHO chunk as long
176  * as it can fit in the packet, but any more data that does not fit in this
177  * packet can be sent only after receiving the COOKIE_ACK.
178  */
179 enum sctp_xmit sctp_packet_transmit_chunk(struct sctp_packet *packet,
180                                           struct sctp_chunk *chunk,
181                                           int one_packet, gfp_t gfp)
182 {
183         enum sctp_xmit retval;
184
185         pr_debug("%s: packet:%p size:%zu chunk:%p size:%d\n", __func__,
186                  packet, packet->size, chunk, chunk->skb ? chunk->skb->len : -1);
187
188         switch ((retval = (sctp_packet_append_chunk(packet, chunk)))) {
189         case SCTP_XMIT_PMTU_FULL:
190                 if (!packet->has_cookie_echo) {
191                         int error = 0;
192
193                         error = sctp_packet_transmit(packet, gfp);
194                         if (error < 0)
195                                 chunk->skb->sk->sk_err = -error;
196
197                         /* If we have an empty packet, then we can NOT ever
198                          * return PMTU_FULL.
199                          */
200                         if (!one_packet)
201                                 retval = sctp_packet_append_chunk(packet,
202                                                                   chunk);
203                 }
204                 break;
205
206         case SCTP_XMIT_RWND_FULL:
207         case SCTP_XMIT_OK:
208         case SCTP_XMIT_DELAY:
209                 break;
210         }
211
212         return retval;
213 }
214
215 /* Try to bundle a pad chunk into a packet with a heartbeat chunk for PLPMTUTD probe */
216 static enum sctp_xmit sctp_packet_bundle_pad(struct sctp_packet *pkt, struct sctp_chunk *chunk)
217 {
218         struct sctp_transport *t = pkt->transport;
219         struct sctp_chunk *pad;
220         int overhead = 0;
221
222         if (!chunk->pmtu_probe)
223                 return SCTP_XMIT_OK;
224
225         /* calculate the Padding Data size for the pad chunk */
226         overhead += sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
227         overhead += sizeof(struct sctp_sender_hb_info) + sizeof(struct sctp_pad_chunk);
228         pad = sctp_make_pad(t->asoc, t->pl.probe_size - overhead);
229         if (!pad)
230                 return SCTP_XMIT_DELAY;
231
232         list_add_tail(&pad->list, &pkt->chunk_list);
233         pkt->size += SCTP_PAD4(ntohs(pad->chunk_hdr->length));
234         chunk->transport = t;
235
236         return SCTP_XMIT_OK;
237 }
238
239 /* Try to bundle an auth chunk into the packet. */
240 static enum sctp_xmit sctp_packet_bundle_auth(struct sctp_packet *pkt,
241                                               struct sctp_chunk *chunk)
242 {
243         struct sctp_association *asoc = pkt->transport->asoc;
244         enum sctp_xmit retval = SCTP_XMIT_OK;
245         struct sctp_chunk *auth;
246
247         /* if we don't have an association, we can't do authentication */
248         if (!asoc)
249                 return retval;
250
251         /* See if this is an auth chunk we are bundling or if
252          * auth is already bundled.
253          */
254         if (chunk->chunk_hdr->type == SCTP_CID_AUTH || pkt->has_auth)
255                 return retval;
256
257         /* if the peer did not request this chunk to be authenticated,
258          * don't do it
259          */
260         if (!chunk->auth)
261                 return retval;
262
263         auth = sctp_make_auth(asoc, chunk->shkey->key_id);
264         if (!auth)
265                 return retval;
266
267         auth->shkey = chunk->shkey;
268         sctp_auth_shkey_hold(auth->shkey);
269
270         retval = __sctp_packet_append_chunk(pkt, auth);
271
272         if (retval != SCTP_XMIT_OK)
273                 sctp_chunk_free(auth);
274
275         return retval;
276 }
277
278 /* Try to bundle a SACK with the packet. */
279 static enum sctp_xmit sctp_packet_bundle_sack(struct sctp_packet *pkt,
280                                               struct sctp_chunk *chunk)
281 {
282         enum sctp_xmit retval = SCTP_XMIT_OK;
283
284         /* If sending DATA and haven't aleady bundled a SACK, try to
285          * bundle one in to the packet.
286          */
287         if (sctp_chunk_is_data(chunk) && !pkt->has_sack &&
288             !pkt->has_cookie_echo) {
289                 struct sctp_association *asoc;
290                 struct timer_list *timer;
291                 asoc = pkt->transport->asoc;
292                 timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK];
293
294                 /* If the SACK timer is running, we have a pending SACK */
295                 if (timer_pending(timer)) {
296                         struct sctp_chunk *sack;
297
298                         if (pkt->transport->sack_generation !=
299                             pkt->transport->asoc->peer.sack_generation)
300                                 return retval;
301
302                         asoc->a_rwnd = asoc->rwnd;
303                         sack = sctp_make_sack(asoc);
304                         if (sack) {
305                                 retval = __sctp_packet_append_chunk(pkt, sack);
306                                 if (retval != SCTP_XMIT_OK) {
307                                         sctp_chunk_free(sack);
308                                         goto out;
309                                 }
310                                 SCTP_INC_STATS(asoc->base.net,
311                                                SCTP_MIB_OUTCTRLCHUNKS);
312                                 asoc->stats.octrlchunks++;
313                                 asoc->peer.sack_needed = 0;
314                                 if (del_timer(timer))
315                                         sctp_association_put(asoc);
316                         }
317                 }
318         }
319 out:
320         return retval;
321 }
322
323
324 /* Append a chunk to the offered packet reporting back any inability to do
325  * so.
326  */
327 static enum sctp_xmit __sctp_packet_append_chunk(struct sctp_packet *packet,
328                                                  struct sctp_chunk *chunk)
329 {
330         __u16 chunk_len = SCTP_PAD4(ntohs(chunk->chunk_hdr->length));
331         enum sctp_xmit retval = SCTP_XMIT_OK;
332
333         /* Check to see if this chunk will fit into the packet */
334         retval = sctp_packet_will_fit(packet, chunk, chunk_len);
335         if (retval != SCTP_XMIT_OK)
336                 goto finish;
337
338         /* We believe that this chunk is OK to add to the packet */
339         switch (chunk->chunk_hdr->type) {
340         case SCTP_CID_DATA:
341         case SCTP_CID_I_DATA:
342                 /* Account for the data being in the packet */
343                 sctp_packet_append_data(packet, chunk);
344                 /* Disallow SACK bundling after DATA. */
345                 packet->has_sack = 1;
346                 /* Disallow AUTH bundling after DATA */
347                 packet->has_auth = 1;
348                 /* Let it be knows that packet has DATA in it */
349                 packet->has_data = 1;
350                 /* timestamp the chunk for rtx purposes */
351                 chunk->sent_at = jiffies;
352                 /* Mainly used for prsctp RTX policy */
353                 chunk->sent_count++;
354                 break;
355         case SCTP_CID_COOKIE_ECHO:
356                 packet->has_cookie_echo = 1;
357                 break;
358
359         case SCTP_CID_SACK:
360                 packet->has_sack = 1;
361                 if (chunk->asoc)
362                         chunk->asoc->stats.osacks++;
363                 break;
364
365         case SCTP_CID_AUTH:
366                 packet->has_auth = 1;
367                 packet->auth = chunk;
368                 break;
369         }
370
371         /* It is OK to send this chunk.  */
372         list_add_tail(&chunk->list, &packet->chunk_list);
373         packet->size += chunk_len;
374         chunk->transport = packet->transport;
375 finish:
376         return retval;
377 }
378
379 /* Append a chunk to the offered packet reporting back any inability to do
380  * so.
381  */
382 enum sctp_xmit sctp_packet_append_chunk(struct sctp_packet *packet,
383                                         struct sctp_chunk *chunk)
384 {
385         enum sctp_xmit retval = SCTP_XMIT_OK;
386
387         pr_debug("%s: packet:%p chunk:%p\n", __func__, packet, chunk);
388
389         /* Data chunks are special.  Before seeing what else we can
390          * bundle into this packet, check to see if we are allowed to
391          * send this DATA.
392          */
393         if (sctp_chunk_is_data(chunk)) {
394                 retval = sctp_packet_can_append_data(packet, chunk);
395                 if (retval != SCTP_XMIT_OK)
396                         goto finish;
397         }
398
399         /* Try to bundle AUTH chunk */
400         retval = sctp_packet_bundle_auth(packet, chunk);
401         if (retval != SCTP_XMIT_OK)
402                 goto finish;
403
404         /* Try to bundle SACK chunk */
405         retval = sctp_packet_bundle_sack(packet, chunk);
406         if (retval != SCTP_XMIT_OK)
407                 goto finish;
408
409         retval = __sctp_packet_append_chunk(packet, chunk);
410         if (retval != SCTP_XMIT_OK)
411                 goto finish;
412
413         retval = sctp_packet_bundle_pad(packet, chunk);
414
415 finish:
416         return retval;
417 }
418
419 static void sctp_packet_gso_append(struct sk_buff *head, struct sk_buff *skb)
420 {
421         if (SCTP_OUTPUT_CB(head)->last == head)
422                 skb_shinfo(head)->frag_list = skb;
423         else
424                 SCTP_OUTPUT_CB(head)->last->next = skb;
425         SCTP_OUTPUT_CB(head)->last = skb;
426
427         head->truesize += skb->truesize;
428         head->data_len += skb->len;
429         head->len += skb->len;
430         refcount_add(skb->truesize, &head->sk->sk_wmem_alloc);
431
432         __skb_header_release(skb);
433 }
434
435 static int sctp_packet_pack(struct sctp_packet *packet,
436                             struct sk_buff *head, int gso, gfp_t gfp)
437 {
438         struct sctp_transport *tp = packet->transport;
439         struct sctp_auth_chunk *auth = NULL;
440         struct sctp_chunk *chunk, *tmp;
441         int pkt_count = 0, pkt_size;
442         struct sock *sk = head->sk;
443         struct sk_buff *nskb;
444         int auth_len = 0;
445
446         if (gso) {
447                 skb_shinfo(head)->gso_type = sk->sk_gso_type;
448                 SCTP_OUTPUT_CB(head)->last = head;
449         } else {
450                 nskb = head;
451                 pkt_size = packet->size;
452                 goto merge;
453         }
454
455         do {
456                 /* calculate the pkt_size and alloc nskb */
457                 pkt_size = packet->overhead;
458                 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list,
459                                          list) {
460                         int padded = SCTP_PAD4(chunk->skb->len);
461
462                         if (chunk == packet->auth)
463                                 auth_len = padded;
464                         else if (auth_len + padded + packet->overhead >
465                                  tp->pathmtu)
466                                 return 0;
467                         else if (pkt_size + padded > tp->pathmtu)
468                                 break;
469                         pkt_size += padded;
470                 }
471                 nskb = alloc_skb(pkt_size + MAX_HEADER, gfp);
472                 if (!nskb)
473                         return 0;
474                 skb_reserve(nskb, packet->overhead + MAX_HEADER);
475
476 merge:
477                 /* merge chunks into nskb and append nskb into head list */
478                 pkt_size -= packet->overhead;
479                 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
480                         int padding;
481
482                         list_del_init(&chunk->list);
483                         if (sctp_chunk_is_data(chunk)) {
484                                 if (!sctp_chunk_retransmitted(chunk) &&
485                                     !tp->rto_pending) {
486                                         chunk->rtt_in_progress = 1;
487                                         tp->rto_pending = 1;
488                                 }
489                         }
490
491                         padding = SCTP_PAD4(chunk->skb->len) - chunk->skb->len;
492                         if (padding)
493                                 skb_put_zero(chunk->skb, padding);
494
495                         if (chunk == packet->auth)
496                                 auth = (struct sctp_auth_chunk *)
497                                                         skb_tail_pointer(nskb);
498
499                         skb_put_data(nskb, chunk->skb->data, chunk->skb->len);
500
501                         pr_debug("*** Chunk:%p[%s] %s 0x%x, length:%d, chunk->skb->len:%d, rtt_in_progress:%d\n",
502                                  chunk,
503                                  sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)),
504                                  chunk->has_tsn ? "TSN" : "No TSN",
505                                  chunk->has_tsn ? ntohl(chunk->subh.data_hdr->tsn) : 0,
506                                  ntohs(chunk->chunk_hdr->length), chunk->skb->len,
507                                  chunk->rtt_in_progress);
508
509                         pkt_size -= SCTP_PAD4(chunk->skb->len);
510
511                         if (!sctp_chunk_is_data(chunk) && chunk != packet->auth)
512                                 sctp_chunk_free(chunk);
513
514                         if (!pkt_size)
515                                 break;
516                 }
517
518                 if (auth) {
519                         sctp_auth_calculate_hmac(tp->asoc, nskb, auth,
520                                                  packet->auth->shkey, gfp);
521                         /* free auth if no more chunks, or add it back */
522                         if (list_empty(&packet->chunk_list))
523                                 sctp_chunk_free(packet->auth);
524                         else
525                                 list_add(&packet->auth->list,
526                                          &packet->chunk_list);
527                 }
528
529                 if (gso)
530                         sctp_packet_gso_append(head, nskb);
531
532                 pkt_count++;
533         } while (!list_empty(&packet->chunk_list));
534
535         if (gso) {
536                 memset(head->cb, 0, max(sizeof(struct inet_skb_parm),
537                                         sizeof(struct inet6_skb_parm)));
538                 skb_shinfo(head)->gso_segs = pkt_count;
539                 skb_shinfo(head)->gso_size = GSO_BY_FRAGS;
540                 goto chksum;
541         }
542
543         if (sctp_checksum_disable)
544                 return 1;
545
546         if (!(tp->dst->dev->features & NETIF_F_SCTP_CRC) ||
547             dst_xfrm(tp->dst) || packet->ipfragok || tp->encap_port) {
548                 struct sctphdr *sh =
549                         (struct sctphdr *)skb_transport_header(head);
550
551                 sh->checksum = sctp_compute_cksum(head, 0);
552         } else {
553 chksum:
554                 head->ip_summed = CHECKSUM_PARTIAL;
555                 head->csum_not_inet = 1;
556                 head->csum_start = skb_transport_header(head) - head->head;
557                 head->csum_offset = offsetof(struct sctphdr, checksum);
558         }
559
560         return pkt_count;
561 }
562
563 /* All packets are sent to the network through this function from
564  * sctp_outq_tail().
565  *
566  * The return value is always 0 for now.
567  */
568 int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
569 {
570         struct sctp_transport *tp = packet->transport;
571         struct sctp_association *asoc = tp->asoc;
572         struct sctp_chunk *chunk, *tmp;
573         int pkt_count, gso = 0;
574         struct sk_buff *head;
575         struct sctphdr *sh;
576         struct sock *sk;
577
578         pr_debug("%s: packet:%p\n", __func__, packet);
579         if (list_empty(&packet->chunk_list))
580                 return 0;
581         chunk = list_entry(packet->chunk_list.next, struct sctp_chunk, list);
582         sk = chunk->skb->sk;
583
584         /* check gso */
585         if (packet->size > tp->pathmtu && !packet->ipfragok && !chunk->pmtu_probe) {
586                 if (!sk_can_gso(sk)) {
587                         pr_err_once("Trying to GSO but underlying device doesn't support it.");
588                         goto out;
589                 }
590                 gso = 1;
591         }
592
593         /* alloc head skb */
594         head = alloc_skb((gso ? packet->overhead : packet->size) +
595                          MAX_HEADER, gfp);
596         if (!head)
597                 goto out;
598         skb_reserve(head, packet->overhead + MAX_HEADER);
599         skb_set_owner_w(head, sk);
600
601         /* set sctp header */
602         sh = skb_push(head, sizeof(struct sctphdr));
603         skb_reset_transport_header(head);
604         sh->source = htons(packet->source_port);
605         sh->dest = htons(packet->destination_port);
606         sh->vtag = htonl(packet->vtag);
607         sh->checksum = 0;
608
609         /* drop packet if no dst */
610         if (!tp->dst) {
611                 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
612                 kfree_skb(head);
613                 goto out;
614         }
615
616         /* pack up chunks */
617         pkt_count = sctp_packet_pack(packet, head, gso, gfp);
618         if (!pkt_count) {
619                 kfree_skb(head);
620                 goto out;
621         }
622         pr_debug("***sctp_transmit_packet*** skb->len:%d\n", head->len);
623
624         /* start autoclose timer */
625         if (packet->has_data && sctp_state(asoc, ESTABLISHED) &&
626             asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) {
627                 struct timer_list *timer =
628                         &asoc->timers[SCTP_EVENT_TIMEOUT_AUTOCLOSE];
629                 unsigned long timeout =
630                         asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE];
631
632                 if (!mod_timer(timer, jiffies + timeout))
633                         sctp_association_hold(asoc);
634         }
635
636         /* sctp xmit */
637         tp->af_specific->ecn_capable(sk);
638         if (asoc) {
639                 asoc->stats.opackets += pkt_count;
640                 if (asoc->peer.last_sent_to != tp)
641                         asoc->peer.last_sent_to = tp;
642         }
643         head->ignore_df = packet->ipfragok;
644         if (tp->dst_pending_confirm)
645                 skb_set_dst_pending_confirm(head, 1);
646         /* neighbour should be confirmed on successful transmission or
647          * positive error
648          */
649         if (tp->af_specific->sctp_xmit(head, tp) >= 0 &&
650             tp->dst_pending_confirm)
651                 tp->dst_pending_confirm = 0;
652
653 out:
654         list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
655                 list_del_init(&chunk->list);
656                 if (!sctp_chunk_is_data(chunk))
657                         sctp_chunk_free(chunk);
658         }
659         sctp_packet_reset(packet);
660         return 0;
661 }
662
663 /********************************************************************
664  * 2nd Level Abstractions
665  ********************************************************************/
666
667 /* This private function check to see if a chunk can be added */
668 static enum sctp_xmit sctp_packet_can_append_data(struct sctp_packet *packet,
669                                                   struct sctp_chunk *chunk)
670 {
671         size_t datasize, rwnd, inflight, flight_size;
672         struct sctp_transport *transport = packet->transport;
673         struct sctp_association *asoc = transport->asoc;
674         struct sctp_outq *q = &asoc->outqueue;
675
676         /* RFC 2960 6.1  Transmission of DATA Chunks
677          *
678          * A) At any given time, the data sender MUST NOT transmit new data to
679          * any destination transport address if its peer's rwnd indicates
680          * that the peer has no buffer space (i.e. rwnd is 0, see Section
681          * 6.2.1).  However, regardless of the value of rwnd (including if it
682          * is 0), the data sender can always have one DATA chunk in flight to
683          * the receiver if allowed by cwnd (see rule B below).  This rule
684          * allows the sender to probe for a change in rwnd that the sender
685          * missed due to the SACK having been lost in transit from the data
686          * receiver to the data sender.
687          */
688
689         rwnd = asoc->peer.rwnd;
690         inflight = q->outstanding_bytes;
691         flight_size = transport->flight_size;
692
693         datasize = sctp_data_size(chunk);
694
695         if (datasize > rwnd && inflight > 0)
696                 /* We have (at least) one data chunk in flight,
697                  * so we can't fall back to rule 6.1 B).
698                  */
699                 return SCTP_XMIT_RWND_FULL;
700
701         /* RFC 2960 6.1  Transmission of DATA Chunks
702          *
703          * B) At any given time, the sender MUST NOT transmit new data
704          * to a given transport address if it has cwnd or more bytes
705          * of data outstanding to that transport address.
706          */
707         /* RFC 7.2.4 & the Implementers Guide 2.8.
708          *
709          * 3) ...
710          *    When a Fast Retransmit is being performed the sender SHOULD
711          *    ignore the value of cwnd and SHOULD NOT delay retransmission.
712          */
713         if (chunk->fast_retransmit != SCTP_NEED_FRTX &&
714             flight_size >= transport->cwnd)
715                 return SCTP_XMIT_RWND_FULL;
716
717         /* Nagle's algorithm to solve small-packet problem:
718          * Inhibit the sending of new chunks when new outgoing data arrives
719          * if any previously transmitted data on the connection remains
720          * unacknowledged.
721          */
722
723         if ((sctp_sk(asoc->base.sk)->nodelay || inflight == 0) &&
724             !asoc->force_delay)
725                 /* Nothing unacked */
726                 return SCTP_XMIT_OK;
727
728         if (!sctp_packet_empty(packet))
729                 /* Append to packet */
730                 return SCTP_XMIT_OK;
731
732         if (!sctp_state(asoc, ESTABLISHED))
733                 return SCTP_XMIT_OK;
734
735         /* Check whether this chunk and all the rest of pending data will fit
736          * or delay in hopes of bundling a full sized packet.
737          */
738         if (chunk->skb->len + q->out_qlen > transport->pathmtu -
739             packet->overhead - sctp_datachk_len(&chunk->asoc->stream) - 4)
740                 /* Enough data queued to fill a packet */
741                 return SCTP_XMIT_OK;
742
743         /* Don't delay large message writes that may have been fragmented */
744         if (!chunk->msg->can_delay)
745                 return SCTP_XMIT_OK;
746
747         /* Defer until all data acked or packet full */
748         return SCTP_XMIT_DELAY;
749 }
750
751 /* This private function does management things when adding DATA chunk */
752 static void sctp_packet_append_data(struct sctp_packet *packet,
753                                 struct sctp_chunk *chunk)
754 {
755         struct sctp_transport *transport = packet->transport;
756         size_t datasize = sctp_data_size(chunk);
757         struct sctp_association *asoc = transport->asoc;
758         u32 rwnd = asoc->peer.rwnd;
759
760         /* Keep track of how many bytes are in flight over this transport. */
761         transport->flight_size += datasize;
762
763         /* Keep track of how many bytes are in flight to the receiver. */
764         asoc->outqueue.outstanding_bytes += datasize;
765
766         /* Update our view of the receiver's rwnd. */
767         if (datasize < rwnd)
768                 rwnd -= datasize;
769         else
770                 rwnd = 0;
771
772         asoc->peer.rwnd = rwnd;
773         sctp_chunk_assign_tsn(chunk);
774         asoc->stream.si->assign_number(chunk);
775 }
776
777 static enum sctp_xmit sctp_packet_will_fit(struct sctp_packet *packet,
778                                            struct sctp_chunk *chunk,
779                                            u16 chunk_len)
780 {
781         enum sctp_xmit retval = SCTP_XMIT_OK;
782         size_t psize, pmtu, maxsize;
783
784         /* Don't bundle in this packet if this chunk's auth key doesn't
785          * match other chunks already enqueued on this packet. Also,
786          * don't bundle the chunk with auth key if other chunks in this
787          * packet don't have auth key.
788          */
789         if ((packet->auth && chunk->shkey != packet->auth->shkey) ||
790             (!packet->auth && chunk->shkey &&
791              chunk->chunk_hdr->type != SCTP_CID_AUTH))
792                 return SCTP_XMIT_PMTU_FULL;
793
794         psize = packet->size;
795         if (packet->transport->asoc)
796                 pmtu = packet->transport->asoc->pathmtu;
797         else
798                 pmtu = packet->transport->pathmtu;
799
800         /* Decide if we need to fragment or resubmit later. */
801         if (psize + chunk_len > pmtu) {
802                 /* It's OK to fragment at IP level if any one of the following
803                  * is true:
804                  *      1. The packet is empty (meaning this chunk is greater
805                  *         the MTU)
806                  *      2. The packet doesn't have any data in it yet and data
807                  *         requires authentication.
808                  */
809                 if (sctp_packet_empty(packet) ||
810                     (!packet->has_data && chunk->auth)) {
811                         /* We no longer do re-fragmentation.
812                          * Just fragment at the IP layer, if we
813                          * actually hit this condition
814                          */
815                         packet->ipfragok = 1;
816                         goto out;
817                 }
818
819                 /* Similarly, if this chunk was built before a PMTU
820                  * reduction, we have to fragment it at IP level now. So
821                  * if the packet already contains something, we need to
822                  * flush.
823                  */
824                 maxsize = pmtu - packet->overhead;
825                 if (packet->auth)
826                         maxsize -= SCTP_PAD4(packet->auth->skb->len);
827                 if (chunk_len > maxsize)
828                         retval = SCTP_XMIT_PMTU_FULL;
829
830                 /* It is also okay to fragment if the chunk we are
831                  * adding is a control chunk, but only if current packet
832                  * is not a GSO one otherwise it causes fragmentation of
833                  * a large frame. So in this case we allow the
834                  * fragmentation by forcing it to be in a new packet.
835                  */
836                 if (!sctp_chunk_is_data(chunk) && packet->has_data)
837                         retval = SCTP_XMIT_PMTU_FULL;
838
839                 if (psize + chunk_len > packet->max_size)
840                         /* Hit GSO/PMTU limit, gotta flush */
841                         retval = SCTP_XMIT_PMTU_FULL;
842
843                 if (!packet->transport->burst_limited &&
844                     psize + chunk_len > (packet->transport->cwnd >> 1))
845                         /* Do not allow a single GSO packet to use more
846                          * than half of cwnd.
847                          */
848                         retval = SCTP_XMIT_PMTU_FULL;
849
850                 if (packet->transport->burst_limited &&
851                     psize + chunk_len > (packet->transport->burst_limited >> 1))
852                         /* Do not allow a single GSO packet to use more
853                          * than half of original cwnd.
854                          */
855                         retval = SCTP_XMIT_PMTU_FULL;
856                 /* Otherwise it will fit in the GSO packet */
857         }
858
859 out:
860         return retval;
861 }