1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * IPV4 GSO/GRO offload support
4 * Linux INET implementation
6 * TCPv4 GSO/GRO support
9 #include <linux/indirect_call_wrapper.h>
10 #include <linux/skbuff.h>
12 #include <net/protocol.h>
14 static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
15 unsigned int seq, unsigned int mss)
18 if (before(ts_seq, seq + mss)) {
19 skb_shinfo(skb)->tx_flags |= SKBTX_SW_TSTAMP;
20 skb_shinfo(skb)->tskey = ts_seq;
29 static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
30 netdev_features_t features)
32 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4))
33 return ERR_PTR(-EINVAL);
35 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
36 return ERR_PTR(-EINVAL);
38 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
39 const struct iphdr *iph = ip_hdr(skb);
40 struct tcphdr *th = tcp_hdr(skb);
42 /* Set up checksum pseudo header, usually expect stack to
43 * have done this already.
47 skb->ip_summed = CHECKSUM_PARTIAL;
48 __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
51 return tcp_gso_segment(skb, features);
54 struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
55 netdev_features_t features)
57 struct sk_buff *segs = ERR_PTR(-EINVAL);
58 unsigned int sum_truesize = 0;
65 struct sk_buff *gso_skb = skb;
67 bool ooo_okay, copy_destructor;
71 if (thlen < sizeof(*th))
74 if (!pskb_may_pull(skb, thlen))
77 oldlen = (u16)~skb->len;
78 __skb_pull(skb, thlen);
80 mss = skb_shinfo(skb)->gso_size;
81 if (unlikely(skb->len <= mss))
84 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
85 /* Packet is from an untrusted source, reset gso_segs. */
87 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
93 copy_destructor = gso_skb->destructor == tcp_wfree;
94 ooo_okay = gso_skb->ooo_okay;
95 /* All segments but the first should have ooo_okay cleared */
98 segs = skb_segment(skb, features);
102 /* Only first segment might have ooo_okay set */
103 segs->ooo_okay = ooo_okay;
105 /* GSO partial and frag_list segmentation only requires splitting
106 * the frame into an MSS multiple and possibly a remainder, both
107 * cases return a GSO skb. So update the mss now.
109 if (skb_is_gso(segs))
110 mss *= skb_shinfo(segs)->gso_segs;
112 delta = htonl(oldlen + (thlen + mss));
116 seq = ntohl(th->seq);
118 if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_SW_TSTAMP))
119 tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss);
121 newcheck = ~csum_fold((__force __wsum)((__force u32)th->check +
122 (__force u32)delta));
125 th->fin = th->psh = 0;
126 th->check = newcheck;
128 if (skb->ip_summed == CHECKSUM_PARTIAL)
129 gso_reset_checksum(skb, ~th->check);
131 th->check = gso_make_checksum(skb, ~th->check);
134 if (copy_destructor) {
135 skb->destructor = gso_skb->destructor;
136 skb->sk = gso_skb->sk;
137 sum_truesize += skb->truesize;
142 th->seq = htonl(seq);
146 /* Following permits TCP Small Queues to work well with GSO :
147 * The callback to TCP stack will be called at the time last frag
148 * is freed at TX completion, and not right now when gso_skb
149 * is freed by GSO engine
151 if (copy_destructor) {
154 swap(gso_skb->sk, skb->sk);
155 swap(gso_skb->destructor, skb->destructor);
156 sum_truesize += skb->truesize;
157 delta = sum_truesize - gso_skb->truesize;
158 /* In some pathological cases, delta can be negative.
159 * We need to either use refcount_add() or refcount_sub_and_test()
161 if (likely(delta >= 0))
162 refcount_add(delta, &skb->sk->sk_wmem_alloc);
164 WARN_ON_ONCE(refcount_sub_and_test(-delta, &skb->sk->sk_wmem_alloc));
167 delta = htonl(oldlen + (skb_tail_pointer(skb) -
168 skb_transport_header(skb)) +
170 th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
171 (__force u32)delta));
172 if (skb->ip_summed == CHECKSUM_PARTIAL)
173 gso_reset_checksum(skb, ~th->check);
175 th->check = gso_make_checksum(skb, ~th->check);
180 struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb)
182 struct sk_buff *pp = NULL;
189 unsigned int mss = 1;
195 off = skb_gro_offset(skb);
196 hlen = off + sizeof(*th);
197 th = skb_gro_header_fast(skb, off);
198 if (skb_gro_header_hard(skb, hlen)) {
199 th = skb_gro_header_slow(skb, hlen, off);
204 thlen = th->doff * 4;
205 if (thlen < sizeof(*th))
209 if (skb_gro_header_hard(skb, hlen)) {
210 th = skb_gro_header_slow(skb, hlen, off);
215 skb_gro_pull(skb, thlen);
217 len = skb_gro_len(skb);
218 flags = tcp_flag_word(th);
220 list_for_each_entry(p, head, list) {
221 if (!NAPI_GRO_CB(p)->same_flow)
226 if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
227 NAPI_GRO_CB(p)->same_flow = 0;
234 goto out_check_final;
237 /* Include the IP ID check below from the inner most IP hdr */
238 flush = NAPI_GRO_CB(p)->flush;
239 flush |= (__force int)(flags & TCP_FLAG_CWR);
240 flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
241 ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
242 flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
243 for (i = sizeof(*th); i < thlen; i += 4)
244 flush |= *(u32 *)((u8 *)th + i) ^
245 *(u32 *)((u8 *)th2 + i);
247 /* When we receive our second frame we can made a decision on if we
248 * continue this flow as an atomic flow with a fixed ID or if we use
249 * an incrementing ID.
251 if (NAPI_GRO_CB(p)->flush_id != 1 ||
252 NAPI_GRO_CB(p)->count != 1 ||
253 !NAPI_GRO_CB(p)->is_atomic)
254 flush |= NAPI_GRO_CB(p)->flush_id;
256 NAPI_GRO_CB(p)->is_atomic = false;
258 mss = skb_shinfo(p)->gso_size;
260 flush |= (len - 1) >= mss;
261 flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
262 #ifdef CONFIG_TLS_DEVICE
263 flush |= p->decrypted ^ skb->decrypted;
266 if (flush || skb_gro_receive(p, skb)) {
268 goto out_check_final;
271 tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
275 flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
276 TCP_FLAG_RST | TCP_FLAG_SYN |
279 if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
283 NAPI_GRO_CB(skb)->flush |= (flush != 0);
288 int tcp_gro_complete(struct sk_buff *skb)
290 struct tcphdr *th = tcp_hdr(skb);
292 skb->csum_start = (unsigned char *)th - skb->head;
293 skb->csum_offset = offsetof(struct tcphdr, check);
294 skb->ip_summed = CHECKSUM_PARTIAL;
296 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
299 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
301 if (skb->encapsulation)
302 skb->inner_transport_header = skb->transport_header;
306 EXPORT_SYMBOL(tcp_gro_complete);
308 INDIRECT_CALLABLE_SCOPE
309 struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
311 /* Don't bother verifying checksum if we're going to flush anyway. */
312 if (!NAPI_GRO_CB(skb)->flush &&
313 skb_gro_checksum_validate(skb, IPPROTO_TCP,
314 inet_gro_compute_pseudo)) {
315 NAPI_GRO_CB(skb)->flush = 1;
319 return tcp_gro_receive(head, skb);
322 INDIRECT_CALLABLE_SCOPE int tcp4_gro_complete(struct sk_buff *skb, int thoff)
324 const struct iphdr *iph = ip_hdr(skb);
325 struct tcphdr *th = tcp_hdr(skb);
327 th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
329 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
331 if (NAPI_GRO_CB(skb)->is_atomic)
332 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
334 return tcp_gro_complete(skb);
337 static const struct net_offload tcpv4_offload = {
339 .gso_segment = tcp4_gso_segment,
340 .gro_receive = tcp4_gro_receive,
341 .gro_complete = tcp4_gro_complete,
345 int __init tcpv4_offload_init(void)
347 return inet_add_offload(&tcpv4_offload, IPPROTO_TCP);