Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / net / ipv4 / udp_offload.c
1 /*
2  *      IPV4 GSO/GRO offload support
3  *      Linux INET implementation
4  *
5  *      This program is free software; you can redistribute it and/or
6  *      modify it under the terms of the GNU General Public License
7  *      as published by the Free Software Foundation; either version
8  *      2 of the License, or (at your option) any later version.
9  *
10  *      UDPv4 GSO support
11  */
12
13 #include <linux/skbuff.h>
14 #include <net/udp.h>
15 #include <net/protocol.h>
16 #include <net/inet_common.h>
17
18 static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
19         netdev_features_t features,
20         struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb,
21                                              netdev_features_t features),
22         __be16 new_protocol, bool is_ipv6)
23 {
24         int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb);
25         bool remcsum, need_csum, offload_csum, gso_partial;
26         struct sk_buff *segs = ERR_PTR(-EINVAL);
27         struct udphdr *uh = udp_hdr(skb);
28         u16 mac_offset = skb->mac_header;
29         __be16 protocol = skb->protocol;
30         u16 mac_len = skb->mac_len;
31         int udp_offset, outer_hlen;
32         __wsum partial;
33         bool need_ipsec;
34
35         if (unlikely(!pskb_may_pull(skb, tnl_hlen)))
36                 goto out;
37
38         /* Adjust partial header checksum to negate old length.
39          * We cannot rely on the value contained in uh->len as it is
40          * possible that the actual value exceeds the boundaries of the
41          * 16 bit length field due to the header being added outside of an
42          * IP or IPv6 frame that was already limited to 64K - 1.
43          */
44         if (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL)
45                 partial = (__force __wsum)uh->len;
46         else
47                 partial = (__force __wsum)htonl(skb->len);
48         partial = csum_sub(csum_unfold(uh->check), partial);
49
50         /* setup inner skb. */
51         skb->encapsulation = 0;
52         SKB_GSO_CB(skb)->encap_level = 0;
53         __skb_pull(skb, tnl_hlen);
54         skb_reset_mac_header(skb);
55         skb_set_network_header(skb, skb_inner_network_offset(skb));
56         skb->mac_len = skb_inner_network_offset(skb);
57         skb->protocol = new_protocol;
58
59         need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM);
60         skb->encap_hdr_csum = need_csum;
61
62         remcsum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TUNNEL_REMCSUM);
63         skb->remcsum_offload = remcsum;
64
65         need_ipsec = skb_dst(skb) && dst_xfrm(skb_dst(skb));
66         /* Try to offload checksum if possible */
67         offload_csum = !!(need_csum &&
68                           !need_ipsec &&
69                           (skb->dev->features &
70                            (is_ipv6 ? (NETIF_F_HW_CSUM | NETIF_F_IPV6_CSUM) :
71                                       (NETIF_F_HW_CSUM | NETIF_F_IP_CSUM))));
72
73         features &= skb->dev->hw_enc_features;
74
75         /* The only checksum offload we care about from here on out is the
76          * outer one so strip the existing checksum feature flags and
77          * instead set the flag based on our outer checksum offload value.
78          */
79         if (remcsum) {
80                 features &= ~NETIF_F_CSUM_MASK;
81                 if (!need_csum || offload_csum)
82                         features |= NETIF_F_HW_CSUM;
83         }
84
85         /* segment inner packet. */
86         segs = gso_inner_segment(skb, features);
87         if (IS_ERR_OR_NULL(segs)) {
88                 skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset,
89                                      mac_len);
90                 goto out;
91         }
92
93         gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL);
94
95         outer_hlen = skb_tnl_header_len(skb);
96         udp_offset = outer_hlen - tnl_hlen;
97         skb = segs;
98         do {
99                 unsigned int len;
100
101                 if (remcsum)
102                         skb->ip_summed = CHECKSUM_NONE;
103
104                 /* Set up inner headers if we are offloading inner checksum */
105                 if (skb->ip_summed == CHECKSUM_PARTIAL) {
106                         skb_reset_inner_headers(skb);
107                         skb->encapsulation = 1;
108                 }
109
110                 skb->mac_len = mac_len;
111                 skb->protocol = protocol;
112
113                 __skb_push(skb, outer_hlen);
114                 skb_reset_mac_header(skb);
115                 skb_set_network_header(skb, mac_len);
116                 skb_set_transport_header(skb, udp_offset);
117                 len = skb->len - udp_offset;
118                 uh = udp_hdr(skb);
119
120                 /* If we are only performing partial GSO the inner header
121                  * will be using a length value equal to only one MSS sized
122                  * segment instead of the entire frame.
123                  */
124                 if (gso_partial && skb_is_gso(skb)) {
125                         uh->len = htons(skb_shinfo(skb)->gso_size +
126                                         SKB_GSO_CB(skb)->data_offset +
127                                         skb->head - (unsigned char *)uh);
128                 } else {
129                         uh->len = htons(len);
130                 }
131
132                 if (!need_csum)
133                         continue;
134
135                 uh->check = ~csum_fold(csum_add(partial,
136                                        (__force __wsum)htonl(len)));
137
138                 if (skb->encapsulation || !offload_csum) {
139                         uh->check = gso_make_checksum(skb, ~uh->check);
140                         if (uh->check == 0)
141                                 uh->check = CSUM_MANGLED_0;
142                 } else {
143                         skb->ip_summed = CHECKSUM_PARTIAL;
144                         skb->csum_start = skb_transport_header(skb) - skb->head;
145                         skb->csum_offset = offsetof(struct udphdr, check);
146                 }
147         } while ((skb = skb->next));
148 out:
149         return segs;
150 }
151
152 struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
153                                        netdev_features_t features,
154                                        bool is_ipv6)
155 {
156         __be16 protocol = skb->protocol;
157         const struct net_offload **offloads;
158         const struct net_offload *ops;
159         struct sk_buff *segs = ERR_PTR(-EINVAL);
160         struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb,
161                                              netdev_features_t features);
162
163         rcu_read_lock();
164
165         switch (skb->inner_protocol_type) {
166         case ENCAP_TYPE_ETHER:
167                 protocol = skb->inner_protocol;
168                 gso_inner_segment = skb_mac_gso_segment;
169                 break;
170         case ENCAP_TYPE_IPPROTO:
171                 offloads = is_ipv6 ? inet6_offloads : inet_offloads;
172                 ops = rcu_dereference(offloads[skb->inner_ipproto]);
173                 if (!ops || !ops->callbacks.gso_segment)
174                         goto out_unlock;
175                 gso_inner_segment = ops->callbacks.gso_segment;
176                 break;
177         default:
178                 goto out_unlock;
179         }
180
181         segs = __skb_udp_tunnel_segment(skb, features, gso_inner_segment,
182                                         protocol, is_ipv6);
183
184 out_unlock:
185         rcu_read_unlock();
186
187         return segs;
188 }
189 EXPORT_SYMBOL(skb_udp_tunnel_segment);
190
191 struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
192                                   netdev_features_t features)
193 {
194         struct sock *sk = gso_skb->sk;
195         unsigned int sum_truesize = 0;
196         struct sk_buff *segs, *seg;
197         struct udphdr *uh;
198         unsigned int mss;
199         bool copy_dtor;
200         __sum16 check;
201         __be16 newlen;
202
203         mss = skb_shinfo(gso_skb)->gso_size;
204         if (gso_skb->len <= sizeof(*uh) + mss)
205                 return ERR_PTR(-EINVAL);
206
207         skb_pull(gso_skb, sizeof(*uh));
208
209         /* clear destructor to avoid skb_segment assigning it to tail */
210         copy_dtor = gso_skb->destructor == sock_wfree;
211         if (copy_dtor)
212                 gso_skb->destructor = NULL;
213
214         segs = skb_segment(gso_skb, features);
215         if (unlikely(IS_ERR_OR_NULL(segs))) {
216                 if (copy_dtor)
217                         gso_skb->destructor = sock_wfree;
218                 return segs;
219         }
220
221         /* GSO partial and frag_list segmentation only requires splitting
222          * the frame into an MSS multiple and possibly a remainder, both
223          * cases return a GSO skb. So update the mss now.
224          */
225         if (skb_is_gso(segs))
226                 mss *= skb_shinfo(segs)->gso_segs;
227
228         seg = segs;
229         uh = udp_hdr(seg);
230
231         /* compute checksum adjustment based on old length versus new */
232         newlen = htons(sizeof(*uh) + mss);
233         check = csum16_add(csum16_sub(uh->check, uh->len), newlen);
234
235         for (;;) {
236                 if (copy_dtor) {
237                         seg->destructor = sock_wfree;
238                         seg->sk = sk;
239                         sum_truesize += seg->truesize;
240                 }
241
242                 if (!seg->next)
243                         break;
244
245                 uh->len = newlen;
246                 uh->check = check;
247
248                 if (seg->ip_summed == CHECKSUM_PARTIAL)
249                         gso_reset_checksum(seg, ~check);
250                 else
251                         uh->check = gso_make_checksum(seg, ~check) ? :
252                                     CSUM_MANGLED_0;
253
254                 seg = seg->next;
255                 uh = udp_hdr(seg);
256         }
257
258         /* last packet can be partial gso_size, account for that in checksum */
259         newlen = htons(skb_tail_pointer(seg) - skb_transport_header(seg) +
260                        seg->data_len);
261         check = csum16_add(csum16_sub(uh->check, uh->len), newlen);
262
263         uh->len = newlen;
264         uh->check = check;
265
266         if (seg->ip_summed == CHECKSUM_PARTIAL)
267                 gso_reset_checksum(seg, ~check);
268         else
269                 uh->check = gso_make_checksum(seg, ~check) ? : CSUM_MANGLED_0;
270
271         /* update refcount for the packet */
272         if (copy_dtor) {
273                 int delta = sum_truesize - gso_skb->truesize;
274
275                 /* In some pathological cases, delta can be negative.
276                  * We need to either use refcount_add() or refcount_sub_and_test()
277                  */
278                 if (likely(delta >= 0))
279                         refcount_add(delta, &sk->sk_wmem_alloc);
280                 else
281                         WARN_ON_ONCE(refcount_sub_and_test(-delta, &sk->sk_wmem_alloc));
282         }
283         return segs;
284 }
285 EXPORT_SYMBOL_GPL(__udp_gso_segment);
286
287 static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
288                                          netdev_features_t features)
289 {
290         struct sk_buff *segs = ERR_PTR(-EINVAL);
291         unsigned int mss;
292         __wsum csum;
293         struct udphdr *uh;
294         struct iphdr *iph;
295
296         if (skb->encapsulation &&
297             (skb_shinfo(skb)->gso_type &
298              (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM))) {
299                 segs = skb_udp_tunnel_segment(skb, features, false);
300                 goto out;
301         }
302
303         if (!(skb_shinfo(skb)->gso_type & (SKB_GSO_UDP | SKB_GSO_UDP_L4)))
304                 goto out;
305
306         if (!pskb_may_pull(skb, sizeof(struct udphdr)))
307                 goto out;
308
309         if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
310                 return __udp_gso_segment(skb, features);
311
312         mss = skb_shinfo(skb)->gso_size;
313         if (unlikely(skb->len <= mss))
314                 goto out;
315
316         /* Do software UFO. Complete and fill in the UDP checksum as
317          * HW cannot do checksum of UDP packets sent as multiple
318          * IP fragments.
319          */
320
321         uh = udp_hdr(skb);
322         iph = ip_hdr(skb);
323
324         uh->check = 0;
325         csum = skb_checksum(skb, 0, skb->len, 0);
326         uh->check = udp_v4_check(skb->len, iph->saddr, iph->daddr, csum);
327         if (uh->check == 0)
328                 uh->check = CSUM_MANGLED_0;
329
330         skb->ip_summed = CHECKSUM_UNNECESSARY;
331
332         /* If there is no outer header we can fake a checksum offload
333          * due to the fact that we have already done the checksum in
334          * software prior to segmenting the frame.
335          */
336         if (!skb->encap_hdr_csum)
337                 features |= NETIF_F_HW_CSUM;
338
339         /* Fragment the skb. IP headers of the fragments are updated in
340          * inet_gso_segment()
341          */
342         segs = skb_segment(skb, features);
343 out:
344         return segs;
345 }
346
347 #define UDP_GRO_CNT_MAX 64
348 static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
349                                                struct sk_buff *skb)
350 {
351         struct udphdr *uh = udp_hdr(skb);
352         struct sk_buff *pp = NULL;
353         struct udphdr *uh2;
354         struct sk_buff *p;
355         unsigned int ulen;
356
357         /* requires non zero csum, for symmetry with GSO */
358         if (!uh->check) {
359                 NAPI_GRO_CB(skb)->flush = 1;
360                 return NULL;
361         }
362
363         /* Do not deal with padded or malicious packets, sorry ! */
364         ulen = ntohs(uh->len);
365         if (ulen <= sizeof(*uh) || ulen != skb_gro_len(skb)) {
366                 NAPI_GRO_CB(skb)->flush = 1;
367                 return NULL;
368         }
369         /* pull encapsulating udp header */
370         skb_gro_pull(skb, sizeof(struct udphdr));
371         skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
372
373         list_for_each_entry(p, head, list) {
374                 if (!NAPI_GRO_CB(p)->same_flow)
375                         continue;
376
377                 uh2 = udp_hdr(p);
378
379                 /* Match ports only, as csum is always non zero */
380                 if ((*(u32 *)&uh->source != *(u32 *)&uh2->source)) {
381                         NAPI_GRO_CB(p)->same_flow = 0;
382                         continue;
383                 }
384
385                 /* Terminate the flow on len mismatch or if it grow "too much".
386                  * Under small packet flood GRO count could elsewhere grow a lot
387                  * leading to excessive truesize values.
388                  * On len mismatch merge the first packet shorter than gso_size,
389                  * otherwise complete the GRO packet.
390                  */
391                 if (ulen > ntohs(uh2->len) || skb_gro_receive(p, skb) ||
392                     ulen != ntohs(uh2->len) ||
393                     NAPI_GRO_CB(p)->count >= UDP_GRO_CNT_MAX)
394                         pp = p;
395
396                 return pp;
397         }
398
399         /* mismatch, but we never need to flush */
400         return NULL;
401 }
402
403 INDIRECT_CALLABLE_DECLARE(struct sock *udp6_lib_lookup_skb(struct sk_buff *skb,
404                                                    __be16 sport, __be16 dport));
405 struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
406                                 struct udphdr *uh, udp_lookup_t lookup)
407 {
408         struct sk_buff *pp = NULL;
409         struct sk_buff *p;
410         struct udphdr *uh2;
411         unsigned int off = skb_gro_offset(skb);
412         int flush = 1;
413         struct sock *sk;
414
415         rcu_read_lock();
416         sk = INDIRECT_CALL_INET(lookup, udp6_lib_lookup_skb,
417                                 udp4_lib_lookup_skb, skb, uh->source, uh->dest);
418         if (!sk)
419                 goto out_unlock;
420
421         if (udp_sk(sk)->gro_enabled) {
422                 pp = call_gro_receive(udp_gro_receive_segment, head, skb);
423                 rcu_read_unlock();
424                 return pp;
425         }
426
427         if (NAPI_GRO_CB(skb)->encap_mark ||
428             (skb->ip_summed != CHECKSUM_PARTIAL &&
429              NAPI_GRO_CB(skb)->csum_cnt == 0 &&
430              !NAPI_GRO_CB(skb)->csum_valid) ||
431             !udp_sk(sk)->gro_receive)
432                 goto out_unlock;
433
434         /* mark that this skb passed once through the tunnel gro layer */
435         NAPI_GRO_CB(skb)->encap_mark = 1;
436
437         flush = 0;
438
439         list_for_each_entry(p, head, list) {
440                 if (!NAPI_GRO_CB(p)->same_flow)
441                         continue;
442
443                 uh2 = (struct udphdr   *)(p->data + off);
444
445                 /* Match ports and either checksums are either both zero
446                  * or nonzero.
447                  */
448                 if ((*(u32 *)&uh->source != *(u32 *)&uh2->source) ||
449                     (!uh->check ^ !uh2->check)) {
450                         NAPI_GRO_CB(p)->same_flow = 0;
451                         continue;
452                 }
453         }
454
455         skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */
456         skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
457         pp = call_gro_receive_sk(udp_sk(sk)->gro_receive, sk, head, skb);
458
459 out_unlock:
460         rcu_read_unlock();
461         skb_gro_flush_final(skb, pp, flush);
462         return pp;
463 }
464 EXPORT_SYMBOL(udp_gro_receive);
465
466 INDIRECT_CALLABLE_SCOPE
467 struct sk_buff *udp4_gro_receive(struct list_head *head, struct sk_buff *skb)
468 {
469         struct udphdr *uh = udp_gro_udphdr(skb);
470
471         if (unlikely(!uh) || !static_branch_unlikely(&udp_encap_needed_key))
472                 goto flush;
473
474         /* Don't bother verifying checksum if we're going to flush anyway. */
475         if (NAPI_GRO_CB(skb)->flush)
476                 goto skip;
477
478         if (skb_gro_checksum_validate_zero_check(skb, IPPROTO_UDP, uh->check,
479                                                  inet_gro_compute_pseudo))
480                 goto flush;
481         else if (uh->check)
482                 skb_gro_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
483                                              inet_gro_compute_pseudo);
484 skip:
485         NAPI_GRO_CB(skb)->is_ipv6 = 0;
486         return udp_gro_receive(head, skb, uh, udp4_lib_lookup_skb);
487
488 flush:
489         NAPI_GRO_CB(skb)->flush = 1;
490         return NULL;
491 }
492
493 static int udp_gro_complete_segment(struct sk_buff *skb)
494 {
495         struct udphdr *uh = udp_hdr(skb);
496
497         skb->csum_start = (unsigned char *)uh - skb->head;
498         skb->csum_offset = offsetof(struct udphdr, check);
499         skb->ip_summed = CHECKSUM_PARTIAL;
500
501         skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
502         skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_L4;
503         return 0;
504 }
505
506 int udp_gro_complete(struct sk_buff *skb, int nhoff,
507                      udp_lookup_t lookup)
508 {
509         __be16 newlen = htons(skb->len - nhoff);
510         struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
511         int err = -ENOSYS;
512         struct sock *sk;
513
514         uh->len = newlen;
515
516         rcu_read_lock();
517         sk = INDIRECT_CALL_INET(lookup, udp6_lib_lookup_skb,
518                                 udp4_lib_lookup_skb, skb, uh->source, uh->dest);
519         if (sk && udp_sk(sk)->gro_enabled) {
520                 err = udp_gro_complete_segment(skb);
521         } else if (sk && udp_sk(sk)->gro_complete) {
522                 skb_shinfo(skb)->gso_type = uh->check ? SKB_GSO_UDP_TUNNEL_CSUM
523                                         : SKB_GSO_UDP_TUNNEL;
524
525                 /* Set encapsulation before calling into inner gro_complete()
526                  * functions to make them set up the inner offsets.
527                  */
528                 skb->encapsulation = 1;
529                 err = udp_sk(sk)->gro_complete(sk, skb,
530                                 nhoff + sizeof(struct udphdr));
531         }
532         rcu_read_unlock();
533
534         if (skb->remcsum_offload)
535                 skb_shinfo(skb)->gso_type |= SKB_GSO_TUNNEL_REMCSUM;
536
537         return err;
538 }
539 EXPORT_SYMBOL(udp_gro_complete);
540
541 INDIRECT_CALLABLE_SCOPE int udp4_gro_complete(struct sk_buff *skb, int nhoff)
542 {
543         const struct iphdr *iph = ip_hdr(skb);
544         struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
545
546         if (uh->check)
547                 uh->check = ~udp_v4_check(skb->len - nhoff, iph->saddr,
548                                           iph->daddr, 0);
549
550         return udp_gro_complete(skb, nhoff, udp4_lib_lookup_skb);
551 }
552
553 static const struct net_offload udpv4_offload = {
554         .callbacks = {
555                 .gso_segment = udp4_ufo_fragment,
556                 .gro_receive  = udp4_gro_receive,
557                 .gro_complete = udp4_gro_complete,
558         },
559 };
560
561 int __init udpv4_offload_init(void)
562 {
563         return inet_add_offload(&udpv4_offload, IPPROTO_UDP);
564 }