1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 #ifndef _NET_IPV6_GRO_H
4 #define _NET_IPV6_GRO_H
6 #include <linux/indirect_call_wrapper.h>
8 #include <linux/ipv6.h>
9 #include <net/ip6_checksum.h>
10 #include <linux/skbuff.h>
14 /* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
17 /* Length of frag0. */
18 unsigned int frag0_len;
20 /* This indicates where we are processing relative to skb->data. */
23 /* This is non-zero if the packet cannot be merged with the new skb. */
26 /* Save the IP ID here and check when we get to the transport layer */
29 /* Number of segments aggregated. */
32 /* Used in ipv6_gro_receive() and foo-over-udp */
35 /* jiffies when first packet was created/queued */
38 /* Used in napi_gro_cb::free */
39 #define NAPI_GRO_FREE 1
40 #define NAPI_GRO_FREE_STOLEN_HEAD 2
41 /* portion of the cb set to zero at every gro iteration */
44 /* Start offset for remote checksum offload */
45 u16 gro_remcsum_start;
47 /* This is non-zero if the packet may be of the same flow. */
50 /* Used in tunnel GRO receive */
53 /* GRO checksum is valid */
56 /* Number of checksums via CHECKSUM_UNNECESSARY */
62 /* Used in foo-over-udp, set in udp[46]_gro_receive */
65 /* Used in GRE, set in fou/gue_gro_receive */
68 /* Used to determine if flush_id can be ignored */
71 /* Number of gro_receive callbacks this packet already went through */
72 u8 recursion_counter:4;
74 /* GRO is done by frag_list pointer chaining. */
78 /* used to support CHECKSUM_COMPLETE for tunneling protocols */
81 /* used in skb_gro_receive() slow path */
85 #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
87 #define GRO_RECURSION_LIMIT 15
88 static inline int gro_recursion_inc_test(struct sk_buff *skb)
90 return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
93 typedef struct sk_buff *(*gro_receive_t)(struct list_head *, struct sk_buff *);
94 static inline struct sk_buff *call_gro_receive(gro_receive_t cb,
95 struct list_head *head,
98 if (unlikely(gro_recursion_inc_test(skb))) {
99 NAPI_GRO_CB(skb)->flush |= 1;
103 return cb(head, skb);
106 typedef struct sk_buff *(*gro_receive_sk_t)(struct sock *, struct list_head *,
108 static inline struct sk_buff *call_gro_receive_sk(gro_receive_sk_t cb,
110 struct list_head *head,
113 if (unlikely(gro_recursion_inc_test(skb))) {
114 NAPI_GRO_CB(skb)->flush |= 1;
118 return cb(sk, head, skb);
121 static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
123 return NAPI_GRO_CB(skb)->data_offset;
126 static inline unsigned int skb_gro_len(const struct sk_buff *skb)
128 return skb->len - NAPI_GRO_CB(skb)->data_offset;
131 static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
133 NAPI_GRO_CB(skb)->data_offset += len;
136 static inline void *skb_gro_header_fast(struct sk_buff *skb,
139 return NAPI_GRO_CB(skb)->frag0 + offset;
142 static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
144 return NAPI_GRO_CB(skb)->frag0_len < hlen;
147 static inline void skb_gro_frag0_invalidate(struct sk_buff *skb)
149 NAPI_GRO_CB(skb)->frag0 = NULL;
150 NAPI_GRO_CB(skb)->frag0_len = 0;
153 static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
156 if (!pskb_may_pull(skb, hlen))
159 skb_gro_frag0_invalidate(skb);
160 return skb->data + offset;
163 static inline void *skb_gro_network_header(struct sk_buff *skb)
165 return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
166 skb_network_offset(skb);
169 static inline __wsum inet_gro_compute_pseudo(struct sk_buff *skb, int proto)
171 const struct iphdr *iph = skb_gro_network_header(skb);
173 return csum_tcpudp_nofold(iph->saddr, iph->daddr,
174 skb_gro_len(skb), proto, 0);
177 static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
178 const void *start, unsigned int len)
180 if (NAPI_GRO_CB(skb)->csum_valid)
181 NAPI_GRO_CB(skb)->csum = wsum_negate(csum_partial(start, len,
182 wsum_negate(NAPI_GRO_CB(skb)->csum)));
185 /* GRO checksum functions. These are logical equivalents of the normal
186 * checksum functions (in skbuff.h) except that they operate on the GRO
187 * offsets and fields in sk_buff.
190 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
192 static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
194 return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb));
197 static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
201 return ((skb->ip_summed != CHECKSUM_PARTIAL ||
202 skb_checksum_start_offset(skb) <
203 skb_gro_offset(skb)) &&
204 !skb_at_gro_remcsum_start(skb) &&
205 NAPI_GRO_CB(skb)->csum_cnt == 0 &&
206 (!zero_okay || check));
209 static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb,
212 if (NAPI_GRO_CB(skb)->csum_valid &&
213 !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum)))
216 NAPI_GRO_CB(skb)->csum = psum;
218 return __skb_gro_checksum_complete(skb);
221 static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb)
223 if (NAPI_GRO_CB(skb)->csum_cnt > 0) {
224 /* Consume a checksum from CHECKSUM_UNNECESSARY */
225 NAPI_GRO_CB(skb)->csum_cnt--;
227 /* Update skb for CHECKSUM_UNNECESSARY and csum_level when we
228 * verified a new top level checksum or an encapsulated one
229 * during GRO. This saves work if we fallback to normal path.
231 __skb_incr_checksum_unnecessary(skb);
235 #define __skb_gro_checksum_validate(skb, proto, zero_okay, check, \
239 if (__skb_gro_checksum_validate_needed(skb, zero_okay, check)) \
240 __ret = __skb_gro_checksum_validate_complete(skb, \
241 compute_pseudo(skb, proto)); \
243 skb_gro_incr_csum_unnecessary(skb); \
247 #define skb_gro_checksum_validate(skb, proto, compute_pseudo) \
248 __skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo)
250 #define skb_gro_checksum_validate_zero_check(skb, proto, check, \
252 __skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo)
254 #define skb_gro_checksum_simple_validate(skb) \
255 __skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo)
257 static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb)
259 return (NAPI_GRO_CB(skb)->csum_cnt == 0 &&
260 !NAPI_GRO_CB(skb)->csum_valid);
263 static inline void __skb_gro_checksum_convert(struct sk_buff *skb,
266 NAPI_GRO_CB(skb)->csum = ~pseudo;
267 NAPI_GRO_CB(skb)->csum_valid = 1;
270 #define skb_gro_checksum_try_convert(skb, proto, compute_pseudo) \
272 if (__skb_gro_checksum_convert_check(skb)) \
273 __skb_gro_checksum_convert(skb, \
274 compute_pseudo(skb, proto)); \
282 static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
288 static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
289 unsigned int off, size_t hdrlen,
290 int start, int offset,
291 struct gro_remcsum *grc,
295 size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
297 BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
300 NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
304 ptr = skb_gro_header_fast(skb, off);
305 if (skb_gro_header_hard(skb, off + plen)) {
306 ptr = skb_gro_header_slow(skb, off + plen, off);
311 delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
314 /* Adjust skb->csum since we changed the packet */
315 NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
317 grc->offset = off + hdrlen + offset;
323 static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
324 struct gro_remcsum *grc)
327 size_t plen = grc->offset + sizeof(u16);
332 ptr = skb_gro_header_fast(skb, grc->offset);
333 if (skb_gro_header_hard(skb, grc->offset + sizeof(u16))) {
334 ptr = skb_gro_header_slow(skb, plen, grc->offset);
339 remcsum_unadjust((__sum16 *)ptr, grc->delta);
342 #ifdef CONFIG_XFRM_OFFLOAD
343 static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
345 if (PTR_ERR(pp) != -EINPROGRESS)
346 NAPI_GRO_CB(skb)->flush |= flush;
348 static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
351 struct gro_remcsum *grc)
353 if (PTR_ERR(pp) != -EINPROGRESS) {
354 NAPI_GRO_CB(skb)->flush |= flush;
355 skb_gro_remcsum_cleanup(skb, grc);
356 skb->remcsum_offload = 0;
360 static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
362 NAPI_GRO_CB(skb)->flush |= flush;
364 static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
367 struct gro_remcsum *grc)
369 NAPI_GRO_CB(skb)->flush |= flush;
370 skb_gro_remcsum_cleanup(skb, grc);
371 skb->remcsum_offload = 0;
375 INDIRECT_CALLABLE_DECLARE(struct sk_buff *ipv6_gro_receive(struct list_head *,
377 INDIRECT_CALLABLE_DECLARE(int ipv6_gro_complete(struct sk_buff *, int));
378 INDIRECT_CALLABLE_DECLARE(struct sk_buff *inet_gro_receive(struct list_head *,
380 INDIRECT_CALLABLE_DECLARE(int inet_gro_complete(struct sk_buff *, int));
382 INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp4_gro_receive(struct list_head *,
384 INDIRECT_CALLABLE_DECLARE(int udp4_gro_complete(struct sk_buff *, int));
386 INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp6_gro_receive(struct list_head *,
388 INDIRECT_CALLABLE_DECLARE(int udp6_gro_complete(struct sk_buff *, int));
390 #define indirect_call_gro_receive_inet(cb, f2, f1, head, skb) \
392 unlikely(gro_recursion_inc_test(skb)) ? \
393 NAPI_GRO_CB(skb)->flush |= 1, NULL : \
394 INDIRECT_CALL_INET(cb, f2, f1, head, skb); \
397 struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
398 struct udphdr *uh, struct sock *sk);
399 int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup);
401 static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb)
404 unsigned int hlen, off;
406 off = skb_gro_offset(skb);
407 hlen = off + sizeof(*uh);
408 uh = skb_gro_header_fast(skb, off);
409 if (skb_gro_header_hard(skb, hlen))
410 uh = skb_gro_header_slow(skb, hlen, off);
415 static inline __wsum ip6_gro_compute_pseudo(struct sk_buff *skb, int proto)
417 const struct ipv6hdr *iph = skb_gro_network_header(skb);
419 return ~csum_unfold(csum_ipv6_magic(&iph->saddr, &iph->daddr,
420 skb_gro_len(skb), proto, 0));
423 int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
425 /* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
426 static inline void gro_normal_list(struct napi_struct *napi)
430 netif_receive_skb_list_internal(&napi->rx_list);
431 INIT_LIST_HEAD(&napi->rx_list);
435 /* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
436 * pass the whole batch up to the stack.
438 static inline void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb, int segs)
440 list_add_tail(&skb->list, &napi->rx_list);
441 napi->rx_count += segs;
442 if (napi->rx_count >= gro_normal_batch)
443 gro_normal_list(napi);
447 #endif /* _NET_IPV6_GRO_H */