Merge tag 'for-linus-6.3-rc3-tag' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / drivers / net / ethernet / qualcomm / rmnet / rmnet_map_data.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2013-2018, 2021, The Linux Foundation. All rights reserved.
3  *
4  * RMNET Data MAP protocol
5  */
6
7 #include <linux/netdevice.h>
8 #include <linux/ip.h>
9 #include <linux/ipv6.h>
10 #include <net/ip6_checksum.h>
11 #include <linux/bitfield.h>
12 #include "rmnet_config.h"
13 #include "rmnet_map.h"
14 #include "rmnet_private.h"
15 #include "rmnet_vnd.h"
16
17 #define RMNET_MAP_DEAGGR_SPACING  64
18 #define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2)
19
20 static __sum16 *rmnet_map_get_csum_field(unsigned char protocol,
21                                          const void *txporthdr)
22 {
23         if (protocol == IPPROTO_TCP)
24                 return &((struct tcphdr *)txporthdr)->check;
25
26         if (protocol == IPPROTO_UDP)
27                 return &((struct udphdr *)txporthdr)->check;
28
29         return NULL;
30 }
31
32 static int
33 rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb,
34                                struct rmnet_map_dl_csum_trailer *csum_trailer,
35                                struct rmnet_priv *priv)
36 {
37         struct iphdr *ip4h = (struct iphdr *)skb->data;
38         void *txporthdr = skb->data + ip4h->ihl * 4;
39         __sum16 *csum_field, pseudo_csum;
40         __sum16 ip_payload_csum;
41
42         /* Computing the checksum over just the IPv4 header--including its
43          * checksum field--should yield 0.  If it doesn't, the IP header
44          * is bad, so return an error and let the IP layer drop it.
45          */
46         if (ip_fast_csum(ip4h, ip4h->ihl)) {
47                 priv->stats.csum_ip4_header_bad++;
48                 return -EINVAL;
49         }
50
51         /* We don't support checksum offload on IPv4 fragments */
52         if (ip_is_fragment(ip4h)) {
53                 priv->stats.csum_fragmented_pkt++;
54                 return -EOPNOTSUPP;
55         }
56
57         /* Checksum offload is only supported for UDP and TCP protocols */
58         csum_field = rmnet_map_get_csum_field(ip4h->protocol, txporthdr);
59         if (!csum_field) {
60                 priv->stats.csum_err_invalid_transport++;
61                 return -EPROTONOSUPPORT;
62         }
63
64         /* RFC 768: UDP checksum is optional for IPv4, and is 0 if unused */
65         if (!*csum_field && ip4h->protocol == IPPROTO_UDP) {
66                 priv->stats.csum_skipped++;
67                 return 0;
68         }
69
70         /* The checksum value in the trailer is computed over the entire
71          * IP packet, including the IP header and payload.  To derive the
72          * transport checksum from this, we first subract the contribution
73          * of the IP header from the trailer checksum.  We then add the
74          * checksum computed over the pseudo header.
75          *
76          * We verified above that the IP header contributes zero to the
77          * trailer checksum.  Therefore the checksum in the trailer is
78          * just the checksum computed over the IP payload.
79
80          * If the IP payload arrives intact, adding the pseudo header
81          * checksum to the IP payload checksum will yield 0xffff (negative
82          * zero).  This means the trailer checksum and the pseudo checksum
83          * are additive inverses of each other.  Put another way, the
84          * message passes the checksum test if the trailer checksum value
85          * is the negated pseudo header checksum.
86          *
87          * Knowing this, we don't even need to examine the transport
88          * header checksum value; it is already accounted for in the
89          * checksum value found in the trailer.
90          */
91         ip_payload_csum = csum_trailer->csum_value;
92
93         pseudo_csum = csum_tcpudp_magic(ip4h->saddr, ip4h->daddr,
94                                         ntohs(ip4h->tot_len) - ip4h->ihl * 4,
95                                         ip4h->protocol, 0);
96
97         /* The cast is required to ensure only the low 16 bits are examined */
98         if (ip_payload_csum != (__sum16)~pseudo_csum) {
99                 priv->stats.csum_validation_failed++;
100                 return -EINVAL;
101         }
102
103         priv->stats.csum_ok++;
104         return 0;
105 }
106
107 #if IS_ENABLED(CONFIG_IPV6)
108 static int
109 rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb,
110                                struct rmnet_map_dl_csum_trailer *csum_trailer,
111                                struct rmnet_priv *priv)
112 {
113         struct ipv6hdr *ip6h = (struct ipv6hdr *)skb->data;
114         void *txporthdr = skb->data + sizeof(*ip6h);
115         __sum16 *csum_field, pseudo_csum;
116         __sum16 ip6_payload_csum;
117         __be16 ip_header_csum;
118
119         /* Checksum offload is only supported for UDP and TCP protocols;
120          * the packet cannot include any IPv6 extension headers
121          */
122         csum_field = rmnet_map_get_csum_field(ip6h->nexthdr, txporthdr);
123         if (!csum_field) {
124                 priv->stats.csum_err_invalid_transport++;
125                 return -EPROTONOSUPPORT;
126         }
127
128         /* The checksum value in the trailer is computed over the entire
129          * IP packet, including the IP header and payload.  To derive the
130          * transport checksum from this, we first subract the contribution
131          * of the IP header from the trailer checksum.  We then add the
132          * checksum computed over the pseudo header.
133          */
134         ip_header_csum = (__force __be16)ip_fast_csum(ip6h, sizeof(*ip6h) / 4);
135         ip6_payload_csum = csum16_sub(csum_trailer->csum_value, ip_header_csum);
136
137         pseudo_csum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
138                                       ntohs(ip6h->payload_len),
139                                       ip6h->nexthdr, 0);
140
141         /* It's sufficient to compare the IP payload checksum with the
142          * negated pseudo checksum to determine whether the packet
143          * checksum was good.  (See further explanation in comments
144          * in rmnet_map_ipv4_dl_csum_trailer()).
145          *
146          * The cast is required to ensure only the low 16 bits are
147          * examined.
148          */
149         if (ip6_payload_csum != (__sum16)~pseudo_csum) {
150                 priv->stats.csum_validation_failed++;
151                 return -EINVAL;
152         }
153
154         priv->stats.csum_ok++;
155         return 0;
156 }
157 #else
158 static int
159 rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb,
160                                struct rmnet_map_dl_csum_trailer *csum_trailer,
161                                struct rmnet_priv *priv)
162 {
163         return 0;
164 }
165 #endif
166
167 static void rmnet_map_complement_ipv4_txporthdr_csum_field(struct iphdr *ip4h)
168 {
169         void *txphdr;
170         u16 *csum;
171
172         txphdr = (void *)ip4h + ip4h->ihl * 4;
173
174         if (ip4h->protocol == IPPROTO_TCP || ip4h->protocol == IPPROTO_UDP) {
175                 csum = (u16 *)rmnet_map_get_csum_field(ip4h->protocol, txphdr);
176                 *csum = ~(*csum);
177         }
178 }
179
180 static void
181 rmnet_map_ipv4_ul_csum_header(struct iphdr *iphdr,
182                               struct rmnet_map_ul_csum_header *ul_header,
183                               struct sk_buff *skb)
184 {
185         u16 val;
186
187         val = MAP_CSUM_UL_ENABLED_FLAG;
188         if (iphdr->protocol == IPPROTO_UDP)
189                 val |= MAP_CSUM_UL_UDP_FLAG;
190         val |= skb->csum_offset & MAP_CSUM_UL_OFFSET_MASK;
191
192         ul_header->csum_start_offset = htons(skb_network_header_len(skb));
193         ul_header->csum_info = htons(val);
194
195         skb->ip_summed = CHECKSUM_NONE;
196
197         rmnet_map_complement_ipv4_txporthdr_csum_field(iphdr);
198 }
199
200 #if IS_ENABLED(CONFIG_IPV6)
201 static void
202 rmnet_map_complement_ipv6_txporthdr_csum_field(struct ipv6hdr *ip6h)
203 {
204         void *txphdr;
205         u16 *csum;
206
207         txphdr = ip6h + 1;
208
209         if (ip6h->nexthdr == IPPROTO_TCP || ip6h->nexthdr == IPPROTO_UDP) {
210                 csum = (u16 *)rmnet_map_get_csum_field(ip6h->nexthdr, txphdr);
211                 *csum = ~(*csum);
212         }
213 }
214
215 static void
216 rmnet_map_ipv6_ul_csum_header(struct ipv6hdr *ipv6hdr,
217                               struct rmnet_map_ul_csum_header *ul_header,
218                               struct sk_buff *skb)
219 {
220         u16 val;
221
222         val = MAP_CSUM_UL_ENABLED_FLAG;
223         if (ipv6hdr->nexthdr == IPPROTO_UDP)
224                 val |= MAP_CSUM_UL_UDP_FLAG;
225         val |= skb->csum_offset & MAP_CSUM_UL_OFFSET_MASK;
226
227         ul_header->csum_start_offset = htons(skb_network_header_len(skb));
228         ul_header->csum_info = htons(val);
229
230         skb->ip_summed = CHECKSUM_NONE;
231
232         rmnet_map_complement_ipv6_txporthdr_csum_field(ipv6hdr);
233 }
234 #else
235 static void
236 rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
237                               struct rmnet_map_ul_csum_header *ul_header,
238                               struct sk_buff *skb)
239 {
240 }
241 #endif
242
243 static void rmnet_map_v5_checksum_uplink_packet(struct sk_buff *skb,
244                                                 struct rmnet_port *port,
245                                                 struct net_device *orig_dev)
246 {
247         struct rmnet_priv *priv = netdev_priv(orig_dev);
248         struct rmnet_map_v5_csum_header *ul_header;
249
250         ul_header = skb_push(skb, sizeof(*ul_header));
251         memset(ul_header, 0, sizeof(*ul_header));
252         ul_header->header_info = u8_encode_bits(RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD,
253                                                 MAPV5_HDRINFO_HDR_TYPE_FMASK);
254
255         if (skb->ip_summed == CHECKSUM_PARTIAL) {
256                 void *iph = ip_hdr(skb);
257                 __sum16 *check;
258                 void *trans;
259                 u8 proto;
260
261                 if (skb->protocol == htons(ETH_P_IP)) {
262                         u16 ip_len = ((struct iphdr *)iph)->ihl * 4;
263
264                         proto = ((struct iphdr *)iph)->protocol;
265                         trans = iph + ip_len;
266                 } else if (IS_ENABLED(CONFIG_IPV6) &&
267                            skb->protocol == htons(ETH_P_IPV6)) {
268                         u16 ip_len = sizeof(struct ipv6hdr);
269
270                         proto = ((struct ipv6hdr *)iph)->nexthdr;
271                         trans = iph + ip_len;
272                 } else {
273                         priv->stats.csum_err_invalid_ip_version++;
274                         goto sw_csum;
275                 }
276
277                 check = rmnet_map_get_csum_field(proto, trans);
278                 if (check) {
279                         skb->ip_summed = CHECKSUM_NONE;
280                         /* Ask for checksum offloading */
281                         ul_header->csum_info |= MAPV5_CSUMINFO_VALID_FLAG;
282                         priv->stats.csum_hw++;
283                         return;
284                 }
285         }
286
287 sw_csum:
288         priv->stats.csum_sw++;
289 }
290
291 /* Adds MAP header to front of skb->data
292  * Padding is calculated and set appropriately in MAP header. Mux ID is
293  * initialized to 0.
294  */
295 struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
296                                                   int hdrlen,
297                                                   struct rmnet_port *port,
298                                                   int pad)
299 {
300         struct rmnet_map_header *map_header;
301         u32 padding, map_datalen;
302
303         map_datalen = skb->len - hdrlen;
304         map_header = (struct rmnet_map_header *)
305                         skb_push(skb, sizeof(struct rmnet_map_header));
306         memset(map_header, 0, sizeof(struct rmnet_map_header));
307
308         /* Set next_hdr bit for csum offload packets */
309         if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV5)
310                 map_header->flags |= MAP_NEXT_HEADER_FLAG;
311
312         if (pad == RMNET_MAP_NO_PAD_BYTES) {
313                 map_header->pkt_len = htons(map_datalen);
314                 return map_header;
315         }
316
317         BUILD_BUG_ON(MAP_PAD_LEN_MASK < 3);
318         padding = ALIGN(map_datalen, 4) - map_datalen;
319
320         if (padding == 0)
321                 goto done;
322
323         if (skb_tailroom(skb) < padding)
324                 return NULL;
325
326         skb_put_zero(skb, padding);
327
328 done:
329         map_header->pkt_len = htons(map_datalen + padding);
330         /* This is a data packet, so the CMD bit is 0 */
331         map_header->flags = padding & MAP_PAD_LEN_MASK;
332
333         return map_header;
334 }
335
336 /* Deaggregates a single packet
337  * A whole new buffer is allocated for each portion of an aggregated frame.
338  * Caller should keep calling deaggregate() on the source skb until 0 is
339  * returned, indicating that there are no more packets to deaggregate. Caller
340  * is responsible for freeing the original skb.
341  */
342 struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
343                                       struct rmnet_port *port)
344 {
345         struct rmnet_map_v5_csum_header *next_hdr = NULL;
346         struct rmnet_map_header *maph;
347         void *data = skb->data;
348         struct sk_buff *skbn;
349         u8 nexthdr_type;
350         u32 packet_len;
351
352         if (skb->len == 0)
353                 return NULL;
354
355         maph = (struct rmnet_map_header *)skb->data;
356         packet_len = ntohs(maph->pkt_len) + sizeof(*maph);
357
358         if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
359                 packet_len += sizeof(struct rmnet_map_dl_csum_trailer);
360         } else if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV5) {
361                 if (!(maph->flags & MAP_CMD_FLAG)) {
362                         packet_len += sizeof(*next_hdr);
363                         if (maph->flags & MAP_NEXT_HEADER_FLAG)
364                                 next_hdr = data + sizeof(*maph);
365                         else
366                                 /* Mapv5 data pkt without csum hdr is invalid */
367                                 return NULL;
368                 }
369         }
370
371         if (((int)skb->len - (int)packet_len) < 0)
372                 return NULL;
373
374         /* Some hardware can send us empty frames. Catch them */
375         if (!maph->pkt_len)
376                 return NULL;
377
378         if (next_hdr) {
379                 nexthdr_type = u8_get_bits(next_hdr->header_info,
380                                            MAPV5_HDRINFO_HDR_TYPE_FMASK);
381                 if (nexthdr_type != RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD)
382                         return NULL;
383         }
384
385         skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING, GFP_ATOMIC);
386         if (!skbn)
387                 return NULL;
388
389         skb_reserve(skbn, RMNET_MAP_DEAGGR_HEADROOM);
390         skb_put(skbn, packet_len);
391         memcpy(skbn->data, skb->data, packet_len);
392         skb_pull(skb, packet_len);
393
394         return skbn;
395 }
396
397 /* Validates packet checksums. Function takes a pointer to
398  * the beginning of a buffer which contains the IP payload +
399  * padding + checksum trailer.
400  * Only IPv4 and IPv6 are supported along with TCP & UDP.
401  * Fragmented or tunneled packets are not supported.
402  */
403 int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len)
404 {
405         struct rmnet_priv *priv = netdev_priv(skb->dev);
406         struct rmnet_map_dl_csum_trailer *csum_trailer;
407
408         if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) {
409                 priv->stats.csum_sw++;
410                 return -EOPNOTSUPP;
411         }
412
413         csum_trailer = (struct rmnet_map_dl_csum_trailer *)(skb->data + len);
414
415         if (!(csum_trailer->flags & MAP_CSUM_DL_VALID_FLAG)) {
416                 priv->stats.csum_valid_unset++;
417                 return -EINVAL;
418         }
419
420         if (skb->protocol == htons(ETH_P_IP))
421                 return rmnet_map_ipv4_dl_csum_trailer(skb, csum_trailer, priv);
422
423         if (IS_ENABLED(CONFIG_IPV6) && skb->protocol == htons(ETH_P_IPV6))
424                 return rmnet_map_ipv6_dl_csum_trailer(skb, csum_trailer, priv);
425
426         priv->stats.csum_err_invalid_ip_version++;
427
428         return -EPROTONOSUPPORT;
429 }
430
431 static void rmnet_map_v4_checksum_uplink_packet(struct sk_buff *skb,
432                                                 struct net_device *orig_dev)
433 {
434         struct rmnet_priv *priv = netdev_priv(orig_dev);
435         struct rmnet_map_ul_csum_header *ul_header;
436         void *iphdr;
437
438         ul_header = (struct rmnet_map_ul_csum_header *)
439                     skb_push(skb, sizeof(struct rmnet_map_ul_csum_header));
440
441         if (unlikely(!(orig_dev->features &
442                      (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))))
443                 goto sw_csum;
444
445         if (skb->ip_summed != CHECKSUM_PARTIAL)
446                 goto sw_csum;
447
448         iphdr = (char *)ul_header +
449                 sizeof(struct rmnet_map_ul_csum_header);
450
451         if (skb->protocol == htons(ETH_P_IP)) {
452                 rmnet_map_ipv4_ul_csum_header(iphdr, ul_header, skb);
453                 priv->stats.csum_hw++;
454                 return;
455         }
456
457         if (IS_ENABLED(CONFIG_IPV6) && skb->protocol == htons(ETH_P_IPV6)) {
458                 rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb);
459                 priv->stats.csum_hw++;
460                 return;
461         }
462
463         priv->stats.csum_err_invalid_ip_version++;
464
465 sw_csum:
466         memset(ul_header, 0, sizeof(*ul_header));
467
468         priv->stats.csum_sw++;
469 }
470
471 /* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP
472  * packets that are supported for UL checksum offload.
473  */
474 void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
475                                       struct rmnet_port *port,
476                                       struct net_device *orig_dev,
477                                       int csum_type)
478 {
479         switch (csum_type) {
480         case RMNET_FLAGS_EGRESS_MAP_CKSUMV4:
481                 rmnet_map_v4_checksum_uplink_packet(skb, orig_dev);
482                 break;
483         case RMNET_FLAGS_EGRESS_MAP_CKSUMV5:
484                 rmnet_map_v5_checksum_uplink_packet(skb, port, orig_dev);
485                 break;
486         default:
487                 break;
488         }
489 }
490
491 /* Process a MAPv5 packet header */
492 int rmnet_map_process_next_hdr_packet(struct sk_buff *skb,
493                                       u16 len)
494 {
495         struct rmnet_priv *priv = netdev_priv(skb->dev);
496         struct rmnet_map_v5_csum_header *next_hdr;
497         u8 nexthdr_type;
498
499         next_hdr = (struct rmnet_map_v5_csum_header *)(skb->data +
500                         sizeof(struct rmnet_map_header));
501
502         nexthdr_type = u8_get_bits(next_hdr->header_info,
503                                    MAPV5_HDRINFO_HDR_TYPE_FMASK);
504
505         if (nexthdr_type != RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD)
506                 return -EINVAL;
507
508         if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) {
509                 priv->stats.csum_sw++;
510         } else if (next_hdr->csum_info & MAPV5_CSUMINFO_VALID_FLAG) {
511                 priv->stats.csum_ok++;
512                 skb->ip_summed = CHECKSUM_UNNECESSARY;
513         } else {
514                 priv->stats.csum_valid_unset++;
515         }
516
517         /* Pull csum v5 header */
518         skb_pull(skb, sizeof(*next_hdr));
519
520         return 0;
521 }
522
523 #define RMNET_AGG_BYPASS_TIME_NSEC 10000000L
524
525 static void reset_aggr_params(struct rmnet_port *port)
526 {
527         port->skbagg_head = NULL;
528         port->agg_count = 0;
529         port->agg_state = 0;
530         memset(&port->agg_time, 0, sizeof(struct timespec64));
531 }
532
533 static void rmnet_send_skb(struct rmnet_port *port, struct sk_buff *skb)
534 {
535         if (skb_needs_linearize(skb, port->dev->features)) {
536                 if (unlikely(__skb_linearize(skb))) {
537                         struct rmnet_priv *priv;
538
539                         priv = netdev_priv(port->rmnet_dev);
540                         this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
541                         dev_kfree_skb_any(skb);
542                         return;
543                 }
544         }
545
546         dev_queue_xmit(skb);
547 }
548
549 static void rmnet_map_flush_tx_packet_work(struct work_struct *work)
550 {
551         struct sk_buff *skb = NULL;
552         struct rmnet_port *port;
553
554         port = container_of(work, struct rmnet_port, agg_wq);
555
556         spin_lock_bh(&port->agg_lock);
557         if (likely(port->agg_state == -EINPROGRESS)) {
558                 /* Buffer may have already been shipped out */
559                 if (likely(port->skbagg_head)) {
560                         skb = port->skbagg_head;
561                         reset_aggr_params(port);
562                 }
563                 port->agg_state = 0;
564         }
565
566         spin_unlock_bh(&port->agg_lock);
567         if (skb)
568                 rmnet_send_skb(port, skb);
569 }
570
571 static enum hrtimer_restart rmnet_map_flush_tx_packet_queue(struct hrtimer *t)
572 {
573         struct rmnet_port *port;
574
575         port = container_of(t, struct rmnet_port, hrtimer);
576
577         schedule_work(&port->agg_wq);
578
579         return HRTIMER_NORESTART;
580 }
581
582 unsigned int rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port,
583                                     struct net_device *orig_dev)
584 {
585         struct timespec64 diff, last;
586         unsigned int len = skb->len;
587         struct sk_buff *agg_skb;
588         int size;
589
590         spin_lock_bh(&port->agg_lock);
591         memcpy(&last, &port->agg_last, sizeof(struct timespec64));
592         ktime_get_real_ts64(&port->agg_last);
593
594         if (!port->skbagg_head) {
595                 /* Check to see if we should agg first. If the traffic is very
596                  * sparse, don't aggregate.
597                  */
598 new_packet:
599                 diff = timespec64_sub(port->agg_last, last);
600                 size = port->egress_agg_params.bytes - skb->len;
601
602                 if (size < 0) {
603                         /* dropped */
604                         spin_unlock_bh(&port->agg_lock);
605                         return 0;
606                 }
607
608                 if (diff.tv_sec > 0 || diff.tv_nsec > RMNET_AGG_BYPASS_TIME_NSEC ||
609                     size == 0)
610                         goto no_aggr;
611
612                 port->skbagg_head = skb_copy_expand(skb, 0, size, GFP_ATOMIC);
613                 if (!port->skbagg_head)
614                         goto no_aggr;
615
616                 dev_kfree_skb_any(skb);
617                 port->skbagg_head->protocol = htons(ETH_P_MAP);
618                 port->agg_count = 1;
619                 ktime_get_real_ts64(&port->agg_time);
620                 skb_frag_list_init(port->skbagg_head);
621                 goto schedule;
622         }
623         diff = timespec64_sub(port->agg_last, port->agg_time);
624         size = port->egress_agg_params.bytes - port->skbagg_head->len;
625
626         if (skb->len > size) {
627                 agg_skb = port->skbagg_head;
628                 reset_aggr_params(port);
629                 spin_unlock_bh(&port->agg_lock);
630                 hrtimer_cancel(&port->hrtimer);
631                 rmnet_send_skb(port, agg_skb);
632                 spin_lock_bh(&port->agg_lock);
633                 goto new_packet;
634         }
635
636         if (skb_has_frag_list(port->skbagg_head))
637                 port->skbagg_tail->next = skb;
638         else
639                 skb_shinfo(port->skbagg_head)->frag_list = skb;
640
641         port->skbagg_head->len += skb->len;
642         port->skbagg_head->data_len += skb->len;
643         port->skbagg_head->truesize += skb->truesize;
644         port->skbagg_tail = skb;
645         port->agg_count++;
646
647         if (diff.tv_sec > 0 || diff.tv_nsec > port->egress_agg_params.time_nsec ||
648             port->agg_count >= port->egress_agg_params.count ||
649             port->skbagg_head->len == port->egress_agg_params.bytes) {
650                 agg_skb = port->skbagg_head;
651                 reset_aggr_params(port);
652                 spin_unlock_bh(&port->agg_lock);
653                 hrtimer_cancel(&port->hrtimer);
654                 rmnet_send_skb(port, agg_skb);
655                 return len;
656         }
657
658 schedule:
659         if (!hrtimer_active(&port->hrtimer) && port->agg_state != -EINPROGRESS) {
660                 port->agg_state = -EINPROGRESS;
661                 hrtimer_start(&port->hrtimer,
662                               ns_to_ktime(port->egress_agg_params.time_nsec),
663                               HRTIMER_MODE_REL);
664         }
665         spin_unlock_bh(&port->agg_lock);
666
667         return len;
668
669 no_aggr:
670         spin_unlock_bh(&port->agg_lock);
671         skb->protocol = htons(ETH_P_MAP);
672         dev_queue_xmit(skb);
673
674         return len;
675 }
676
677 void rmnet_map_update_ul_agg_config(struct rmnet_port *port, u32 size,
678                                     u32 count, u32 time)
679 {
680         spin_lock_bh(&port->agg_lock);
681         port->egress_agg_params.bytes = size;
682         WRITE_ONCE(port->egress_agg_params.count, count);
683         port->egress_agg_params.time_nsec = time * NSEC_PER_USEC;
684         spin_unlock_bh(&port->agg_lock);
685 }
686
687 void rmnet_map_tx_aggregate_init(struct rmnet_port *port)
688 {
689         hrtimer_init(&port->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
690         port->hrtimer.function = rmnet_map_flush_tx_packet_queue;
691         spin_lock_init(&port->agg_lock);
692         rmnet_map_update_ul_agg_config(port, 4096, 1, 800);
693         INIT_WORK(&port->agg_wq, rmnet_map_flush_tx_packet_work);
694 }
695
696 void rmnet_map_tx_aggregate_exit(struct rmnet_port *port)
697 {
698         hrtimer_cancel(&port->hrtimer);
699         cancel_work_sync(&port->agg_wq);
700
701         spin_lock_bh(&port->agg_lock);
702         if (port->agg_state == -EINPROGRESS) {
703                 if (port->skbagg_head) {
704                         dev_kfree_skb_any(port->skbagg_head);
705                         reset_aggr_params(port);
706                 }
707
708                 port->agg_state = 0;
709         }
710         spin_unlock_bh(&port->agg_lock);
711 }