Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/hid/hid
[linux-2.6-microblaze.git] / net / sched / sch_fq.c
1 /*
2  * net/sched/sch_fq.c Fair Queue Packet Scheduler (per flow pacing)
3  *
4  *  Copyright (C) 2013-2015 Eric Dumazet <edumazet@google.com>
5  *
6  *      This program is free software; you can redistribute it and/or
7  *      modify it under the terms of the GNU General Public License
8  *      as published by the Free Software Foundation; either version
9  *      2 of the License, or (at your option) any later version.
10  *
11  *  Meant to be mostly used for locally generated traffic :
12  *  Fast classification depends on skb->sk being set before reaching us.
13  *  If not, (router workload), we use rxhash as fallback, with 32 bits wide hash.
14  *  All packets belonging to a socket are considered as a 'flow'.
15  *
16  *  Flows are dynamically allocated and stored in a hash table of RB trees
17  *  They are also part of one Round Robin 'queues' (new or old flows)
18  *
19  *  Burst avoidance (aka pacing) capability :
20  *
21  *  Transport (eg TCP) can set in sk->sk_pacing_rate a rate, enqueue a
22  *  bunch of packets, and this packet scheduler adds delay between
23  *  packets to respect rate limitation.
24  *
25  *  enqueue() :
26  *   - lookup one RB tree (out of 1024 or more) to find the flow.
27  *     If non existent flow, create it, add it to the tree.
28  *     Add skb to the per flow list of skb (fifo).
29  *   - Use a special fifo for high prio packets
30  *
31  *  dequeue() : serves flows in Round Robin
32  *  Note : When a flow becomes empty, we do not immediately remove it from
33  *  rb trees, for performance reasons (its expected to send additional packets,
34  *  or SLAB cache will reuse socket for another flow)
35  */
36
37 #include <linux/module.h>
38 #include <linux/types.h>
39 #include <linux/kernel.h>
40 #include <linux/jiffies.h>
41 #include <linux/string.h>
42 #include <linux/in.h>
43 #include <linux/errno.h>
44 #include <linux/init.h>
45 #include <linux/skbuff.h>
46 #include <linux/slab.h>
47 #include <linux/rbtree.h>
48 #include <linux/hash.h>
49 #include <linux/prefetch.h>
50 #include <linux/vmalloc.h>
51 #include <net/netlink.h>
52 #include <net/pkt_sched.h>
53 #include <net/sock.h>
54 #include <net/tcp_states.h>
55 #include <net/tcp.h>
56
57 /*
58  * Per flow structure, dynamically allocated
59  */
60 struct fq_flow {
61         struct sk_buff  *head;          /* list of skbs for this flow : first skb */
62         union {
63                 struct sk_buff *tail;   /* last skb in the list */
64                 unsigned long  age;     /* jiffies when flow was emptied, for gc */
65         };
66         struct rb_node  fq_node;        /* anchor in fq_root[] trees */
67         struct sock     *sk;
68         int             qlen;           /* number of packets in flow queue */
69         int             credit;
70         u32             socket_hash;    /* sk_hash */
71         struct fq_flow *next;           /* next pointer in RR lists, or &detached */
72
73         struct rb_node  rate_node;      /* anchor in q->delayed tree */
74         u64             time_next_packet;
75 };
76
77 struct fq_flow_head {
78         struct fq_flow *first;
79         struct fq_flow *last;
80 };
81
82 struct fq_sched_data {
83         struct fq_flow_head new_flows;
84
85         struct fq_flow_head old_flows;
86
87         struct rb_root  delayed;        /* for rate limited flows */
88         u64             time_next_delayed_flow;
89         unsigned long   unthrottle_latency_ns;
90
91         struct fq_flow  internal;       /* for non classified or high prio packets */
92         u32             quantum;
93         u32             initial_quantum;
94         u32             flow_refill_delay;
95         u32             flow_plimit;    /* max packets per flow */
96         unsigned long   flow_max_rate;  /* optional max rate per flow */
97         u64             ce_threshold;
98         u32             orphan_mask;    /* mask for orphaned skb */
99         u32             low_rate_threshold;
100         struct rb_root  *fq_root;
101         u8              rate_enable;
102         u8              fq_trees_log;
103
104         u32             flows;
105         u32             inactive_flows;
106         u32             throttled_flows;
107
108         u64             stat_gc_flows;
109         u64             stat_internal_packets;
110         u64             stat_throttled;
111         u64             stat_ce_mark;
112         u64             stat_flows_plimit;
113         u64             stat_pkts_too_long;
114         u64             stat_allocation_errors;
115         struct qdisc_watchdog watchdog;
116 };
117
118 /* special value to mark a detached flow (not on old/new list) */
119 static struct fq_flow detached, throttled;
120
121 static void fq_flow_set_detached(struct fq_flow *f)
122 {
123         f->next = &detached;
124         f->age = jiffies;
125 }
126
127 static bool fq_flow_is_detached(const struct fq_flow *f)
128 {
129         return f->next == &detached;
130 }
131
132 static bool fq_flow_is_throttled(const struct fq_flow *f)
133 {
134         return f->next == &throttled;
135 }
136
137 static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
138 {
139         if (head->first)
140                 head->last->next = flow;
141         else
142                 head->first = flow;
143         head->last = flow;
144         flow->next = NULL;
145 }
146
147 static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f)
148 {
149         rb_erase(&f->rate_node, &q->delayed);
150         q->throttled_flows--;
151         fq_flow_add_tail(&q->old_flows, f);
152 }
153
154 static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
155 {
156         struct rb_node **p = &q->delayed.rb_node, *parent = NULL;
157
158         while (*p) {
159                 struct fq_flow *aux;
160
161                 parent = *p;
162                 aux = rb_entry(parent, struct fq_flow, rate_node);
163                 if (f->time_next_packet >= aux->time_next_packet)
164                         p = &parent->rb_right;
165                 else
166                         p = &parent->rb_left;
167         }
168         rb_link_node(&f->rate_node, parent, p);
169         rb_insert_color(&f->rate_node, &q->delayed);
170         q->throttled_flows++;
171         q->stat_throttled++;
172
173         f->next = &throttled;
174         if (q->time_next_delayed_flow > f->time_next_packet)
175                 q->time_next_delayed_flow = f->time_next_packet;
176 }
177
178
179 static struct kmem_cache *fq_flow_cachep __read_mostly;
180
181
182 /* limit number of collected flows per round */
183 #define FQ_GC_MAX 8
184 #define FQ_GC_AGE (3*HZ)
185
186 static bool fq_gc_candidate(const struct fq_flow *f)
187 {
188         return fq_flow_is_detached(f) &&
189                time_after(jiffies, f->age + FQ_GC_AGE);
190 }
191
192 static void fq_gc(struct fq_sched_data *q,
193                   struct rb_root *root,
194                   struct sock *sk)
195 {
196         struct fq_flow *f, *tofree[FQ_GC_MAX];
197         struct rb_node **p, *parent;
198         int fcnt = 0;
199
200         p = &root->rb_node;
201         parent = NULL;
202         while (*p) {
203                 parent = *p;
204
205                 f = rb_entry(parent, struct fq_flow, fq_node);
206                 if (f->sk == sk)
207                         break;
208
209                 if (fq_gc_candidate(f)) {
210                         tofree[fcnt++] = f;
211                         if (fcnt == FQ_GC_MAX)
212                                 break;
213                 }
214
215                 if (f->sk > sk)
216                         p = &parent->rb_right;
217                 else
218                         p = &parent->rb_left;
219         }
220
221         q->flows -= fcnt;
222         q->inactive_flows -= fcnt;
223         q->stat_gc_flows += fcnt;
224         while (fcnt) {
225                 struct fq_flow *f = tofree[--fcnt];
226
227                 rb_erase(&f->fq_node, root);
228                 kmem_cache_free(fq_flow_cachep, f);
229         }
230 }
231
232 static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
233 {
234         struct rb_node **p, *parent;
235         struct sock *sk = skb->sk;
236         struct rb_root *root;
237         struct fq_flow *f;
238
239         /* warning: no starvation prevention... */
240         if (unlikely((skb->priority & TC_PRIO_MAX) == TC_PRIO_CONTROL))
241                 return &q->internal;
242
243         /* SYNACK messages are attached to a TCP_NEW_SYN_RECV request socket
244          * or a listener (SYNCOOKIE mode)
245          * 1) request sockets are not full blown,
246          *    they do not contain sk_pacing_rate
247          * 2) They are not part of a 'flow' yet
248          * 3) We do not want to rate limit them (eg SYNFLOOD attack),
249          *    especially if the listener set SO_MAX_PACING_RATE
250          * 4) We pretend they are orphaned
251          */
252         if (!sk || sk_listener(sk)) {
253                 unsigned long hash = skb_get_hash(skb) & q->orphan_mask;
254
255                 /* By forcing low order bit to 1, we make sure to not
256                  * collide with a local flow (socket pointers are word aligned)
257                  */
258                 sk = (struct sock *)((hash << 1) | 1UL);
259                 skb_orphan(skb);
260         }
261
262         root = &q->fq_root[hash_ptr(sk, q->fq_trees_log)];
263
264         if (q->flows >= (2U << q->fq_trees_log) &&
265             q->inactive_flows > q->flows/2)
266                 fq_gc(q, root, sk);
267
268         p = &root->rb_node;
269         parent = NULL;
270         while (*p) {
271                 parent = *p;
272
273                 f = rb_entry(parent, struct fq_flow, fq_node);
274                 if (f->sk == sk) {
275                         /* socket might have been reallocated, so check
276                          * if its sk_hash is the same.
277                          * It not, we need to refill credit with
278                          * initial quantum
279                          */
280                         if (unlikely(skb->sk &&
281                                      f->socket_hash != sk->sk_hash)) {
282                                 f->credit = q->initial_quantum;
283                                 f->socket_hash = sk->sk_hash;
284                                 if (fq_flow_is_throttled(f))
285                                         fq_flow_unset_throttled(q, f);
286                                 f->time_next_packet = 0ULL;
287                         }
288                         return f;
289                 }
290                 if (f->sk > sk)
291                         p = &parent->rb_right;
292                 else
293                         p = &parent->rb_left;
294         }
295
296         f = kmem_cache_zalloc(fq_flow_cachep, GFP_ATOMIC | __GFP_NOWARN);
297         if (unlikely(!f)) {
298                 q->stat_allocation_errors++;
299                 return &q->internal;
300         }
301         fq_flow_set_detached(f);
302         f->sk = sk;
303         if (skb->sk)
304                 f->socket_hash = sk->sk_hash;
305         f->credit = q->initial_quantum;
306
307         rb_link_node(&f->fq_node, parent, p);
308         rb_insert_color(&f->fq_node, root);
309
310         q->flows++;
311         q->inactive_flows++;
312         return f;
313 }
314
315
316 /* remove one skb from head of flow queue */
317 static struct sk_buff *fq_dequeue_head(struct Qdisc *sch, struct fq_flow *flow)
318 {
319         struct sk_buff *skb = flow->head;
320
321         if (skb) {
322                 flow->head = skb->next;
323                 skb_mark_not_on_list(skb);
324                 flow->qlen--;
325                 qdisc_qstats_backlog_dec(sch, skb);
326                 sch->q.qlen--;
327         }
328         return skb;
329 }
330
331 static void flow_queue_add(struct fq_flow *flow, struct sk_buff *skb)
332 {
333         struct sk_buff *head = flow->head;
334
335         skb->next = NULL;
336         if (!head)
337                 flow->head = skb;
338         else
339                 flow->tail->next = skb;
340
341         flow->tail = skb;
342 }
343
344 static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
345                       struct sk_buff **to_free)
346 {
347         struct fq_sched_data *q = qdisc_priv(sch);
348         struct fq_flow *f;
349
350         if (unlikely(sch->q.qlen >= sch->limit))
351                 return qdisc_drop(skb, sch, to_free);
352
353         f = fq_classify(skb, q);
354         if (unlikely(f->qlen >= q->flow_plimit && f != &q->internal)) {
355                 q->stat_flows_plimit++;
356                 return qdisc_drop(skb, sch, to_free);
357         }
358
359         f->qlen++;
360         qdisc_qstats_backlog_inc(sch, skb);
361         if (fq_flow_is_detached(f)) {
362                 struct sock *sk = skb->sk;
363
364                 fq_flow_add_tail(&q->new_flows, f);
365                 if (time_after(jiffies, f->age + q->flow_refill_delay))
366                         f->credit = max_t(u32, f->credit, q->quantum);
367                 if (sk && q->rate_enable) {
368                         if (unlikely(smp_load_acquire(&sk->sk_pacing_status) !=
369                                      SK_PACING_FQ))
370                                 smp_store_release(&sk->sk_pacing_status,
371                                                   SK_PACING_FQ);
372                 }
373                 q->inactive_flows--;
374         }
375
376         /* Note: this overwrites f->age */
377         flow_queue_add(f, skb);
378
379         if (unlikely(f == &q->internal)) {
380                 q->stat_internal_packets++;
381         }
382         sch->q.qlen++;
383
384         return NET_XMIT_SUCCESS;
385 }
386
387 static void fq_check_throttled(struct fq_sched_data *q, u64 now)
388 {
389         unsigned long sample;
390         struct rb_node *p;
391
392         if (q->time_next_delayed_flow > now)
393                 return;
394
395         /* Update unthrottle latency EWMA.
396          * This is cheap and can help diagnosing timer/latency problems.
397          */
398         sample = (unsigned long)(now - q->time_next_delayed_flow);
399         q->unthrottle_latency_ns -= q->unthrottle_latency_ns >> 3;
400         q->unthrottle_latency_ns += sample >> 3;
401
402         q->time_next_delayed_flow = ~0ULL;
403         while ((p = rb_first(&q->delayed)) != NULL) {
404                 struct fq_flow *f = rb_entry(p, struct fq_flow, rate_node);
405
406                 if (f->time_next_packet > now) {
407                         q->time_next_delayed_flow = f->time_next_packet;
408                         break;
409                 }
410                 fq_flow_unset_throttled(q, f);
411         }
412 }
413
414 static struct sk_buff *fq_dequeue(struct Qdisc *sch)
415 {
416         struct fq_sched_data *q = qdisc_priv(sch);
417         struct fq_flow_head *head;
418         struct sk_buff *skb;
419         struct fq_flow *f;
420         unsigned long rate;
421         u32 plen;
422         u64 now;
423
424         if (!sch->q.qlen)
425                 return NULL;
426
427         skb = fq_dequeue_head(sch, &q->internal);
428         if (skb)
429                 goto out;
430
431         now = ktime_get_ns();
432         fq_check_throttled(q, now);
433 begin:
434         head = &q->new_flows;
435         if (!head->first) {
436                 head = &q->old_flows;
437                 if (!head->first) {
438                         if (q->time_next_delayed_flow != ~0ULL)
439                                 qdisc_watchdog_schedule_ns(&q->watchdog,
440                                                            q->time_next_delayed_flow);
441                         return NULL;
442                 }
443         }
444         f = head->first;
445
446         if (f->credit <= 0) {
447                 f->credit += q->quantum;
448                 head->first = f->next;
449                 fq_flow_add_tail(&q->old_flows, f);
450                 goto begin;
451         }
452
453         skb = f->head;
454         if (skb) {
455                 u64 time_next_packet = max_t(u64, ktime_to_ns(skb->tstamp),
456                                              f->time_next_packet);
457
458                 if (now < time_next_packet) {
459                         head->first = f->next;
460                         f->time_next_packet = time_next_packet;
461                         fq_flow_set_throttled(q, f);
462                         goto begin;
463                 }
464                 if (time_next_packet &&
465                     (s64)(now - time_next_packet - q->ce_threshold) > 0) {
466                         INET_ECN_set_ce(skb);
467                         q->stat_ce_mark++;
468                 }
469         }
470
471         skb = fq_dequeue_head(sch, f);
472         if (!skb) {
473                 head->first = f->next;
474                 /* force a pass through old_flows to prevent starvation */
475                 if ((head == &q->new_flows) && q->old_flows.first) {
476                         fq_flow_add_tail(&q->old_flows, f);
477                 } else {
478                         fq_flow_set_detached(f);
479                         q->inactive_flows++;
480                 }
481                 goto begin;
482         }
483         prefetch(&skb->end);
484         plen = qdisc_pkt_len(skb);
485         f->credit -= plen;
486
487         if (!q->rate_enable)
488                 goto out;
489
490         rate = q->flow_max_rate;
491
492         /* If EDT time was provided for this skb, we need to
493          * update f->time_next_packet only if this qdisc enforces
494          * a flow max rate.
495          */
496         if (!skb->tstamp) {
497                 if (skb->sk)
498                         rate = min(skb->sk->sk_pacing_rate, rate);
499
500                 if (rate <= q->low_rate_threshold) {
501                         f->credit = 0;
502                 } else {
503                         plen = max(plen, q->quantum);
504                         if (f->credit > 0)
505                                 goto out;
506                 }
507         }
508         if (rate != ~0UL) {
509                 u64 len = (u64)plen * NSEC_PER_SEC;
510
511                 if (likely(rate))
512                         len = div64_ul(len, rate);
513                 /* Since socket rate can change later,
514                  * clamp the delay to 1 second.
515                  * Really, providers of too big packets should be fixed !
516                  */
517                 if (unlikely(len > NSEC_PER_SEC)) {
518                         len = NSEC_PER_SEC;
519                         q->stat_pkts_too_long++;
520                 }
521                 /* Account for schedule/timers drifts.
522                  * f->time_next_packet was set when prior packet was sent,
523                  * and current time (@now) can be too late by tens of us.
524                  */
525                 if (f->time_next_packet)
526                         len -= min(len/2, now - f->time_next_packet);
527                 f->time_next_packet = now + len;
528         }
529 out:
530         qdisc_bstats_update(sch, skb);
531         return skb;
532 }
533
534 static void fq_flow_purge(struct fq_flow *flow)
535 {
536         rtnl_kfree_skbs(flow->head, flow->tail);
537         flow->head = NULL;
538         flow->qlen = 0;
539 }
540
541 static void fq_reset(struct Qdisc *sch)
542 {
543         struct fq_sched_data *q = qdisc_priv(sch);
544         struct rb_root *root;
545         struct rb_node *p;
546         struct fq_flow *f;
547         unsigned int idx;
548
549         sch->q.qlen = 0;
550         sch->qstats.backlog = 0;
551
552         fq_flow_purge(&q->internal);
553
554         if (!q->fq_root)
555                 return;
556
557         for (idx = 0; idx < (1U << q->fq_trees_log); idx++) {
558                 root = &q->fq_root[idx];
559                 while ((p = rb_first(root)) != NULL) {
560                         f = rb_entry(p, struct fq_flow, fq_node);
561                         rb_erase(p, root);
562
563                         fq_flow_purge(f);
564
565                         kmem_cache_free(fq_flow_cachep, f);
566                 }
567         }
568         q->new_flows.first      = NULL;
569         q->old_flows.first      = NULL;
570         q->delayed              = RB_ROOT;
571         q->flows                = 0;
572         q->inactive_flows       = 0;
573         q->throttled_flows      = 0;
574 }
575
576 static void fq_rehash(struct fq_sched_data *q,
577                       struct rb_root *old_array, u32 old_log,
578                       struct rb_root *new_array, u32 new_log)
579 {
580         struct rb_node *op, **np, *parent;
581         struct rb_root *oroot, *nroot;
582         struct fq_flow *of, *nf;
583         int fcnt = 0;
584         u32 idx;
585
586         for (idx = 0; idx < (1U << old_log); idx++) {
587                 oroot = &old_array[idx];
588                 while ((op = rb_first(oroot)) != NULL) {
589                         rb_erase(op, oroot);
590                         of = rb_entry(op, struct fq_flow, fq_node);
591                         if (fq_gc_candidate(of)) {
592                                 fcnt++;
593                                 kmem_cache_free(fq_flow_cachep, of);
594                                 continue;
595                         }
596                         nroot = &new_array[hash_ptr(of->sk, new_log)];
597
598                         np = &nroot->rb_node;
599                         parent = NULL;
600                         while (*np) {
601                                 parent = *np;
602
603                                 nf = rb_entry(parent, struct fq_flow, fq_node);
604                                 BUG_ON(nf->sk == of->sk);
605
606                                 if (nf->sk > of->sk)
607                                         np = &parent->rb_right;
608                                 else
609                                         np = &parent->rb_left;
610                         }
611
612                         rb_link_node(&of->fq_node, parent, np);
613                         rb_insert_color(&of->fq_node, nroot);
614                 }
615         }
616         q->flows -= fcnt;
617         q->inactive_flows -= fcnt;
618         q->stat_gc_flows += fcnt;
619 }
620
621 static void fq_free(void *addr)
622 {
623         kvfree(addr);
624 }
625
626 static int fq_resize(struct Qdisc *sch, u32 log)
627 {
628         struct fq_sched_data *q = qdisc_priv(sch);
629         struct rb_root *array;
630         void *old_fq_root;
631         u32 idx;
632
633         if (q->fq_root && log == q->fq_trees_log)
634                 return 0;
635
636         /* If XPS was setup, we can allocate memory on right NUMA node */
637         array = kvmalloc_node(sizeof(struct rb_root) << log, GFP_KERNEL | __GFP_RETRY_MAYFAIL,
638                               netdev_queue_numa_node_read(sch->dev_queue));
639         if (!array)
640                 return -ENOMEM;
641
642         for (idx = 0; idx < (1U << log); idx++)
643                 array[idx] = RB_ROOT;
644
645         sch_tree_lock(sch);
646
647         old_fq_root = q->fq_root;
648         if (old_fq_root)
649                 fq_rehash(q, old_fq_root, q->fq_trees_log, array, log);
650
651         q->fq_root = array;
652         q->fq_trees_log = log;
653
654         sch_tree_unlock(sch);
655
656         fq_free(old_fq_root);
657
658         return 0;
659 }
660
661 static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
662         [TCA_FQ_PLIMIT]                 = { .type = NLA_U32 },
663         [TCA_FQ_FLOW_PLIMIT]            = { .type = NLA_U32 },
664         [TCA_FQ_QUANTUM]                = { .type = NLA_U32 },
665         [TCA_FQ_INITIAL_QUANTUM]        = { .type = NLA_U32 },
666         [TCA_FQ_RATE_ENABLE]            = { .type = NLA_U32 },
667         [TCA_FQ_FLOW_DEFAULT_RATE]      = { .type = NLA_U32 },
668         [TCA_FQ_FLOW_MAX_RATE]          = { .type = NLA_U32 },
669         [TCA_FQ_BUCKETS_LOG]            = { .type = NLA_U32 },
670         [TCA_FQ_FLOW_REFILL_DELAY]      = { .type = NLA_U32 },
671         [TCA_FQ_LOW_RATE_THRESHOLD]     = { .type = NLA_U32 },
672         [TCA_FQ_CE_THRESHOLD]           = { .type = NLA_U32 },
673 };
674
675 static int fq_change(struct Qdisc *sch, struct nlattr *opt,
676                      struct netlink_ext_ack *extack)
677 {
678         struct fq_sched_data *q = qdisc_priv(sch);
679         struct nlattr *tb[TCA_FQ_MAX + 1];
680         int err, drop_count = 0;
681         unsigned drop_len = 0;
682         u32 fq_log;
683
684         if (!opt)
685                 return -EINVAL;
686
687         err = nla_parse_nested(tb, TCA_FQ_MAX, opt, fq_policy, NULL);
688         if (err < 0)
689                 return err;
690
691         sch_tree_lock(sch);
692
693         fq_log = q->fq_trees_log;
694
695         if (tb[TCA_FQ_BUCKETS_LOG]) {
696                 u32 nval = nla_get_u32(tb[TCA_FQ_BUCKETS_LOG]);
697
698                 if (nval >= 1 && nval <= ilog2(256*1024))
699                         fq_log = nval;
700                 else
701                         err = -EINVAL;
702         }
703         if (tb[TCA_FQ_PLIMIT])
704                 sch->limit = nla_get_u32(tb[TCA_FQ_PLIMIT]);
705
706         if (tb[TCA_FQ_FLOW_PLIMIT])
707                 q->flow_plimit = nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]);
708
709         if (tb[TCA_FQ_QUANTUM]) {
710                 u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);
711
712                 if (quantum > 0)
713                         q->quantum = quantum;
714                 else
715                         err = -EINVAL;
716         }
717
718         if (tb[TCA_FQ_INITIAL_QUANTUM])
719                 q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]);
720
721         if (tb[TCA_FQ_FLOW_DEFAULT_RATE])
722                 pr_warn_ratelimited("sch_fq: defrate %u ignored.\n",
723                                     nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE]));
724
725         if (tb[TCA_FQ_FLOW_MAX_RATE]) {
726                 u32 rate = nla_get_u32(tb[TCA_FQ_FLOW_MAX_RATE]);
727
728                 q->flow_max_rate = (rate == ~0U) ? ~0UL : rate;
729         }
730         if (tb[TCA_FQ_LOW_RATE_THRESHOLD])
731                 q->low_rate_threshold =
732                         nla_get_u32(tb[TCA_FQ_LOW_RATE_THRESHOLD]);
733
734         if (tb[TCA_FQ_RATE_ENABLE]) {
735                 u32 enable = nla_get_u32(tb[TCA_FQ_RATE_ENABLE]);
736
737                 if (enable <= 1)
738                         q->rate_enable = enable;
739                 else
740                         err = -EINVAL;
741         }
742
743         if (tb[TCA_FQ_FLOW_REFILL_DELAY]) {
744                 u32 usecs_delay = nla_get_u32(tb[TCA_FQ_FLOW_REFILL_DELAY]) ;
745
746                 q->flow_refill_delay = usecs_to_jiffies(usecs_delay);
747         }
748
749         if (tb[TCA_FQ_ORPHAN_MASK])
750                 q->orphan_mask = nla_get_u32(tb[TCA_FQ_ORPHAN_MASK]);
751
752         if (tb[TCA_FQ_CE_THRESHOLD])
753                 q->ce_threshold = (u64)NSEC_PER_USEC *
754                                   nla_get_u32(tb[TCA_FQ_CE_THRESHOLD]);
755
756         if (!err) {
757                 sch_tree_unlock(sch);
758                 err = fq_resize(sch, fq_log);
759                 sch_tree_lock(sch);
760         }
761         while (sch->q.qlen > sch->limit) {
762                 struct sk_buff *skb = fq_dequeue(sch);
763
764                 if (!skb)
765                         break;
766                 drop_len += qdisc_pkt_len(skb);
767                 rtnl_kfree_skbs(skb, skb);
768                 drop_count++;
769         }
770         qdisc_tree_reduce_backlog(sch, drop_count, drop_len);
771
772         sch_tree_unlock(sch);
773         return err;
774 }
775
776 static void fq_destroy(struct Qdisc *sch)
777 {
778         struct fq_sched_data *q = qdisc_priv(sch);
779
780         fq_reset(sch);
781         fq_free(q->fq_root);
782         qdisc_watchdog_cancel(&q->watchdog);
783 }
784
785 static int fq_init(struct Qdisc *sch, struct nlattr *opt,
786                    struct netlink_ext_ack *extack)
787 {
788         struct fq_sched_data *q = qdisc_priv(sch);
789         int err;
790
791         sch->limit              = 10000;
792         q->flow_plimit          = 100;
793         q->quantum              = 2 * psched_mtu(qdisc_dev(sch));
794         q->initial_quantum      = 10 * psched_mtu(qdisc_dev(sch));
795         q->flow_refill_delay    = msecs_to_jiffies(40);
796         q->flow_max_rate        = ~0UL;
797         q->time_next_delayed_flow = ~0ULL;
798         q->rate_enable          = 1;
799         q->new_flows.first      = NULL;
800         q->old_flows.first      = NULL;
801         q->delayed              = RB_ROOT;
802         q->fq_root              = NULL;
803         q->fq_trees_log         = ilog2(1024);
804         q->orphan_mask          = 1024 - 1;
805         q->low_rate_threshold   = 550000 / 8;
806
807         /* Default ce_threshold of 4294 seconds */
808         q->ce_threshold         = (u64)NSEC_PER_USEC * ~0U;
809
810         qdisc_watchdog_init_clockid(&q->watchdog, sch, CLOCK_MONOTONIC);
811
812         if (opt)
813                 err = fq_change(sch, opt, extack);
814         else
815                 err = fq_resize(sch, q->fq_trees_log);
816
817         return err;
818 }
819
820 static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
821 {
822         struct fq_sched_data *q = qdisc_priv(sch);
823         u64 ce_threshold = q->ce_threshold;
824         struct nlattr *opts;
825
826         opts = nla_nest_start(skb, TCA_OPTIONS);
827         if (opts == NULL)
828                 goto nla_put_failure;
829
830         /* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore */
831
832         do_div(ce_threshold, NSEC_PER_USEC);
833
834         if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) ||
835             nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) ||
836             nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) ||
837             nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) ||
838             nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) ||
839             nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE,
840                         min_t(unsigned long, q->flow_max_rate, ~0U)) ||
841             nla_put_u32(skb, TCA_FQ_FLOW_REFILL_DELAY,
842                         jiffies_to_usecs(q->flow_refill_delay)) ||
843             nla_put_u32(skb, TCA_FQ_ORPHAN_MASK, q->orphan_mask) ||
844             nla_put_u32(skb, TCA_FQ_LOW_RATE_THRESHOLD,
845                         q->low_rate_threshold) ||
846             nla_put_u32(skb, TCA_FQ_CE_THRESHOLD, (u32)ce_threshold) ||
847             nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log))
848                 goto nla_put_failure;
849
850         return nla_nest_end(skb, opts);
851
852 nla_put_failure:
853         return -1;
854 }
855
856 static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
857 {
858         struct fq_sched_data *q = qdisc_priv(sch);
859         struct tc_fq_qd_stats st;
860
861         sch_tree_lock(sch);
862
863         st.gc_flows               = q->stat_gc_flows;
864         st.highprio_packets       = q->stat_internal_packets;
865         st.tcp_retrans            = 0;
866         st.throttled              = q->stat_throttled;
867         st.flows_plimit           = q->stat_flows_plimit;
868         st.pkts_too_long          = q->stat_pkts_too_long;
869         st.allocation_errors      = q->stat_allocation_errors;
870         st.time_next_delayed_flow = q->time_next_delayed_flow - ktime_get_ns();
871         st.flows                  = q->flows;
872         st.inactive_flows         = q->inactive_flows;
873         st.throttled_flows        = q->throttled_flows;
874         st.unthrottle_latency_ns  = min_t(unsigned long,
875                                           q->unthrottle_latency_ns, ~0U);
876         st.ce_mark                = q->stat_ce_mark;
877         sch_tree_unlock(sch);
878
879         return gnet_stats_copy_app(d, &st, sizeof(st));
880 }
881
882 static struct Qdisc_ops fq_qdisc_ops __read_mostly = {
883         .id             =       "fq",
884         .priv_size      =       sizeof(struct fq_sched_data),
885
886         .enqueue        =       fq_enqueue,
887         .dequeue        =       fq_dequeue,
888         .peek           =       qdisc_peek_dequeued,
889         .init           =       fq_init,
890         .reset          =       fq_reset,
891         .destroy        =       fq_destroy,
892         .change         =       fq_change,
893         .dump           =       fq_dump,
894         .dump_stats     =       fq_dump_stats,
895         .owner          =       THIS_MODULE,
896 };
897
898 static int __init fq_module_init(void)
899 {
900         int ret;
901
902         fq_flow_cachep = kmem_cache_create("fq_flow_cache",
903                                            sizeof(struct fq_flow),
904                                            0, 0, NULL);
905         if (!fq_flow_cachep)
906                 return -ENOMEM;
907
908         ret = register_qdisc(&fq_qdisc_ops);
909         if (ret)
910                 kmem_cache_destroy(fq_flow_cachep);
911         return ret;
912 }
913
914 static void __exit fq_module_exit(void)
915 {
916         unregister_qdisc(&fq_qdisc_ops);
917         kmem_cache_destroy(fq_flow_cachep);
918 }
919
920 module_init(fq_module_init)
921 module_exit(fq_module_exit)
922 MODULE_AUTHOR("Eric Dumazet");
923 MODULE_LICENSE("GPL");