2 * net/sched/sch_fq.c Fair Queue Packet Scheduler (per flow pacing)
4 * Copyright (C) 2013-2015 Eric Dumazet <edumazet@google.com>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 * Meant to be mostly used for locally generated traffic :
12 * Fast classification depends on skb->sk being set before reaching us.
13 * If not, (router workload), we use rxhash as fallback, with 32 bits wide hash.
14 * All packets belonging to a socket are considered as a 'flow'.
16 * Flows are dynamically allocated and stored in a hash table of RB trees
17 * They are also part of one Round Robin 'queues' (new or old flows)
19 * Burst avoidance (aka pacing) capability :
21 * Transport (eg TCP) can set in sk->sk_pacing_rate a rate, enqueue a
22 * bunch of packets, and this packet scheduler adds delay between
23 * packets to respect rate limitation.
26 * - lookup one RB tree (out of 1024 or more) to find the flow.
27 * If non existent flow, create it, add it to the tree.
28 * Add skb to the per flow list of skb (fifo).
29 * - Use a special fifo for high prio packets
31 * dequeue() : serves flows in Round Robin
32 * Note : When a flow becomes empty, we do not immediately remove it from
33 * rb trees, for performance reasons (its expected to send additional packets,
34 * or SLAB cache will reuse socket for another flow)
37 #include <linux/module.h>
38 #include <linux/types.h>
39 #include <linux/kernel.h>
40 #include <linux/jiffies.h>
41 #include <linux/string.h>
43 #include <linux/errno.h>
44 #include <linux/init.h>
45 #include <linux/skbuff.h>
46 #include <linux/slab.h>
47 #include <linux/rbtree.h>
48 #include <linux/hash.h>
49 #include <linux/prefetch.h>
50 #include <linux/vmalloc.h>
51 #include <net/netlink.h>
52 #include <net/pkt_sched.h>
54 #include <net/tcp_states.h>
61 static inline struct fq_skb_cb *fq_skb_cb(struct sk_buff *skb)
63 qdisc_cb_private_validate(skb, sizeof(struct fq_skb_cb));
64 return (struct fq_skb_cb *)qdisc_skb_cb(skb)->data;
68 * Per flow structure, dynamically allocated.
69 * If packets have monotically increasing time_to_send, they are placed in O(1)
70 * in linear list (head,tail), otherwise are placed in a rbtree (t_root).
73 struct rb_root t_root;
74 struct sk_buff *head; /* list of skbs for this flow : first skb */
76 struct sk_buff *tail; /* last skb in the list */
77 unsigned long age; /* jiffies when flow was emptied, for gc */
79 struct rb_node fq_node; /* anchor in fq_root[] trees */
81 int qlen; /* number of packets in flow queue */
83 u32 socket_hash; /* sk_hash */
84 struct fq_flow *next; /* next pointer in RR lists, or &detached */
86 struct rb_node rate_node; /* anchor in q->delayed tree */
91 struct fq_flow *first;
95 struct fq_sched_data {
96 struct fq_flow_head new_flows;
98 struct fq_flow_head old_flows;
100 struct rb_root delayed; /* for rate limited flows */
101 u64 time_next_delayed_flow;
102 unsigned long unthrottle_latency_ns;
104 struct fq_flow internal; /* for non classified or high prio packets */
107 u32 flow_refill_delay;
108 u32 flow_plimit; /* max packets per flow */
109 unsigned long flow_max_rate; /* optional max rate per flow */
111 u32 orphan_mask; /* mask for orphaned skb */
112 u32 low_rate_threshold;
113 struct rb_root *fq_root;
122 u64 stat_internal_packets;
125 u64 stat_flows_plimit;
126 u64 stat_pkts_too_long;
127 u64 stat_allocation_errors;
128 struct qdisc_watchdog watchdog;
131 /* special value to mark a detached flow (not on old/new list) */
132 static struct fq_flow detached, throttled;
134 static void fq_flow_set_detached(struct fq_flow *f)
140 static bool fq_flow_is_detached(const struct fq_flow *f)
142 return f->next == &detached;
145 static bool fq_flow_is_throttled(const struct fq_flow *f)
147 return f->next == &throttled;
150 static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
153 head->last->next = flow;
160 static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f)
162 rb_erase(&f->rate_node, &q->delayed);
163 q->throttled_flows--;
164 fq_flow_add_tail(&q->old_flows, f);
167 static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
169 struct rb_node **p = &q->delayed.rb_node, *parent = NULL;
175 aux = rb_entry(parent, struct fq_flow, rate_node);
176 if (f->time_next_packet >= aux->time_next_packet)
177 p = &parent->rb_right;
179 p = &parent->rb_left;
181 rb_link_node(&f->rate_node, parent, p);
182 rb_insert_color(&f->rate_node, &q->delayed);
183 q->throttled_flows++;
186 f->next = &throttled;
187 if (q->time_next_delayed_flow > f->time_next_packet)
188 q->time_next_delayed_flow = f->time_next_packet;
192 static struct kmem_cache *fq_flow_cachep __read_mostly;
195 /* limit number of collected flows per round */
197 #define FQ_GC_AGE (3*HZ)
199 static bool fq_gc_candidate(const struct fq_flow *f)
201 return fq_flow_is_detached(f) &&
202 time_after(jiffies, f->age + FQ_GC_AGE);
205 static void fq_gc(struct fq_sched_data *q,
206 struct rb_root *root,
209 struct fq_flow *f, *tofree[FQ_GC_MAX];
210 struct rb_node **p, *parent;
218 f = rb_entry(parent, struct fq_flow, fq_node);
222 if (fq_gc_candidate(f)) {
224 if (fcnt == FQ_GC_MAX)
229 p = &parent->rb_right;
231 p = &parent->rb_left;
235 q->inactive_flows -= fcnt;
236 q->stat_gc_flows += fcnt;
238 struct fq_flow *f = tofree[--fcnt];
240 rb_erase(&f->fq_node, root);
241 kmem_cache_free(fq_flow_cachep, f);
245 static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
247 struct rb_node **p, *parent;
248 struct sock *sk = skb->sk;
249 struct rb_root *root;
252 /* warning: no starvation prevention... */
253 if (unlikely((skb->priority & TC_PRIO_MAX) == TC_PRIO_CONTROL))
256 /* SYNACK messages are attached to a TCP_NEW_SYN_RECV request socket
257 * or a listener (SYNCOOKIE mode)
258 * 1) request sockets are not full blown,
259 * they do not contain sk_pacing_rate
260 * 2) They are not part of a 'flow' yet
261 * 3) We do not want to rate limit them (eg SYNFLOOD attack),
262 * especially if the listener set SO_MAX_PACING_RATE
263 * 4) We pretend they are orphaned
265 if (!sk || sk_listener(sk)) {
266 unsigned long hash = skb_get_hash(skb) & q->orphan_mask;
268 /* By forcing low order bit to 1, we make sure to not
269 * collide with a local flow (socket pointers are word aligned)
271 sk = (struct sock *)((hash << 1) | 1UL);
273 } else if (sk->sk_state == TCP_CLOSE) {
274 unsigned long hash = skb_get_hash(skb) & q->orphan_mask;
276 * Sockets in TCP_CLOSE are non connected.
277 * Typical use case is UDP sockets, they can send packets
278 * with sendto() to many different destinations.
279 * We probably could use a generic bit advertising
280 * non connected sockets, instead of sk_state == TCP_CLOSE,
283 sk = (struct sock *)((hash << 1) | 1UL);
286 root = &q->fq_root[hash_ptr(sk, q->fq_trees_log)];
288 if (q->flows >= (2U << q->fq_trees_log) &&
289 q->inactive_flows > q->flows/2)
297 f = rb_entry(parent, struct fq_flow, fq_node);
299 /* socket might have been reallocated, so check
300 * if its sk_hash is the same.
301 * It not, we need to refill credit with
304 if (unlikely(skb->sk == sk &&
305 f->socket_hash != sk->sk_hash)) {
306 f->credit = q->initial_quantum;
307 f->socket_hash = sk->sk_hash;
308 if (fq_flow_is_throttled(f))
309 fq_flow_unset_throttled(q, f);
310 f->time_next_packet = 0ULL;
315 p = &parent->rb_right;
317 p = &parent->rb_left;
320 f = kmem_cache_zalloc(fq_flow_cachep, GFP_ATOMIC | __GFP_NOWARN);
322 q->stat_allocation_errors++;
325 /* f->t_root is already zeroed after kmem_cache_zalloc() */
327 fq_flow_set_detached(f);
330 f->socket_hash = sk->sk_hash;
331 f->credit = q->initial_quantum;
333 rb_link_node(&f->fq_node, parent, p);
334 rb_insert_color(&f->fq_node, root);
341 static struct sk_buff *fq_peek(struct fq_flow *flow)
343 struct sk_buff *skb = skb_rb_first(&flow->t_root);
344 struct sk_buff *head = flow->head;
352 if (fq_skb_cb(skb)->time_to_send < fq_skb_cb(head)->time_to_send)
357 static void fq_erase_head(struct Qdisc *sch, struct fq_flow *flow,
360 if (skb == flow->head) {
361 flow->head = skb->next;
363 rb_erase(&skb->rbnode, &flow->t_root);
364 skb->dev = qdisc_dev(sch);
368 /* remove one skb from head of flow queue */
369 static struct sk_buff *fq_dequeue_head(struct Qdisc *sch, struct fq_flow *flow)
371 struct sk_buff *skb = fq_peek(flow);
374 fq_erase_head(sch, flow, skb);
375 skb_mark_not_on_list(skb);
377 qdisc_qstats_backlog_dec(sch, skb);
383 static void flow_queue_add(struct fq_flow *flow, struct sk_buff *skb)
385 struct rb_node **p, *parent;
386 struct sk_buff *head, *aux;
388 fq_skb_cb(skb)->time_to_send = skb->tstamp ?: ktime_get_ns();
392 fq_skb_cb(skb)->time_to_send >= fq_skb_cb(flow->tail)->time_to_send) {
396 flow->tail->next = skb;
402 p = &flow->t_root.rb_node;
407 aux = rb_to_skb(parent);
408 if (fq_skb_cb(skb)->time_to_send >= fq_skb_cb(aux)->time_to_send)
409 p = &parent->rb_right;
411 p = &parent->rb_left;
413 rb_link_node(&skb->rbnode, parent, p);
414 rb_insert_color(&skb->rbnode, &flow->t_root);
417 static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
418 struct sk_buff **to_free)
420 struct fq_sched_data *q = qdisc_priv(sch);
423 if (unlikely(sch->q.qlen >= sch->limit))
424 return qdisc_drop(skb, sch, to_free);
426 f = fq_classify(skb, q);
427 if (unlikely(f->qlen >= q->flow_plimit && f != &q->internal)) {
428 q->stat_flows_plimit++;
429 return qdisc_drop(skb, sch, to_free);
433 qdisc_qstats_backlog_inc(sch, skb);
434 if (fq_flow_is_detached(f)) {
435 struct sock *sk = skb->sk;
437 fq_flow_add_tail(&q->new_flows, f);
438 if (time_after(jiffies, f->age + q->flow_refill_delay))
439 f->credit = max_t(u32, f->credit, q->quantum);
440 if (sk && q->rate_enable) {
441 if (unlikely(smp_load_acquire(&sk->sk_pacing_status) !=
443 smp_store_release(&sk->sk_pacing_status,
449 /* Note: this overwrites f->age */
450 flow_queue_add(f, skb);
452 if (unlikely(f == &q->internal)) {
453 q->stat_internal_packets++;
457 return NET_XMIT_SUCCESS;
460 static void fq_check_throttled(struct fq_sched_data *q, u64 now)
462 unsigned long sample;
465 if (q->time_next_delayed_flow > now)
468 /* Update unthrottle latency EWMA.
469 * This is cheap and can help diagnosing timer/latency problems.
471 sample = (unsigned long)(now - q->time_next_delayed_flow);
472 q->unthrottle_latency_ns -= q->unthrottle_latency_ns >> 3;
473 q->unthrottle_latency_ns += sample >> 3;
475 q->time_next_delayed_flow = ~0ULL;
476 while ((p = rb_first(&q->delayed)) != NULL) {
477 struct fq_flow *f = rb_entry(p, struct fq_flow, rate_node);
479 if (f->time_next_packet > now) {
480 q->time_next_delayed_flow = f->time_next_packet;
483 fq_flow_unset_throttled(q, f);
487 static struct sk_buff *fq_dequeue(struct Qdisc *sch)
489 struct fq_sched_data *q = qdisc_priv(sch);
490 struct fq_flow_head *head;
500 skb = fq_dequeue_head(sch, &q->internal);
504 now = ktime_get_ns();
505 fq_check_throttled(q, now);
507 head = &q->new_flows;
509 head = &q->old_flows;
511 if (q->time_next_delayed_flow != ~0ULL)
512 qdisc_watchdog_schedule_ns(&q->watchdog,
513 q->time_next_delayed_flow);
519 if (f->credit <= 0) {
520 f->credit += q->quantum;
521 head->first = f->next;
522 fq_flow_add_tail(&q->old_flows, f);
528 u64 time_next_packet = max_t(u64, fq_skb_cb(skb)->time_to_send,
529 f->time_next_packet);
531 if (now < time_next_packet) {
532 head->first = f->next;
533 f->time_next_packet = time_next_packet;
534 fq_flow_set_throttled(q, f);
537 if (time_next_packet &&
538 (s64)(now - time_next_packet - q->ce_threshold) > 0) {
539 INET_ECN_set_ce(skb);
544 skb = fq_dequeue_head(sch, f);
546 head->first = f->next;
547 /* force a pass through old_flows to prevent starvation */
548 if ((head == &q->new_flows) && q->old_flows.first) {
549 fq_flow_add_tail(&q->old_flows, f);
551 fq_flow_set_detached(f);
557 plen = qdisc_pkt_len(skb);
563 rate = q->flow_max_rate;
565 /* If EDT time was provided for this skb, we need to
566 * update f->time_next_packet only if this qdisc enforces
571 rate = min(skb->sk->sk_pacing_rate, rate);
573 if (rate <= q->low_rate_threshold) {
576 plen = max(plen, q->quantum);
582 u64 len = (u64)plen * NSEC_PER_SEC;
585 len = div64_ul(len, rate);
586 /* Since socket rate can change later,
587 * clamp the delay to 1 second.
588 * Really, providers of too big packets should be fixed !
590 if (unlikely(len > NSEC_PER_SEC)) {
592 q->stat_pkts_too_long++;
594 /* Account for schedule/timers drifts.
595 * f->time_next_packet was set when prior packet was sent,
596 * and current time (@now) can be too late by tens of us.
598 if (f->time_next_packet)
599 len -= min(len/2, now - f->time_next_packet);
600 f->time_next_packet = now + len;
603 qdisc_bstats_update(sch, skb);
607 static void fq_flow_purge(struct fq_flow *flow)
609 struct rb_node *p = rb_first(&flow->t_root);
612 struct sk_buff *skb = rb_to_skb(p);
615 rb_erase(&skb->rbnode, &flow->t_root);
616 rtnl_kfree_skbs(skb, skb);
618 rtnl_kfree_skbs(flow->head, flow->tail);
623 static void fq_reset(struct Qdisc *sch)
625 struct fq_sched_data *q = qdisc_priv(sch);
626 struct rb_root *root;
632 sch->qstats.backlog = 0;
634 fq_flow_purge(&q->internal);
639 for (idx = 0; idx < (1U << q->fq_trees_log); idx++) {
640 root = &q->fq_root[idx];
641 while ((p = rb_first(root)) != NULL) {
642 f = rb_entry(p, struct fq_flow, fq_node);
647 kmem_cache_free(fq_flow_cachep, f);
650 q->new_flows.first = NULL;
651 q->old_flows.first = NULL;
652 q->delayed = RB_ROOT;
654 q->inactive_flows = 0;
655 q->throttled_flows = 0;
658 static void fq_rehash(struct fq_sched_data *q,
659 struct rb_root *old_array, u32 old_log,
660 struct rb_root *new_array, u32 new_log)
662 struct rb_node *op, **np, *parent;
663 struct rb_root *oroot, *nroot;
664 struct fq_flow *of, *nf;
668 for (idx = 0; idx < (1U << old_log); idx++) {
669 oroot = &old_array[idx];
670 while ((op = rb_first(oroot)) != NULL) {
672 of = rb_entry(op, struct fq_flow, fq_node);
673 if (fq_gc_candidate(of)) {
675 kmem_cache_free(fq_flow_cachep, of);
678 nroot = &new_array[hash_ptr(of->sk, new_log)];
680 np = &nroot->rb_node;
685 nf = rb_entry(parent, struct fq_flow, fq_node);
686 BUG_ON(nf->sk == of->sk);
689 np = &parent->rb_right;
691 np = &parent->rb_left;
694 rb_link_node(&of->fq_node, parent, np);
695 rb_insert_color(&of->fq_node, nroot);
699 q->inactive_flows -= fcnt;
700 q->stat_gc_flows += fcnt;
703 static void fq_free(void *addr)
708 static int fq_resize(struct Qdisc *sch, u32 log)
710 struct fq_sched_data *q = qdisc_priv(sch);
711 struct rb_root *array;
715 if (q->fq_root && log == q->fq_trees_log)
718 /* If XPS was setup, we can allocate memory on right NUMA node */
719 array = kvmalloc_node(sizeof(struct rb_root) << log, GFP_KERNEL | __GFP_RETRY_MAYFAIL,
720 netdev_queue_numa_node_read(sch->dev_queue));
724 for (idx = 0; idx < (1U << log); idx++)
725 array[idx] = RB_ROOT;
729 old_fq_root = q->fq_root;
731 fq_rehash(q, old_fq_root, q->fq_trees_log, array, log);
734 q->fq_trees_log = log;
736 sch_tree_unlock(sch);
738 fq_free(old_fq_root);
743 static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
744 [TCA_FQ_PLIMIT] = { .type = NLA_U32 },
745 [TCA_FQ_FLOW_PLIMIT] = { .type = NLA_U32 },
746 [TCA_FQ_QUANTUM] = { .type = NLA_U32 },
747 [TCA_FQ_INITIAL_QUANTUM] = { .type = NLA_U32 },
748 [TCA_FQ_RATE_ENABLE] = { .type = NLA_U32 },
749 [TCA_FQ_FLOW_DEFAULT_RATE] = { .type = NLA_U32 },
750 [TCA_FQ_FLOW_MAX_RATE] = { .type = NLA_U32 },
751 [TCA_FQ_BUCKETS_LOG] = { .type = NLA_U32 },
752 [TCA_FQ_FLOW_REFILL_DELAY] = { .type = NLA_U32 },
753 [TCA_FQ_LOW_RATE_THRESHOLD] = { .type = NLA_U32 },
754 [TCA_FQ_CE_THRESHOLD] = { .type = NLA_U32 },
757 static int fq_change(struct Qdisc *sch, struct nlattr *opt,
758 struct netlink_ext_ack *extack)
760 struct fq_sched_data *q = qdisc_priv(sch);
761 struct nlattr *tb[TCA_FQ_MAX + 1];
762 int err, drop_count = 0;
763 unsigned drop_len = 0;
769 err = nla_parse_nested_deprecated(tb, TCA_FQ_MAX, opt, fq_policy,
776 fq_log = q->fq_trees_log;
778 if (tb[TCA_FQ_BUCKETS_LOG]) {
779 u32 nval = nla_get_u32(tb[TCA_FQ_BUCKETS_LOG]);
781 if (nval >= 1 && nval <= ilog2(256*1024))
786 if (tb[TCA_FQ_PLIMIT])
787 sch->limit = nla_get_u32(tb[TCA_FQ_PLIMIT]);
789 if (tb[TCA_FQ_FLOW_PLIMIT])
790 q->flow_plimit = nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]);
792 if (tb[TCA_FQ_QUANTUM]) {
793 u32 quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);
796 q->quantum = quantum;
801 if (tb[TCA_FQ_INITIAL_QUANTUM])
802 q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]);
804 if (tb[TCA_FQ_FLOW_DEFAULT_RATE])
805 pr_warn_ratelimited("sch_fq: defrate %u ignored.\n",
806 nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE]));
808 if (tb[TCA_FQ_FLOW_MAX_RATE]) {
809 u32 rate = nla_get_u32(tb[TCA_FQ_FLOW_MAX_RATE]);
811 q->flow_max_rate = (rate == ~0U) ? ~0UL : rate;
813 if (tb[TCA_FQ_LOW_RATE_THRESHOLD])
814 q->low_rate_threshold =
815 nla_get_u32(tb[TCA_FQ_LOW_RATE_THRESHOLD]);
817 if (tb[TCA_FQ_RATE_ENABLE]) {
818 u32 enable = nla_get_u32(tb[TCA_FQ_RATE_ENABLE]);
821 q->rate_enable = enable;
826 if (tb[TCA_FQ_FLOW_REFILL_DELAY]) {
827 u32 usecs_delay = nla_get_u32(tb[TCA_FQ_FLOW_REFILL_DELAY]) ;
829 q->flow_refill_delay = usecs_to_jiffies(usecs_delay);
832 if (tb[TCA_FQ_ORPHAN_MASK])
833 q->orphan_mask = nla_get_u32(tb[TCA_FQ_ORPHAN_MASK]);
835 if (tb[TCA_FQ_CE_THRESHOLD])
836 q->ce_threshold = (u64)NSEC_PER_USEC *
837 nla_get_u32(tb[TCA_FQ_CE_THRESHOLD]);
840 sch_tree_unlock(sch);
841 err = fq_resize(sch, fq_log);
844 while (sch->q.qlen > sch->limit) {
845 struct sk_buff *skb = fq_dequeue(sch);
849 drop_len += qdisc_pkt_len(skb);
850 rtnl_kfree_skbs(skb, skb);
853 qdisc_tree_reduce_backlog(sch, drop_count, drop_len);
855 sch_tree_unlock(sch);
859 static void fq_destroy(struct Qdisc *sch)
861 struct fq_sched_data *q = qdisc_priv(sch);
865 qdisc_watchdog_cancel(&q->watchdog);
868 static int fq_init(struct Qdisc *sch, struct nlattr *opt,
869 struct netlink_ext_ack *extack)
871 struct fq_sched_data *q = qdisc_priv(sch);
875 q->flow_plimit = 100;
876 q->quantum = 2 * psched_mtu(qdisc_dev(sch));
877 q->initial_quantum = 10 * psched_mtu(qdisc_dev(sch));
878 q->flow_refill_delay = msecs_to_jiffies(40);
879 q->flow_max_rate = ~0UL;
880 q->time_next_delayed_flow = ~0ULL;
882 q->new_flows.first = NULL;
883 q->old_flows.first = NULL;
884 q->delayed = RB_ROOT;
886 q->fq_trees_log = ilog2(1024);
887 q->orphan_mask = 1024 - 1;
888 q->low_rate_threshold = 550000 / 8;
890 /* Default ce_threshold of 4294 seconds */
891 q->ce_threshold = (u64)NSEC_PER_USEC * ~0U;
893 qdisc_watchdog_init_clockid(&q->watchdog, sch, CLOCK_MONOTONIC);
896 err = fq_change(sch, opt, extack);
898 err = fq_resize(sch, q->fq_trees_log);
903 static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
905 struct fq_sched_data *q = qdisc_priv(sch);
906 u64 ce_threshold = q->ce_threshold;
909 opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
911 goto nla_put_failure;
913 /* TCA_FQ_FLOW_DEFAULT_RATE is not used anymore */
915 do_div(ce_threshold, NSEC_PER_USEC);
917 if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) ||
918 nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) ||
919 nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) ||
920 nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) ||
921 nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) ||
922 nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE,
923 min_t(unsigned long, q->flow_max_rate, ~0U)) ||
924 nla_put_u32(skb, TCA_FQ_FLOW_REFILL_DELAY,
925 jiffies_to_usecs(q->flow_refill_delay)) ||
926 nla_put_u32(skb, TCA_FQ_ORPHAN_MASK, q->orphan_mask) ||
927 nla_put_u32(skb, TCA_FQ_LOW_RATE_THRESHOLD,
928 q->low_rate_threshold) ||
929 nla_put_u32(skb, TCA_FQ_CE_THRESHOLD, (u32)ce_threshold) ||
930 nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log))
931 goto nla_put_failure;
933 return nla_nest_end(skb, opts);
939 static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
941 struct fq_sched_data *q = qdisc_priv(sch);
942 struct tc_fq_qd_stats st;
946 st.gc_flows = q->stat_gc_flows;
947 st.highprio_packets = q->stat_internal_packets;
949 st.throttled = q->stat_throttled;
950 st.flows_plimit = q->stat_flows_plimit;
951 st.pkts_too_long = q->stat_pkts_too_long;
952 st.allocation_errors = q->stat_allocation_errors;
953 st.time_next_delayed_flow = q->time_next_delayed_flow - ktime_get_ns();
955 st.inactive_flows = q->inactive_flows;
956 st.throttled_flows = q->throttled_flows;
957 st.unthrottle_latency_ns = min_t(unsigned long,
958 q->unthrottle_latency_ns, ~0U);
959 st.ce_mark = q->stat_ce_mark;
960 sch_tree_unlock(sch);
962 return gnet_stats_copy_app(d, &st, sizeof(st));
965 static struct Qdisc_ops fq_qdisc_ops __read_mostly = {
967 .priv_size = sizeof(struct fq_sched_data),
969 .enqueue = fq_enqueue,
970 .dequeue = fq_dequeue,
971 .peek = qdisc_peek_dequeued,
974 .destroy = fq_destroy,
977 .dump_stats = fq_dump_stats,
978 .owner = THIS_MODULE,
981 static int __init fq_module_init(void)
985 fq_flow_cachep = kmem_cache_create("fq_flow_cache",
986 sizeof(struct fq_flow),
991 ret = register_qdisc(&fq_qdisc_ops);
993 kmem_cache_destroy(fq_flow_cachep);
997 static void __exit fq_module_exit(void)
999 unregister_qdisc(&fq_qdisc_ops);
1000 kmem_cache_destroy(fq_flow_cachep);
1003 module_init(fq_module_init)
1004 module_exit(fq_module_exit)
1005 MODULE_AUTHOR("Eric Dumazet");
1006 MODULE_LICENSE("GPL");