Merge tag 'keys-namespace-20190627' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / net / sched / sch_fq_codel.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Fair Queue CoDel discipline
4  *
5  *  Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
6  */
7
8 #include <linux/module.h>
9 #include <linux/types.h>
10 #include <linux/kernel.h>
11 #include <linux/jiffies.h>
12 #include <linux/string.h>
13 #include <linux/in.h>
14 #include <linux/errno.h>
15 #include <linux/init.h>
16 #include <linux/skbuff.h>
17 #include <linux/jhash.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <net/netlink.h>
21 #include <net/pkt_sched.h>
22 #include <net/pkt_cls.h>
23 #include <net/codel.h>
24 #include <net/codel_impl.h>
25 #include <net/codel_qdisc.h>
26
27 /*      Fair Queue CoDel.
28  *
29  * Principles :
30  * Packets are classified (internal classifier or external) on flows.
31  * This is a Stochastic model (as we use a hash, several flows
32  *                             might be hashed on same slot)
33  * Each flow has a CoDel managed queue.
34  * Flows are linked onto two (Round Robin) lists,
35  * so that new flows have priority on old ones.
36  *
37  * For a given flow, packets are not reordered (CoDel uses a FIFO)
38  * head drops only.
39  * ECN capability is on by default.
40  * Low memory footprint (64 bytes per flow)
41  */
42
43 struct fq_codel_flow {
44         struct sk_buff    *head;
45         struct sk_buff    *tail;
46         struct list_head  flowchain;
47         int               deficit;
48         u32               dropped; /* number of drops (or ECN marks) on this flow */
49         struct codel_vars cvars;
50 }; /* please try to keep this structure <= 64 bytes */
51
52 struct fq_codel_sched_data {
53         struct tcf_proto __rcu *filter_list; /* optional external classifier */
54         struct tcf_block *block;
55         struct fq_codel_flow *flows;    /* Flows table [flows_cnt] */
56         u32             *backlogs;      /* backlog table [flows_cnt] */
57         u32             flows_cnt;      /* number of flows */
58         u32             quantum;        /* psched_mtu(qdisc_dev(sch)); */
59         u32             drop_batch_size;
60         u32             memory_limit;
61         struct codel_params cparams;
62         struct codel_stats cstats;
63         u32             memory_usage;
64         u32             drop_overmemory;
65         u32             drop_overlimit;
66         u32             new_flow_count;
67
68         struct list_head new_flows;     /* list of new flows */
69         struct list_head old_flows;     /* list of old flows */
70 };
71
72 static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
73                                   struct sk_buff *skb)
74 {
75         return reciprocal_scale(skb_get_hash(skb), q->flows_cnt);
76 }
77
78 static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,
79                                       int *qerr)
80 {
81         struct fq_codel_sched_data *q = qdisc_priv(sch);
82         struct tcf_proto *filter;
83         struct tcf_result res;
84         int result;
85
86         if (TC_H_MAJ(skb->priority) == sch->handle &&
87             TC_H_MIN(skb->priority) > 0 &&
88             TC_H_MIN(skb->priority) <= q->flows_cnt)
89                 return TC_H_MIN(skb->priority);
90
91         filter = rcu_dereference_bh(q->filter_list);
92         if (!filter)
93                 return fq_codel_hash(q, skb) + 1;
94
95         *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
96         result = tcf_classify(skb, filter, &res, false);
97         if (result >= 0) {
98 #ifdef CONFIG_NET_CLS_ACT
99                 switch (result) {
100                 case TC_ACT_STOLEN:
101                 case TC_ACT_QUEUED:
102                 case TC_ACT_TRAP:
103                         *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
104                         /* fall through */
105                 case TC_ACT_SHOT:
106                         return 0;
107                 }
108 #endif
109                 if (TC_H_MIN(res.classid) <= q->flows_cnt)
110                         return TC_H_MIN(res.classid);
111         }
112         return 0;
113 }
114
115 /* helper functions : might be changed when/if skb use a standard list_head */
116
117 /* remove one skb from head of slot queue */
118 static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow)
119 {
120         struct sk_buff *skb = flow->head;
121
122         flow->head = skb->next;
123         skb_mark_not_on_list(skb);
124         return skb;
125 }
126
127 /* add skb to flow queue (tail add) */
128 static inline void flow_queue_add(struct fq_codel_flow *flow,
129                                   struct sk_buff *skb)
130 {
131         if (flow->head == NULL)
132                 flow->head = skb;
133         else
134                 flow->tail->next = skb;
135         flow->tail = skb;
136         skb->next = NULL;
137 }
138
139 static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets,
140                                   struct sk_buff **to_free)
141 {
142         struct fq_codel_sched_data *q = qdisc_priv(sch);
143         struct sk_buff *skb;
144         unsigned int maxbacklog = 0, idx = 0, i, len;
145         struct fq_codel_flow *flow;
146         unsigned int threshold;
147         unsigned int mem = 0;
148
149         /* Queue is full! Find the fat flow and drop packet(s) from it.
150          * This might sound expensive, but with 1024 flows, we scan
151          * 4KB of memory, and we dont need to handle a complex tree
152          * in fast path (packet queue/enqueue) with many cache misses.
153          * In stress mode, we'll try to drop 64 packets from the flow,
154          * amortizing this linear lookup to one cache line per drop.
155          */
156         for (i = 0; i < q->flows_cnt; i++) {
157                 if (q->backlogs[i] > maxbacklog) {
158                         maxbacklog = q->backlogs[i];
159                         idx = i;
160                 }
161         }
162
163         /* Our goal is to drop half of this fat flow backlog */
164         threshold = maxbacklog >> 1;
165
166         flow = &q->flows[idx];
167         len = 0;
168         i = 0;
169         do {
170                 skb = dequeue_head(flow);
171                 len += qdisc_pkt_len(skb);
172                 mem += get_codel_cb(skb)->mem_usage;
173                 __qdisc_drop(skb, to_free);
174         } while (++i < max_packets && len < threshold);
175
176         flow->dropped += i;
177         q->backlogs[idx] -= len;
178         q->memory_usage -= mem;
179         sch->qstats.drops += i;
180         sch->qstats.backlog -= len;
181         sch->q.qlen -= i;
182         return idx;
183 }
184
185 static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch,
186                             struct sk_buff **to_free)
187 {
188         struct fq_codel_sched_data *q = qdisc_priv(sch);
189         unsigned int idx, prev_backlog, prev_qlen;
190         struct fq_codel_flow *flow;
191         int uninitialized_var(ret);
192         unsigned int pkt_len;
193         bool memory_limited;
194
195         idx = fq_codel_classify(skb, sch, &ret);
196         if (idx == 0) {
197                 if (ret & __NET_XMIT_BYPASS)
198                         qdisc_qstats_drop(sch);
199                 __qdisc_drop(skb, to_free);
200                 return ret;
201         }
202         idx--;
203
204         codel_set_enqueue_time(skb);
205         flow = &q->flows[idx];
206         flow_queue_add(flow, skb);
207         q->backlogs[idx] += qdisc_pkt_len(skb);
208         qdisc_qstats_backlog_inc(sch, skb);
209
210         if (list_empty(&flow->flowchain)) {
211                 list_add_tail(&flow->flowchain, &q->new_flows);
212                 q->new_flow_count++;
213                 flow->deficit = q->quantum;
214                 flow->dropped = 0;
215         }
216         get_codel_cb(skb)->mem_usage = skb->truesize;
217         q->memory_usage += get_codel_cb(skb)->mem_usage;
218         memory_limited = q->memory_usage > q->memory_limit;
219         if (++sch->q.qlen <= sch->limit && !memory_limited)
220                 return NET_XMIT_SUCCESS;
221
222         prev_backlog = sch->qstats.backlog;
223         prev_qlen = sch->q.qlen;
224
225         /* save this packet length as it might be dropped by fq_codel_drop() */
226         pkt_len = qdisc_pkt_len(skb);
227         /* fq_codel_drop() is quite expensive, as it performs a linear search
228          * in q->backlogs[] to find a fat flow.
229          * So instead of dropping a single packet, drop half of its backlog
230          * with a 64 packets limit to not add a too big cpu spike here.
231          */
232         ret = fq_codel_drop(sch, q->drop_batch_size, to_free);
233
234         prev_qlen -= sch->q.qlen;
235         prev_backlog -= sch->qstats.backlog;
236         q->drop_overlimit += prev_qlen;
237         if (memory_limited)
238                 q->drop_overmemory += prev_qlen;
239
240         /* As we dropped packet(s), better let upper stack know this.
241          * If we dropped a packet for this flow, return NET_XMIT_CN,
242          * but in this case, our parents wont increase their backlogs.
243          */
244         if (ret == idx) {
245                 qdisc_tree_reduce_backlog(sch, prev_qlen - 1,
246                                           prev_backlog - pkt_len);
247                 return NET_XMIT_CN;
248         }
249         qdisc_tree_reduce_backlog(sch, prev_qlen, prev_backlog);
250         return NET_XMIT_SUCCESS;
251 }
252
253 /* This is the specific function called from codel_dequeue()
254  * to dequeue a packet from queue. Note: backlog is handled in
255  * codel, we dont need to reduce it here.
256  */
257 static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
258 {
259         struct Qdisc *sch = ctx;
260         struct fq_codel_sched_data *q = qdisc_priv(sch);
261         struct fq_codel_flow *flow;
262         struct sk_buff *skb = NULL;
263
264         flow = container_of(vars, struct fq_codel_flow, cvars);
265         if (flow->head) {
266                 skb = dequeue_head(flow);
267                 q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
268                 q->memory_usage -= get_codel_cb(skb)->mem_usage;
269                 sch->q.qlen--;
270                 sch->qstats.backlog -= qdisc_pkt_len(skb);
271         }
272         return skb;
273 }
274
275 static void drop_func(struct sk_buff *skb, void *ctx)
276 {
277         struct Qdisc *sch = ctx;
278
279         kfree_skb(skb);
280         qdisc_qstats_drop(sch);
281 }
282
283 static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
284 {
285         struct fq_codel_sched_data *q = qdisc_priv(sch);
286         struct sk_buff *skb;
287         struct fq_codel_flow *flow;
288         struct list_head *head;
289         u32 prev_drop_count, prev_ecn_mark;
290
291 begin:
292         head = &q->new_flows;
293         if (list_empty(head)) {
294                 head = &q->old_flows;
295                 if (list_empty(head))
296                         return NULL;
297         }
298         flow = list_first_entry(head, struct fq_codel_flow, flowchain);
299
300         if (flow->deficit <= 0) {
301                 flow->deficit += q->quantum;
302                 list_move_tail(&flow->flowchain, &q->old_flows);
303                 goto begin;
304         }
305
306         prev_drop_count = q->cstats.drop_count;
307         prev_ecn_mark = q->cstats.ecn_mark;
308
309         skb = codel_dequeue(sch, &sch->qstats.backlog, &q->cparams,
310                             &flow->cvars, &q->cstats, qdisc_pkt_len,
311                             codel_get_enqueue_time, drop_func, dequeue_func);
312
313         flow->dropped += q->cstats.drop_count - prev_drop_count;
314         flow->dropped += q->cstats.ecn_mark - prev_ecn_mark;
315
316         if (!skb) {
317                 /* force a pass through old_flows to prevent starvation */
318                 if ((head == &q->new_flows) && !list_empty(&q->old_flows))
319                         list_move_tail(&flow->flowchain, &q->old_flows);
320                 else
321                         list_del_init(&flow->flowchain);
322                 goto begin;
323         }
324         qdisc_bstats_update(sch, skb);
325         flow->deficit -= qdisc_pkt_len(skb);
326         /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
327          * or HTB crashes. Defer it for next round.
328          */
329         if (q->cstats.drop_count && sch->q.qlen) {
330                 qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
331                                           q->cstats.drop_len);
332                 q->cstats.drop_count = 0;
333                 q->cstats.drop_len = 0;
334         }
335         return skb;
336 }
337
338 static void fq_codel_flow_purge(struct fq_codel_flow *flow)
339 {
340         rtnl_kfree_skbs(flow->head, flow->tail);
341         flow->head = NULL;
342 }
343
344 static void fq_codel_reset(struct Qdisc *sch)
345 {
346         struct fq_codel_sched_data *q = qdisc_priv(sch);
347         int i;
348
349         INIT_LIST_HEAD(&q->new_flows);
350         INIT_LIST_HEAD(&q->old_flows);
351         for (i = 0; i < q->flows_cnt; i++) {
352                 struct fq_codel_flow *flow = q->flows + i;
353
354                 fq_codel_flow_purge(flow);
355                 INIT_LIST_HEAD(&flow->flowchain);
356                 codel_vars_init(&flow->cvars);
357         }
358         memset(q->backlogs, 0, q->flows_cnt * sizeof(u32));
359         sch->q.qlen = 0;
360         sch->qstats.backlog = 0;
361         q->memory_usage = 0;
362 }
363
364 static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
365         [TCA_FQ_CODEL_TARGET]   = { .type = NLA_U32 },
366         [TCA_FQ_CODEL_LIMIT]    = { .type = NLA_U32 },
367         [TCA_FQ_CODEL_INTERVAL] = { .type = NLA_U32 },
368         [TCA_FQ_CODEL_ECN]      = { .type = NLA_U32 },
369         [TCA_FQ_CODEL_FLOWS]    = { .type = NLA_U32 },
370         [TCA_FQ_CODEL_QUANTUM]  = { .type = NLA_U32 },
371         [TCA_FQ_CODEL_CE_THRESHOLD] = { .type = NLA_U32 },
372         [TCA_FQ_CODEL_DROP_BATCH_SIZE] = { .type = NLA_U32 },
373         [TCA_FQ_CODEL_MEMORY_LIMIT] = { .type = NLA_U32 },
374 };
375
376 static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
377                            struct netlink_ext_ack *extack)
378 {
379         struct fq_codel_sched_data *q = qdisc_priv(sch);
380         struct nlattr *tb[TCA_FQ_CODEL_MAX + 1];
381         int err;
382
383         if (!opt)
384                 return -EINVAL;
385
386         err = nla_parse_nested_deprecated(tb, TCA_FQ_CODEL_MAX, opt,
387                                           fq_codel_policy, NULL);
388         if (err < 0)
389                 return err;
390         if (tb[TCA_FQ_CODEL_FLOWS]) {
391                 if (q->flows)
392                         return -EINVAL;
393                 q->flows_cnt = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]);
394                 if (!q->flows_cnt ||
395                     q->flows_cnt > 65536)
396                         return -EINVAL;
397         }
398         sch_tree_lock(sch);
399
400         if (tb[TCA_FQ_CODEL_TARGET]) {
401                 u64 target = nla_get_u32(tb[TCA_FQ_CODEL_TARGET]);
402
403                 q->cparams.target = (target * NSEC_PER_USEC) >> CODEL_SHIFT;
404         }
405
406         if (tb[TCA_FQ_CODEL_CE_THRESHOLD]) {
407                 u64 val = nla_get_u32(tb[TCA_FQ_CODEL_CE_THRESHOLD]);
408
409                 q->cparams.ce_threshold = (val * NSEC_PER_USEC) >> CODEL_SHIFT;
410         }
411
412         if (tb[TCA_FQ_CODEL_INTERVAL]) {
413                 u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]);
414
415                 q->cparams.interval = (interval * NSEC_PER_USEC) >> CODEL_SHIFT;
416         }
417
418         if (tb[TCA_FQ_CODEL_LIMIT])
419                 sch->limit = nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]);
420
421         if (tb[TCA_FQ_CODEL_ECN])
422                 q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]);
423
424         if (tb[TCA_FQ_CODEL_QUANTUM])
425                 q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
426
427         if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])
428                 q->drop_batch_size = min(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]));
429
430         if (tb[TCA_FQ_CODEL_MEMORY_LIMIT])
431                 q->memory_limit = min(1U << 31, nla_get_u32(tb[TCA_FQ_CODEL_MEMORY_LIMIT]));
432
433         while (sch->q.qlen > sch->limit ||
434                q->memory_usage > q->memory_limit) {
435                 struct sk_buff *skb = fq_codel_dequeue(sch);
436
437                 q->cstats.drop_len += qdisc_pkt_len(skb);
438                 rtnl_kfree_skbs(skb, skb);
439                 q->cstats.drop_count++;
440         }
441         qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len);
442         q->cstats.drop_count = 0;
443         q->cstats.drop_len = 0;
444
445         sch_tree_unlock(sch);
446         return 0;
447 }
448
449 static void fq_codel_destroy(struct Qdisc *sch)
450 {
451         struct fq_codel_sched_data *q = qdisc_priv(sch);
452
453         tcf_block_put(q->block);
454         kvfree(q->backlogs);
455         kvfree(q->flows);
456 }
457
458 static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt,
459                          struct netlink_ext_ack *extack)
460 {
461         struct fq_codel_sched_data *q = qdisc_priv(sch);
462         int i;
463         int err;
464
465         sch->limit = 10*1024;
466         q->flows_cnt = 1024;
467         q->memory_limit = 32 << 20; /* 32 MBytes */
468         q->drop_batch_size = 64;
469         q->quantum = psched_mtu(qdisc_dev(sch));
470         INIT_LIST_HEAD(&q->new_flows);
471         INIT_LIST_HEAD(&q->old_flows);
472         codel_params_init(&q->cparams);
473         codel_stats_init(&q->cstats);
474         q->cparams.ecn = true;
475         q->cparams.mtu = psched_mtu(qdisc_dev(sch));
476
477         if (opt) {
478                 err = fq_codel_change(sch, opt, extack);
479                 if (err)
480                         goto init_failure;
481         }
482
483         err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
484         if (err)
485                 goto init_failure;
486
487         if (!q->flows) {
488                 q->flows = kvcalloc(q->flows_cnt,
489                                     sizeof(struct fq_codel_flow),
490                                     GFP_KERNEL);
491                 if (!q->flows) {
492                         err = -ENOMEM;
493                         goto init_failure;
494                 }
495                 q->backlogs = kvcalloc(q->flows_cnt, sizeof(u32), GFP_KERNEL);
496                 if (!q->backlogs) {
497                         err = -ENOMEM;
498                         goto alloc_failure;
499                 }
500                 for (i = 0; i < q->flows_cnt; i++) {
501                         struct fq_codel_flow *flow = q->flows + i;
502
503                         INIT_LIST_HEAD(&flow->flowchain);
504                         codel_vars_init(&flow->cvars);
505                 }
506         }
507         if (sch->limit >= 1)
508                 sch->flags |= TCQ_F_CAN_BYPASS;
509         else
510                 sch->flags &= ~TCQ_F_CAN_BYPASS;
511         return 0;
512
513 alloc_failure:
514         kvfree(q->flows);
515         q->flows = NULL;
516 init_failure:
517         q->flows_cnt = 0;
518         return err;
519 }
520
521 static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
522 {
523         struct fq_codel_sched_data *q = qdisc_priv(sch);
524         struct nlattr *opts;
525
526         opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
527         if (opts == NULL)
528                 goto nla_put_failure;
529
530         if (nla_put_u32(skb, TCA_FQ_CODEL_TARGET,
531                         codel_time_to_us(q->cparams.target)) ||
532             nla_put_u32(skb, TCA_FQ_CODEL_LIMIT,
533                         sch->limit) ||
534             nla_put_u32(skb, TCA_FQ_CODEL_INTERVAL,
535                         codel_time_to_us(q->cparams.interval)) ||
536             nla_put_u32(skb, TCA_FQ_CODEL_ECN,
537                         q->cparams.ecn) ||
538             nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM,
539                         q->quantum) ||
540             nla_put_u32(skb, TCA_FQ_CODEL_DROP_BATCH_SIZE,
541                         q->drop_batch_size) ||
542             nla_put_u32(skb, TCA_FQ_CODEL_MEMORY_LIMIT,
543                         q->memory_limit) ||
544             nla_put_u32(skb, TCA_FQ_CODEL_FLOWS,
545                         q->flows_cnt))
546                 goto nla_put_failure;
547
548         if (q->cparams.ce_threshold != CODEL_DISABLED_THRESHOLD &&
549             nla_put_u32(skb, TCA_FQ_CODEL_CE_THRESHOLD,
550                         codel_time_to_us(q->cparams.ce_threshold)))
551                 goto nla_put_failure;
552
553         return nla_nest_end(skb, opts);
554
555 nla_put_failure:
556         return -1;
557 }
558
559 static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
560 {
561         struct fq_codel_sched_data *q = qdisc_priv(sch);
562         struct tc_fq_codel_xstats st = {
563                 .type                           = TCA_FQ_CODEL_XSTATS_QDISC,
564         };
565         struct list_head *pos;
566
567         st.qdisc_stats.maxpacket = q->cstats.maxpacket;
568         st.qdisc_stats.drop_overlimit = q->drop_overlimit;
569         st.qdisc_stats.ecn_mark = q->cstats.ecn_mark;
570         st.qdisc_stats.new_flow_count = q->new_flow_count;
571         st.qdisc_stats.ce_mark = q->cstats.ce_mark;
572         st.qdisc_stats.memory_usage  = q->memory_usage;
573         st.qdisc_stats.drop_overmemory = q->drop_overmemory;
574
575         sch_tree_lock(sch);
576         list_for_each(pos, &q->new_flows)
577                 st.qdisc_stats.new_flows_len++;
578
579         list_for_each(pos, &q->old_flows)
580                 st.qdisc_stats.old_flows_len++;
581         sch_tree_unlock(sch);
582
583         return gnet_stats_copy_app(d, &st, sizeof(st));
584 }
585
586 static struct Qdisc *fq_codel_leaf(struct Qdisc *sch, unsigned long arg)
587 {
588         return NULL;
589 }
590
591 static unsigned long fq_codel_find(struct Qdisc *sch, u32 classid)
592 {
593         return 0;
594 }
595
596 static unsigned long fq_codel_bind(struct Qdisc *sch, unsigned long parent,
597                               u32 classid)
598 {
599         /* we cannot bypass queue discipline anymore */
600         sch->flags &= ~TCQ_F_CAN_BYPASS;
601         return 0;
602 }
603
604 static void fq_codel_unbind(struct Qdisc *q, unsigned long cl)
605 {
606 }
607
608 static struct tcf_block *fq_codel_tcf_block(struct Qdisc *sch, unsigned long cl,
609                                             struct netlink_ext_ack *extack)
610 {
611         struct fq_codel_sched_data *q = qdisc_priv(sch);
612
613         if (cl)
614                 return NULL;
615         return q->block;
616 }
617
618 static int fq_codel_dump_class(struct Qdisc *sch, unsigned long cl,
619                           struct sk_buff *skb, struct tcmsg *tcm)
620 {
621         tcm->tcm_handle |= TC_H_MIN(cl);
622         return 0;
623 }
624
625 static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
626                                      struct gnet_dump *d)
627 {
628         struct fq_codel_sched_data *q = qdisc_priv(sch);
629         u32 idx = cl - 1;
630         struct gnet_stats_queue qs = { 0 };
631         struct tc_fq_codel_xstats xstats;
632
633         if (idx < q->flows_cnt) {
634                 const struct fq_codel_flow *flow = &q->flows[idx];
635                 const struct sk_buff *skb;
636
637                 memset(&xstats, 0, sizeof(xstats));
638                 xstats.type = TCA_FQ_CODEL_XSTATS_CLASS;
639                 xstats.class_stats.deficit = flow->deficit;
640                 xstats.class_stats.ldelay =
641                         codel_time_to_us(flow->cvars.ldelay);
642                 xstats.class_stats.count = flow->cvars.count;
643                 xstats.class_stats.lastcount = flow->cvars.lastcount;
644                 xstats.class_stats.dropping = flow->cvars.dropping;
645                 if (flow->cvars.dropping) {
646                         codel_tdiff_t delta = flow->cvars.drop_next -
647                                               codel_get_time();
648
649                         xstats.class_stats.drop_next = (delta >= 0) ?
650                                 codel_time_to_us(delta) :
651                                 -codel_time_to_us(-delta);
652                 }
653                 if (flow->head) {
654                         sch_tree_lock(sch);
655                         skb = flow->head;
656                         while (skb) {
657                                 qs.qlen++;
658                                 skb = skb->next;
659                         }
660                         sch_tree_unlock(sch);
661                 }
662                 qs.backlog = q->backlogs[idx];
663                 qs.drops = flow->dropped;
664         }
665         if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
666                 return -1;
667         if (idx < q->flows_cnt)
668                 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
669         return 0;
670 }
671
672 static void fq_codel_walk(struct Qdisc *sch, struct qdisc_walker *arg)
673 {
674         struct fq_codel_sched_data *q = qdisc_priv(sch);
675         unsigned int i;
676
677         if (arg->stop)
678                 return;
679
680         for (i = 0; i < q->flows_cnt; i++) {
681                 if (list_empty(&q->flows[i].flowchain) ||
682                     arg->count < arg->skip) {
683                         arg->count++;
684                         continue;
685                 }
686                 if (arg->fn(sch, i + 1, arg) < 0) {
687                         arg->stop = 1;
688                         break;
689                 }
690                 arg->count++;
691         }
692 }
693
694 static const struct Qdisc_class_ops fq_codel_class_ops = {
695         .leaf           =       fq_codel_leaf,
696         .find           =       fq_codel_find,
697         .tcf_block      =       fq_codel_tcf_block,
698         .bind_tcf       =       fq_codel_bind,
699         .unbind_tcf     =       fq_codel_unbind,
700         .dump           =       fq_codel_dump_class,
701         .dump_stats     =       fq_codel_dump_class_stats,
702         .walk           =       fq_codel_walk,
703 };
704
705 static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = {
706         .cl_ops         =       &fq_codel_class_ops,
707         .id             =       "fq_codel",
708         .priv_size      =       sizeof(struct fq_codel_sched_data),
709         .enqueue        =       fq_codel_enqueue,
710         .dequeue        =       fq_codel_dequeue,
711         .peek           =       qdisc_peek_dequeued,
712         .init           =       fq_codel_init,
713         .reset          =       fq_codel_reset,
714         .destroy        =       fq_codel_destroy,
715         .change         =       fq_codel_change,
716         .dump           =       fq_codel_dump,
717         .dump_stats =   fq_codel_dump_stats,
718         .owner          =       THIS_MODULE,
719 };
720
721 static int __init fq_codel_module_init(void)
722 {
723         return register_qdisc(&fq_codel_qdisc_ops);
724 }
725
726 static void __exit fq_codel_module_exit(void)
727 {
728         unregister_qdisc(&fq_codel_qdisc_ops);
729 }
730
731 module_init(fq_codel_module_init)
732 module_exit(fq_codel_module_exit)
733 MODULE_AUTHOR("Eric Dumazet");
734 MODULE_LICENSE("GPL");