2 * net/sched/sch_red.c Random Early Detection queue.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
12 * J Hadi Salim 980914: computation fixes
13 * Alexey Makarenko <makar@phoenix.kharkov.ua> 990814: qave on idle link was calculated incorrectly.
14 * J Hadi Salim 980816: ECN support
17 #include <linux/module.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/skbuff.h>
21 #include <net/pkt_sched.h>
22 #include <net/pkt_cls.h>
23 #include <net/inet_ecn.h>
27 /* Parameters, settable by user:
28 -----------------------------
30 limit - bytes (must be > qth_max + burst)
32 Hard limit on queue length, should be chosen >qth_max
33 to allow packet bursts. This parameter does not
34 affect the algorithms behaviour and can be chosen
35 arbitrarily high (well, less than ram size)
36 Really, this limit will never be reached
37 if RED works correctly.
40 struct red_sched_data {
41 u32 limit; /* HARD maximal queue length */
43 struct timer_list adapt_timer;
45 struct red_parms parms;
47 struct red_stats stats;
51 static inline int red_use_ecn(struct red_sched_data *q)
53 return q->flags & TC_RED_ECN;
56 static inline int red_use_harddrop(struct red_sched_data *q)
58 return q->flags & TC_RED_HARDDROP;
61 static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch,
62 struct sk_buff **to_free)
64 struct red_sched_data *q = qdisc_priv(sch);
65 struct Qdisc *child = q->qdisc;
68 q->vars.qavg = red_calc_qavg(&q->parms,
70 child->qstats.backlog);
72 if (red_is_idling(&q->vars))
73 red_end_of_idle_period(&q->vars);
75 switch (red_action(&q->parms, &q->vars, q->vars.qavg)) {
80 qdisc_qstats_overlimit(sch);
81 if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
90 qdisc_qstats_overlimit(sch);
91 if (red_use_harddrop(q) || !red_use_ecn(q) ||
92 !INET_ECN_set_ce(skb)) {
93 q->stats.forced_drop++;
97 q->stats.forced_mark++;
101 ret = qdisc_enqueue(skb, child, to_free);
102 if (likely(ret == NET_XMIT_SUCCESS)) {
103 qdisc_qstats_backlog_inc(sch, skb);
105 } else if (net_xmit_drop_count(ret)) {
107 qdisc_qstats_drop(sch);
112 qdisc_drop(skb, sch, to_free);
116 static struct sk_buff *red_dequeue(struct Qdisc *sch)
119 struct red_sched_data *q = qdisc_priv(sch);
120 struct Qdisc *child = q->qdisc;
122 skb = child->dequeue(child);
124 qdisc_bstats_update(sch, skb);
125 qdisc_qstats_backlog_dec(sch, skb);
128 if (!red_is_idling(&q->vars))
129 red_start_of_idle_period(&q->vars);
134 static struct sk_buff *red_peek(struct Qdisc *sch)
136 struct red_sched_data *q = qdisc_priv(sch);
137 struct Qdisc *child = q->qdisc;
139 return child->ops->peek(child);
142 static void red_reset(struct Qdisc *sch)
144 struct red_sched_data *q = qdisc_priv(sch);
146 qdisc_reset(q->qdisc);
147 sch->qstats.backlog = 0;
149 red_restart(&q->vars);
152 static int red_offload(struct Qdisc *sch, bool enable)
154 struct red_sched_data *q = qdisc_priv(sch);
155 struct net_device *dev = qdisc_dev(sch);
156 struct tc_red_qopt_offload opt = {
157 .handle = sch->handle,
158 .parent = sch->parent,
161 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
165 opt.command = TC_RED_REPLACE;
166 opt.set.min = q->parms.qth_min >> q->parms.Wlog;
167 opt.set.max = q->parms.qth_max >> q->parms.Wlog;
168 opt.set.probability = q->parms.max_P;
169 opt.set.limit = q->limit;
170 opt.set.is_ecn = red_use_ecn(q);
171 opt.set.is_harddrop = red_use_harddrop(q);
172 opt.set.qstats = &sch->qstats;
174 opt.command = TC_RED_DESTROY;
177 return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED, &opt);
180 static void red_destroy(struct Qdisc *sch)
182 struct red_sched_data *q = qdisc_priv(sch);
184 del_timer_sync(&q->adapt_timer);
185 red_offload(sch, false);
189 static const struct nla_policy red_policy[TCA_RED_MAX + 1] = {
190 [TCA_RED_PARMS] = { .len = sizeof(struct tc_red_qopt) },
191 [TCA_RED_STAB] = { .len = RED_STAB_SIZE },
192 [TCA_RED_MAX_P] = { .type = NLA_U32 },
195 static int red_change(struct Qdisc *sch, struct nlattr *opt,
196 struct netlink_ext_ack *extack)
198 struct Qdisc *old_child = NULL, *child = NULL;
199 struct red_sched_data *q = qdisc_priv(sch);
200 struct nlattr *tb[TCA_RED_MAX + 1];
201 struct tc_red_qopt *ctl;
208 err = nla_parse_nested(tb, TCA_RED_MAX, opt, red_policy, NULL);
212 if (tb[TCA_RED_PARMS] == NULL ||
213 tb[TCA_RED_STAB] == NULL)
216 max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0;
218 ctl = nla_data(tb[TCA_RED_PARMS]);
219 if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog))
222 if (ctl->limit > 0) {
223 child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit,
226 return PTR_ERR(child);
228 /* child is fifo, no need to check for noop_qdisc */
229 qdisc_hash_add(child, true);
233 q->flags = ctl->flags;
234 q->limit = ctl->limit;
236 qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
237 q->qdisc->qstats.backlog);
238 old_child = q->qdisc;
242 red_set_parms(&q->parms,
243 ctl->qth_min, ctl->qth_max, ctl->Wlog,
244 ctl->Plog, ctl->Scell_log,
245 nla_data(tb[TCA_RED_STAB]),
247 red_set_vars(&q->vars);
249 del_timer(&q->adapt_timer);
250 if (ctl->flags & TC_RED_ADAPTATIVE)
251 mod_timer(&q->adapt_timer, jiffies + HZ/2);
253 if (!q->qdisc->q.qlen)
254 red_start_of_idle_period(&q->vars);
256 sch_tree_unlock(sch);
258 red_offload(sch, true);
261 qdisc_put(old_child);
265 static inline void red_adaptative_timer(struct timer_list *t)
267 struct red_sched_data *q = from_timer(q, t, adapt_timer);
268 struct Qdisc *sch = q->sch;
269 spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
271 spin_lock(root_lock);
272 red_adaptative_algo(&q->parms, &q->vars);
273 mod_timer(&q->adapt_timer, jiffies + HZ/2);
274 spin_unlock(root_lock);
277 static int red_init(struct Qdisc *sch, struct nlattr *opt,
278 struct netlink_ext_ack *extack)
280 struct red_sched_data *q = qdisc_priv(sch);
282 q->qdisc = &noop_qdisc;
284 timer_setup(&q->adapt_timer, red_adaptative_timer, 0);
285 return red_change(sch, opt, extack);
288 static int red_dump_offload_stats(struct Qdisc *sch)
290 struct tc_red_qopt_offload hw_stats = {
291 .command = TC_RED_STATS,
292 .handle = sch->handle,
293 .parent = sch->parent,
295 .stats.bstats = &sch->bstats,
296 .stats.qstats = &sch->qstats,
300 return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_RED, &hw_stats);
303 static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
305 struct red_sched_data *q = qdisc_priv(sch);
306 struct nlattr *opts = NULL;
307 struct tc_red_qopt opt = {
310 .qth_min = q->parms.qth_min >> q->parms.Wlog,
311 .qth_max = q->parms.qth_max >> q->parms.Wlog,
312 .Wlog = q->parms.Wlog,
313 .Plog = q->parms.Plog,
314 .Scell_log = q->parms.Scell_log,
318 err = red_dump_offload_stats(sch);
320 goto nla_put_failure;
322 opts = nla_nest_start(skb, TCA_OPTIONS);
324 goto nla_put_failure;
325 if (nla_put(skb, TCA_RED_PARMS, sizeof(opt), &opt) ||
326 nla_put_u32(skb, TCA_RED_MAX_P, q->parms.max_P))
327 goto nla_put_failure;
328 return nla_nest_end(skb, opts);
331 nla_nest_cancel(skb, opts);
335 static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
337 struct red_sched_data *q = qdisc_priv(sch);
338 struct net_device *dev = qdisc_dev(sch);
339 struct tc_red_xstats st = {0};
341 if (sch->flags & TCQ_F_OFFLOADED) {
342 struct tc_red_qopt_offload hw_stats_request = {
343 .command = TC_RED_XSTATS,
344 .handle = sch->handle,
345 .parent = sch->parent,
350 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED,
353 st.early = q->stats.prob_drop + q->stats.forced_drop;
354 st.pdrop = q->stats.pdrop;
355 st.other = q->stats.other;
356 st.marked = q->stats.prob_mark + q->stats.forced_mark;
358 return gnet_stats_copy_app(d, &st, sizeof(st));
361 static int red_dump_class(struct Qdisc *sch, unsigned long cl,
362 struct sk_buff *skb, struct tcmsg *tcm)
364 struct red_sched_data *q = qdisc_priv(sch);
366 tcm->tcm_handle |= TC_H_MIN(1);
367 tcm->tcm_info = q->qdisc->handle;
371 static void red_graft_offload(struct Qdisc *sch,
372 struct Qdisc *new, struct Qdisc *old,
373 struct netlink_ext_ack *extack)
375 struct tc_red_qopt_offload graft_offload = {
376 .handle = sch->handle,
377 .parent = sch->parent,
378 .child_handle = new->handle,
379 .command = TC_RED_GRAFT,
382 qdisc_offload_graft_helper(qdisc_dev(sch), sch, new, old,
383 TC_SETUP_QDISC_RED, &graft_offload, extack);
386 static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
387 struct Qdisc **old, struct netlink_ext_ack *extack)
389 struct red_sched_data *q = qdisc_priv(sch);
394 *old = qdisc_replace(sch, new, &q->qdisc);
396 red_graft_offload(sch, new, *old, extack);
400 static struct Qdisc *red_leaf(struct Qdisc *sch, unsigned long arg)
402 struct red_sched_data *q = qdisc_priv(sch);
406 static unsigned long red_find(struct Qdisc *sch, u32 classid)
411 static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker)
414 if (walker->count >= walker->skip)
415 if (walker->fn(sch, 1, walker) < 0) {
423 static const struct Qdisc_class_ops red_class_ops = {
428 .dump = red_dump_class,
431 static struct Qdisc_ops red_qdisc_ops __read_mostly = {
433 .priv_size = sizeof(struct red_sched_data),
434 .cl_ops = &red_class_ops,
435 .enqueue = red_enqueue,
436 .dequeue = red_dequeue,
440 .destroy = red_destroy,
441 .change = red_change,
443 .dump_stats = red_dump_stats,
444 .owner = THIS_MODULE,
447 static int __init red_module_init(void)
449 return register_qdisc(&red_qdisc_ops);
452 static void __exit red_module_exit(void)
454 unregister_qdisc(&red_qdisc_ops);
457 module_init(red_module_init)
458 module_exit(red_module_exit)
460 MODULE_LICENSE("GPL");