Merge branch 'next' into for-linus
[linux-2.6-microblaze.git] / net / sched / sch_gred.c
1 /*
2  * net/sched/sch_gred.c Generic Random Early Detection queue.
3  *
4  *
5  *              This program is free software; you can redistribute it and/or
6  *              modify it under the terms of the GNU General Public License
7  *              as published by the Free Software Foundation; either version
8  *              2 of the License, or (at your option) any later version.
9  *
10  * Authors:    J Hadi Salim (hadi@cyberus.ca) 1998-2002
11  *
12  *             991129: -  Bug fix with grio mode
13  *                     - a better sing. AvgQ mode with Grio(WRED)
14  *                     - A finer grained VQ dequeue based on sugestion
15  *                       from Ren Liu
16  *                     - More error checks
17  *
18  *  For all the glorious comments look at include/net/red.h
19  */
20
21 #include <linux/slab.h>
22 #include <linux/module.h>
23 #include <linux/types.h>
24 #include <linux/kernel.h>
25 #include <linux/skbuff.h>
26 #include <net/pkt_cls.h>
27 #include <net/pkt_sched.h>
28 #include <net/red.h>
29
30 #define GRED_DEF_PRIO (MAX_DPs / 2)
31 #define GRED_VQ_MASK (MAX_DPs - 1)
32
33 #define GRED_VQ_RED_FLAGS       (TC_RED_ECN | TC_RED_HARDDROP)
34
35 struct gred_sched_data;
36 struct gred_sched;
37
38 struct gred_sched_data {
39         u32             limit;          /* HARD maximal queue length    */
40         u32             DP;             /* the drop parameters */
41         u32             red_flags;      /* virtualQ version of red_flags */
42         u64             bytesin;        /* bytes seen on virtualQ so far*/
43         u32             packetsin;      /* packets seen on virtualQ so far*/
44         u32             backlog;        /* bytes on the virtualQ */
45         u8              prio;           /* the prio of this vq */
46
47         struct red_parms parms;
48         struct red_vars  vars;
49         struct red_stats stats;
50 };
51
52 enum {
53         GRED_WRED_MODE = 1,
54         GRED_RIO_MODE,
55 };
56
57 struct gred_sched {
58         struct gred_sched_data *tab[MAX_DPs];
59         unsigned long   flags;
60         u32             red_flags;
61         u32             DPs;
62         u32             def;
63         struct red_vars wred_set;
64 };
65
66 static inline int gred_wred_mode(struct gred_sched *table)
67 {
68         return test_bit(GRED_WRED_MODE, &table->flags);
69 }
70
71 static inline void gred_enable_wred_mode(struct gred_sched *table)
72 {
73         __set_bit(GRED_WRED_MODE, &table->flags);
74 }
75
76 static inline void gred_disable_wred_mode(struct gred_sched *table)
77 {
78         __clear_bit(GRED_WRED_MODE, &table->flags);
79 }
80
81 static inline int gred_rio_mode(struct gred_sched *table)
82 {
83         return test_bit(GRED_RIO_MODE, &table->flags);
84 }
85
86 static inline void gred_enable_rio_mode(struct gred_sched *table)
87 {
88         __set_bit(GRED_RIO_MODE, &table->flags);
89 }
90
91 static inline void gred_disable_rio_mode(struct gred_sched *table)
92 {
93         __clear_bit(GRED_RIO_MODE, &table->flags);
94 }
95
96 static inline int gred_wred_mode_check(struct Qdisc *sch)
97 {
98         struct gred_sched *table = qdisc_priv(sch);
99         int i;
100
101         /* Really ugly O(n^2) but shouldn't be necessary too frequent. */
102         for (i = 0; i < table->DPs; i++) {
103                 struct gred_sched_data *q = table->tab[i];
104                 int n;
105
106                 if (q == NULL)
107                         continue;
108
109                 for (n = i + 1; n < table->DPs; n++)
110                         if (table->tab[n] && table->tab[n]->prio == q->prio)
111                                 return 1;
112         }
113
114         return 0;
115 }
116
117 static inline unsigned int gred_backlog(struct gred_sched *table,
118                                         struct gred_sched_data *q,
119                                         struct Qdisc *sch)
120 {
121         if (gred_wred_mode(table))
122                 return sch->qstats.backlog;
123         else
124                 return q->backlog;
125 }
126
127 static inline u16 tc_index_to_dp(struct sk_buff *skb)
128 {
129         return skb->tc_index & GRED_VQ_MASK;
130 }
131
132 static inline void gred_load_wred_set(const struct gred_sched *table,
133                                       struct gred_sched_data *q)
134 {
135         q->vars.qavg = table->wred_set.qavg;
136         q->vars.qidlestart = table->wred_set.qidlestart;
137 }
138
139 static inline void gred_store_wred_set(struct gred_sched *table,
140                                        struct gred_sched_data *q)
141 {
142         table->wred_set.qavg = q->vars.qavg;
143         table->wred_set.qidlestart = q->vars.qidlestart;
144 }
145
146 static int gred_use_ecn(struct gred_sched_data *q)
147 {
148         return q->red_flags & TC_RED_ECN;
149 }
150
151 static int gred_use_harddrop(struct gred_sched_data *q)
152 {
153         return q->red_flags & TC_RED_HARDDROP;
154 }
155
156 static bool gred_per_vq_red_flags_used(struct gred_sched *table)
157 {
158         unsigned int i;
159
160         /* Local per-vq flags couldn't have been set unless global are 0 */
161         if (table->red_flags)
162                 return false;
163         for (i = 0; i < MAX_DPs; i++)
164                 if (table->tab[i] && table->tab[i]->red_flags)
165                         return true;
166         return false;
167 }
168
169 static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch,
170                         struct sk_buff **to_free)
171 {
172         struct gred_sched_data *q = NULL;
173         struct gred_sched *t = qdisc_priv(sch);
174         unsigned long qavg = 0;
175         u16 dp = tc_index_to_dp(skb);
176
177         if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
178                 dp = t->def;
179
180                 q = t->tab[dp];
181                 if (!q) {
182                         /* Pass through packets not assigned to a DP
183                          * if no default DP has been configured. This
184                          * allows for DP flows to be left untouched.
185                          */
186                         if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <=
187                                         sch->limit))
188                                 return qdisc_enqueue_tail(skb, sch);
189                         else
190                                 goto drop;
191                 }
192
193                 /* fix tc_index? --could be controversial but needed for
194                    requeueing */
195                 skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp;
196         }
197
198         /* sum up all the qaves of prios < ours to get the new qave */
199         if (!gred_wred_mode(t) && gred_rio_mode(t)) {
200                 int i;
201
202                 for (i = 0; i < t->DPs; i++) {
203                         if (t->tab[i] && t->tab[i]->prio < q->prio &&
204                             !red_is_idling(&t->tab[i]->vars))
205                                 qavg += t->tab[i]->vars.qavg;
206                 }
207
208         }
209
210         q->packetsin++;
211         q->bytesin += qdisc_pkt_len(skb);
212
213         if (gred_wred_mode(t))
214                 gred_load_wred_set(t, q);
215
216         q->vars.qavg = red_calc_qavg(&q->parms,
217                                      &q->vars,
218                                      gred_backlog(t, q, sch));
219
220         if (red_is_idling(&q->vars))
221                 red_end_of_idle_period(&q->vars);
222
223         if (gred_wred_mode(t))
224                 gred_store_wred_set(t, q);
225
226         switch (red_action(&q->parms, &q->vars, q->vars.qavg + qavg)) {
227         case RED_DONT_MARK:
228                 break;
229
230         case RED_PROB_MARK:
231                 qdisc_qstats_overlimit(sch);
232                 if (!gred_use_ecn(q) || !INET_ECN_set_ce(skb)) {
233                         q->stats.prob_drop++;
234                         goto congestion_drop;
235                 }
236
237                 q->stats.prob_mark++;
238                 break;
239
240         case RED_HARD_MARK:
241                 qdisc_qstats_overlimit(sch);
242                 if (gred_use_harddrop(q) || !gred_use_ecn(q) ||
243                     !INET_ECN_set_ce(skb)) {
244                         q->stats.forced_drop++;
245                         goto congestion_drop;
246                 }
247                 q->stats.forced_mark++;
248                 break;
249         }
250
251         if (gred_backlog(t, q, sch) + qdisc_pkt_len(skb) <= q->limit) {
252                 q->backlog += qdisc_pkt_len(skb);
253                 return qdisc_enqueue_tail(skb, sch);
254         }
255
256         q->stats.pdrop++;
257 drop:
258         return qdisc_drop(skb, sch, to_free);
259
260 congestion_drop:
261         qdisc_drop(skb, sch, to_free);
262         return NET_XMIT_CN;
263 }
264
265 static struct sk_buff *gred_dequeue(struct Qdisc *sch)
266 {
267         struct sk_buff *skb;
268         struct gred_sched *t = qdisc_priv(sch);
269
270         skb = qdisc_dequeue_head(sch);
271
272         if (skb) {
273                 struct gred_sched_data *q;
274                 u16 dp = tc_index_to_dp(skb);
275
276                 if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
277                         net_warn_ratelimited("GRED: Unable to relocate VQ 0x%x after dequeue, screwing up backlog\n",
278                                              tc_index_to_dp(skb));
279                 } else {
280                         q->backlog -= qdisc_pkt_len(skb);
281
282                         if (gred_wred_mode(t)) {
283                                 if (!sch->qstats.backlog)
284                                         red_start_of_idle_period(&t->wred_set);
285                         } else {
286                                 if (!q->backlog)
287                                         red_start_of_idle_period(&q->vars);
288                         }
289                 }
290
291                 return skb;
292         }
293
294         return NULL;
295 }
296
297 static void gred_reset(struct Qdisc *sch)
298 {
299         int i;
300         struct gred_sched *t = qdisc_priv(sch);
301
302         qdisc_reset_queue(sch);
303
304         for (i = 0; i < t->DPs; i++) {
305                 struct gred_sched_data *q = t->tab[i];
306
307                 if (!q)
308                         continue;
309
310                 red_restart(&q->vars);
311                 q->backlog = 0;
312         }
313 }
314
315 static void gred_offload(struct Qdisc *sch, enum tc_gred_command command)
316 {
317         struct gred_sched *table = qdisc_priv(sch);
318         struct net_device *dev = qdisc_dev(sch);
319         struct tc_gred_qopt_offload opt = {
320                 .command        = command,
321                 .handle         = sch->handle,
322                 .parent         = sch->parent,
323         };
324
325         if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
326                 return;
327
328         if (command == TC_GRED_REPLACE) {
329                 unsigned int i;
330
331                 opt.set.grio_on = gred_rio_mode(table);
332                 opt.set.wred_on = gred_wred_mode(table);
333                 opt.set.dp_cnt = table->DPs;
334                 opt.set.dp_def = table->def;
335
336                 for (i = 0; i < table->DPs; i++) {
337                         struct gred_sched_data *q = table->tab[i];
338
339                         if (!q)
340                                 continue;
341                         opt.set.tab[i].present = true;
342                         opt.set.tab[i].limit = q->limit;
343                         opt.set.tab[i].prio = q->prio;
344                         opt.set.tab[i].min = q->parms.qth_min >> q->parms.Wlog;
345                         opt.set.tab[i].max = q->parms.qth_max >> q->parms.Wlog;
346                         opt.set.tab[i].is_ecn = gred_use_ecn(q);
347                         opt.set.tab[i].is_harddrop = gred_use_harddrop(q);
348                         opt.set.tab[i].probability = q->parms.max_P;
349                         opt.set.tab[i].backlog = &q->backlog;
350                 }
351                 opt.set.qstats = &sch->qstats;
352         }
353
354         dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_GRED, &opt);
355 }
356
357 static int gred_offload_dump_stats(struct Qdisc *sch)
358 {
359         struct gred_sched *table = qdisc_priv(sch);
360         struct tc_gred_qopt_offload *hw_stats;
361         unsigned int i;
362         int ret;
363
364         hw_stats = kzalloc(sizeof(*hw_stats), GFP_KERNEL);
365         if (!hw_stats)
366                 return -ENOMEM;
367
368         hw_stats->command = TC_GRED_STATS;
369         hw_stats->handle = sch->handle;
370         hw_stats->parent = sch->parent;
371
372         for (i = 0; i < MAX_DPs; i++)
373                 if (table->tab[i])
374                         hw_stats->stats.xstats[i] = &table->tab[i]->stats;
375
376         ret = qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_GRED, hw_stats);
377         /* Even if driver returns failure adjust the stats - in case offload
378          * ended but driver still wants to adjust the values.
379          */
380         for (i = 0; i < MAX_DPs; i++) {
381                 if (!table->tab[i])
382                         continue;
383                 table->tab[i]->packetsin += hw_stats->stats.bstats[i].packets;
384                 table->tab[i]->bytesin += hw_stats->stats.bstats[i].bytes;
385                 table->tab[i]->backlog += hw_stats->stats.qstats[i].backlog;
386
387                 _bstats_update(&sch->bstats,
388                                hw_stats->stats.bstats[i].bytes,
389                                hw_stats->stats.bstats[i].packets);
390                 sch->qstats.qlen += hw_stats->stats.qstats[i].qlen;
391                 sch->qstats.backlog += hw_stats->stats.qstats[i].backlog;
392                 sch->qstats.drops += hw_stats->stats.qstats[i].drops;
393                 sch->qstats.requeues += hw_stats->stats.qstats[i].requeues;
394                 sch->qstats.overlimits += hw_stats->stats.qstats[i].overlimits;
395         }
396
397         kfree(hw_stats);
398         return ret;
399 }
400
401 static inline void gred_destroy_vq(struct gred_sched_data *q)
402 {
403         kfree(q);
404 }
405
406 static int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps,
407                                  struct netlink_ext_ack *extack)
408 {
409         struct gred_sched *table = qdisc_priv(sch);
410         struct tc_gred_sopt *sopt;
411         bool red_flags_changed;
412         int i;
413
414         if (!dps)
415                 return -EINVAL;
416
417         sopt = nla_data(dps);
418
419         if (sopt->DPs > MAX_DPs) {
420                 NL_SET_ERR_MSG_MOD(extack, "number of virtual queues too high");
421                 return -EINVAL;
422         }
423         if (sopt->DPs == 0) {
424                 NL_SET_ERR_MSG_MOD(extack,
425                                    "number of virtual queues can't be 0");
426                 return -EINVAL;
427         }
428         if (sopt->def_DP >= sopt->DPs) {
429                 NL_SET_ERR_MSG_MOD(extack, "default virtual queue above virtual queue count");
430                 return -EINVAL;
431         }
432         if (sopt->flags && gred_per_vq_red_flags_used(table)) {
433                 NL_SET_ERR_MSG_MOD(extack, "can't set per-Qdisc RED flags when per-virtual queue flags are used");
434                 return -EINVAL;
435         }
436
437         sch_tree_lock(sch);
438         table->DPs = sopt->DPs;
439         table->def = sopt->def_DP;
440         red_flags_changed = table->red_flags != sopt->flags;
441         table->red_flags = sopt->flags;
442
443         /*
444          * Every entry point to GRED is synchronized with the above code
445          * and the DP is checked against DPs, i.e. shadowed VQs can no
446          * longer be found so we can unlock right here.
447          */
448         sch_tree_unlock(sch);
449
450         if (sopt->grio) {
451                 gred_enable_rio_mode(table);
452                 gred_disable_wred_mode(table);
453                 if (gred_wred_mode_check(sch))
454                         gred_enable_wred_mode(table);
455         } else {
456                 gred_disable_rio_mode(table);
457                 gred_disable_wred_mode(table);
458         }
459
460         if (red_flags_changed)
461                 for (i = 0; i < table->DPs; i++)
462                         if (table->tab[i])
463                                 table->tab[i]->red_flags =
464                                         table->red_flags & GRED_VQ_RED_FLAGS;
465
466         for (i = table->DPs; i < MAX_DPs; i++) {
467                 if (table->tab[i]) {
468                         pr_warn("GRED: Warning: Destroying shadowed VQ 0x%x\n",
469                                 i);
470                         gred_destroy_vq(table->tab[i]);
471                         table->tab[i] = NULL;
472                 }
473         }
474
475         gred_offload(sch, TC_GRED_REPLACE);
476         return 0;
477 }
478
479 static inline int gred_change_vq(struct Qdisc *sch, int dp,
480                                  struct tc_gred_qopt *ctl, int prio,
481                                  u8 *stab, u32 max_P,
482                                  struct gred_sched_data **prealloc,
483                                  struct netlink_ext_ack *extack)
484 {
485         struct gred_sched *table = qdisc_priv(sch);
486         struct gred_sched_data *q = table->tab[dp];
487
488         if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog)) {
489                 NL_SET_ERR_MSG_MOD(extack, "invalid RED parameters");
490                 return -EINVAL;
491         }
492
493         if (!q) {
494                 table->tab[dp] = q = *prealloc;
495                 *prealloc = NULL;
496                 if (!q)
497                         return -ENOMEM;
498                 q->red_flags = table->red_flags & GRED_VQ_RED_FLAGS;
499         }
500
501         q->DP = dp;
502         q->prio = prio;
503         if (ctl->limit > sch->limit)
504                 q->limit = sch->limit;
505         else
506                 q->limit = ctl->limit;
507
508         if (q->backlog == 0)
509                 red_end_of_idle_period(&q->vars);
510
511         red_set_parms(&q->parms,
512                       ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog,
513                       ctl->Scell_log, stab, max_P);
514         red_set_vars(&q->vars);
515         return 0;
516 }
517
518 static const struct nla_policy gred_vq_policy[TCA_GRED_VQ_MAX + 1] = {
519         [TCA_GRED_VQ_DP]        = { .type = NLA_U32 },
520         [TCA_GRED_VQ_FLAGS]     = { .type = NLA_U32 },
521 };
522
523 static const struct nla_policy gred_vqe_policy[TCA_GRED_VQ_ENTRY_MAX + 1] = {
524         [TCA_GRED_VQ_ENTRY]     = { .type = NLA_NESTED },
525 };
526
527 static const struct nla_policy gred_policy[TCA_GRED_MAX + 1] = {
528         [TCA_GRED_PARMS]        = { .len = sizeof(struct tc_gred_qopt) },
529         [TCA_GRED_STAB]         = { .len = 256 },
530         [TCA_GRED_DPS]          = { .len = sizeof(struct tc_gred_sopt) },
531         [TCA_GRED_MAX_P]        = { .type = NLA_U32 },
532         [TCA_GRED_LIMIT]        = { .type = NLA_U32 },
533         [TCA_GRED_VQ_LIST]      = { .type = NLA_NESTED },
534 };
535
536 static void gred_vq_apply(struct gred_sched *table, const struct nlattr *entry)
537 {
538         struct nlattr *tb[TCA_GRED_VQ_MAX + 1];
539         u32 dp;
540
541         nla_parse_nested(tb, TCA_GRED_VQ_MAX, entry, gred_vq_policy, NULL);
542
543         dp = nla_get_u32(tb[TCA_GRED_VQ_DP]);
544
545         if (tb[TCA_GRED_VQ_FLAGS])
546                 table->tab[dp]->red_flags = nla_get_u32(tb[TCA_GRED_VQ_FLAGS]);
547 }
548
549 static void gred_vqs_apply(struct gred_sched *table, struct nlattr *vqs)
550 {
551         const struct nlattr *attr;
552         int rem;
553
554         nla_for_each_nested(attr, vqs, rem) {
555                 switch (nla_type(attr)) {
556                 case TCA_GRED_VQ_ENTRY:
557                         gred_vq_apply(table, attr);
558                         break;
559                 }
560         }
561 }
562
563 static int gred_vq_validate(struct gred_sched *table, u32 cdp,
564                             const struct nlattr *entry,
565                             struct netlink_ext_ack *extack)
566 {
567         struct nlattr *tb[TCA_GRED_VQ_MAX + 1];
568         int err;
569         u32 dp;
570
571         err = nla_parse_nested(tb, TCA_GRED_VQ_MAX, entry, gred_vq_policy,
572                                extack);
573         if (err < 0)
574                 return err;
575
576         if (!tb[TCA_GRED_VQ_DP]) {
577                 NL_SET_ERR_MSG_MOD(extack, "Virtual queue with no index specified");
578                 return -EINVAL;
579         }
580         dp = nla_get_u32(tb[TCA_GRED_VQ_DP]);
581         if (dp >= table->DPs) {
582                 NL_SET_ERR_MSG_MOD(extack, "Virtual queue with index out of bounds");
583                 return -EINVAL;
584         }
585         if (dp != cdp && !table->tab[dp]) {
586                 NL_SET_ERR_MSG_MOD(extack, "Virtual queue not yet instantiated");
587                 return -EINVAL;
588         }
589
590         if (tb[TCA_GRED_VQ_FLAGS]) {
591                 u32 red_flags = nla_get_u32(tb[TCA_GRED_VQ_FLAGS]);
592
593                 if (table->red_flags && table->red_flags != red_flags) {
594                         NL_SET_ERR_MSG_MOD(extack, "can't change per-virtual queue RED flags when per-Qdisc flags are used");
595                         return -EINVAL;
596                 }
597                 if (red_flags & ~GRED_VQ_RED_FLAGS) {
598                         NL_SET_ERR_MSG_MOD(extack,
599                                            "invalid RED flags specified");
600                         return -EINVAL;
601                 }
602         }
603
604         return 0;
605 }
606
607 static int gred_vqs_validate(struct gred_sched *table, u32 cdp,
608                              struct nlattr *vqs, struct netlink_ext_ack *extack)
609 {
610         const struct nlattr *attr;
611         int rem, err;
612
613         err = nla_validate_nested(vqs, TCA_GRED_VQ_ENTRY_MAX,
614                                   gred_vqe_policy, extack);
615         if (err < 0)
616                 return err;
617
618         nla_for_each_nested(attr, vqs, rem) {
619                 switch (nla_type(attr)) {
620                 case TCA_GRED_VQ_ENTRY:
621                         err = gred_vq_validate(table, cdp, attr, extack);
622                         if (err)
623                                 return err;
624                         break;
625                 default:
626                         NL_SET_ERR_MSG_MOD(extack, "GRED_VQ_LIST can contain only entry attributes");
627                         return -EINVAL;
628                 }
629         }
630
631         if (rem > 0) {
632                 NL_SET_ERR_MSG_MOD(extack, "Trailing data after parsing virtual queue list");
633                 return -EINVAL;
634         }
635
636         return 0;
637 }
638
639 static int gred_change(struct Qdisc *sch, struct nlattr *opt,
640                        struct netlink_ext_ack *extack)
641 {
642         struct gred_sched *table = qdisc_priv(sch);
643         struct tc_gred_qopt *ctl;
644         struct nlattr *tb[TCA_GRED_MAX + 1];
645         int err, prio = GRED_DEF_PRIO;
646         u8 *stab;
647         u32 max_P;
648         struct gred_sched_data *prealloc;
649
650         if (opt == NULL)
651                 return -EINVAL;
652
653         err = nla_parse_nested(tb, TCA_GRED_MAX, opt, gred_policy, extack);
654         if (err < 0)
655                 return err;
656
657         if (tb[TCA_GRED_PARMS] == NULL && tb[TCA_GRED_STAB] == NULL) {
658                 if (tb[TCA_GRED_LIMIT] != NULL)
659                         sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
660                 return gred_change_table_def(sch, tb[TCA_GRED_DPS], extack);
661         }
662
663         if (tb[TCA_GRED_PARMS] == NULL ||
664             tb[TCA_GRED_STAB] == NULL ||
665             tb[TCA_GRED_LIMIT] != NULL) {
666                 NL_SET_ERR_MSG_MOD(extack, "can't configure Qdisc and virtual queue at the same time");
667                 return -EINVAL;
668         }
669
670         max_P = tb[TCA_GRED_MAX_P] ? nla_get_u32(tb[TCA_GRED_MAX_P]) : 0;
671
672         ctl = nla_data(tb[TCA_GRED_PARMS]);
673         stab = nla_data(tb[TCA_GRED_STAB]);
674
675         if (ctl->DP >= table->DPs) {
676                 NL_SET_ERR_MSG_MOD(extack, "virtual queue index above virtual queue count");
677                 return -EINVAL;
678         }
679
680         if (tb[TCA_GRED_VQ_LIST]) {
681                 err = gred_vqs_validate(table, ctl->DP, tb[TCA_GRED_VQ_LIST],
682                                         extack);
683                 if (err)
684                         return err;
685         }
686
687         if (gred_rio_mode(table)) {
688                 if (ctl->prio == 0) {
689                         int def_prio = GRED_DEF_PRIO;
690
691                         if (table->tab[table->def])
692                                 def_prio = table->tab[table->def]->prio;
693
694                         printk(KERN_DEBUG "GRED: DP %u does not have a prio "
695                                "setting default to %d\n", ctl->DP, def_prio);
696
697                         prio = def_prio;
698                 } else
699                         prio = ctl->prio;
700         }
701
702         prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL);
703         sch_tree_lock(sch);
704
705         err = gred_change_vq(sch, ctl->DP, ctl, prio, stab, max_P, &prealloc,
706                              extack);
707         if (err < 0)
708                 goto err_unlock_free;
709
710         if (tb[TCA_GRED_VQ_LIST])
711                 gred_vqs_apply(table, tb[TCA_GRED_VQ_LIST]);
712
713         if (gred_rio_mode(table)) {
714                 gred_disable_wred_mode(table);
715                 if (gred_wred_mode_check(sch))
716                         gred_enable_wred_mode(table);
717         }
718
719         sch_tree_unlock(sch);
720         kfree(prealloc);
721
722         gred_offload(sch, TC_GRED_REPLACE);
723         return 0;
724
725 err_unlock_free:
726         sch_tree_unlock(sch);
727         kfree(prealloc);
728         return err;
729 }
730
731 static int gred_init(struct Qdisc *sch, struct nlattr *opt,
732                      struct netlink_ext_ack *extack)
733 {
734         struct nlattr *tb[TCA_GRED_MAX + 1];
735         int err;
736
737         if (!opt)
738                 return -EINVAL;
739
740         err = nla_parse_nested(tb, TCA_GRED_MAX, opt, gred_policy, extack);
741         if (err < 0)
742                 return err;
743
744         if (tb[TCA_GRED_PARMS] || tb[TCA_GRED_STAB]) {
745                 NL_SET_ERR_MSG_MOD(extack,
746                                    "virtual queue configuration can't be specified at initialization time");
747                 return -EINVAL;
748         }
749
750         if (tb[TCA_GRED_LIMIT])
751                 sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
752         else
753                 sch->limit = qdisc_dev(sch)->tx_queue_len
754                              * psched_mtu(qdisc_dev(sch));
755
756         return gred_change_table_def(sch, tb[TCA_GRED_DPS], extack);
757 }
758
759 static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
760 {
761         struct gred_sched *table = qdisc_priv(sch);
762         struct nlattr *parms, *vqs, *opts = NULL;
763         int i;
764         u32 max_p[MAX_DPs];
765         struct tc_gred_sopt sopt = {
766                 .DPs    = table->DPs,
767                 .def_DP = table->def,
768                 .grio   = gred_rio_mode(table),
769                 .flags  = table->red_flags,
770         };
771
772         if (gred_offload_dump_stats(sch))
773                 goto nla_put_failure;
774
775         opts = nla_nest_start(skb, TCA_OPTIONS);
776         if (opts == NULL)
777                 goto nla_put_failure;
778         if (nla_put(skb, TCA_GRED_DPS, sizeof(sopt), &sopt))
779                 goto nla_put_failure;
780
781         for (i = 0; i < MAX_DPs; i++) {
782                 struct gred_sched_data *q = table->tab[i];
783
784                 max_p[i] = q ? q->parms.max_P : 0;
785         }
786         if (nla_put(skb, TCA_GRED_MAX_P, sizeof(max_p), max_p))
787                 goto nla_put_failure;
788
789         if (nla_put_u32(skb, TCA_GRED_LIMIT, sch->limit))
790                 goto nla_put_failure;
791
792         /* Old style all-in-one dump of VQs */
793         parms = nla_nest_start(skb, TCA_GRED_PARMS);
794         if (parms == NULL)
795                 goto nla_put_failure;
796
797         for (i = 0; i < MAX_DPs; i++) {
798                 struct gred_sched_data *q = table->tab[i];
799                 struct tc_gred_qopt opt;
800                 unsigned long qavg;
801
802                 memset(&opt, 0, sizeof(opt));
803
804                 if (!q) {
805                         /* hack -- fix at some point with proper message
806                            This is how we indicate to tc that there is no VQ
807                            at this DP */
808
809                         opt.DP = MAX_DPs + i;
810                         goto append_opt;
811                 }
812
813                 opt.limit       = q->limit;
814                 opt.DP          = q->DP;
815                 opt.backlog     = gred_backlog(table, q, sch);
816                 opt.prio        = q->prio;
817                 opt.qth_min     = q->parms.qth_min >> q->parms.Wlog;
818                 opt.qth_max     = q->parms.qth_max >> q->parms.Wlog;
819                 opt.Wlog        = q->parms.Wlog;
820                 opt.Plog        = q->parms.Plog;
821                 opt.Scell_log   = q->parms.Scell_log;
822                 opt.other       = q->stats.other;
823                 opt.early       = q->stats.prob_drop;
824                 opt.forced      = q->stats.forced_drop;
825                 opt.pdrop       = q->stats.pdrop;
826                 opt.packets     = q->packetsin;
827                 opt.bytesin     = q->bytesin;
828
829                 if (gred_wred_mode(table))
830                         gred_load_wred_set(table, q);
831
832                 qavg = red_calc_qavg(&q->parms, &q->vars,
833                                      q->vars.qavg >> q->parms.Wlog);
834                 opt.qave = qavg >> q->parms.Wlog;
835
836 append_opt:
837                 if (nla_append(skb, sizeof(opt), &opt) < 0)
838                         goto nla_put_failure;
839         }
840
841         nla_nest_end(skb, parms);
842
843         /* Dump the VQs again, in more structured way */
844         vqs = nla_nest_start(skb, TCA_GRED_VQ_LIST);
845         if (!vqs)
846                 goto nla_put_failure;
847
848         for (i = 0; i < MAX_DPs; i++) {
849                 struct gred_sched_data *q = table->tab[i];
850                 struct nlattr *vq;
851
852                 if (!q)
853                         continue;
854
855                 vq = nla_nest_start(skb, TCA_GRED_VQ_ENTRY);
856                 if (!vq)
857                         goto nla_put_failure;
858
859                 if (nla_put_u32(skb, TCA_GRED_VQ_DP, q->DP))
860                         goto nla_put_failure;
861
862                 if (nla_put_u32(skb, TCA_GRED_VQ_FLAGS, q->red_flags))
863                         goto nla_put_failure;
864
865                 /* Stats */
866                 if (nla_put_u64_64bit(skb, TCA_GRED_VQ_STAT_BYTES, q->bytesin,
867                                       TCA_GRED_VQ_PAD))
868                         goto nla_put_failure;
869                 if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PACKETS, q->packetsin))
870                         goto nla_put_failure;
871                 if (nla_put_u32(skb, TCA_GRED_VQ_STAT_BACKLOG,
872                                 gred_backlog(table, q, sch)))
873                         goto nla_put_failure;
874                 if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PROB_DROP,
875                                 q->stats.prob_drop))
876                         goto nla_put_failure;
877                 if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PROB_MARK,
878                                 q->stats.prob_mark))
879                         goto nla_put_failure;
880                 if (nla_put_u32(skb, TCA_GRED_VQ_STAT_FORCED_DROP,
881                                 q->stats.forced_drop))
882                         goto nla_put_failure;
883                 if (nla_put_u32(skb, TCA_GRED_VQ_STAT_FORCED_MARK,
884                                 q->stats.forced_mark))
885                         goto nla_put_failure;
886                 if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PDROP, q->stats.pdrop))
887                         goto nla_put_failure;
888                 if (nla_put_u32(skb, TCA_GRED_VQ_STAT_OTHER, q->stats.other))
889                         goto nla_put_failure;
890
891                 nla_nest_end(skb, vq);
892         }
893         nla_nest_end(skb, vqs);
894
895         return nla_nest_end(skb, opts);
896
897 nla_put_failure:
898         nla_nest_cancel(skb, opts);
899         return -EMSGSIZE;
900 }
901
902 static void gred_destroy(struct Qdisc *sch)
903 {
904         struct gred_sched *table = qdisc_priv(sch);
905         int i;
906
907         for (i = 0; i < table->DPs; i++) {
908                 if (table->tab[i])
909                         gred_destroy_vq(table->tab[i]);
910         }
911         gred_offload(sch, TC_GRED_DESTROY);
912 }
913
914 static struct Qdisc_ops gred_qdisc_ops __read_mostly = {
915         .id             =       "gred",
916         .priv_size      =       sizeof(struct gred_sched),
917         .enqueue        =       gred_enqueue,
918         .dequeue        =       gred_dequeue,
919         .peek           =       qdisc_peek_head,
920         .init           =       gred_init,
921         .reset          =       gred_reset,
922         .destroy        =       gred_destroy,
923         .change         =       gred_change,
924         .dump           =       gred_dump,
925         .owner          =       THIS_MODULE,
926 };
927
928 static int __init gred_module_init(void)
929 {
930         return register_qdisc(&gred_qdisc_ops);
931 }
932
933 static void __exit gred_module_exit(void)
934 {
935         unregister_qdisc(&gred_qdisc_ops);
936 }
937
938 module_init(gred_module_init)
939 module_exit(gred_module_exit)
940
941 MODULE_LICENSE("GPL");