Merge tag 'arm-late-6.0' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
[linux-2.6-microblaze.git] / net / sched / sch_cbq.c
index e0da155..91a0dc4 100644 (file)
@@ -116,7 +116,7 @@ struct cbq_class {
        long                    avgidle;
        long                    deficit;        /* Saved deficit for WRR */
        psched_time_t           penalized;
-       struct gnet_stats_basic_packed bstats;
+       struct gnet_stats_basic_sync bstats;
        struct gnet_stats_queue qstats;
        struct net_rate_estimator __rcu *rate_est;
        struct tc_cbq_xstats    xstats;
@@ -149,7 +149,6 @@ struct cbq_sched_data {
        psched_time_t           now;            /* Cached timestamp */
        unsigned int            pmask;
 
-       struct hrtimer          delay_timer;
        struct qdisc_watchdog   watchdog;       /* Watchdog timer,
                                                   started when CBQ has
                                                   backlog, but cannot
@@ -441,81 +440,6 @@ static void cbq_overlimit(struct cbq_class *cl)
        }
 }
 
-static psched_tdiff_t cbq_undelay_prio(struct cbq_sched_data *q, int prio,
-                                      psched_time_t now)
-{
-       struct cbq_class *cl;
-       struct cbq_class *cl_prev = q->active[prio];
-       psched_time_t sched = now;
-
-       if (cl_prev == NULL)
-               return 0;
-
-       do {
-               cl = cl_prev->next_alive;
-               if (now - cl->penalized > 0) {
-                       cl_prev->next_alive = cl->next_alive;
-                       cl->next_alive = NULL;
-                       cl->cpriority = cl->priority;
-                       cl->delayed = 0;
-                       cbq_activate_class(cl);
-
-                       if (cl == q->active[prio]) {
-                               q->active[prio] = cl_prev;
-                               if (cl == q->active[prio]) {
-                                       q->active[prio] = NULL;
-                                       return 0;
-                               }
-                       }
-
-                       cl = cl_prev->next_alive;
-               } else if (sched - cl->penalized > 0)
-                       sched = cl->penalized;
-       } while ((cl_prev = cl) != q->active[prio]);
-
-       return sched - now;
-}
-
-static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
-{
-       struct cbq_sched_data *q = container_of(timer, struct cbq_sched_data,
-                                               delay_timer);
-       struct Qdisc *sch = q->watchdog.qdisc;
-       psched_time_t now;
-       psched_tdiff_t delay = 0;
-       unsigned int pmask;
-
-       now = psched_get_time();
-
-       pmask = q->pmask;
-       q->pmask = 0;
-
-       while (pmask) {
-               int prio = ffz(~pmask);
-               psched_tdiff_t tmp;
-
-               pmask &= ~(1<<prio);
-
-               tmp = cbq_undelay_prio(q, prio, now);
-               if (tmp > 0) {
-                       q->pmask |= 1<<prio;
-                       if (tmp < delay || delay == 0)
-                               delay = tmp;
-               }
-       }
-
-       if (delay) {
-               ktime_t time;
-
-               time = 0;
-               time = ktime_add_ns(time, PSCHED_TICKS2NS(now + delay));
-               hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS_PINNED);
-       }
-
-       __netif_schedule(qdisc_root(sch));
-       return HRTIMER_NORESTART;
-}
-
 /*
  * It is mission critical procedure.
  *
@@ -565,8 +489,7 @@ cbq_update(struct cbq_sched_data *q)
                long avgidle = cl->avgidle;
                long idle;
 
-               cl->bstats.packets++;
-               cl->bstats.bytes += len;
+               _bstats_update(&cl->bstats, len, 1);
 
                /*
                 * (now - last) is total time between packet right edges.
@@ -1035,7 +958,6 @@ cbq_reset(struct Qdisc *sch)
        q->tx_class = NULL;
        q->tx_borrowed = NULL;
        qdisc_watchdog_cancel(&q->watchdog);
-       hrtimer_cancel(&q->delay_timer);
        q->toplevel = TC_CBQ_MAXLEVEL;
        q->now = psched_get_time();
 
@@ -1057,7 +979,7 @@ cbq_reset(struct Qdisc *sch)
 }
 
 
-static int cbq_set_lss(struct cbq_class *cl, struct tc_cbq_lssopt *lss)
+static void cbq_set_lss(struct cbq_class *cl, struct tc_cbq_lssopt *lss)
 {
        if (lss->change & TCF_CBQ_LSS_FLAGS) {
                cl->share = (lss->flags & TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent;
@@ -1075,7 +997,6 @@ static int cbq_set_lss(struct cbq_class *cl, struct tc_cbq_lssopt *lss)
        }
        if (lss->change & TCF_CBQ_LSS_OFFTIME)
                cl->offtime = lss->offtime;
-       return 0;
 }
 
 static void cbq_rmprio(struct cbq_sched_data *q, struct cbq_class *cl)
@@ -1163,8 +1084,6 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt,
        int err;
 
        qdisc_watchdog_init(&q->watchdog, sch);
-       hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
-       q->delay_timer.function = cbq_undelay;
 
        err = cbq_opt_parse(tb, opt, extack);
        if (err < 0)
@@ -1384,8 +1303,7 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
        if (cl->undertime != PSCHED_PASTPERFECT)
                cl->xstats.undertime = cl->undertime - q->now;
 
-       if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
-                                 d, NULL, &cl->bstats) < 0 ||
+       if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 ||
            gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
            gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0)
                return -1;
@@ -1519,7 +1437,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
                        err = gen_replace_estimator(&cl->bstats, NULL,
                                                    &cl->rate_est,
                                                    NULL,
-                                                   qdisc_root_sleeping_running(sch),
+                                                   true,
                                                    tca[TCA_RATE]);
                        if (err) {
                                NL_SET_ERR_MSG(extack, "Failed to replace specified rate estimator");
@@ -1611,6 +1529,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
        if (cl == NULL)
                goto failure;
 
+       gnet_stats_basic_sync_init(&cl->bstats);
        err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
        if (err) {
                kfree(cl);
@@ -1619,9 +1538,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
 
        if (tca[TCA_RATE]) {
                err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est,
-                                       NULL,
-                                       qdisc_root_sleeping_running(sch),
-                                       tca[TCA_RATE]);
+                                       NULL, true, tca[TCA_RATE]);
                if (err) {
                        NL_SET_ERR_MSG(extack, "Couldn't create new estimator");
                        tcf_block_put(cl->block);