From 142537e419234c396890a22806b8644dce21b132 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 21 Sep 2018 08:51:48 -0700 Subject: [PATCH] net_sched: sch_fq: switch to CLOCK_TAI TCP will soon provide per skb->tstamp with earliest departure time, so that sch_fq does not have to determine departure time by looking at socket sk_pacing_rate. We chose in linux-4.19 CLOCK_TAI as the clock base for transports, qdiscs, and NIC offloads. Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller --- net/sched/sch_fq.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c index b27ba36a269c..d5185c44e9a5 100644 --- a/net/sched/sch_fq.c +++ b/net/sched/sch_fq.c @@ -460,7 +460,7 @@ static void fq_check_throttled(struct fq_sched_data *q, u64 now) static struct sk_buff *fq_dequeue(struct Qdisc *sch) { struct fq_sched_data *q = qdisc_priv(sch); - u64 now = ktime_get_ns(); + u64 now = ktime_get_tai_ns(); struct fq_flow_head *head; struct sk_buff *skb; struct fq_flow *f; @@ -823,7 +823,7 @@ static int fq_init(struct Qdisc *sch, struct nlattr *opt, q->fq_trees_log = ilog2(1024); q->orphan_mask = 1024 - 1; q->low_rate_threshold = 550000 / 8; - qdisc_watchdog_init(&q->watchdog, sch); + qdisc_watchdog_init_clockid(&q->watchdog, sch, CLOCK_TAI); if (opt) err = fq_change(sch, opt, extack); @@ -878,7 +878,7 @@ static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d) st.flows_plimit = q->stat_flows_plimit; st.pkts_too_long = q->stat_pkts_too_long; st.allocation_errors = q->stat_allocation_errors; - st.time_next_delayed_flow = q->time_next_delayed_flow - ktime_get_ns(); + st.time_next_delayed_flow = q->time_next_delayed_flow - ktime_get_tai_ns(); st.flows = q->flows; st.inactive_flows = q->inactive_flows; st.throttled_flows = q->throttled_flows; -- 2.20.1