summary refs log tree commit diff
path: root/net/sched/sch_fq.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2018-09-21 08:51:48 -0700
committerDavid S. Miller <davem@davemloft.net>2018-09-21 19:37:59 -0700
commit142537e419234c396890a22806b8644dce21b132 (patch)
tree372bace77661b8ba09e341d6ddbb4c5371aa42be /net/sched/sch_fq.c
parent2fd66ffba50716fc5ab481c48db643af3bda2276 (diff)
downloadlinux-142537e419234c396890a22806b8644dce21b132.tar.gz
net_sched: sch_fq: switch to CLOCK_TAI
TCP will soon provide per skb->tstamp with earliest departure time,
so that sch_fq does not have to determine departure time by looking
at socket sk_pacing_rate.

We chose in linux-4.19 CLOCK_TAI as the clock base for transports,
qdiscs, and NIC offloads.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_fq.c')
-rw-r--r--net/sched/sch_fq.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index b27ba36a269c..d5185c44e9a5 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -460,7 +460,7 @@ static void fq_check_throttled(struct fq_sched_data *q, u64 now)
 static struct sk_buff *fq_dequeue(struct Qdisc *sch)
 {
 	struct fq_sched_data *q = qdisc_priv(sch);
-	u64 now = ktime_get_ns();
+	u64 now = ktime_get_tai_ns();
 	struct fq_flow_head *head;
 	struct sk_buff *skb;
 	struct fq_flow *f;
@@ -823,7 +823,7 @@ static int fq_init(struct Qdisc *sch, struct nlattr *opt,
 	q->fq_trees_log		= ilog2(1024);
 	q->orphan_mask		= 1024 - 1;
 	q->low_rate_threshold	= 550000 / 8;
-	qdisc_watchdog_init(&q->watchdog, sch);
+	qdisc_watchdog_init_clockid(&q->watchdog, sch, CLOCK_TAI);
 
 	if (opt)
 		err = fq_change(sch, opt, extack);
@@ -878,7 +878,7 @@ static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
 	st.flows_plimit		  = q->stat_flows_plimit;
 	st.pkts_too_long	  = q->stat_pkts_too_long;
 	st.allocation_errors	  = q->stat_allocation_errors;
-	st.time_next_delayed_flow = q->time_next_delayed_flow - ktime_get_ns();
+	st.time_next_delayed_flow = q->time_next_delayed_flow - ktime_get_tai_ns();
 	st.flows		  = q->flows;
 	st.inactive_flows	  = q->inactive_flows;
 	st.throttled_flows	  = q->throttled_flows;