TCP will soon provide per skb->tstamp with earliest departure time,
so that sch_fq does not have to determine departure time by looking
at socket sk_pacing_rate.

We chose in linux-4.19 CLOCK_TAI as the clock base for transports,
qdiscs, and NIC offloads.

Signed-off-by: Eric Dumazet <eduma...@google.com>
---
 net/sched/sch_fq.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 
b27ba36a269cc72cd716da19dcfa27018ec01490..d5185c44e9a5f521ca99243b6e9b53ec05b84d49
 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -460,7 +460,7 @@ static void fq_check_throttled(struct fq_sched_data *q, u64 
now)
 static struct sk_buff *fq_dequeue(struct Qdisc *sch)
 {
        struct fq_sched_data *q = qdisc_priv(sch);
-       u64 now = ktime_get_ns();
+       u64 now = ktime_get_tai_ns();
        struct fq_flow_head *head;
        struct sk_buff *skb;
        struct fq_flow *f;
@@ -823,7 +823,7 @@ static int fq_init(struct Qdisc *sch, struct nlattr *opt,
        q->fq_trees_log         = ilog2(1024);
        q->orphan_mask          = 1024 - 1;
        q->low_rate_threshold   = 550000 / 8;
-       qdisc_watchdog_init(&q->watchdog, sch);
+       qdisc_watchdog_init_clockid(&q->watchdog, sch, CLOCK_TAI);
 
        if (opt)
                err = fq_change(sch, opt, extack);
@@ -878,7 +878,7 @@ static int fq_dump_stats(struct Qdisc *sch, struct 
gnet_dump *d)
        st.flows_plimit           = q->stat_flows_plimit;
        st.pkts_too_long          = q->stat_pkts_too_long;
        st.allocation_errors      = q->stat_allocation_errors;
-       st.time_next_delayed_flow = q->time_next_delayed_flow - ktime_get_ns();
+       st.time_next_delayed_flow = q->time_next_delayed_flow - 
ktime_get_tai_ns();
        st.flows                  = q->flows;
        st.inactive_flows         = q->inactive_flows;
        st.throttled_flows        = q->throttled_flows;
-- 
2.19.0.444.g18242da7ef-goog

Reply via email to