From: Rik van Riel <[email protected]>

When running a microbenchmark calling an invalid syscall number
in a loop, on a nohz_full CPU, we spend a full 9% of our CPU
time in __acct_update_integrals.

This function converts cputime_t to jiffies, to a timeval, only to
convert the timeval back to microseconds before discarding it.

This patch leaves __acct_update_integrals functionally equivalent,
but speeds things up by about 11%, with 10 million calls to an
invalid syscall number dropping from 3.7 to 3.3 seconds.

Signed-off-by: Rik van Riel <[email protected]>
---
 kernel/tsacct.c | 14 +++++++-------
 1 file changed, 7 insertions(+), 7 deletions(-)

diff --git a/kernel/tsacct.c b/kernel/tsacct.c
index 975cb49e32bf..afb5cf8ecc5f 100644
--- a/kernel/tsacct.c
+++ b/kernel/tsacct.c
@@ -125,22 +125,22 @@ static void __acct_update_integrals(struct task_struct 
*tsk,
 {
        if (likely(tsk->mm)) {
                cputime_t time, dtime;
-               struct timeval value;
                unsigned long flags;
-               u64 delta;
+               u64 delta, usecs;
 
                local_irq_save(flags);
                time = stime + utime;
                dtime = time - tsk->acct_timexpd;
-               jiffies_to_timeval(cputime_to_jiffies(dtime), &value);
-               delta = value.tv_sec;
-               delta = delta * USEC_PER_SEC + value.tv_usec;
+               delta = cputime_to_jiffies(dtime);
 
                if (delta == 0)
                        goto out;
+
+               usecs = jiffies_to_usecs(delta);
+
                tsk->acct_timexpd = time;
-               tsk->acct_rss_mem1 += delta * get_mm_rss(tsk->mm);
-               tsk->acct_vm_mem1 += delta * tsk->mm->total_vm;
+               tsk->acct_rss_mem1 += usecs * get_mm_rss(tsk->mm);
+               tsk->acct_vm_mem1 += usecs * tsk->mm->total_vm;
        out:
                local_irq_restore(flags);
        }
-- 
2.5.0

Reply via email to