Will be used by basic cgroup resource stat reporting later.
Signed-off-by: Tejun Heo <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Li Zefan <[email protected]>
Cc: Johannes Weiner <[email protected]>
---
include/linux/sched/cputime.h | 3 ++-
kernel/sched/cputime.c | 5 ++---
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/include/linux/sched/cputime.h b/include/linux/sched/cputime.h
index 4c5b973..9251044 100644
--- a/include/linux/sched/cputime.h
+++ b/include/linux/sched/cputime.h
@@ -53,7 +53,8 @@ static inline void task_cputime_scaled(struct task_struct *t,
extern void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st);
extern void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64
*st);
-
+extern void cputime_adjust(struct task_cputime *curr, struct prev_cputime
*prev,
+ u64 *ut, u64 *st);
/*
* Thread group CPU time accounting.
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 6e3ea4a..3a8bfcc 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -585,9 +585,8 @@ static u64 scale_stime(u64 stime, u64 rtime, u64 total)
*
* Assuming that rtime_i+1 >= rtime_i.
*/
-static void cputime_adjust(struct task_cputime *curr,
- struct prev_cputime *prev,
- u64 *ut, u64 *st)
+void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
+ u64 *ut, u64 *st)
{
u64 rtime, stime, utime;
unsigned long flags;
--
2.9.3