Commit-ID: 09329d1c2024522308ca4de977fc6bba753bab1a Gitweb: https://git.kernel.org/tip/09329d1c2024522308ca4de977fc6bba753bab1a Author: Bart Van Assche <[email protected]> AuthorDate: Thu, 14 Feb 2019 15:00:40 -0800 Committer: Ingo Molnar <[email protected]> CommitDate: Thu, 28 Feb 2019 07:55:40 +0100
locking/lockdep: Reorder struct lock_class members This patch does not change any functionality but makes the patch that frees lock classes that are no longer in use easier to read. Signed-off-by: Bart Van Assche <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Cc: Andrew Morton <[email protected]> Cc: Johannes Berg <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Paul E. McKenney <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: Waiman Long <[email protected]> Cc: Will Deacon <[email protected]> Cc: [email protected] Cc: [email protected] Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Ingo Molnar <[email protected]> --- include/linux/lockdep.h | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index c5335df2372f..0c38bade84b7 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -76,6 +76,13 @@ struct lock_class { */ struct list_head lock_entry; + /* + * These fields represent a directed graph of lock dependencies, + * to every node we attach a list of "forward" and a list of + * "backward" graph nodes. + */ + struct list_head locks_after, locks_before; + struct lockdep_subclass_key *key; unsigned int subclass; unsigned int dep_gen_id; @@ -86,13 +93,6 @@ struct lock_class { unsigned long usage_mask; struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES]; - /* - * These fields represent a directed graph of lock dependencies, - * to every node we attach a list of "forward" and a list of - * "backward" graph nodes. - */ - struct list_head locks_after, locks_before; - /* * Generation counter, when doing certain classes of graph walking, * to ensure that we check one node only once:

