On Wed, Aug 02, 2006 at 05:02:11AM +0200, Andi Kleen wrote:
> > --- a/arch/x86_64/kernel/smp.c
> > +++ b/arch/x86_64/kernel/smp.c
> > @@ -203,7 +203,7 @@ int __cpuinit init_smp_flush(void)
> > {
> > int i;
> > for_each_cpu_mask(i, cpu_possible_map) {
> > - spin_lock_init(&per_cpu
> --- a/arch/x86_64/kernel/smp.c
> +++ b/arch/x86_64/kernel/smp.c
> @@ -203,7 +203,7 @@ int __cpuinit init_smp_flush(void)
> {
> int i;
> for_each_cpu_mask(i, cpu_possible_map) {
> - spin_lock_init(&per_cpu(flush_state.tlbstate_lock, i));
> + spin_lock_init(&pe