---
 i386/i386/cpu_number.h | 26 ++++++++++++++------------
 x86_64/locore.S        | 10 +++++-----
 2 files changed, 19 insertions(+), 17 deletions(-)

diff --git a/i386/i386/cpu_number.h b/i386/i386/cpu_number.h
index 0c0ec189..592592d8 100644
--- a/i386/i386/cpu_number.h
+++ b/i386/i386/cpu_number.h
@@ -41,13 +41,15 @@
 #define        CX(addr, reg)   addr(,reg,8)
 #endif
 
-/* Slow version, always works */
+/* Slow version, always works.
+ * Call with 32 bit wide reg on i386
+ * Call with 64 bit wide reg on x86_64 */
 #define        CPU_NUMBER_NO_STACK(reg)        \
-       movl    %cs:lapic, reg          ;\
-       movl    %cs:APIC_ID(reg), reg   ;\
-       shrl    $24, reg                ;\
-       andl    %cs:apic_id_mask, reg   ;\
-       movl    %cs:CX(cpu_id_lut, reg), reg    ;\
+       mov     %cs:lapic, reg          ;\
+       mov     %cs:APIC_ID(reg), reg   ;\
+       shr     $24, reg                ;\
+       and     %cs:apic_id_mask, reg   ;\
+       mov     %cs:CX(cpu_id_lut, reg), reg    ;\
 
 /* Fast version, requires a stack */
 #ifdef __i386__
@@ -71,23 +73,23 @@
        popl    %esi
 #endif
 #ifdef __x86_64__
-/* Never call CPU_NUMBER_NO_GS(%esi) */
+/* Never call CPU_NUMBER_NO_GS(%rsi) */
 #define CPU_NUMBER_NO_GS(reg)          \
        pushq   %rsi            ;\
        pushq   %rax            ;\
        pushq   %rbx            ;\
        pushq   %rcx            ;\
        pushq   %rdx            ;\
-       movl    $1, %eax        ;\
+       movq    $1, %rax        ;\
        cpuid                   ;\
-       shrl    $24, %ebx       ;\
-       andl    %cs:apic_id_mask, %ebx  ;\
-       movl    %cs:CX(cpu_id_lut, %ebx), %esi  ;\
+       shrq    $24, %rbx       ;\
+       andq    %cs:apic_id_mask, %rbx  ;\
+       movq    %cs:CX(cpu_id_lut, %rbx), %rsi  ;\
        popq    %rdx            ;\
        popq    %rcx            ;\
        popq    %rbx            ;\
        popq    %rax            ;\
-       movl    %esi, reg       ;\
+       movq    %rsi, reg       ;\
        popq    %rsi
 #endif
 
diff --git a/x86_64/locore.S b/x86_64/locore.S
index 376f41c1..67d27fb1 100644
--- a/x86_64/locore.S
+++ b/x86_64/locore.S
@@ -832,7 +832,7 @@ ENTRY(all_intrs)
 
        PUSH_SEGMENTS_ISR(%rdx)         /* save segment registers */
 
-       CPU_NUMBER_NO_GS(%ecx)
+       CPU_NUMBER_NO_GS(%rcx)
        movq    %rsp,%rdx               /* on an interrupt stack? */
        and     $(~(INTSTACK_SIZE-1)),%rdx
        cmpq    %ss:CX(EXT(int_stack_base),%rcx),%rdx
@@ -893,7 +893,7 @@ LEXT(return_to_iret)                        /* to find the 
return from calling interrupt) */
        iretq                           /* return to caller */
 
 int_from_intstack:
-       CPU_NUMBER_NO_GS(%edx)
+       CPU_NUMBER_NO_GS(%rdx)
        cmpq    CX(EXT(int_stack_base),%rdx),%rsp /* seemingly looping? */
        jb      stack_overflowed        /* if not: */
        call    EXT(interrupt)          /* call interrupt routine */
@@ -1181,7 +1181,7 @@ syscall_entry_2:
        movq    %rdx,R_CS(%rsp)         /* fix cs */
        movq    %rbx,R_EFLAGS(%rsp)     /* fix eflags */
 
-       CPU_NUMBER_NO_STACK(%edx)
+       CPU_NUMBER_NO_STACK(%rdx)
        TIME_TRAP_SENTRY
 
        movq    CX(EXT(kernel_stack),%rdx),%rbx
@@ -1421,7 +1421,7 @@ ENTRY(syscall64)
        mov     %r10,%rcx               /* fix arg3 location according to C ABI 
*/
 
        /* switch to kernel stack, then we can enable interrupts */
-       CPU_NUMBER_NO_STACK(%r11d)
+       CPU_NUMBER_NO_STACK(%r11)
        movq    CX(EXT(kernel_stack),%r11),%rsp
        sti
 
@@ -1464,7 +1464,7 @@ _syscall64_call:
 
 _syscall64_check_for_ast:
        /* Check for ast. */
-       CPU_NUMBER_NO_GS(%r11d)
+       CPU_NUMBER_NO_GS(%r11)
        cmpl    $0,CX(EXT(need_ast),%r11)
        jz      _syscall64_restore_state
 
-- 
2.45.2



Reply via email to