TESTED:
 - Now compiles with: --enable-platform=xen --enable-ncpus=8
 - Does not break any other CI build

TODO:
 - Runtime test of xen smp kernel

---
 i386/Makefrag.am        |   6 +++
 i386/Makefrag_x86.am    |   2 -
 i386/i386/ast_check.c   |   4 ++
 i386/i386/cpu_number.h  |   9 ++++
 i386/i386/i386asm.sym   |  17 ++++---
 i386/i386/locore.S      |   2 +-
 i386/i386/mp_desc.c     | 100 +++++++++++++++++++++++++++++++++++++++-
 i386/i386/percpu.c      |  40 +++++++++++++---
 i386/i386/percpu.h      |   5 ++
 i386/i386/spl.S         |   4 +-
 i386/i386/xen.h         |  10 +++-
 i386/i386at/model_dep.h |   1 +
 i386/xen/xen.c          |   1 +
 i386/xen/xen_locore.S   |   8 ++--
 x86_64/Makefrag.am      |   6 +++
 x86_64/locore.S         |   2 +-
 x86_64/spl.S            |   4 +-
 x86_64/xen_locore.S     |   8 ++--
 18 files changed, 199 insertions(+), 30 deletions(-)

diff --git a/i386/Makefrag.am b/i386/Makefrag.am
index 85333d1e..f54efeee 100644
--- a/i386/Makefrag.am
+++ b/i386/Makefrag.am
@@ -112,6 +112,12 @@ libkernel_a_SOURCES += \
        i386/i386/pit.c \
        i386/i386/pit.h
 
+if enable_smp
+libkernel_a_SOURCES += \
+       i386/i386/smp.c \
+       i386/i386/smp.h
+endif
+
 if enable_apic
 libkernel_a_SOURCES += \
        i386/i386at/ioapic.c
diff --git a/i386/Makefrag_x86.am b/i386/Makefrag_x86.am
index a6c7a5c8..eb800705 100644
--- a/i386/Makefrag_x86.am
+++ b/i386/Makefrag_x86.am
@@ -62,8 +62,6 @@ libkernel_a_SOURCES += \
        i386/i386/sched_param.h \
        i386/i386/seg.h \
        i386/i386/setjmp.h \
-       i386/i386/smp.c \
-       i386/i386/smp.h \
        i386/i386/spl.h \
        i386/i386/strings.c \
        i386/i386/task.h \
diff --git a/i386/i386/ast_check.c b/i386/i386/ast_check.c
index 8bf69a68..c0d9f630 100644
--- a/i386/i386/ast_check.c
+++ b/i386/i386/ast_check.c
@@ -50,7 +50,11 @@ void init_ast_check(const processor_t processor)
  */
 void cause_ast_check(const processor_t processor)
 {
+#ifndef MACH_XEN
     smp_remote_ast(APIC_LOGICAL_ID(processor->slot_num));
+#else
+#warning cause_ast_check not implemented on xen?
+#endif
 }
 
 #endif /* NCPUS > 1 */
diff --git a/i386/i386/cpu_number.h b/i386/i386/cpu_number.h
index 4e894a00..af0b93cb 100644
--- a/i386/i386/cpu_number.h
+++ b/i386/i386/cpu_number.h
@@ -38,8 +38,13 @@
 #define        CX8(addr, reg)  addr(,reg,8)
 
 /* Fastest version, requires gs being set up */
+#ifdef MACH_XEN
+#define CPU_NUMBER(reg) \
+       movl    MY(VCPU_ID), reg;
+#else
 #define CPU_NUMBER(reg)        \
        movl    MY(CPU_ID), reg;
+#endif
 
 #ifndef __ASSEMBLER__
 #include <kern/cpu_number.h>
@@ -55,7 +60,11 @@ static inline int cpu_number_slow(void)
 
 static inline int cpu_number(void)
 {
+#ifdef MACH_XEN
+       return percpu_get(int, vcpu_id);
+#else
        return percpu_get(int, cpu_id);
+#endif
 }
 #endif
 
diff --git a/i386/i386/i386asm.sym b/i386/i386/i386asm.sym
index e1f5c6bb..deb3e45e 100644
--- a/i386/i386/i386asm.sym
+++ b/i386/i386/i386asm.sym
@@ -181,13 +181,18 @@ offset    thread                  th      user_timer
 #endif
 
 #ifdef MACH_XEN
-offset shared_info             si      vcpu_info[0].evtchn_upcall_mask CPU_CLI
-offset shared_info             si      vcpu_info[0].evtchn_upcall_pending      
CPU_PENDING
-offset shared_info             si      vcpu_info[0].evtchn_pending_sel 
CPU_PENDING_SEL
-offset shared_info             si      evtchn_pending  PENDING
-offset shared_info             si      evtchn_mask     EVTMASK
+offset percpu                  pc      vcpu_id         PERCPU_VCPU_ID
+offset shared_info             si      vcpu_info[0].evtchn_upcall_mask         
CPU0_CLI
+offset shared_info             si      vcpu_info[0].evtchn_upcall_pending      
CPU0_PENDING
+offset shared_info             si      vcpu_info[0].evtchn_pending_sel         
CPU0_PENDING_SEL
+offset vcpu_info               vi      evtchn_upcall_mask      CPUx_CLI
+offset vcpu_info               vi      evtchn_upcall_pending   CPUx_PENDING
+offset vcpu_info               vi      evtchn_pending_sel      CPUx_PENDING_SEL
+offset shared_info             si      evtchn_pending          PENDING
+offset shared_info             si      evtchn_mask             EVTMASK
 #ifdef MACH_PV_PAGETABLES
-offset shared_info             si      vcpu_info[0].arch.cr2   CR2
+offset shared_info             si      vcpu_info[0].arch.cr2   CPU0_CR2
+offset vcpu_info               vi      arch.cr2                CPUx_CR2
 #endif /* MACH_PV_PAGETABLES */
 #endif /* MACH_XEN */
 
diff --git a/i386/i386/locore.S b/i386/i386/locore.S
index 905e1af9..19e674c3 100644
--- a/i386/i386/locore.S
+++ b/i386/i386/locore.S
@@ -492,7 +492,7 @@ ENTRY(t_page_fault)
        pushl   $(T_PAGE_FAULT)         /* mark a page fault trap */
        pusha                           /* save the general registers */
 #ifdef MACH_PV_PAGETABLES
-       movl    %ss:hyp_shared_info+CR2,%eax
+       movl    %ss:hyp_shared_info+CPU0_CR2,%eax
 #else  /* MACH_PV_PAGETABLES */
        movl    %cr2,%eax               /* get the faulting address */
 #endif /* MACH_PV_PAGETABLES */
diff --git a/i386/i386/mp_desc.c b/i386/i386/mp_desc.c
index 1343861c..406cf2bf 100644
--- a/i386/i386/mp_desc.c
+++ b/i386/i386/mp_desc.c
@@ -95,9 +95,11 @@ interrupt_stack_alloc(void)
 }
 
 #if    NCPUS > 1
+#ifndef MACH_XEN
 phys_addr_t apboot_addr;
 extern void *apboot, *apbootend;
 extern volatile ApicLocalUnit* lapic;
+#endif
 
 /*
  * Multiprocessor i386/i486 systems use a separate copy of the
@@ -205,7 +207,9 @@ cpu_control(int cpu, const int *info, unsigned int count)
 void
 interrupt_processor(int cpu)
 {
+#ifndef MACH_XEN
        smp_pmap_update(APIC_LOGICAL_ID(cpu));
+#endif
 }
 
 static void
@@ -225,6 +229,7 @@ paging_enable(void)
 #endif  /* MACH_HYP */
 }
 
+#ifndef MACH_XEN
 static __attribute__((noreturn)) void
 cpu_setup(int cpu)
 {
@@ -341,4 +346,97 @@ start_other_cpus(void)
        /* Re-enable IOAPIC interrupts as per setup */
        lapic_enable();
 }
-#endif /* NCPUS > 1 */
+
+#else  /* !MACH_XEN */
+
+#include <xen/public/vcpu.h>
+
+void start_other_cpus(void) {
+       int vcpu;
+       int vcpus;
+       int err;
+       struct vcpu_register_vcpu_info info;
+       volatile struct vcpu_info *vcpui;
+
+       for (vcpus = 0; vcpus < NCPUS; vcpus++) {
+               if (hyp_vcpu_is_up(vcpus) < 0)
+                       break;
+        }
+       printf("Xen: Detected %d vCPUS\n", vcpus);
+
+       if (vcpus == 1)
+               return;
+
+       for (vcpu = 1; vcpu < vcpus; vcpu++) {
+               machine_slot[vcpu].running = FALSE;
+       }
+
+       for (vcpu = 1; vcpu < vcpus; vcpu++) {
+
+               init_percpu(vcpu);
+
+               err = hyp_vcpu_initialise(vcpu, &percpu_array[vcpu].vcpu_gc);
+               if (err) {
+                       printf("Cannot initialise xen vcpu=%d\n", vcpu);
+                       continue;
+               }
+
+               vcpui = &hyp_shared_info.vcpu_info[vcpu];
+               info.mfn = kv_to_mfn(vcpui);
+               info.offset = (uint32_t)((uintptr_t)vcpui & ~PAGE_MASK);
+
+               err = hyp_vcpu_register(vcpu, &info);
+               if (err) {
+                       printf("Cannot register xen vcpu=%d\n", vcpu);
+                       continue;
+               }
+               percpu_array[vcpu].vcpu_id = vcpu;
+
+               err = hyp_vcpu_up(vcpu);
+               if (err) {
+                       printf("Cannot bring up xen vcpu=%d\n", vcpu);
+                       percpu_array[vcpu].vcpu_id = 0;
+                       continue;
+               }
+       }
+
+       for (vcpu = 1; vcpu < vcpus; vcpu++) {
+               printf("Waiting for vCPU %d\n", vcpu);
+
+               do {
+                       cpu_pause();
+               } while (machine_slot[vcpu].running == FALSE);
+       }
+       printf("Xen vCPU bringup complete\n");
+}
+
+void cpu_ap_main(void) {
+       int vcpu = cpu_number();
+
+       assert (vcpu > 0);
+
+       mp_desc_init(vcpu);
+       printf("vCPU=(%u) mpdesc done\n", vcpu);
+
+       ap_gdt_init(vcpu);
+       printf("vCPU=(%u) gdt done\n", vcpu);
+
+       ap_idt_init(vcpu);
+       printf("vCPU=(%u) idt done\n", vcpu);
+
+       ap_ldt_init(vcpu);
+       printf("vCPU=(%u) ldt done\n", vcpu);
+
+       ap_ktss_init(vcpu);
+       printf("vCPU=(%u) ktss done\n", vcpu);
+
+       /* Initialize machine_slot fields with the cpu data */
+       machine_slot[vcpu].cpu_subtype = CPU_SUBTYPE_AT386;
+       machine_slot[vcpu].cpu_type = machine_slot[0].cpu_type;
+       machine_slot[vcpu].running = TRUE;
+
+       cpu_launch_first_thread(THREAD_NULL);
+}
+
+#endif /* !MACH_XEN */
+#endif /* NCPUS > 1 */
diff --git a/i386/i386/percpu.c b/i386/i386/percpu.c
index c6b728b6..aed798ad 100644
--- a/i386/i386/percpu.c
+++ b/i386/i386/percpu.c
@@ -14,20 +14,48 @@
  * You should have received a copy of the GNU General Public License
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
-#include <i386/smp.h>
+#ifdef APIC
 #include <i386/apic.h>
+#endif
+#include <i386/gdt.h>
+#include <i386/ldt.h>
+#include <i386/smp.h>
 #include <kern/cpu_number.h>
 #include <i386/percpu.h>
+#include <i386at/model_dep.h>
+
+extern void cpu_ap_main(void);
+extern void hyp_callback(void);
+extern void hyp_failsafe_callback(void);
 
 struct percpu percpu_array[NCPUS] = {0};
 
-#ifndef MACH_XEN
 void init_percpu(int cpu)
 {
-    int apic_id = apic_get_current_cpu();
-
     percpu_array[cpu].self = &percpu_array[cpu];
-    percpu_array[cpu].apic_id = apic_id;
+#ifdef APIC
+    percpu_array[cpu].apic_id = apic_get_current_cpu();
+#endif
     percpu_array[cpu].cpu_id = cpu;
-}
+#if NCPUS > 1 && defined(MACH_XEN)
+    percpu_array[cpu].vcpu_id = cpu;
+    percpu_array[cpu].vcpu_gc.user_regs.eip = (unsigned long)cpu_ap_main;
+    percpu_array[cpu].vcpu_gc.flags = VGCF_IN_KERNEL;
+    percpu_array[cpu].vcpu_gc.user_regs.eflags = 0x1000; /* IOPL_RING1 */
+    percpu_array[cpu].vcpu_gc.user_regs.ds = USER_DS;
+    percpu_array[cpu].vcpu_gc.user_regs.es = USER_DS;
+    percpu_array[cpu].vcpu_gc.user_regs.ss = KERNEL_DS;
+    percpu_array[cpu].vcpu_gc.user_regs.cs = KERNEL_CS;
+    percpu_array[cpu].vcpu_gc.user_regs.esp = (unsigned 
long)int_stack_top[cpu];
+    //XXX gdt_{frames,ents} ?
+    //XXX trap_ctxt (idt)?
+    percpu_array[cpu].vcpu_gc.kernel_ss = KERNEL_DS;
+    percpu_array[cpu].vcpu_gc.kernel_sp = (unsigned long)int_stack_top[cpu];
+#ifdef __x86_64__
+    percpu_array[cpu].vcpu_gc.gs_base_kernel = (unsigned 
long)percpu_array[cpu].self;
 #endif
+    percpu_array[cpu].vcpu_gc.event_callback_eip = (unsigned long)hyp_callback;
+    percpu_array[cpu].vcpu_gc.failsafe_callback_eip = (unsigned 
long)hyp_failsafe_callback;
+    percpu_array[cpu].vcpu_gc.ctrlreg[3] = (unsigned long)kernel_page_dir;
+#endif
+}
diff --git a/i386/i386/percpu.h b/i386/i386/percpu.h
index 637d2ca6..4c036bcd 100644
--- a/i386/i386/percpu.h
+++ b/i386/i386/percpu.h
@@ -67,11 +67,16 @@ MACRO_END
 
 #include <kern/processor.h>
 #include <mach/mach_types.h>
+#include <i386/xen.h>
 
 struct percpu {
     struct percpu      *self;
     int                        apic_id;
     int                        cpu_id;
+#ifdef MACH_XEN
+    int                        vcpu_id;
+    struct vcpu_guest_context vcpu_gc;
+#endif
     struct processor   processor;
     thread_t           active_thread;
     vm_offset_t                active_stack;
diff --git a/i386/i386/spl.S b/i386/i386/spl.S
index 2f2c8e3a..3176d81c 100644
--- a/i386/i386/spl.S
+++ b/i386/i386/spl.S
@@ -42,8 +42,8 @@
        testl   hyp_shared_info+PENDING, %ebx;  \
        popl    %ebx;                           \
        jz      9f;                             /* Check whether there was some 
pending */ \
-lock   orl     $1,hyp_shared_info+CPU_PENDING_SEL; /* Yes, activate it */ \
-       movb    $1,hyp_shared_info+CPU_PENDING; \
+lock   orl     $1,hyp_shared_info+CPU0_PENDING_SEL; /* Yes, activate it */ \
+       movb    $1,hyp_shared_info+CPU0_PENDING; \
 9:
 
 ENTRY(spl0)
diff --git a/i386/i386/xen.h b/i386/i386/xen.h
index 2cd81be8..be5e20c3 100644
--- a/i386/i386/xen.h
+++ b/i386/i386/xen.h
@@ -360,6 +360,14 @@ _hypcall2(long, vm_assist, unsigned int, cmd, unsigned 
int, type);
 
 _hypcall0(long, iret);
 
+#include <xen/public/vcpu.h>
+_hypcall3(int, vcpu_op, int, cmd, int, vcpu, vm_offset_t /* void* */, arg)
+#define hyp_vcpu_initialise(vcpu, gc) hyp_vcpu_op(VCPUOP_initialise, vcpu, 
(vm_offset_t)gc)
+#define hyp_vcpu_register(vcpu, info) hyp_vcpu_op(VCPUOP_register_vcpu_info, 
vcpu, (vm_offset_t)info)
+#define hyp_vcpu_is_up(vcpu) hyp_vcpu_op(VCPUOP_is_up, vcpu, 0)
+#define hyp_vcpu_up(vcpu) hyp_vcpu_op(VCPUOP_up, vcpu, 0)
+#define hyp_vcpu_down(vcpu) hyp_vcpu_op(VCPUOP_down, vcpu, 0)
+
 #include <xen/public/sched.h>
 _hypcall2(long, sched_op, int, cmd, vm_offset_t /* void* */, arg)
 #define hyp_yield() hyp_sched_op(SCHEDOP_yield, 0)
@@ -403,7 +411,7 @@ static inline uint64_t hyp_cpu_clock(void) {
 
 #else  /* __ASSEMBLER__ */
 /* TODO: SMP */
-#define cli movb $0xff,hyp_shared_info+CPU_CLI
+#define cli movb $0xff,hyp_shared_info+CPU0_CLI
 #define sti call hyp_sti
 #define iretq jmp hyp_iretq
 #endif /* ASSEMBLER */
diff --git a/i386/i386at/model_dep.h b/i386/i386at/model_dep.h
index 3d5b6645..85a37e47 100644
--- a/i386/i386at/model_dep.h
+++ b/i386/i386at/model_dep.h
@@ -21,6 +21,7 @@
 
 #include <i386/vm_param.h>
 #include <mach/vm_prot.h>
+#include <sys/types.h>
 
 /*
  * Interrupt stack.
diff --git a/i386/xen/xen.c b/i386/xen/xen.c
index 1cc3fcab..8d7ae766 100644
--- a/i386/xen/xen.c
+++ b/i386/xen/xen.c
@@ -20,6 +20,7 @@
 #include <kern/debug.h>
 #include <kern/mach_clock.h>
 
+#include <mach/vm_param.h>
 #include <mach/machine/eflags.h>
 #include <machine/thread.h>
 #include <machine/ipl.h>
diff --git a/i386/xen/xen_locore.S b/i386/xen/xen_locore.S
index 1468ef80..6e20139c 100644
--- a/i386/xen/xen_locore.S
+++ b/i386/xen/xen_locore.S
@@ -56,7 +56,7 @@ ENTRY(hyp_sti)
        pushl   %ebp
        movl    %esp, %ebp
 _hyp_sti:
-       movb    $0,hyp_shared_info+CPU_CLI /* Enable interrupts */
+       movb    $0,hyp_shared_info+CPU0_CLI /* Enable interrupts */
        cmpl    $0,int_active           /* Check whether we were already 
checking pending interrupts */
        jz      0f
        popl    %ebp
@@ -64,12 +64,12 @@ _hyp_sti:
 0:
        /* Not active, check pending interrupts by hand */
        /* no memory barrier needed on x86 */
-       cmpb    $0,hyp_shared_info+CPU_PENDING
+       cmpb    $0,hyp_shared_info+CPU0_PENDING
        jne     0f
        popl    %ebp
        ret
 0:
-       movb    $0xff,hyp_shared_info+CPU_CLI
+       movb    $0xff,hyp_shared_info+CPU0_CLI
 1:
        pushl   %eax
        pushl   %ecx
@@ -86,7 +86,7 @@ _hyp_sti:
        popl    %ecx
        popl    %eax
        decl    int_active              /* stopped handling interrupts */
-       cmpb    $0,hyp_shared_info+CPU_PENDING
+       cmpb    $0,hyp_shared_info+CPU0_PENDING
        jne     1b
        jmp     _hyp_sti
 
diff --git a/x86_64/Makefrag.am b/x86_64/Makefrag.am
index 36b5fa38..bc82cec9 100644
--- a/x86_64/Makefrag.am
+++ b/x86_64/Makefrag.am
@@ -110,6 +110,12 @@ libkernel_a_SOURCES += \
        i386/i386/pit.c \
        i386/i386/pit.h
 
+if enable_smp
+libkernel_a_SOURCES += \
+       i386/i386/smp.c \
+       i386/i386/smp.h
+endif
+
 if enable_apic
 libkernel_a_SOURCES += \
        i386/i386at/ioapic.c
diff --git a/x86_64/locore.S b/x86_64/locore.S
index fad0d73c..672ae4b4 100644
--- a/x86_64/locore.S
+++ b/x86_64/locore.S
@@ -632,7 +632,7 @@ ENTRY(t_page_fault)
        pushq   $(T_PAGE_FAULT)         /* mark a page fault trap */
        pusha                           /* save the general registers */
 #ifdef MACH_XEN
-       movq    %ss:hyp_shared_info+CR2,%rax
+       movq    %ss:hyp_shared_info+CPU0_CR2,%rax
 #else  /* MACH_XEN */
        movq    %cr2,%rax               /* get the faulting address */
 #endif /* MACH_XEN */
diff --git a/x86_64/spl.S b/x86_64/spl.S
index 28a17e22..822262ab 100644
--- a/x86_64/spl.S
+++ b/x86_64/spl.S
@@ -41,8 +41,8 @@
        testl   hyp_shared_info+PENDING, %ebx;  \
        popq    %rbx;                           \
        jz      9f;                             /* Check whether there was some 
pending */ \
-lock   orl     $1,hyp_shared_info+CPU_PENDING_SEL; /* Yes, activate it */ \
-       movb    $1,hyp_shared_info+CPU_PENDING; \
+lock   orl     $1,hyp_shared_info+CPU0_PENDING_SEL; /* Yes, activate it */ \
+       movb    $1,hyp_shared_info+CPU0_PENDING; \
 9:
 
 ENTRY(spl0)
diff --git a/x86_64/xen_locore.S b/x86_64/xen_locore.S
index 967c8904..dfbf47c1 100644
--- a/x86_64/xen_locore.S
+++ b/x86_64/xen_locore.S
@@ -58,7 +58,7 @@ ENTRY(hyp_sti)
        pushq   %rbp
        movq    %rsp, %rbp
 _hyp_sti:
-       movb    $0,hyp_shared_info+CPU_CLI /* Enable interrupts */
+       movb    $0,hyp_shared_info+CPU0_CLI /* Enable interrupts */
        cmpl    $0,int_active           /* Check whether we were already 
checking pending interrupts */
        jz      0f
        popq    %rbp
@@ -66,12 +66,12 @@ _hyp_sti:
 0:
        /* Not active, check pending interrupts by hand */
        /* no memory barrier needed on x86 */
-       cmpb    $0,hyp_shared_info+CPU_PENDING
+       cmpb    $0,hyp_shared_info+CPU0_PENDING
        jne     0f
        popq    %rbp
        ret
 0:
-       movb    $0xff,hyp_shared_info+CPU_CLI
+       movb    $0xff,hyp_shared_info+CPU0_CLI
 1:
        pushq   %rax
        pushq   %rcx
@@ -98,7 +98,7 @@ _hyp_sti:
        popq    %rcx
        popq    %rax
        decl    int_active              /* stopped handling interrupts */
-       cmpb    $0,hyp_shared_info+CPU_PENDING
+       cmpb    $0,hyp_shared_info+CPU0_PENDING
        jne     1b
        jmp     _hyp_sti
 
-- 
2.51.0



Reply via email to