+    /* Turn paging on.
+     * TODO: Why does setting the WP bit here cause a crash?
+     */
+    set_cr0(get_cr0() | CR0_PG /* | CR0_WP */);
+    set_cr0(get_cr0() & ~(CR0_CD | CR0_NW));
+    if (CPU_HAS_FEATURE(CPU_FEATURE_PGE))
+        set_cr4(get_cr4() | CR4_PGE);

Check my original paging_setup() to compare. You must check if PAE is
enabled, and set different flags depending of this.

https://github.com/AlmuHS/GNUMach_SMP/blob/smp/i386/i386/mp_desc.c#L365-L392



El mié, 1 feb 2023 a las 11:58, Damien Zammit (<dam...@zamaudio.com>)
escribió:

> This is a rather large commit, but difficult to break it up.
> This also serialises the AP bringup, so paging can be enabled per cpu
> one by one.
>
> Also-by: Almudena Garcia <liberamenso10...@gmail.com>
> ---
>  i386/i386/cpu_number.h  |   2 +
>  i386/i386/mp_desc.c     | 226 ++++++++++++++++++++++++++++------------
>  i386/i386/mp_desc.h     |   7 +-
>  i386/i386at/boothdr.S   |  18 +++-
>  i386/i386at/ioapic.c    |   5 +-
>  i386/i386at/model_dep.c | 102 +++---------------
>  i386/intel/pmap.c       |  93 +++++++++++++++++
>  i386/intel/pmap.h       |   6 ++
>  kern/startup.c          |   5 +-
>  9 files changed, 302 insertions(+), 162 deletions(-)
>
> diff --git a/i386/i386/cpu_number.h b/i386/i386/cpu_number.h
> index b6c3a629..46232459 100644
> --- a/i386/i386/cpu_number.h
> +++ b/i386/i386/cpu_number.h
> @@ -32,6 +32,8 @@
>
>  #if    NCPUS > 1
>
> +#include "apic.h"
> +
>  /* More-specific code must define cpu_number() and CPU_NUMBER.  */
>  #ifdef __i386__
>  #define        CX(addr, reg)   addr(,reg,4)
> diff --git a/i386/i386/mp_desc.c b/i386/i386/mp_desc.c
> index bcf2fbe7..0db29291 100644
> --- a/i386/i386/mp_desc.c
> +++ b/i386/i386/mp_desc.c
> @@ -24,25 +24,36 @@
>   * the rights to redistribute these changes.
>   */
>
> -#if    NCPUS > 1
> -
> -#include <string.h>
> -
>  #include <kern/cpu_number.h>
>  #include <kern/debug.h>
>  #include <kern/printf.h>
> +#include <kern/smp.h>
> +#include <kern/startup.h>
> +#include <kern/kmutex.h>
>  #include <mach/machine.h>
>  #include <mach/xen.h>
>  #include <vm/vm_kern.h>
>
>  #include <i386/mp_desc.h>
>  #include <i386/lock.h>
> +#include <i386/apic.h>
> +#include <i386/locore.h>
> +#include <i386/gdt.h>
> +#include <i386at/idt.h>
> +#include <i386at/int_init.h>
> +#include <i386/cpu.h>
> +#include <i386/smp.h>
> +
>  #include <i386at/model_dep.h>
>  #include <machine/ktss.h>
> +#include <machine/smp.h>
>  #include <machine/tss.h>
>  #include <machine/io_perm.h>
>  #include <machine/vm_param.h>
>
> +#include <i386at/acpi_parse_apic.h>
> +#include <string.h>
> +
>  /*
>   * The i386 needs an interrupt stack to keep the PCB stack from being
>   * overrun by interrupts.  All interrupt stacks MUST lie at lower
> addresses
> @@ -52,20 +63,35 @@
>  /*
>   * Addresses of bottom and top of interrupt stacks.
>   */
> -vm_offset_t    interrupt_stack[NCPUS];
>  vm_offset_t    int_stack_top[NCPUS];
>  vm_offset_t    int_stack_base[NCPUS];
>
> -/*
> - * Barrier address.
> - */
> -vm_offset_t    int_stack_high;
> +/* Interrupt stack allocation */
> +uint8_t solid_intstack[NCPUS*INTSTACK_SIZE]
> __aligned(NCPUS*INTSTACK_SIZE);
> +
> +void
> +interrupt_stack_alloc(void)
> +{
> +       int i;
> +
> +       /*
> +        * Set up pointers to the top of the interrupt stack.
> +        */
>
> +       for (i = 0; i < NCPUS; i++) {
> +               int_stack_base[i] = (vm_offset_t) &solid_intstack[i *
> INTSTACK_SIZE];
> +               int_stack_top[i] = (vm_offset_t) &solid_intstack[(i + 1) *
> INTSTACK_SIZE] - 4;
> +       }
> +}
> +
> +#if    NCPUS > 1
>  /*
> - * First cpu`s interrupt stack.
> + * Flag to mark SMP init by BSP complete
>   */
> -extern char            _intstack[];    /* bottom */
> -extern char            _eintstack[];   /* top */
> +int bspdone;
> +
> +extern void *apboot, *apbootend;
> +extern volatile ApicLocalUnit* lapic;
>
>  /*
>   * Multiprocessor i386/i486 systems use a separate copy of the
> @@ -77,7 +103,7 @@ extern char          _eintstack[];   /* top */
>   */
>
>  /*
> - * Allocated descriptor tables.
> + * Descriptor tables.
>   */
>  struct mp_desc_table   *mp_desc_table[NCPUS] = { 0 };
>
> @@ -102,12 +128,13 @@ extern struct real_descriptor     ldt[LDTSZ];
>   * Allocate and initialize the per-processor descriptor tables.
>   */
>
> -struct mp_desc_table *
> +int
>  mp_desc_init(int mycpu)
>  {
>         struct mp_desc_table *mpt;
> +       vm_offset_t mem;
>
> -       if (mycpu == master_cpu) {
> +       if (mycpu == 0) {
>                 /*
>                  * Master CPU uses the tables built at boot time.
>                  * Just set the TSS and GDT pointers.
> @@ -118,61 +145,28 @@ mp_desc_init(int mycpu)
>         }
>         else {
>                 /*
> -                * Other CPUs allocate the table from the bottom of
> -                * the interrupt stack.
> +                * Allocate tables for other CPUs
>                  */
> -               mpt = (struct mp_desc_table *) interrupt_stack[mycpu];
> +               if (!init_alloc_aligned(sizeof(struct mp_desc_table),
> &mem))
> +                       panic("not enough memory for descriptor tables");
> +               mpt = (struct mp_desc_table *)phystokv(mem);
>
>                 mp_desc_table[mycpu] = mpt;
>                 mp_ktss[mycpu] = &mpt->ktss;
>                 mp_gdt[mycpu] = mpt->gdt;
>
>                 /*
> -                * Copy the tables
> +                * Zero the tables
>                  */
> -               memcpy(mpt->idt,
> -                 idt,
> -                 sizeof(idt));
> -               memcpy(mpt->gdt,
> -                 gdt,
> -                 sizeof(gdt));
> -               memcpy(mpt->ldt,
> -                 ldt,
> -                 sizeof(ldt));
> -               memset(&mpt->ktss, 0,
> -                 sizeof(struct task_tss));
> +               memset(mpt->idt, 0, sizeof(idt));
> +               memset(mpt->gdt, 0, sizeof(gdt));
> +               memset(mpt->ldt, 0, sizeof(ldt));
> +               memset(&mpt->ktss, 0, sizeof(struct task_tss));
>
> -               /*
> -                * Fix up the entries in the GDT to point to
> -                * this LDT and this TSS.
> -                */
> -#ifdef MACH_RING1
> -               panic("TODO %s:%d\n",__FILE__,__LINE__);
> -#else  /* MACH_RING1 */
> -               _fill_gdt_sys_descriptor(mpt->gdt, KERNEL_LDT,
> -                       (unsigned)&mpt->ldt,
> -                       LDTSZ * sizeof(struct real_descriptor) - 1,
> -                       ACC_P|ACC_PL_K|ACC_LDT, 0);
> -               _fill_gdt_sys_descriptor(mpt->gdt, KERNEL_TSS,
> -                       (unsigned)&mpt->ktss,
> -                       sizeof(struct task_tss) - 1,
> -                       ACC_P|ACC_PL_K|ACC_TSS, 0);
> -
> -               mpt->ktss.tss.ss0 = KERNEL_DS;
> -               mpt->ktss.tss.io_bit_map_offset = IOPB_INVAL;
> -               mpt->ktss.barrier = 0xFF;
> -#endif /* MACH_RING1 */
> -
> -               return mpt;
> +               return mycpu;
>         }
>  }
>
> -static kern_return_t intel_startCPU(int slot_num)
> -{
> -       printf("TODO: intel_startCPU\n");
> -       return KERN_FAILURE;
> -}
> -
>  /* XXX should be adjusted per CPU speed */
>  int simple_lock_pause_loop = 100;
>
> @@ -206,24 +200,124 @@ void
>  interrupt_processor(int cpu)
>  {
>         printf("interrupt cpu %d\n",cpu);
> +       smp_pmap_update(apic_get_cpu_apic_id(cpu));
> +}
> +
> +void
> +cpu_setup(int cpu)
> +{
> +    printf("AP=(%u) before\n", cpu);
> +
> +    pmap_make_temporary_mapping();
> +    printf("AP=(%u) tempmap done\n", cpu);
> +
> +#ifndef MACH_HYP
> +    /* Turn paging on.
> +     * TODO: Why does setting the WP bit here cause a crash?
> +     */
> +    set_cr0(get_cr0() | CR0_PG /* | CR0_WP */);
> +    set_cr0(get_cr0() & ~(CR0_CD | CR0_NW));
> +    if (CPU_HAS_FEATURE(CPU_FEATURE_PGE))
> +        set_cr4(get_cr4() | CR4_PGE);
> +#endif  /* MACH_HYP */
> +    flush_instr_queue();
> +    printf("AP=(%u) paging done\n", cpu);
> +
> +    mp_desc_init(cpu);
> +    printf("AP=(%u) mpdesc done\n", cpu);
> +
> +    ap_gdt_init(cpu);
> +    printf("AP=(%u) gdt done\n", cpu);
> +
> +    ap_idt_init(cpu);
> +    printf("AP=(%u) idt done\n", cpu);
> +
> +    ap_int_init(cpu);
> +    printf("AP=(%u) int done\n", cpu);
> +
> +    ap_ldt_init(cpu);
> +    printf("AP=(%u) ldt done\n", cpu);
> +
> +    ap_ktss_init(cpu);
> +    printf("AP=(%u) ktss done\n", cpu);
> +
> +    pmap_remove_temporary_mapping();
> +    printf("AP=(%u) remove tempmap done\n", cpu);
> +
> +    pmap_set_page_dir();
> +    flush_tlb();
> +    printf("AP=(%u) reset page dir done\n", cpu);
> +
> +    /* Initialize machine_slot fields with the cpu data */
> +    machine_slot[cpu].cpu_subtype = CPU_SUBTYPE_AT386;
> +    machine_slot[cpu].cpu_type = machine_slot[0].cpu_type;
> +
> +    lapic_enable();
> +    cpu_launch_first_thread(THREAD_NULL);
> +}
> +
> +void
> +cpu_ap_main()
> +{
> +    unsigned apic_id = (((ApicLocalUnit*)phystokv(lapic_addr))->apic_id.r
> >> 24) & 0xff;
> +    int cpu = apic_get_cpu_kernel_id(apic_id);
> +
> +    do {
> +        asm volatile ("pause" : : : "memory");
> +    } while (bspdone != cpu);
> +
> +    __sync_synchronize();
> +
> +    cpu_setup(cpu);
>  }
>
>  kern_return_t
>  cpu_start(int cpu)
>  {
> -       if (machine_slot[cpu].running)
> -               return KERN_FAILURE;
> +    assert(machine_slot[cpu].running != TRUE);
> +
> +    uint16_t apic_id = apic_get_cpu_apic_id(cpu);
> +
> +    printf("Trying to enable: %d\n", apic_id);
>
> -       return intel_startCPU(cpu);
> +    smp_startup_cpu(apic_id, AP_BOOT_ADDR);
> +
> +    printf("Started cpu %d (lapic id %04x)\n", cpu, apic_id);
> +
> +    return KERN_SUCCESS;
>  }
>
>  void
>  start_other_cpus(void)
>  {
> -       int cpu;
> -       for (cpu = 0; cpu < NCPUS; cpu++)
> -               if (cpu != cpu_number())
> -                       cpu_start(cpu);
> -}
> +       int ncpus = smp_get_numcpus();
> +
> +       //Copy cpu initialization assembly routine
> +       memcpy((void*)phystokv(AP_BOOT_ADDR), (void*) &apboot,
> +              (uint32_t)&apbootend - (uint32_t)&apboot);
>
> +#ifndef APIC
> +       lapic_enable(); /* Enable lapic only once */
> +#endif
> +       unsigned cpu;
> +
> +       splhigh();
> +
> +       bspdone = 0;
> +       for (cpu = 1; cpu < ncpus; cpu++) {
> +               machine_slot[cpu].running = FALSE;
> +
> +               //Start cpu
> +               printf("Starting AP %d\n", cpu);
> +               cpu_start(cpu);
> +
> +               bspdone++;
> +               do {
> +                       asm volatile ("pause" : : : "memory");
> +               } while (machine_slot[cpu].running == FALSE);
> +
> +               __sync_synchronize();
> +       }
> +       printf("BSP: Completed SMP init\n");
> +}
>  #endif /* NCPUS > 1 */
> diff --git a/i386/i386/mp_desc.h b/i386/i386/mp_desc.h
> index ede8775f..59d50e77 100644
> --- a/i386/i386/mp_desc.h
> +++ b/i386/i386/mp_desc.h
> @@ -46,6 +46,8 @@
>  #include "gdt.h"
>  #include "ldt.h"
>
> +#define AP_BOOT_ADDR   0x7000
> +
>  /*
>   * The descriptor tables are together in a structure
>   * allocated one per processor (except for the boot processor).
> @@ -72,11 +74,12 @@ extern struct task_tss              *mp_ktss[NCPUS];
>   */
>  extern struct real_descriptor  *mp_gdt[NCPUS];
>
> +extern uint8_t solid_intstack[];
>
>  /*
>   * Each CPU calls this routine to set up its descriptor tables.
>   */
> -extern struct mp_desc_table *  mp_desc_init(int);
> +extern int mp_desc_init(int);
>
>
>  extern void interrupt_processor(int cpu);
> @@ -90,4 +93,6 @@ extern kern_return_t cpu_start(int cpu);
>
>  extern kern_return_t cpu_control(int cpu, const int *info, unsigned int
> count);
>
> +extern void interrupt_stack_alloc(void);
> +
>  #endif /* _I386_MP_DESC_H_ */
> diff --git a/i386/i386at/boothdr.S b/i386/i386at/boothdr.S
> index 82d4b34a..d1d1fa51 100644
> --- a/i386/i386at/boothdr.S
> +++ b/i386/i386at/boothdr.S
> @@ -1,6 +1,6 @@
>
>  #include <mach/machine/asm.h>
> -
> +#include <i386/apic.h>
>  #include <i386/i386asm.h>
>
>         /*
> @@ -54,7 +54,18 @@ boot_entry:
>         movw    %ax,%ss
>
>         /* Switch to our own interrupt stack.  */
> -       movl    $_intstack+INTSTACK_SIZE,%esp
> +       movl    $solid_intstack+INTSTACK_SIZE-4, %esp
> +       andl    $0xfffffff0,%esp
> +
> +       /* Enable local apic */
> +       xorl    %eax, %eax
> +       xorl    %edx, %edx
> +       movl    $APIC_MSR, %ecx
> +       rdmsr
> +       orl     $APIC_MSR_ENABLE, %eax
> +       orl     $APIC_MSR_BSP, %eax
> +       movl    $APIC_MSR, %ecx
> +       wrmsr
>
>         /* Reset EFLAGS to a known state.  */
>         pushl   $0
> @@ -91,9 +102,6 @@ iplt_done:
>         /* Jump into C code.  */
>         call    EXT(c_boot_entry)
>
> -       .comm   _intstack,INTSTACK_SIZE
> -       .comm   _eintstack,0
> -
>  .align 16
>         .word 0
>  boot_gdt_descr:
> diff --git a/i386/i386at/ioapic.c b/i386/i386at/ioapic.c
> index 003690ed..f7b0d1d3 100644
> --- a/i386/i386at/ioapic.c
> +++ b/i386/i386at/ioapic.c
> @@ -186,9 +186,8 @@ lapic_enable_timer(void)
>      /* Some buggy hardware requires this set again */
>      lapic->divider_config.r = LAPIC_TIMER_DIVIDE_16;
>
> -    /* Enable interrupts for the first time on BSP */
> -    asm("sti");
> -    printf("LAPIC timer configured\n");
> +    /* Enable interrupts for the first time */
> +    printf("LAPIC timer configured on cpu%d\n", cpu_number());
>  }
>
>  void
> diff --git a/i386/i386at/model_dep.c b/i386/i386at/model_dep.c
> index fe11bffc..8f4fdfb9 100644
> --- a/i386/i386at/model_dep.c
> +++ b/i386/i386at/model_dep.c
> @@ -134,11 +134,9 @@ extern char        version[];
>  /* If set, reboot the system on ctrl-alt-delete.  */
>  boolean_t      rebootflag = FALSE;     /* exported to kdintr */
>
> -/* Interrupt stack.  */
> -static char int_stack[INTSTACK_SIZE] __aligned(INTSTACK_SIZE);
> -#if NCPUS <= 1
> -vm_offset_t int_stack_top[1], int_stack_base[1];
> -#endif
> +/* Interrupt stacks  */
> +extern vm_offset_t int_stack_top[], int_stack_base[];
> +extern uint8_t solid_intstack[];    /* bottom */
>
>  #ifdef LINUX_DEV
>  extern void linux_init(void);
> @@ -171,15 +169,18 @@ void machine_init(void)
>         hyp_init();
>  #else  /* MACH_HYP */
>
> +#if (NCPUS > 1)
> +       acpi_apic_init();
> +       smp_init();
> +#endif
>  #if defined(APIC)
> -       if (acpi_apic_init() != ACPI_SUCCESS) {
> -               panic("APIC not found, unable to boot");
> -       }
>         ioapic_configure();
>         lapic_enable_timer();
> -#if (NCPUS > 1)
> -       smp_init();
> +#else
> +       startrtclock();
> +#endif
>
> +#if defined(APIC)
>  #warning FIXME: Rather unmask them from their respective drivers
>         /* kd */
>         unmask_irq(1);
> @@ -187,8 +188,7 @@ void machine_init(void)
>         unmask_irq(4);
>         /* com1 */
>         unmask_irq(3);
> -#endif /* NCPUS > 1 */
> -#endif /* APIC */
> +#endif
>
>  #ifdef LINUX_DEV
>         /*
> @@ -359,10 +359,6 @@ register_boot_data(const struct multiboot_raw_info
> *mbi)
>  static void
>  i386at_init(void)
>  {
> -       /* XXX move to intel/pmap.h */
> -       extern pt_entry_t *kernel_page_dir;
> -       int i;
> -
>         /*
>          * Initialize the PIC prior to any possible call to an spl.
>          */
> @@ -448,47 +444,8 @@ i386at_init(void)
>          */
>         biosmem_setup();
>
> -       /*
> -        * We'll have to temporarily install a direct mapping
> -        * between physical memory and low linear memory,
> -        * until we start using our new kernel segment descriptors.
> -        */
> -#if INIT_VM_MIN_KERNEL_ADDRESS != LINEAR_MIN_KERNEL_ADDRESS
> -       vm_offset_t delta = INIT_VM_MIN_KERNEL_ADDRESS -
> LINEAR_MIN_KERNEL_ADDRESS;
> -       if ((vm_offset_t)(-delta) < delta)
> -               delta = (vm_offset_t)(-delta);
> -       int nb_direct = delta >> PDESHIFT;
> -       for (i = 0; i < nb_direct; i++)
> -
>  kernel_page_dir[lin2pdenum_cont(INIT_VM_MIN_KERNEL_ADDRESS) + i] =
> -
>  kernel_page_dir[lin2pdenum_cont(LINEAR_MIN_KERNEL_ADDRESS) + i];
> -#endif
> -       /* We need BIOS memory mapped at 0xc0000 & co for BIOS accesses */
> -#if VM_MIN_KERNEL_ADDRESS != 0
> -       kernel_page_dir[lin2pdenum_cont(LINEAR_MIN_KERNEL_ADDRESS -
> VM_MIN_KERNEL_ADDRESS)] =
> -
>  kernel_page_dir[lin2pdenum_cont(LINEAR_MIN_KERNEL_ADDRESS)];
> -#endif
> +       pmap_make_temporary_mapping();
>
> -#ifdef MACH_PV_PAGETABLES
> -       for (i = 0; i < PDPNUM; i++)
> -               pmap_set_page_readonly_init((void*) kernel_page_dir + i *
> INTEL_PGBYTES);
> -#if PAE
> -       pmap_set_page_readonly_init(kernel_pmap->pdpbase);
> -#endif /* PAE */
> -#endif /* MACH_PV_PAGETABLES */
> -#if PAE
> -#ifdef __x86_64__
> -       set_cr3((unsigned long)_kvtophys(kernel_pmap->l4base));
> -#else
> -       set_cr3((unsigned long)_kvtophys(kernel_pmap->pdpbase));
> -#endif
> -#ifndef        MACH_HYP
> -       if (!CPU_HAS_FEATURE(CPU_FEATURE_PAE))
> -               panic("CPU doesn't have support for PAE.");
> -       set_cr4(get_cr4() | CR4_PAE);
> -#endif /* MACH_HYP */
> -#else
> -       set_cr3((unsigned long)_kvtophys(kernel_page_dir));
> -#endif /* PAE */
>  #ifndef        MACH_HYP
>         /* Turn paging on.
>          * Also set the WP bit so that on 486 or better processors
> @@ -520,40 +477,13 @@ i386at_init(void)
>         mp_desc_init(0);
>  #endif // NCPUS
>
> -#if INIT_VM_MIN_KERNEL_ADDRESS != LINEAR_MIN_KERNEL_ADDRESS
> -       /* Get rid of the temporary direct mapping and flush it out of the
> TLB.  */
> -       for (i = 0 ; i < nb_direct; i++) {
> -#ifdef MACH_XEN
> -#ifdef MACH_PSEUDO_PHYS
> -               if
> (!hyp_mmu_update_pte(kv_to_ma(&kernel_page_dir[lin2pdenum_cont(VM_MIN_KERNEL_ADDRESS)
> + i]), 0))
> -#else  /* MACH_PSEUDO_PHYS */
> -               if (hyp_do_update_va_mapping(VM_MIN_KERNEL_ADDRESS + i *
> INTEL_PGBYTES, 0, UVMF_INVLPG | UVMF_ALL))
> -#endif /* MACH_PSEUDO_PHYS */
> -                       printf("couldn't unmap frame %d\n", i);
> -#else  /* MACH_XEN */
> -
>  kernel_page_dir[lin2pdenum_cont(INIT_VM_MIN_KERNEL_ADDRESS) + i] = 0;
> -#endif /* MACH_XEN */
> -       }
> -#endif
> -       /* Keep BIOS memory mapped */
> -#if VM_MIN_KERNEL_ADDRESS != 0
> -       kernel_page_dir[lin2pdenum_cont(LINEAR_MIN_KERNEL_ADDRESS -
> VM_MIN_KERNEL_ADDRESS)] =
> -
>  kernel_page_dir[lin2pdenum_cont(LINEAR_MIN_KERNEL_ADDRESS)];
> -#endif
> -
> -       /* Not used after boot, better give it back.  */
> -#ifdef MACH_XEN
> -       hyp_free_page(0, (void*) VM_MIN_KERNEL_ADDRESS);
> -#endif /* MACH_XEN */
> -
> -       flush_tlb();
> +       pmap_remove_temporary_mapping();
>
>  #ifdef MACH_XEN
>         hyp_p2m_init();
>  #endif /* MACH_XEN */
>
> -       int_stack_base[0] = (vm_offset_t)&int_stack;
> -       int_stack_top[0] = int_stack_base[0] + INTSTACK_SIZE - 4;
> +       interrupt_stack_alloc();
>  }
>
>  /*
> @@ -645,7 +575,6 @@ void c_boot_entry(vm_offset_t bi)
>  #endif /* MACH_KDB */
>
>         machine_slot[0].is_cpu = TRUE;
> -       machine_slot[0].running = TRUE;
>         machine_slot[0].cpu_subtype = CPU_SUBTYPE_AT386;
>
>         switch (cpu_type)
> @@ -693,6 +622,7 @@ startrtclock(void)
>  {
>  #ifndef APIC
>         clkstart();
> +       unmask_irq(0);
>  #endif
>  }
>
> diff --git a/i386/intel/pmap.c b/i386/intel/pmap.c
> index 0505cfa2..3c57d732 100644
> --- a/i386/intel/pmap.c
> +++ b/i386/intel/pmap.c
> @@ -3009,3 +3009,96 @@ pmap_unmap_page_zero (void)
>  #endif /* MACH_PV_PAGETABLES */
>  }
>  #endif /* __i386__ */
> +
> +void
> +pmap_make_temporary_mapping(void)
> +{
> +       int i;
> +
> +       /*
> +        * We'll have to temporarily install a direct mapping
> +        * between physical memory and low linear memory,
> +        * until we start using our new kernel segment descriptors.
> +        */
> +#if INIT_VM_MIN_KERNEL_ADDRESS != LINEAR_MIN_KERNEL_ADDRESS
> +       vm_offset_t delta = INIT_VM_MIN_KERNEL_ADDRESS -
> LINEAR_MIN_KERNEL_ADDRESS;
> +       if ((vm_offset_t)(-delta) < delta)
> +               delta = (vm_offset_t)(-delta);
> +       int nb_direct = delta >> PDESHIFT;
> +       for (i = 0; i < nb_direct; i++)
> +
>  kernel_page_dir[lin2pdenum_cont(INIT_VM_MIN_KERNEL_ADDRESS) + i] =
> +
>  kernel_page_dir[lin2pdenum_cont(LINEAR_MIN_KERNEL_ADDRESS) + i];
> +#endif
> +       /* We need BIOS memory mapped at 0xc0000 & co for BIOS accesses */
> +#if VM_MIN_KERNEL_ADDRESS != 0
> +       kernel_page_dir[lin2pdenum_cont(LINEAR_MIN_KERNEL_ADDRESS -
> VM_MIN_KERNEL_ADDRESS)] =
> +
>  kernel_page_dir[lin2pdenum_cont(LINEAR_MIN_KERNEL_ADDRESS)];
> +#endif
> +
> +#ifdef MACH_PV_PAGETABLES
> +       for (i = 0; i < PDPNUM; i++)
> +               pmap_set_page_readonly_init((void*) kernel_page_dir + i *
> INTEL_PGBYTES);
> +#if PAE
> +       pmap_set_page_readonly_init(kernel_pmap->pdpbase);
> +#endif /* PAE */
> +#endif /* MACH_PV_PAGETABLES */
> +
> +       pmap_set_page_dir();
> +}
> +
> +void
> +pmap_set_page_dir(void)
> +{
> +#if PAE
> +#ifdef __x86_64__
> +       set_cr3((unsigned long)_kvtophys(kernel_pmap->l4base));
> +#else
> +       set_cr3((unsigned long)_kvtophys(kernel_pmap->pdpbase));
> +#endif
> +#ifndef        MACH_HYP
> +       if (!CPU_HAS_FEATURE(CPU_FEATURE_PAE))
> +               panic("CPU doesn't have support for PAE.");
> +       set_cr4(get_cr4() | CR4_PAE);
> +#endif /* MACH_HYP */
> +#else
> +       set_cr3((unsigned long)_kvtophys(kernel_page_dir));
> +#endif /* PAE */
> +}
> +
> +void
> +pmap_remove_temporary_mapping(void)
> +{
> +       int i;
> +
> +#if INIT_VM_MIN_KERNEL_ADDRESS != LINEAR_MIN_KERNEL_ADDRESS
> +       vm_offset_t delta = INIT_VM_MIN_KERNEL_ADDRESS -
> LINEAR_MIN_KERNEL_ADDRESS;
> +       if ((vm_offset_t)(-delta) < delta)
> +               delta = (vm_offset_t)(-delta);
> +       int nb_direct = delta >> PDESHIFT;
> +       /* Get rid of the temporary direct mapping and flush it out of the
> TLB.  */
> +       for (i = 0 ; i < nb_direct; i++) {
> +#ifdef MACH_XEN
> +#ifdef MACH_PSEUDO_PHYS
> +               if
> (!hyp_mmu_update_pte(kv_to_ma(&kernel_page_dir[lin2pdenum_cont(VM_MIN_KERNEL_ADDRESS)
> + i]), 0))
> +#else  /* MACH_PSEUDO_PHYS */
> +               if (hyp_do_update_va_mapping(VM_MIN_KERNEL_ADDRESS + i *
> INTEL_PGBYTES, 0, UVMF_INVLPG | UVMF_ALL))
> +#endif /* MACH_PSEUDO_PHYS */
> +                       printf("couldn't unmap frame %d\n", i);
> +#else  /* MACH_XEN */
> +
>  kernel_page_dir[lin2pdenum_cont(INIT_VM_MIN_KERNEL_ADDRESS) + i] = 0;
> +#endif /* MACH_XEN */
> +       }
> +#endif
> +       /* Keep BIOS memory mapped */
> +#if VM_MIN_KERNEL_ADDRESS != 0
> +       kernel_page_dir[lin2pdenum_cont(LINEAR_MIN_KERNEL_ADDRESS -
> VM_MIN_KERNEL_ADDRESS)] =
> +
>  kernel_page_dir[lin2pdenum_cont(LINEAR_MIN_KERNEL_ADDRESS)];
> +#endif
> +
> +       /* Not used after boot, better give it back.  */
> +#ifdef MACH_XEN
> +       hyp_free_page(0, (void*) VM_MIN_KERNEL_ADDRESS);
> +#endif /* MACH_XEN */
> +
> +       flush_tlb();
> +}
> diff --git a/i386/intel/pmap.h b/i386/intel/pmap.h
> index d6bf85fb..92247faa 100644
> --- a/i386/intel/pmap.h
> +++ b/i386/intel/pmap.h
> @@ -475,6 +475,8 @@ pt_entry_t *pmap_pte(const pmap_t pmap, vm_offset_t
> addr);
>  #define        pmap_attribute(pmap,addr,size,attr,value) \
>                                         (KERN_INVALID_ADDRESS)
>
> +extern pt_entry_t *kernel_page_dir;
> +
>  /*
>   *  Bootstrap the system enough to run with virtual memory.
>   *  Allocate the kernel page directory and page tables,
> @@ -483,6 +485,10 @@ pt_entry_t *pmap_pte(const pmap_t pmap, vm_offset_t
> addr);
>   */
>  extern void pmap_bootstrap(void);
>
> +extern void pmap_set_page_dir(void);
> +extern void pmap_make_temporary_mapping(void);
> +extern void pmap_remove_temporary_mapping(void);
> +
>  extern void pmap_unmap_page_zero (void);
>
>  /*
> diff --git a/kern/startup.c b/kern/startup.c
> index 2eb3a739..42f5ac6c 100644
> --- a/kern/startup.c
> +++ b/kern/startup.c
> @@ -308,8 +308,11 @@ void cpu_launch_first_thread(thread_t th)
>
>         PMAP_ACTIVATE_USER(vm_map_pmap(th->task->map), th, mycpu);
>
> +#if defined(APIC)
> +       lapic_enable_timer();
> +#else
>         startrtclock();         /* needs an active thread */
> -
> +#endif
>         load_context(th);
>         /*NOTREACHED*/
>  }
> --
> 2.34.1
>
>
>
>

Reply via email to