On Fri, Apr 23, 2021 at 08:07:43PM +0200, Martin Pieuchot wrote:
> Diff below convert the last uses of uvm_km_alloc(9) and uvm_km_zalloc(9)
> to km_alloc(9).
>
> One of the allocations below uses `kp_pageable' instead of `kp_zero'
> because the mapping for `pm_pdir_intel' is lost when PAE is enabled
> and need to be re-established when a fault happens.  This is consistent
> with what currently happens with uvm_km_zalloc().  Thanks to hshoexer@
> for the analysis.
>
> Fixing this is left as an exercise for the reader.  I'm currently
> concerned by getting rid of the old allocators.
>
> ok?
>

Reads ok. ok mlarkin

> Index: arch/i386/i386/pmap.c
> ===================================================================
> RCS file: /cvs/src/sys/arch/i386/i386/pmap.c,v
> retrieving revision 1.211
> diff -u -p -r1.211 pmap.c
> --- arch/i386/i386/pmap.c     11 Mar 2021 11:16:57 -0000      1.211
> +++ arch/i386/i386/pmap.c     23 Apr 2021 17:36:57 -0000
> @@ -1365,7 +1365,7 @@ void
>  pmap_pinit_pd_86(struct pmap *pmap)
>  {
>       /* allocate PDP */
> -     pmap->pm_pdir = uvm_km_alloc(kernel_map, NBPG);
> +     pmap->pm_pdir = (vaddr_t)km_alloc(NBPG, &kv_any, &kp_dirty, &kd_waitok);
>       if (pmap->pm_pdir == 0)
>               panic("pmap_pinit_pd_86: kernel_map out of virtual space!");
>       pmap_extract(pmap_kernel(), (vaddr_t)pmap->pm_pdir,
> @@ -1397,7 +1397,8 @@ pmap_pinit_pd_86(struct pmap *pmap)
>        * execution, one that lacks all kernel mappings.
>        */
>       if (cpu_meltdown) {
> -             pmap->pm_pdir_intel = uvm_km_zalloc(kernel_map, NBPG);
> +             pmap->pm_pdir_intel = (vaddr_t)km_alloc(NBPG, &kv_any, &kp_zero,
> +                 &kd_waitok);
>               if (pmap->pm_pdir_intel == 0)
>                       panic("%s: kernel_map out of virtual space!", __func__);
>
> @@ -1449,11 +1450,12 @@ pmap_destroy(struct pmap *pmap)
>               uvm_pagefree(pg);
>       }
>
> -     uvm_km_free(kernel_map, pmap->pm_pdir, pmap->pm_pdirsize);
> +     km_free((void *)pmap->pm_pdir, pmap->pm_pdirsize, &kv_any, &kp_dirty);
>       pmap->pm_pdir = 0;
>
>       if (pmap->pm_pdir_intel) {
> -             uvm_km_free(kernel_map, pmap->pm_pdir_intel, pmap->pm_pdirsize);
> +             km_free((void *)pmap->pm_pdir_intel, pmap->pm_pdirsize,
> +                 &kv_any, &kp_dirty);
>               pmap->pm_pdir_intel = 0;
>       }
>
> @@ -2522,8 +2524,9 @@ pmap_enter_special_86(vaddr_t va, paddr_
>                   __func__, va);
>
>       if (!pmap->pm_pdir_intel) {
> -             if ((pmap->pm_pdir_intel = uvm_km_zalloc(kernel_map, NBPG))
> -                 == 0)
> +             pmap->pm_pdir_intel = (vaddr_t)km_alloc(NBPG, &kv_any, &kp_zero,
> +                 &kd_waitok);
> +             if (pmap->pm_pdir_intel == 0)
>                       panic("%s: kernel_map out of virtual space!", __func__);
>               if (!pmap_extract(pmap, pmap->pm_pdir_intel,
>                   &pmap->pm_pdirpa_intel))
> Index: arch/i386/i386/pmapae.c
> ===================================================================
> RCS file: /cvs/src/sys/arch/i386/i386/pmapae.c,v
> retrieving revision 1.60
> diff -u -p -r1.60 pmapae.c
> --- arch/i386/i386/pmapae.c   23 Sep 2020 15:13:26 -0000      1.60
> +++ arch/i386/i386/pmapae.c   23 Apr 2021 17:59:05 -0000
> @@ -738,7 +738,7 @@ pmap_bootstrap_pae(void)
>                                   (uint32_t)VM_PAGE_TO_PHYS(ptppg));
>                       }
>               }
> -             uvm_km_free(kernel_map, (vaddr_t)pd, NBPG);
> +             km_free(pd, NBPG, &kv_any, &kp_dirty);
>               DPRINTF("%s: freeing PDP 0x%x\n", __func__, (uint32_t)pd);
>       }
>
> @@ -944,7 +944,8 @@ pmap_pinit_pd_pae(struct pmap *pmap)
>       paddr_t pdidx[4];
>
>       /* allocate PDP */
> -     pmap->pm_pdir = uvm_km_alloc(kernel_map, 4 * NBPG);
> +     pmap->pm_pdir = (vaddr_t)km_alloc(4 * NBPG, &kv_any, &kp_dirty,
> +         &kd_waitok);
>       if (pmap->pm_pdir == 0)
>               panic("pmap_pinit_pd_pae: kernel_map out of virtual space!");
>       /* page index is in the pmap! */
> @@ -997,7 +998,8 @@ pmap_pinit_pd_pae(struct pmap *pmap)
>       if (cpu_meltdown) {
>               int i;
>
> -             if ((va = uvm_km_zalloc(kernel_map, 4 * NBPG)) == 0)
> +             va = (vaddr_t)km_alloc(4 * NBPG, &kv_any, &kp_zero, &kd_nowait);
> +             if (va == 0)
>                       panic("%s: kernel_map out of virtual space!", __func__);
>               if (!pmap_extract(pmap_kernel(),
>                   (vaddr_t)&pmap->pm_pdidx_intel, &pmap->pm_pdirpa_intel))
> @@ -1936,7 +1938,20 @@ pmap_enter_special_pae(vaddr_t va, paddr
>                   __func__, va);
>
>       if (!pmap->pm_pdir_intel) {
> -             if ((vapd = uvm_km_zalloc(kernel_map, 4 * NBPG)) == 0)
> +#if notyet
> +             /*
> +              * XXX mapping is established via pmap_kenter() and lost
> +              * after enabling PAE.
> +              */
> +             vapd = (vaddr_t)km_alloc(4 * NBPG, &kv_any, &kp_zero,
> +                 &kd_waitok);
> +#else
> +             vapd = (vaddr_t)km_alloc(4 * NBPG, &kv_any, &kp_pageable,
> +                 &kd_waitok);
> +             if (vapd != 0)
> +                     bzero((void *)vapd, 4 * NBPG);
> +#endif
> +             if (vapd == 0)
>                       panic("%s: kernel_map out of virtual space!", __func__);
>               pmap->pm_pdir_intel = vapd;
>               if (!pmap_extract(pmap, (vaddr_t)&pmap->pm_pdidx_intel,
>

Reply via email to