In preparation for reactivating the presently dead 2M page path of the function, also deal with the case of replacing an L1 page table all in one go.
Signed-off-by: Jan Beulich <[email protected]> --- v2: Split from previous bigger patch. --- a/xen/arch/x86/mm/shadow/hvm.c +++ b/xen/arch/x86/mm/shadow/hvm.c @@ -847,14 +847,9 @@ static void cf_check sh_unshadow_for_p2m * scheme, that's OK, but otherwise they must be unshadowed. */ case 2: - if ( !(oflags & _PAGE_PSE) ) - break; - - ASSERT(!p2m_is_grant(p2mt)); - { unsigned int i; - l1_pgentry_t *npte = NULL; + l1_pgentry_t *npte = NULL, *opte = NULL; /* If we're replacing a superpage with a normal L1 page, map it */ if ( (l1e_get_flags(new) & _PAGE_PRESENT) && @@ -862,24 +857,39 @@ static void cf_check sh_unshadow_for_p2m mfn_valid(nmfn) ) npte = map_domain_page(nmfn); + /* If we're replacing a normal L1 page, map it as well. */ + if ( !(oflags & _PAGE_PSE) ) + opte = map_domain_page(omfn); + gfn &= ~(L1_PAGETABLE_ENTRIES - 1); for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ ) { - if ( !npte || - !(l1e_get_flags(npte[i]) & _PAGE_PRESENT) || - !mfn_eq(l1e_get_mfn(npte[i]), omfn) ) + if ( opte ) + { + if ( !(l1e_get_flags(opte[i]) & _PAGE_PRESENT) ) + continue; + omfn = l1e_get_mfn(opte[i]); + } + + if ( npte ) + nmfn = l1e_get_flags(npte[i]) & _PAGE_PRESENT + ? l1e_get_mfn(npte[i]) : INVALID_MFN; + + if ( !mfn_eq(nmfn, omfn) ) { /* This GFN->MFN mapping has gone away */ sh_remove_all_shadows_and_parents(d, omfn); if ( sh_remove_all_mappings(d, omfn, _gfn(gfn + i)) ) flush = true; } + omfn = mfn_add(omfn, 1); + nmfn = mfn_add(nmfn, 1); } - if ( npte ) - unmap_domain_page(npte); + unmap_domain_page(opte); + unmap_domain_page(npte); } break;
