It has been 4 years since the default load address changed from 1M to 2M, and
_stext ceased residing in l2_xenmap[0]. We should not be inserting an unused
mapping.
To ensure we don't create mappings accidentally, loop from 0 and obey
_PAGE_PRESENT on all entries.
Fixes: 7ed93f3a0dff ("x86: change default load address from 1 MiB to 2 MiB")
Signed-off-by: Andrew Cooper <[email protected]>
---
CC: Jan Beulich <[email protected]>
CC: Roger Pau Monné <[email protected]>
CC: Wei Liu <[email protected]>
Previously posted on its own.
---
xen/arch/x86/setup.c | 10 +++-------
1 file changed, 3 insertions(+), 7 deletions(-)
diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
index da47cdea14a1..6f241048425c 100644
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -1279,16 +1279,12 @@ void __init noreturn __start_xen(unsigned long mbi_p)
/* The only data mappings to be relocated are in the Xen area. */
pl2e = __va(__pa(l2_xenmap));
- /*
- * Undo the temporary-hooking of the l1_directmap. __2M_text_start
- * is contained in this PTE.
- */
+
BUG_ON(using_2M_mapping() &&
l2_table_offset((unsigned long)_erodata) ==
l2_table_offset((unsigned long)_stext));
- *pl2e++ = l2e_from_pfn(xen_phys_start >> PAGE_SHIFT,
- PAGE_HYPERVISOR_RX | _PAGE_PSE);
- for ( i = 1; i < L2_PAGETABLE_ENTRIES; i++, pl2e++ )
+
+ for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++, pl2e++ )
{
unsigned int flags;
--
2.11.0