From 3c2d2c6fc8157bbe54b1273eb8c29f26d6e2afb6 Mon Sep 17 00:00:00 2001
From: Yinghai Lu <yinghai@kernel.org>
Date: Sat, 15 Dec 2012 20:59:07 -0800
Subject: [PATCH v7 04/29] x86, mm: add early kernel mapping in c

It is usually done in arch/x86/kernel/head_64.S, after have #PF handler way
we could and have to move the kernel mapping init later.

That could make us to have smooth transition to init_mem_mapping from
BRK/topdown way.

Signed-off-by: Yinghai Lu <yinghai@kernel.org>
---
 arch/x86/include/asm/init.h |    2 ++
 arch/x86/mm/init_64.c       |   77 +++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 79 insertions(+)

diff --git a/arch/x86/include/asm/init.h b/arch/x86/include/asm/init.h
index bac770b..8d0687a 100644
--- a/arch/x86/include/asm/init.h
+++ b/arch/x86/include/asm/init.h
@@ -1,5 +1,7 @@
 #ifndef _ASM_X86_INIT_H
 #define _ASM_X86_INIT_H
 
+int kernel_ident_mapping_init(pgd_t *pgd_page,
+				unsigned long addr, unsigned long end);
 
 #endif /* _ASM_X86_INIT_H */
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index b1178eb..4f5f9f7 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -56,6 +56,83 @@
 
 #include "mm_internal.h"
 
+static void __init ident_pmd_init(pmd_t *pmd_page,
+			  unsigned long addr, unsigned long end)
+{
+	addr &= PMD_MASK;
+	for (; addr < end; addr += PMD_SIZE) {
+		pmd_t *pmd = pmd_page + pmd_index(addr);
+
+		if (!pmd_present(*pmd))
+			set_pmd(pmd, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC));
+	}
+}
+static int __init ident_pud_init(pud_t *pud_page,
+			  unsigned long addr, unsigned long end)
+{
+	unsigned long next;
+
+	for (; addr < end; addr = next) {
+		pud_t *pud = pud_page + pud_index(addr);
+		pmd_t *pmd;
+
+		next = (addr & PUD_MASK) + PUD_SIZE;
+		if (next > end)
+			next = end;
+
+		if (pud_present(*pud)) {
+			pmd = pmd_offset(pud, 0);
+			ident_pmd_init(pmd, addr, next);
+			continue;
+		}
+		pmd = (pmd_t *)alloc_low_page();
+		if (!pmd)
+			return -ENOMEM;
+		clear_page(pmd);
+		ident_pmd_init(pmd, addr, next);
+		set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
+	}
+
+	return 0;
+}
+int __init kernel_ident_mapping_init(pgd_t *pgd_page,
+				unsigned long addr, unsigned long end)
+{
+	unsigned long next;
+	int result;
+
+	addr = (unsigned long)__va(addr);
+	end = (unsigned long)__va(end);
+
+	for (; addr < end; addr = next) {
+		pgd_t *pgd = pgd_page + pgd_index(addr);
+		pud_t *pud;
+
+		next = (addr & PGDIR_MASK) + PGDIR_SIZE;
+		if (next > end)
+			next = end;
+
+		if (pgd_present(*pgd)) {
+			pud = pud_offset(pgd, 0);
+			result = ident_pud_init(pud, __pa(addr), __pa(next));
+			if (result)
+				return result;
+			continue;
+		}
+
+		pud = (pud_t *)alloc_low_page();
+		if (!pud)
+			return -ENOMEM;
+		clear_page(pud);
+		result = ident_pud_init(pud, __pa(addr), __pa(next));
+		if (result)
+			return result;
+		set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));
+	}
+
+	return 0;
+}
+
 static int __init parse_direct_gbpages_off(char *arg)
 {
 	direct_gbpages = 0;
-- 
1.7.10.4

