Update p2m->need_flush if page was present before (requiring a tlb flush).
This causes p2m->flush_tlb() to be now be reachable, make sure we call it
only when set.

Signed-off-by: Teddy Astie <[email protected]>
---
This change is important for the next patch.
Would it be better to merge it with the next patch ?

 xen/arch/x86/mm/p2m-pt.c | 3 +++
 xen/arch/x86/mm/p2m.c    | 6 ++++--
 2 files changed, 7 insertions(+), 2 deletions(-)

diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
index 5a6ce2f8bc..1fea3884be 100644
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -143,6 +143,9 @@ static int write_p2m_entry(struct p2m_domain *p2m, unsigned 
long gfn,
         if ( p2m->write_p2m_entry_post )
             p2m->write_p2m_entry_post(p2m, oflags);
 
+        if ( oflags & _PAGE_PRESENT )
+            p2m->need_flush = true;
+
         paging_unlock(d);
 
         if ( nestedhvm_enabled(d) && !p2m_is_nestedp2m(p2m) &&
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index e2a00a0efd..98f8272270 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -274,7 +274,8 @@ void p2m_tlb_flush_sync(struct p2m_domain *p2m)
     if ( p2m->need_flush )
     {
         p2m->need_flush = 0;
-        p2m->tlb_flush(p2m);
+        if ( p2m->tlb_flush )
+            p2m->tlb_flush(p2m);
     }
 }
 
@@ -287,7 +288,8 @@ void p2m_unlock_and_tlb_flush(struct p2m_domain *p2m)
     {
         p2m->need_flush = 0;
         mm_write_unlock(&p2m->lock);
-        p2m->tlb_flush(p2m);
+        if ( p2m->tlb_flush )
+            p2m->tlb_flush(p2m);
     } else
         mm_write_unlock(&p2m->lock);
 }
-- 
2.51.2



--
Teddy Astie | Vates XCP-ng Developer

XCP-ng & Xen Orchestra - Vates solutions

web: https://vates.tech


Reply via email to