As per the standard this is UB, i.e. we're building on a defacto extension
in the compilers we use. Misra C:2012 rule 20.6 disallows this altogether,
though. Use helper always-inline functions instead.

In sh_audit_l1_table(), along with reducing the scope of "gfn", which now
isn't used anymore by the if() side of the conditional, also reduce the
scope of two other adjacent variables.

For audit_magic() note that both which parameters are needed and what
their types are is attributed to AUDIT_FAIL() accessing variables which
aren't passed as arguments to it.

No functional change intended. Of course codegen does change with this,
first and foremost in register allocation.

Reported-by: Andrew Cooper <[email protected]>
Signed-off-by: Jan Beulich <[email protected]>
---
Leaving even the fetching of current to the helper in
sh_rm_write_access_from_l1() looks tidier to me overall, albeit this means
the fetch will now occur once per present L1E.

Converting the #if to if() and #ifdef to if(IS_ENABLED()) wouldn't work
here, as identifiers are used which aren't available when the respective
conditions are false.

Note that I tested this only on top of
https://lists.xen.org/archives/html/xen-devel/2023-05/msg01140.html, but I
have no reason to assume that there is a problematic interaction. Of
course it would be really nice if the rest of that series finally could go
in.

--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -395,7 +395,7 @@ static inline mfn_t cf_check sh_next_pag
     shadow_set_l2e(d, sl2e, new_sl2e, sl2mfn, SH_type_fl1_shadow, sh_next_page)
 
 static inline u32
-guest_index(void *ptr)
+guest_index(const void *ptr)
 {
     return (u32)((unsigned long)ptr & ~PAGE_MASK) / sizeof(guest_l1e_t);
 }
@@ -3549,16 +3549,25 @@ static int cf_check sh_guess_wrmap(
 }
 #endif
 
+/* Remember the last shadow that we shot a writeable mapping in */
+static always_inline void store_last_writeable_pte_smfn(
+    const struct domain *d, mfn_t sl1mfn)
+{
+#if SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC
+    struct vcpu *curr = current;
+
+    if ( curr->domain == d )
+        curr->arch.paging.shadow.last_writeable_pte_smfn = mfn_x(sl1mfn);
+#endif
+}
+
 int cf_check sh_rm_write_access_from_l1(
     struct domain *d, mfn_t sl1mfn, mfn_t readonly_mfn)
 /* Excises all writeable mappings to readonly_mfn from this l1 shadow table */
 {
     shadow_l1e_t *sl1e;
     int done = 0;
-#if SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC
-    struct vcpu *curr = current;
     mfn_t base_sl1mfn = sl1mfn; /* Because sl1mfn changes in the foreach */
-#endif
 
     FOREACH_PRESENT_L1E(sl1mfn, sl1e, NULL, done,
     {
@@ -3568,11 +3577,9 @@ int cf_check sh_rm_write_access_from_l1(
             shadow_l1e_t ro_sl1e = shadow_l1e_remove_flags(*sl1e, _PAGE_RW);
 
             shadow_set_l1e(d, sl1e, ro_sl1e, p2m_ram_rw, sl1mfn);
-#if SHADOW_OPTIMIZATIONS & SHOPT_WRITABLE_HEURISTIC
-            /* Remember the last shadow that we shot a writeable mapping in */
-            if ( curr->domain == d )
-                curr->arch.paging.shadow.last_writeable_pte_smfn = 
mfn_x(base_sl1mfn);
-#endif
+
+            store_last_writeable_pte_smfn(d, base_sl1mfn);
+
             if ( (mfn_to_page(readonly_mfn)->u.inuse.type_info
                   & PGT_count_mask) == 0 )
                 /* This breaks us cleanly out of the FOREACH macro */
@@ -3882,12 +3889,36 @@ static const char *sh_audit_flags(const
     return NULL;
 }
 
+static always_inline bool audit_magic(
+    const guest_l1e_t *gl1e, mfn_t gl1mfn,
+    const shadow_l1e_t *sl1e, mfn_t sl1mfn)
+{
+    bool done = false;
+
+#if SHADOW_OPTIMIZATIONS & SHOPT_FAST_FAULT_PATH
+    if ( !sh_l1e_is_gnp(*sl1e) )
+    {
+        gfn_t gfn = sh_l1e_mmio_get_gfn(*sl1e);
+
+        ASSERT(sh_l1e_is_mmio(*sl1e));
+
+        if ( !gfn_eq(gfn, guest_l1e_get_gfn(*gl1e)) )
+            AUDIT_FAIL(1,
+                       "shadow MMIO gfn is %" SH_PRI_gfn " but guest gfn is %" 
SH_PRI_gfn,
+                       gfn_x(gfn), gfn_x(guest_l1e_get_gfn(*gl1e)));
+    }
+    else if ( guest_l1e_get_flags(*gl1e) & _PAGE_PRESENT )
+        AUDIT_FAIL(1, "shadow is GNP magic but guest is present");
+#endif
+
+    return done;
+}
+
 int cf_check sh_audit_l1_table(struct domain *d, mfn_t sl1mfn, mfn_t x)
 {
     guest_l1e_t *gl1e, *gp;
     shadow_l1e_t *sl1e;
-    mfn_t mfn, gmfn, gl1mfn;
-    gfn_t gfn;
+    mfn_t gl1mfn;
     p2m_type_t p2mt;
     const char *s;
     int done = 0;
@@ -3906,28 +3937,10 @@ int cf_check sh_audit_l1_table(struct do
 #endif
 
     gl1e = gp = map_domain_page(gl1mfn);
-    FOREACH_PRESENT_L1E(sl1mfn, sl1e, &gl1e, done, {
-
+    FOREACH_PRESENT_L1E(sl1mfn, sl1e, &gl1e, done,
+    {
         if ( sh_l1e_is_magic(*sl1e) )
-        {
-#if (SHADOW_OPTIMIZATIONS & SHOPT_FAST_FAULT_PATH)
-            if ( sh_l1e_is_gnp(*sl1e) )
-            {
-                if ( guest_l1e_get_flags(*gl1e) & _PAGE_PRESENT )
-                    AUDIT_FAIL(1, "shadow is GNP magic but guest is present");
-            }
-            else
-            {
-                ASSERT(sh_l1e_is_mmio(*sl1e));
-                gfn = sh_l1e_mmio_get_gfn(*sl1e);
-                if ( gfn_x(gfn) != gfn_x(guest_l1e_get_gfn(*gl1e)) )
-                    AUDIT_FAIL(1, "shadow MMIO gfn is %" SH_PRI_gfn
-                               " but guest gfn is %" SH_PRI_gfn,
-                               gfn_x(gfn),
-                               gfn_x(guest_l1e_get_gfn(*gl1e)));
-            }
-#endif
-        }
+            done = audit_magic(gl1e, gl1mfn, sl1e, sl1mfn);
         else
         {
             s = sh_audit_flags(d, 1, guest_l1e_get_flags(*gl1e),
@@ -3936,9 +3949,10 @@ int cf_check sh_audit_l1_table(struct do
 
             if ( SHADOW_AUDIT & SHADOW_AUDIT_ENTRIES_MFNS )
             {
-                gfn = guest_l1e_get_gfn(*gl1e);
-                mfn = shadow_l1e_get_mfn(*sl1e);
-                gmfn = get_gfn_query_unlocked(d, gfn_x(gfn), &p2mt);
+                gfn_t gfn = guest_l1e_get_gfn(*gl1e);
+                mfn_t mfn = shadow_l1e_get_mfn(*sl1e);
+                mfn_t gmfn = get_gfn_query_unlocked(d, gfn_x(gfn), &p2mt);
+
                 if ( !p2m_is_grant(p2mt) && !mfn_eq(gmfn, mfn) )
                     AUDIT_FAIL(1, "bad translation: gfn %" SH_PRI_gfn
                                " --> %" PRI_mfn " != mfn %" PRI_mfn,
@@ -4027,6 +4041,17 @@ int cf_check sh_audit_l2_table(struct do
 }
 
 #if GUEST_PAGING_LEVELS >= 4
+static always_inline unsigned int type_from_gl3e(
+    const struct domain *d, const guest_l3e_t *gl3e)
+{
+#ifdef CONFIG_PV32
+    if ( guest_index(gl3e) == 3 && is_pv_32bit_domain(d) )
+        return SH_type_l2h_shadow;
+#endif
+
+    return SH_type_l2_shadow;
+}
+
 int cf_check sh_audit_l3_table(struct domain *d, mfn_t sl3mfn, mfn_t x)
 {
     guest_l3e_t *gl3e, *gp;
@@ -4056,14 +4081,10 @@ int cf_check sh_audit_l3_table(struct do
 
         if ( SHADOW_AUDIT & SHADOW_AUDIT_ENTRIES_MFNS )
         {
-            unsigned int t = SH_type_l2_shadow;
+            unsigned int t = type_from_gl3e(d, gl3e);
 
             gfn = guest_l3e_get_gfn(*gl3e);
             mfn = shadow_l3e_get_mfn(*sl3e);
-#ifdef CONFIG_PV32
-            if ( guest_index(gl3e) == 3 && is_pv_32bit_domain(d) )
-                t = SH_type_l2h_shadow;
-#endif
             gmfn = get_shadow_status(
                        d, get_gfn_query_unlocked(d, gfn_x(gfn), &p2mt), t);
             if ( !mfn_eq(gmfn, mfn) )

Reply via email to