This is in order to maintain bisectability through the subsequent changes,
where flsl() changes sign-ness non-atomically by architecture.

Signed-off-by: Andrew Cooper <[email protected]>
---
CC: Jan Beulich <[email protected]>
CC: Roger Pau MonnĂ© <[email protected]>
CC: Wei Liu <[email protected]>
CC: Stefano Stabellini <[email protected]>
CC: Julien Grall <[email protected]>
CC: Volodymyr Babchuk <[email protected]>
CC: Bertrand Marquis <[email protected]>
CC: Michal Orzel <[email protected]>
CC: Oleksii Kurochko <[email protected]>
CC: Shawn Anastasio <[email protected]>
CC: [email protected] <[email protected]>
CC: Simone Ballarin <[email protected]>
CC: Federico Serafini <[email protected]>
CC: Nicola Vetrini <[email protected]>

v2:
 * New
---
 xen/common/page_alloc.c | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 7c1bdfc046bf..8d3342e95236 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -1842,7 +1842,7 @@ static void _init_heap_pages(const struct page_info *pg,
          * Note that the value of ffsl() and flsl() starts from 1 so we need
          * to decrement it by 1.
          */
-        unsigned int inc_order = min(MAX_ORDER, flsl(e - s) - 1);
+        unsigned int inc_order = min(MAX_ORDER + 0U, flsl(e - s) - 1U);
 
         if ( s )
             inc_order = min(inc_order, ffsl(s) - 1U);
@@ -2266,7 +2266,7 @@ void __init xenheap_max_mfn(unsigned long mfn)
     ASSERT(!first_node_initialised);
     ASSERT(!xenheap_bits);
     BUILD_BUG_ON((PADDR_BITS - PAGE_SHIFT) >= BITS_PER_LONG);
-    xenheap_bits = min(flsl(mfn + 1) - 1 + PAGE_SHIFT, PADDR_BITS);
+    xenheap_bits = min(flsl(mfn + 1) - 1U + PAGE_SHIFT, PADDR_BITS + 0U);
     printk(XENLOG_INFO "Xen heap: %u bits\n", xenheap_bits);
 }
 
-- 
2.30.2


Reply via email to