Hi,

The attached patch fixes a bug in the AArch64 __clear_cache implementation in which the loop iterating through the cache lines to clear started from the first address to clear, incrementing by the size of the cache line, and potentially missing to clear the last cache line.

Patch passes the regression test on aarch64-none-gnu-linux. OK for trunk and aarch64-4.7-branch?

Thanks,
Yufeng

libgcc/

2013-01-17  Sofiane Naci  <sofiane.n...@arm.com>
            Yufeng Zhang  <yufeng.zh...@arm.com>

        * config/aarch64/sync-cache.c (__aarch64_sync_cache_range): Align
        the loop start address for cache clearing.
diff --git a/libgcc/config/aarch64/sync-cache.c b/libgcc/config/aarch64/sync-cache.c
index d7b621e..66b7afe 100644
--- a/libgcc/config/aarch64/sync-cache.c
+++ b/libgcc/config/aarch64/sync-cache.c
@@ -39,7 +39,11 @@ __aarch64_sync_cache_range (const void *base, const void *end)
      instruction cache fetches the updated data.  'end' is exclusive,
      as per the GNU definition of __clear_cache.  */
 
-  for (address = base; address < (const char *) end; address += dcache_lsize)
+  /* Make the start address of the loop cache aligned.  */
+  address = (const char*) ((__UINTPTR_TYPE__) base
+			   & ~ (__UINTPTR_TYPE__) (dcache_lsize - 1));
+
+  for (address; address < (const char *) end; address += dcache_lsize)
     asm volatile ("dc\tcvau, %0"
 		  :
 		  : "r" (address)
@@ -47,7 +51,11 @@ __aarch64_sync_cache_range (const void *base, const void *end)
 
   asm volatile ("dsb\tish" : : : "memory");
 
-  for (address = base; address < (const char *) end; address += icache_lsize)
+  /* Make the start address of the loop cache aligned.  */
+  address = (const char*) ((__UINTPTR_TYPE__) base
+			   & ~ (__UINTPTR_TYPE__) (icache_lsize - 1));
+
+  for (address; address < (const char *) end; address += icache_lsize)
     asm volatile ("ic\tivau, %0"
 		  :
 		  : "r" (address)

Reply via email to