This libgo patch avoids the libc memmove and memclr functions when the
type being moved or cleared may contain pointer values. This is
necessary to ensure that Go's concurrent garbage collector doesn't see
corrupt pointer values.

The libc memmove and memclr don't reliably operate on full memory
words. We already avoided them on PPC64, but the same problem can
occur even on x86, where some processors use "rep movsb" and "rep
stosb". Always use C code that stores full memory words.

While we're here, clean up the C code. We don't need special handling
if the memmove/memclr pointers are not pointer-aligned.

Unfortunately, this will likely be slower. Perhaps some day we can
have our own assembly code that operates a word at a time, or we can
use different operations when we know there are no pointers.

Bootstrapped and ran Go testsuite on x86_64-pc-linux-gnu. Committed to mainline.

Ian
479872227c0ac284f3935cafe90c70348059a35c
diff --git a/gcc/go/gofrontend/MERGE b/gcc/go/gofrontend/MERGE
index 64eb27e5e8d..99bf24c38e0 100644
--- a/gcc/go/gofrontend/MERGE
+++ b/gcc/go/gofrontend/MERGE
@@ -1,4 +1,4 @@
-8aab4c94f039132b270ab03968f74d24d315e141
+c4d7bfb9895efc196b04f18f5da77fd99b39212a
 
 The first line of this file holds the git revision number of the last
 merge done from the gofrontend repository.
diff --git a/libgo/runtime/go-memclr.c b/libgo/runtime/go-memclr.c
index 53b81178539..84df98db6d4 100644
--- a/libgo/runtime/go-memclr.c
+++ b/libgo/runtime/go-memclr.c
@@ -11,50 +11,39 @@ void memclrNoHeapPointers(void *, uintptr)
   __attribute__ ((no_split_stack));
 
 void
-memclrNoHeapPointers (void *p1, uintptr len)
+memclrNoHeapPointers(void *p1, uintptr len)
 {
-
-#if !defined(__PPC64__)
-  __builtin_memset(p1, 0, len);
-#else
-  int64 rem,drem,i;
-  uint64 offset;
-  volatile uint64 *vp;
+  const uintptr ptr_size = sizeof(p1);
+  uintptr rem,drem,i;
+  uintptr offset;
+  volatile uintptr *vp;
 
   if (len == 0) {
     return;
   }
   rem = len;
 
-  offset = (uint64)p1 % 8;
-  // This memset is OK since it can't contain
-  // an 8 byte aligned pointer.
-  if ((rem < 8) || (offset > 0 && offset+rem <= 16)) {
+  offset = (uintptr)p1 % ptr_size;
+  if (rem < ptr_size || offset > 0) {
+    // This memset is OK since it can't contain
+    // an pointer aligned pointer.
     __builtin_memset(p1, 0, rem);
     return;
   }
-  // Move initial bytes to get to 8 byte boundary
-  if (offset > 0) {
-    __builtin_memset(p1, 0, 8-offset);
-    p1 = (void*)((char*)p1+8-offset);
-    rem -= 8-offset;
-  }
 
-  // If at least 8 bytes left, clear
-  drem = rem>>3;
+  drem = rem / ptr_size;
 
-  vp = (volatile uint64*)(p1);
+  vp = (volatile uintptr*)(p1);
   // Without the use of volatile here, the compiler
   // might convert the loop into a memset.
   for (i=0; i<drem; i++) {
     *vp = 0;
     vp++;
-    rem -= 8;
+    rem -= ptr_size;
   }
-  p1 = (void*)((char*)p1 + 8*drem);
-  // Clear any remaining
+  // Clear any remaining bytes.
   if (rem > 0) {
-    __builtin_memset (p1, 0, rem);
+    p1 = (void*)((char*)p1 + ptr_size*drem);
+    __builtin_memset(p1, 0, rem);
   }
-#endif
 }
diff --git a/libgo/runtime/go-memmove.c b/libgo/runtime/go-memmove.c
index 1ca3f4822b7..1dbd2b39273 100644
--- a/libgo/runtime/go-memmove.c
+++ b/libgo/runtime/go-memmove.c
@@ -12,78 +12,60 @@ void gomemmove(void *, void *, uintptr)
 
 // This implementation is necessary since
 // the __builtin_memmove might use __libc_memmove
-// which doesn't require atomicity of 8 byte
+// which doesn't require atomicity of pointer-sized
 // moves.
 
 void
-gomemmove (void *dst, void *src, uintptr len)
+gomemmove(void *dst, void *src, uintptr len)
 {
-#if !defined(__PPC64__)
-  __builtin_memmove(dst, src, len);
-#else
-  uint64 offset, tail;
-  int64 rem;
-  uint64 dwords;
-  uint64 i;
-  char *bdst,*bsrc;
-
-  rem = len;
+  const uintptr ptr_size = sizeof(dst);
+  uintptr tail;
+  uintptr rem;
+  uintptr dwords;
+  uintptr i;
+  char *bdst, *bsrc;
 
   if (len == 0) {
-       return;
+    return;
   }
 
-  // If src and dst don't have the same 8 byte alignment then
-  // there is no issue with copying pointer atomicity. Use the
-  // builtin.
-  if (((uint64)dst % 8) != ((uint64)src % 8) || len < 8) {
-       __builtin_memmove(dst, src, len);
-       return;
+  // We expect pointer-containing values to be pointer-aligned.
+  // If these pointers are not aligned, they don't contain pointers.
+  if ((uintptr)dst % ptr_size != 0 || (uintptr)src % ptr_size != 0 || len < 
ptr_size) {
+    __builtin_memmove(dst, src, len);
+    return;
   }
 
-  // Length >= 8 && same ptr alignment
-  offset = (uint64)dst % 8;
-
-  // If not 8 byte alignment, move the intial bytes.
-  if (offset > 0) {
-       __builtin_memmove(dst, src, 8-offset);
-       dst += (8-offset);
-       src += (8-offset);
-       rem -= (8-offset);
-  }
+  bdst = (char*)dst;
+  bsrc = (char*)src;
 
-  // Move the tail bytes to make the backward move
-  // easier.
-  tail = rem % 8;
+  // Move the tail bytes to make the backward move easier.
+  rem = len;
+  tail = rem % ptr_size;
   if (tail > 0) {
-       __builtin_memmove(dst+rem-tail, src+rem-tail, tail);
-       rem -= tail;
-  }
-
-  if (rem == 0) {
-       return;
+    __builtin_memmove(bdst+rem-tail, bsrc+rem-tail, tail);
+    rem -= tail;
   }
 
-  // Must now be 8 byte alignment and rem is multiple of 8.
-  dwords = len>>3;
+  // Must now be pointer alignment and rem is multiple of ptr_size.
+  dwords = rem / ptr_size;
 
-  // Determine if a backwards move is needed
-  // Forward or backward, move all doublewords
+  // Determine if a backwards move is needed.
+  // Forward or backward, move all words.
 
-  if ((uint64)(dst - src) < (uint64)rem) {
-       bdst = dst+rem-8;
-       bsrc = src+rem-8;
-       for (i = 0; i<dwords; i++) {
-               *(uint64*)bdst = *(uint64*)bsrc;
-               bdst -= 8;
-               bsrc -= 8;
-       }
+  if ((uintptr)(bdst - bsrc) < rem) {
+    bdst += rem - ptr_size;
+    bsrc += rem - ptr_size;
+    for (i = 0; i<dwords; i++) {
+      *(uintptr*)bdst = *(uintptr*)bsrc;
+      bdst -= ptr_size;
+      bsrc -= ptr_size;
+    }
   } else {
-       for (i = 0; i<dwords; i++) {
-               *(uint64*)dst = *(uint64*)src;
-               dst += 8;
-               src += 8;
-       }
+    for (i = 0; i<dwords; i++) {
+      *(uintptr*)bdst = *(uintptr*)bsrc;
+      bdst += ptr_size;
+      bsrc += ptr_size;
+    }
   }
-#endif
 }

Reply via email to