This revision was automatically updated to reflect the committed changes.
Closed by commit rG2da0df5e7cac: [X86][bugfix] redefine __SSC_MARK to escape 
cpp string literal concatenation… (authored by xiangzhangllvm).
Herald added a project: clang.
Herald added a subscriber: cfe-commits.

Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D132636/new/

https://reviews.llvm.org/D132636

Files:
  clang/lib/Headers/x86gprintrin.h


Index: clang/lib/Headers/x86gprintrin.h
===================================================================
--- clang/lib/Headers/x86gprintrin.h
+++ clang/lib/Headers/x86gprintrin.h
@@ -26,22 +26,19 @@
 #endif
 
 #if defined(__i386__)
-#define __FULLBX "ebx"
+#define __SAVE_GPRBX "mov {%%ebx, %%eax |eax, ebx};"
+#define __RESTORE_GPRBX "mov {%%eax, %%ebx |ebx, eax};"
 #define __TMPGPR "eax"
 #else
 // When in 64-bit target, the 32-bit operands generate a 32-bit result,
 // zero-extended to a 64-bit result in the destination general-purpose,
 // It means "mov x %ebx" will clobber the higher 32 bits of rbx, so we
 // should preserve the 64-bit register rbx.
-#define __FULLBX "rbx"
+#define __SAVE_GPRBX "mov {%%rbx, %%rax |rax, rbx};"
+#define __RESTORE_GPRBX "mov {%%rax, %%rbx |rbx, rax};"
 #define __TMPGPR "rax"
 #endif
 
-#define __MOVEGPR(__r1, __r2) "mov {%%"__r1 ", %%"__r2 "|"__r2 ", "__r1"};"
-
-#define __SAVE_GPRBX __MOVEGPR(__FULLBX, __TMPGPR)
-#define __RESTORE_GPRBX __MOVEGPR(__TMPGPR, __FULLBX)
-
 #define __SSC_MARK(__Tag)                                                      
\
   __asm__ __volatile__( __SAVE_GPRBX                                           
\
                        "mov {%0, %%ebx|ebx, %0}; "                             
\


Index: clang/lib/Headers/x86gprintrin.h
===================================================================
--- clang/lib/Headers/x86gprintrin.h
+++ clang/lib/Headers/x86gprintrin.h
@@ -26,22 +26,19 @@
 #endif
 
 #if defined(__i386__)
-#define __FULLBX "ebx"
+#define __SAVE_GPRBX "mov {%%ebx, %%eax |eax, ebx};"
+#define __RESTORE_GPRBX "mov {%%eax, %%ebx |ebx, eax};"
 #define __TMPGPR "eax"
 #else
 // When in 64-bit target, the 32-bit operands generate a 32-bit result,
 // zero-extended to a 64-bit result in the destination general-purpose,
 // It means "mov x %ebx" will clobber the higher 32 bits of rbx, so we
 // should preserve the 64-bit register rbx.
-#define __FULLBX "rbx"
+#define __SAVE_GPRBX "mov {%%rbx, %%rax |rax, rbx};"
+#define __RESTORE_GPRBX "mov {%%rax, %%rbx |rbx, rax};"
 #define __TMPGPR "rax"
 #endif
 
-#define __MOVEGPR(__r1, __r2) "mov {%%"__r1 ", %%"__r2 "|"__r2 ", "__r1"};"
-
-#define __SAVE_GPRBX __MOVEGPR(__FULLBX, __TMPGPR)
-#define __RESTORE_GPRBX __MOVEGPR(__TMPGPR, __FULLBX)
-
 #define __SSC_MARK(__Tag)                                                      \
   __asm__ __volatile__( __SAVE_GPRBX                                           \
                        "mov {%0, %%ebx|ebx, %0}; "                             \
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to