Check the result of __get_cpuid and process FEAT1_REGISTER only when
__get_cpuid returns success. Use __cpuid instead of nested __get_cpuid.
libatomic/ChangeLog:
* config/x86/init.c (__libat_feat1_init): Check the result of
__get_cpuid and process FEAT1_REGISTER only when __get_cpuid
returns success. Use __cpuid instead of nested __get_cpuid.
Bootstrapped and regression tested libatomic on x86_64-linux-gnu {,-m32}.
Uros.
diff --git a/libatomic/config/x86/init.c b/libatomic/config/x86/init.c
index a75be3f175c..26168d46832 100644
--- a/libatomic/config/x86/init.c
+++ b/libatomic/config/x86/init.c
@@ -33,21 +33,23 @@ __libat_feat1_init (void)
{
unsigned int eax, ebx, ecx, edx;
FEAT1_REGISTER = 0;
- __get_cpuid (1, &eax, &ebx, &ecx, &edx);
-#ifdef __x86_64__
- if ((FEAT1_REGISTER & (bit_AVX | bit_CMPXCHG16B))
- == (bit_AVX | bit_CMPXCHG16B))
+ if (__get_cpuid (1, &eax, &ebx, &ecx, &edx))
{
- /* Intel SDM guarantees that 16-byte VMOVDQA on 16-byte aligned address
- is atomic, and AMD is going to do something similar soon.
- We don't have a guarantee from vendors of other CPUs with AVX,
- like Zhaoxin and VIA. */
- unsigned int ecx2 = 0;
- __get_cpuid (0, &eax, &ebx, &ecx2, &edx);
- if (ecx2 != signature_INTEL_ecx && ecx2 != signature_AMD_ecx)
- FEAT1_REGISTER &= ~bit_AVX;
- }
+#ifdef __x86_64__
+ if ((FEAT1_REGISTER & (bit_AVX | bit_CMPXCHG16B))
+ == (bit_AVX | bit_CMPXCHG16B))
+ {
+ /* Intel SDM guarantees that 16-byte VMOVDQA on 16-byte aligned
+ address is atomic, and AMD is going to do something similar soon.
+ We don't have a guarantee from vendors of other CPUs with AVX,
+ like Zhaoxin and VIA. */
+ unsigned int ecx2;
+ __cpuid (0, eax, ebx, ecx2, edx);
+ if (ecx2 != signature_INTEL_ecx && ecx2 != signature_AMD_ecx)
+ FEAT1_REGISTER &= ~bit_AVX;
+ }
#endif
+ }
/* See the load in load_feat1. */
__atomic_store_n (&__libat_feat1, FEAT1_REGISTER, __ATOMIC_RELAXED);
return FEAT1_REGISTER;