Author: ctopper Date: Mon May 16 23:41:38 2016 New Revision: 269741 URL: http://llvm.org/viewvc/llvm-project?rev=269741&view=rev Log: [AVX512] Add parentheses around macro arguments in AVX512ER intrinsics. Remove leading underscores from macro argument names. Add explicit typecasts to all macro arguments and return values. And finally reformat after all the adjustments.
This is a mostly mechanical change accomplished with a script. I tried to split out any changes to the typecasts that already existed into separate commits. Modified: cfe/trunk/lib/Headers/avx512erintrin.h Modified: cfe/trunk/lib/Headers/avx512erintrin.h URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/Headers/avx512erintrin.h?rev=269741&r1=269740&r2=269741&view=diff ============================================================================== --- cfe/trunk/lib/Headers/avx512erintrin.h (original) +++ cfe/trunk/lib/Headers/avx512erintrin.h Mon May 16 23:41:38 2016 @@ -31,66 +31,66 @@ #define _mm512_exp2a23_round_pd(A, R) __extension__ ({ \ (__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \ (__v8df)_mm512_setzero_pd(), \ - (__mmask8)-1, (R)); }) + (__mmask8)-1, (int)(R)); }) #define _mm512_mask_exp2a23_round_pd(S, M, A, R) __extension__ ({ \ (__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \ - (__v8df)(__m512d)(S), \ - (__mmask8)(M), (R)); }) + (__v8df)(__m512d)(S), (__mmask8)(M), \ + (int)(R)); }) #define _mm512_maskz_exp2a23_round_pd(M, A, R) __extension__ ({ \ (__m512d)__builtin_ia32_exp2pd_mask((__v8df)(__m512d)(A), \ (__v8df)_mm512_setzero_pd(), \ - (__mmask8)(M), (R)); }) + (__mmask8)(M), (int)(R)); }) #define _mm512_exp2a23_pd(A) \ - _mm512_exp2a23_round_pd((A), _MM_FROUND_CUR_DIRECTION) + _mm512_exp2a23_round_pd((A), _MM_FROUND_CUR_DIRECTION) #define _mm512_mask_exp2a23_pd(S, M, A) \ - _mm512_mask_exp2a23_round_pd((S), (M), (A), _MM_FROUND_CUR_DIRECTION) + _mm512_mask_exp2a23_round_pd((S), (M), (A), _MM_FROUND_CUR_DIRECTION) #define _mm512_maskz_exp2a23_pd(M, A) \ - _mm512_maskz_exp2a23_round_pd((M), (A), _MM_FROUND_CUR_DIRECTION) + _mm512_maskz_exp2a23_round_pd((M), (A), _MM_FROUND_CUR_DIRECTION) #define _mm512_exp2a23_round_ps(A, R) __extension__ ({ \ (__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \ (__v16sf)_mm512_setzero_ps(), \ - (__mmask16)-1, (R)); }) + (__mmask16)-1, (int)(R)); }) #define _mm512_mask_exp2a23_round_ps(S, M, A, R) __extension__ ({ \ (__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \ - (__v16sf)(__m512)(S), \ - (__mmask16)(M), (R)); }) + (__v16sf)(__m512)(S), (__mmask16)(M), \ + (int)(R)); }) #define _mm512_maskz_exp2a23_round_ps(M, A, R) __extension__ ({ \ (__m512)__builtin_ia32_exp2ps_mask((__v16sf)(__m512)(A), \ (__v16sf)_mm512_setzero_ps(), \ - (__mmask16)(M), (R)); }) + (__mmask16)(M), (int)(R)); }) #define _mm512_exp2a23_ps(A) \ - _mm512_exp2a23_round_ps((A), _MM_FROUND_CUR_DIRECTION) + _mm512_exp2a23_round_ps((A), _MM_FROUND_CUR_DIRECTION) #define _mm512_mask_exp2a23_ps(S, M, A) \ - _mm512_mask_exp2a23_round_ps((S), (M), (A), _MM_FROUND_CUR_DIRECTION) + _mm512_mask_exp2a23_round_ps((S), (M), (A), _MM_FROUND_CUR_DIRECTION) #define _mm512_maskz_exp2a23_ps(M, A) \ - _mm512_maskz_exp2a23_round_ps((M), (A), _MM_FROUND_CUR_DIRECTION) + _mm512_maskz_exp2a23_round_ps((M), (A), _MM_FROUND_CUR_DIRECTION) // rsqrt28 #define _mm512_rsqrt28_round_pd(A, R) __extension__ ({ \ (__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \ (__v8df)_mm512_setzero_pd(), \ - (__mmask8)-1, (R)); }) + (__mmask8)-1, (int)(R)); }) #define _mm512_mask_rsqrt28_round_pd(S, M, A, R) __extension__ ({ \ (__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \ - (__v8df)(__m512d)(S), \ - (__mmask8)(M), (R)); }) + (__v8df)(__m512d)(S), (__mmask8)(M), \ + (int)(R)); }) #define _mm512_maskz_rsqrt28_round_pd(M, A, R) __extension__ ({ \ (__m512d)__builtin_ia32_rsqrt28pd_mask((__v8df)(__m512d)(A), \ (__v8df)_mm512_setzero_pd(), \ - (__mmask8)(M), (R)); }) + (__mmask8)(M), (int)(R)); }) #define _mm512_rsqrt28_pd(A) \ _mm512_rsqrt28_round_pd((A), _MM_FROUND_CUR_DIRECTION) @@ -104,17 +104,17 @@ #define _mm512_rsqrt28_round_ps(A, R) __extension__ ({ \ (__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \ (__v16sf)_mm512_setzero_ps(), \ - (__mmask16)-1, (R)); }) + (__mmask16)-1, (int)(R)); }) #define _mm512_mask_rsqrt28_round_ps(S, M, A, R) __extension__ ({ \ (__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \ - (__v16sf)(__m512)(S), \ - (__mmask16)(M), (R)); }) + (__v16sf)(__m512)(S), (__mmask16)(M), \ + (int)(R)); }) #define _mm512_maskz_rsqrt28_round_ps(M, A, R) __extension__ ({ \ (__m512)__builtin_ia32_rsqrt28ps_mask((__v16sf)(__m512)(A), \ (__v16sf)_mm512_setzero_ps(), \ - (__mmask16)(M), (R)); }) + (__mmask16)(M), (int)(R)); }) #define _mm512_rsqrt28_ps(A) \ _mm512_rsqrt28_round_ps((A), _MM_FROUND_CUR_DIRECTION) @@ -127,21 +127,21 @@ #define _mm_rsqrt28_round_ss(A, B, R) __extension__ ({ \ (__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \ - (__v4sf)(__m128)(B), \ - (__v4sf)_mm_setzero_ps(), \ - (__mmask8)-1, (R)); }) + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1, (int)(R)); }) #define _mm_mask_rsqrt28_round_ss(S, M, A, B, R) __extension__ ({ \ (__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \ - (__v4sf)(__m128)(B), \ - (__v4sf)(__m128)(S), \ - (__mmask8)(M), (R)); }) + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(S), \ + (__mmask8)(M), (int)(R)); }) #define _mm_maskz_rsqrt28_round_ss(M, A, B, R) __extension__ ({ \ (__m128)__builtin_ia32_rsqrt28ss_round_mask((__v4sf)(__m128)(A), \ - (__v4sf)(__m128)(B), \ - (__v4sf)_mm_setzero_ps(), \ - (__mmask8)(M), (R)); }) + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(M), (int)(R)); }) #define _mm_rsqrt28_ss(A, B) \ _mm_rsqrt28_round_ss((A), (B), _MM_FROUND_CUR_DIRECTION) @@ -154,21 +154,21 @@ #define _mm_rsqrt28_round_sd(A, B, R) __extension__ ({ \ (__m128d)__builtin_ia32_rsqrt28sd_round_mask((__v2df)(__m128d)(A), \ - (__v2df)(__m128d)(B), \ - (__v2df)_mm_setzero_pd(), \ - (__mmask8)-1, (R)); }) + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1, (int)(R)); }) #define _mm_mask_rsqrt28_round_sd(S, M, A, B, R) __extension__ ({ \ (__m128d)__builtin_ia32_rsqrt28sd_round_mask((__v2df)(__m128d)(A), \ - (__v2df)(__m128d)(B), \ - (__v2df)(__m128d)(S), \ - (__mmask8)(M), (R)); }) + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(S), \ + (__mmask8)(M), (int)(R)); }) #define _mm_maskz_rsqrt28_round_sd(M, A, B, R) __extension__ ({ \ (__m128d)__builtin_ia32_rsqrt28sd_round_mask((__v2df)(__m128d)(A), \ - (__v2df)(__m128d)(B), \ - (__v2df)_mm_setzero_pd(), \ - (__mmask8)(M), (R)); }) + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(M), (int)(R)); }) #define _mm_rsqrt28_sd(A, B) \ _mm_rsqrt28_round_sd((A), (B), _MM_FROUND_CUR_DIRECTION) @@ -183,17 +183,17 @@ #define _mm512_rcp28_round_pd(A, R) __extension__ ({ \ (__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \ (__v8df)_mm512_setzero_pd(), \ - (__mmask8)-1, (R)); }) + (__mmask8)-1, (int)(R)); }) #define _mm512_mask_rcp28_round_pd(S, M, A, R) __extension__ ({ \ (__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \ - (__v8df)(__m512d)(S), \ - (__mmask8)(M), (R)); }) + (__v8df)(__m512d)(S), (__mmask8)(M), \ + (int)(R)); }) #define _mm512_maskz_rcp28_round_pd(M, A, R) __extension__ ({ \ (__m512d)__builtin_ia32_rcp28pd_mask((__v8df)(__m512d)(A), \ (__v8df)_mm512_setzero_pd(), \ - (__mmask8)(M), (R)); }) + (__mmask8)(M), (int)(R)); }) #define _mm512_rcp28_pd(A) \ _mm512_rcp28_round_pd((A), _MM_FROUND_CUR_DIRECTION) @@ -207,17 +207,17 @@ #define _mm512_rcp28_round_ps(A, R) __extension__ ({ \ (__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \ (__v16sf)_mm512_setzero_ps(), \ - (__mmask16)-1, (R)); }) + (__mmask16)-1, (int)(R)); }) #define _mm512_mask_rcp28_round_ps(S, M, A, R) __extension__ ({ \ (__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \ - (__v16sf)(__m512)(S), \ - (__mmask16)(M), (R)); }) + (__v16sf)(__m512)(S), (__mmask16)(M), \ + (int)(R)); }) #define _mm512_maskz_rcp28_round_ps(M, A, R) __extension__ ({ \ (__m512)__builtin_ia32_rcp28ps_mask((__v16sf)(__m512)(A), \ (__v16sf)_mm512_setzero_ps(), \ - (__mmask16)(M), (R)); }) + (__mmask16)(M), (int)(R)); }) #define _mm512_rcp28_ps(A) \ _mm512_rcp28_round_ps((A), _MM_FROUND_CUR_DIRECTION) @@ -230,21 +230,21 @@ #define _mm_rcp28_round_ss(A, B, R) __extension__ ({ \ (__m128)__builtin_ia32_rcp28ss_round_mask((__v4sf)(__m128)(A), \ - (__v4sf)(__m128)(B), \ - (__v4sf)_mm_setzero_ps(), \ - (__mmask8)-1, (R)); }) + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)-1, (int)(R)); }) #define _mm_mask_rcp28_round_ss(S, M, A, B, R) __extension__ ({ \ (__m128)__builtin_ia32_rcp28ss_round_mask((__v4sf)(__m128)(A), \ - (__v4sf)(__m128)(B), \ - (__v4sf)(__m128)(S), \ - (__mmask8)(M), (R)); }) + (__v4sf)(__m128)(B), \ + (__v4sf)(__m128)(S), \ + (__mmask8)(M), (int)(R)); }) #define _mm_maskz_rcp28_round_ss(M, A, B, R) __extension__ ({ \ (__m128)__builtin_ia32_rcp28ss_round_mask((__v4sf)(__m128)(A), \ - (__v4sf)(__m128)(B), \ - (__v4sf)_mm_setzero_ps(), \ - (__mmask8)(M), (R)); }) + (__v4sf)(__m128)(B), \ + (__v4sf)_mm_setzero_ps(), \ + (__mmask8)(M), (int)(R)); }) #define _mm_rcp28_ss(A, B) \ _mm_rcp28_round_ss((A), (B), _MM_FROUND_CUR_DIRECTION) @@ -257,21 +257,21 @@ #define _mm_rcp28_round_sd(A, B, R) __extension__ ({ \ (__m128d)__builtin_ia32_rcp28sd_round_mask((__v2df)(__m128d)(A), \ - (__v2df)(__m128d)(B), \ - (__v2df)_mm_setzero_pd(), \ - (__mmask8)-1, (R)); }) + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)-1, (int)(R)); }) #define _mm_mask_rcp28_round_sd(S, M, A, B, R) __extension__ ({ \ (__m128d)__builtin_ia32_rcp28sd_round_mask((__v2df)(__m128d)(A), \ - (__v2df)(__m128d)(B), \ - (__v2df)(__m128d)(S), \ - (__mmask8)(M), (R)); }) + (__v2df)(__m128d)(B), \ + (__v2df)(__m128d)(S), \ + (__mmask8)(M), (int)(R)); }) #define _mm_maskz_rcp28_round_sd(M, A, B, R) __extension__ ({ \ (__m128d)__builtin_ia32_rcp28sd_round_mask((__v2df)(__m128d)(A), \ - (__v2df)(__m128d)(B), \ - (__v2df)_mm_setzero_pd(), \ - (__mmask8)(M), (R)); }) + (__v2df)(__m128d)(B), \ + (__v2df)_mm_setzero_pd(), \ + (__mmask8)(M), (int)(R)); }) #define _mm_rcp28_sd(A, B) \ _mm_rcp28_round_sd((A), (B), _MM_FROUND_CUR_DIRECTION) _______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org http://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits