On 29.08.2023 12:53, Jan Beulich wrote:
> Everywhere else the VL tests are grouped with the basic ones,
> distinguished simply by the "form" specifiers.
> 
> No change to the generated test blobs, and hence no functional change.
> 
> Signed-off-by: Jan Beulich <[email protected]>

Any chance of an ack for this purely mechanical tidying?

Thanks, Jan

> --- a/tools/tests/x86_emulator/Makefile
> +++ b/tools/tests/x86_emulator/Makefile
> @@ -18,7 +18,7 @@ CFLAGS += $(CFLAGS_xeninclude)
>  
>  SIMD := 3dnow sse sse2 sse4 avx avx2 xop avx512f avx512bw avx512dq avx512er 
> avx512vbmi avx512fp16
>  FMA := fma4 fma
> -SG := avx2-sg avx512f-sg avx512vl-sg
> +SG := avx2-sg avx512f-sg
>  AES := ssse3-aes avx-aes avx2-vaes avx512bw-vaes
>  CLMUL := ssse3-pclmul avx-pclmul avx2-vpclmulqdq avx512bw-vpclmulqdq 
> avx512vbmi2-vpclmulqdq
>  SHA := sse4-sha avx-sha avx512f-sha
> @@ -70,14 +70,10 @@ xop-flts := $(avx-flts)
>  avx512f-vecs := 64 16 32
>  avx512f-ints := 4 8
>  avx512f-flts := 4 8
> -avx512f-sg-vecs := 64
> +avx512f-sg-vecs := $(avx512f-vecs)
>  avx512f-sg-idxs := 4 8
>  avx512f-sg-ints := $(avx512f-ints)
>  avx512f-sg-flts := $(avx512f-flts)
> -avx512vl-sg-vecs := 16 32
> -avx512vl-sg-idxs := $(avx512f-sg-idxs)
> -avx512vl-sg-ints := $(avx512f-ints)
> -avx512vl-sg-flts := $(avx512f-flts)
>  avx512bw-vecs := $(avx512f-vecs)
>  avx512bw-ints := 1 2
>  avx512bw-flts :=
> --- a/tools/tests/x86_emulator/test_x86_emulator.c
> +++ b/tools/tests/x86_emulator/test_x86_emulator.c
> @@ -34,7 +34,6 @@ asm ( ".pushsection .test, \"ax\", @prog
>  #include "avx512f.h"
>  #include "avx512f-sg.h"
>  #include "avx512f-sha.h"
> -#include "avx512vl-sg.h"
>  #include "avx512bw.h"
>  #include "avx512bw-vaes.h"
>  #include "avx512bw-vpclmulqdq.h"
> @@ -462,22 +461,22 @@ static const struct {
>      AVX512VL(VL u64x2,        avx512f,      16u8),
>      AVX512VL(VL s64x4,        avx512f,      32i8),
>      AVX512VL(VL u64x4,        avx512f,      32u8),
> -    SIMD(AVX512VL S/G f32[4x32], avx512vl_sg, 16x4f4),
> -    SIMD(AVX512VL S/G f64[2x32], avx512vl_sg, 16x4f8),
> -    SIMD(AVX512VL S/G f32[2x64], avx512vl_sg, 16x8f4),
> -    SIMD(AVX512VL S/G f64[2x64], avx512vl_sg, 16x8f8),
> -    SIMD(AVX512VL S/G f32[8x32], avx512vl_sg, 32x4f4),
> -    SIMD(AVX512VL S/G f64[4x32], avx512vl_sg, 32x4f8),
> -    SIMD(AVX512VL S/G f32[4x64], avx512vl_sg, 32x8f4),
> -    SIMD(AVX512VL S/G f64[4x64], avx512vl_sg, 32x8f8),
> -    SIMD(AVX512VL S/G i32[4x32], avx512vl_sg, 16x4i4),
> -    SIMD(AVX512VL S/G i64[2x32], avx512vl_sg, 16x4i8),
> -    SIMD(AVX512VL S/G i32[2x64], avx512vl_sg, 16x8i4),
> -    SIMD(AVX512VL S/G i64[2x64], avx512vl_sg, 16x8i8),
> -    SIMD(AVX512VL S/G i32[8x32], avx512vl_sg, 32x4i4),
> -    SIMD(AVX512VL S/G i64[4x32], avx512vl_sg, 32x4i8),
> -    SIMD(AVX512VL S/G i32[4x64], avx512vl_sg, 32x8i4),
> -    SIMD(AVX512VL S/G i64[4x64], avx512vl_sg, 32x8i8),
> +    SIMD(AVX512VL S/G f32[4x32], avx512f_sg, 16x4f4),
> +    SIMD(AVX512VL S/G f64[2x32], avx512f_sg, 16x4f8),
> +    SIMD(AVX512VL S/G f32[2x64], avx512f_sg, 16x8f4),
> +    SIMD(AVX512VL S/G f64[2x64], avx512f_sg, 16x8f8),
> +    SIMD(AVX512VL S/G f32[8x32], avx512f_sg, 32x4f4),
> +    SIMD(AVX512VL S/G f64[4x32], avx512f_sg, 32x4f8),
> +    SIMD(AVX512VL S/G f32[4x64], avx512f_sg, 32x8f4),
> +    SIMD(AVX512VL S/G f64[4x64], avx512f_sg, 32x8f8),
> +    SIMD(AVX512VL S/G i32[4x32], avx512f_sg, 16x4i4),
> +    SIMD(AVX512VL S/G i64[2x32], avx512f_sg, 16x4i8),
> +    SIMD(AVX512VL S/G i32[2x64], avx512f_sg, 16x8i4),
> +    SIMD(AVX512VL S/G i64[2x64], avx512f_sg, 16x8i8),
> +    SIMD(AVX512VL S/G i32[8x32], avx512f_sg, 32x4i4),
> +    SIMD(AVX512VL S/G i64[4x32], avx512f_sg, 32x4i8),
> +    SIMD(AVX512VL S/G i32[4x64], avx512f_sg, 32x8i4),
> +    SIMD(AVX512VL S/G i64[4x64], avx512f_sg, 32x8i8),
>      SIMD(AVX512BW s8x64,     avx512bw,      64i1),
>      SIMD(AVX512BW u8x64,     avx512bw,      64u1),
>      SIMD(AVX512BW s16x32,    avx512bw,      64i2),
> 


Reply via email to