Author: Andrzej WarzyĆski Date: 2026-03-07T11:56:06Z New Revision: b8ea766938b4b2812f9b833a883ad02c92852ba6
URL: https://github.com/llvm/llvm-project/commit/b8ea766938b4b2812f9b833a883ad02c92852ba6 DIFF: https://github.com/llvm/llvm-project/commit/b8ea766938b4b2812f9b833a883ad02c92852ba6.diff LOG: [Clang][AArch64] Reorganize tests for `vceqz` intrinsics (NFC) (#185090) Group related `vceqz_*` and `vceqzd_*` tests together for consistency and readability. Add a comment documenting the scalar variants that are not currently covered. No functional change. Follow-up to #184893. Added: Modified: clang/test/CodeGen/AArch64/neon/fullfp16.c clang/test/CodeGen/AArch64/neon/intrinsics.c Removed: ################################################################################ diff --git a/clang/test/CodeGen/AArch64/neon/fullfp16.c b/clang/test/CodeGen/AArch64/neon/fullfp16.c index 77b6c09de857d..3cac22ee5ad54 100644 --- a/clang/test/CodeGen/AArch64/neon/fullfp16.c +++ b/clang/test/CodeGen/AArch64/neon/fullfp16.c @@ -9,6 +9,13 @@ // // Tests for unconstrained intrinsics that require the fullfp16 extension. // +// This file contains tests that were originally located in +// * clang/test/CodeGen/AArch64/v8.2a-fp16-intrinsics.c +// The main diff erence is the use of RUN lines that enable ClangIR lowering; +// therefore only builtins currently supported by ClangIR are tested here. +// Once ClangIR support is complete, this file is intended to replace the +// original test file. +// // These intrinsics expand to code containing multiple compound and declaration // statements rather than just plain function calls, which leads to: // * "scopes" at the CIR level, and then @@ -19,10 +26,6 @@ // // TODO: Remove `-simplifycfg` once CIR lowering includes the relevant // optimizations to reduce the CFG. -// -// TODO: Merge this file with -// * clang/test/CodeGen/AArch64/v8.2a-fp16-intrinsics.c -// (the source of these tests). //============================================================================= #include <arm_fp16.h> diff --git a/clang/test/CodeGen/AArch64/neon/intrinsics.c b/clang/test/CodeGen/AArch64/neon/intrinsics.c index a711245b33723..47d2a58afb550 100644 --- a/clang/test/CodeGen/AArch64/neon/intrinsics.c +++ b/clang/test/CodeGen/AArch64/neon/intrinsics.c @@ -7,11 +7,19 @@ //============================================================================= // NOTES // +// This file contains tests that were originally located in +// * clang/test/CodeGen/AArch64/neon-intrinsics.c. +// The main diff erence is the use of RUN lines that enable ClangIR lowering; +// therefore only builtins currently supported by ClangIR are tested here. +// Once ClangIR support is complete, this file is intended to replace the +// original test file. +// // ACLE section headings based on v2025Q2 of the ACLE specification: // * https://arm-software.github.io/acle/neon_intrinsics/advsimd.html#bitwise-equal-to-zero // // Different labels for CIR stem from an additional function call that is // present at the AST and CIR levels, but is inlined at the LLVM IR level. +// //============================================================================= #include <arm_neon.h> @@ -30,21 +38,6 @@ int64_t test_vnegd_s64(int64_t a) { //===------------------------------------------------------===// // 2.1.2.2 Bitwise equal to zero //===------------------------------------------------------===// -// LLVM-LABEL: @test_vceqzd_s64 -// CIR-LABEL: @vceqzd_s64 -uint64_t test_vceqzd_s64(int64_t a) { -// CIR: [[C_0:%.*]] = cir.const #cir.int<0> -// CIR: [[CMP:%.*]] = cir.cmp(eq, %{{.*}}, [[C_0]]) : !s64i, !cir.bool -// CIR: [[RES:%.*]] = cir.cast bool_to_int [[CMP]] : !cir.bool -> !cir.int<s, 1> -// CIR: cir.cast integral [[RES]] : !cir.int<s, 1> -> !u64i - -// LLVM-SAME: i64{{.*}} [[A:%.*]]) -// LLVM: [[TMP0:%.*]] = icmp eq i64 [[A]], 0 -// LLVM-NEXT: [[VCEQZ_I:%.*]] = sext i1 [[TMP0]] to i64 -// LLVM-NEXT: ret i64 [[VCEQZ_I]] - return (uint64_t)vceqzd_s64(a); -} - // LLVM-LABEL: @test_vceqz_s8( // CIR-LABEL: @vceqz_s8( uint8x8_t test_vceqz_s8(int8x8_t a) { @@ -106,22 +99,6 @@ uint64x1_t test_vceqz_s64(int64x1_t a) { return vceqz_s64(a); } -// LLVM-LABEL: @test_vceqz_u64( -// CIR-LABEL: @vceqz_u64( -uint64x1_t test_vceqz_u64(uint64x1_t a) { -// CIR: cir.cast bitcast {{%.*}} : !cir.vector<8 x !s8i> -> !cir.vector<1 x !u64i> -// CIR: [[C_0:%.*]] = cir.const #cir.zero : !cir.vector<1 x !u64i> -// CIR: cir.vec.cmp(eq, {{%.*}}, [[C_0]]) : !cir.vector<1 x !u64i>, !cir.vector<1 x !s64i> - -// LLVM-SAME: <1 x i64> {{.*}} [[A:%.*]]) #[[ATTR0]] { -// LLVM: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <8 x i8> -// LLVM-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> -// LLVM-NEXT: [[TMP2:%.*]] = icmp eq <1 x i64> [[TMP1]], zeroinitializer -// LLVM-NEXT: [[VCEQZ_I:%.*]] = sext <1 x i1> [[TMP2]] to <1 x i64> -// LLVM-NEXT: ret <1 x i64> [[VCEQZ_I]] - return vceqz_u64(a); -} - // LLVM-LABEL: @test_vceqz_p64( // CIR-LABEL: @vceqz_p64( uint64x1_t test_vceqz_p64(poly64x1_t a) { @@ -232,6 +209,22 @@ uint32x2_t test_vceqz_u32(uint32x2_t a) { return vceqz_u32(a); } +// LLVM-LABEL: @test_vceqz_u64( +// CIR-LABEL: @vceqz_u64( +uint64x1_t test_vceqz_u64(uint64x1_t a) { +// CIR: cir.cast bitcast {{%.*}} : !cir.vector<8 x !s8i> -> !cir.vector<1 x !u64i> +// CIR: [[C_0:%.*]] = cir.const #cir.zero : !cir.vector<1 x !u64i> +// CIR: cir.vec.cmp(eq, {{%.*}}, [[C_0]]) : !cir.vector<1 x !u64i>, !cir.vector<1 x !s64i> + +// LLVM-SAME: <1 x i64> {{.*}} [[A:%.*]]) #[[ATTR0]] { +// LLVM: [[TMP0:%.*]] = bitcast <1 x i64> [[A]] to <8 x i8> +// LLVM-NEXT: [[TMP1:%.*]] = bitcast <8 x i8> [[TMP0]] to <1 x i64> +// LLVM-NEXT: [[TMP2:%.*]] = icmp eq <1 x i64> [[TMP1]], zeroinitializer +// LLVM-NEXT: [[VCEQZ_I:%.*]] = sext <1 x i1> [[TMP2]] to <1 x i64> +// LLVM-NEXT: ret <1 x i64> [[VCEQZ_I]] + return vceqz_u64(a); +} + // LLVM-LABEL: @test_vceqzq_u8( // CIR-LABEL: @vceqzq_u8( uint8x16_t test_vceqzq_u8(uint8x16_t a) { @@ -345,6 +338,23 @@ uint32x4_t test_vceqzq_f32(float32x4_t a) { return vceqzq_f32(a); } +// LLVM-LABEL: @test_vceqzq_f64( +// CIR-LABEL: @vceqzq_f64( +uint64x2_t test_vceqzq_f64(float64x2_t a) { +// CIR: cir.cast bitcast {{%.*}} : !cir.vector<16 x !s8i> -> !cir.vector<2 x !cir.double> +// CIR: [[C_0:%.*]] = cir.const #cir.zero : !cir.vector<2 x !cir.double> +// CIR: cir.vec.cmp(eq, {{%.*}}, [[C_0]]) : !cir.vector<2 x !cir.double>, !cir.vector<2 x !s64i> + +// LLVM-SAME: <2 x double> {{.*}} [[A:%.*]]) #[[ATTR0]] { +// LLVM: [[TMP0:%.*]] = bitcast <2 x double> [[A]] to <2 x i64> +// LLVM-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8> +// LLVM-NEXT: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x double> +// LLVM-NEXT: [[TMP3:%.*]] = fcmp oeq <2 x double> [[TMP2]], zeroinitializer +// LLVM-NEXT: [[VCEQZ_I:%.*]] = sext <2 x i1> [[TMP3]] to <2 x i64> +// LLVM-NEXT: ret <2 x i64> [[VCEQZ_I]] + return vceqzq_f64(a); +} + // LLVM-LABEL: @test_vceqz_p8( // CIR-LABEL: @vceqz_p8( uint8x8_t test_vceqz_p8(poly8x8_t a) { @@ -371,23 +381,6 @@ uint8x16_t test_vceqzq_p8(poly8x16_t a) { return vceqzq_p8(a); } -// LLVM-LABEL: @test_vceqzq_f64( -// CIR-LABEL: @vceqzq_f64( -uint64x2_t test_vceqzq_f64(float64x2_t a) { -// CIR: cir.cast bitcast {{%.*}} : !cir.vector<16 x !s8i> -> !cir.vector<2 x !cir.double> -// CIR: [[C_0:%.*]] = cir.const #cir.zero : !cir.vector<2 x !cir.double> -// CIR: cir.vec.cmp(eq, {{%.*}}, [[C_0]]) : !cir.vector<2 x !cir.double>, !cir.vector<2 x !s64i> - -// LLVM-SAME: <2 x double> {{.*}} [[A:%.*]]) #[[ATTR0]] { -// LLVM: [[TMP0:%.*]] = bitcast <2 x double> [[A]] to <2 x i64> -// LLVM-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8> -// LLVM-NEXT: [[TMP2:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x double> -// LLVM-NEXT: [[TMP3:%.*]] = fcmp oeq <2 x double> [[TMP2]], zeroinitializer -// LLVM-NEXT: [[VCEQZ_I:%.*]] = sext <2 x i1> [[TMP3]] to <2 x i64> -// LLVM-NEXT: ret <2 x i64> [[VCEQZ_I]] - return vceqzq_f64(a); -} - // LLVM-LABEL: @test_vceqzq_p64( // CIR-LABEL: @vceqzq_p64( uint64x2_t test_vceqzq_p64(poly64x2_t a) { @@ -404,6 +397,25 @@ uint64x2_t test_vceqzq_p64(poly64x2_t a) { return vceqzq_p64(a); } +// LLVM-LABEL: @test_vceqzd_s64 +// CIR-LABEL: @vceqzd_s64 +uint64_t test_vceqzd_s64(int64_t a) { +// CIR: [[C_0:%.*]] = cir.const #cir.int<0> +// CIR: [[CMP:%.*]] = cir.cmp(eq, %{{.*}}, [[C_0]]) : !s64i, !cir.bool +// CIR: [[RES:%.*]] = cir.cast bool_to_int [[CMP]] : !cir.bool -> !cir.int<s, 1> +// CIR: cir.cast integral [[RES]] : !cir.int<s, 1> -> !u64i + +// LLVM-SAME: i64{{.*}} [[A:%.*]]) +// LLVM: [[TMP0:%.*]] = icmp eq i64 [[A]], 0 +// LLVM-NEXT: [[VCEQZ_I:%.*]] = sext i1 [[TMP0]] to i64 +// LLVM-NEXT: ret i64 [[VCEQZ_I]] + return (uint64_t)vceqzd_s64(a); +} + +// TODO SISD variants: +// vceqzd_u64, vceqzs_f32, vceqzd_f64 + + //===------------------------------------------------------===// // 2.1.1.6.1. Absolute diff erence //===------------------------------------------------------===// _______________________________________________ cfe-commits mailing list [email protected] https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
