================ @@ -7,155 +7,155 @@ // RUN: -o - | FileCheck %s --check-prefixes=CHECK,NO_HALF #ifdef __HLSL_ENABLE_16_BIT -// NATIVE_HALF: %dx.dot = mul i16 %0, %1 -// NATIVE_HALF: ret i16 %dx.dot +// NATIVE_HALF: %dot = mul i16 %0, %1 +// NATIVE_HALF: ret i16 %dot int16_t test_dot_short(int16_t p0, int16_t p1) { return dot(p0, p1); } -// NATIVE_HALF: %dx.dot = call i16 @llvm.dx.sdot.v2i16(<2 x i16> %0, <2 x i16> %1) -// NATIVE_HALF: ret i16 %dx.dot +// NATIVE_HALF: %dot = call i16 @llvm.sdot.v2i16(<2 x i16> %0, <2 x i16> %1) +// NATIVE_HALF: ret i16 %dot int16_t test_dot_short2(int16_t2 p0, int16_t2 p1) { return dot(p0, p1); } -// NATIVE_HALF: %dx.dot = call i16 @llvm.dx.sdot.v3i16(<3 x i16> %0, <3 x i16> %1) -// NATIVE_HALF: ret i16 %dx.dot +// NATIVE_HALF: %dot = call i16 @llvm.sdot.v3i16(<3 x i16> %0, <3 x i16> %1) +// NATIVE_HALF: ret i16 %dot int16_t test_dot_short3(int16_t3 p0, int16_t3 p1) { return dot(p0, p1); } -// NATIVE_HALF: %dx.dot = call i16 @llvm.dx.sdot.v4i16(<4 x i16> %0, <4 x i16> %1) -// NATIVE_HALF: ret i16 %dx.dot +// NATIVE_HALF: %dot = call i16 @llvm.sdot.v4i16(<4 x i16> %0, <4 x i16> %1) +// NATIVE_HALF: ret i16 %dot int16_t test_dot_short4(int16_t4 p0, int16_t4 p1) { return dot(p0, p1); } -// NATIVE_HALF: %dx.dot = mul i16 %0, %1 -// NATIVE_HALF: ret i16 %dx.dot +// NATIVE_HALF: %dot = mul i16 %0, %1 +// NATIVE_HALF: ret i16 %dot uint16_t test_dot_ushort(uint16_t p0, uint16_t p1) { return dot(p0, p1); } -// NATIVE_HALF: %dx.dot = call i16 @llvm.dx.udot.v2i16(<2 x i16> %0, <2 x i16> %1) -// NATIVE_HALF: ret i16 %dx.dot +// NATIVE_HALF: %dot = call i16 @llvm.udot.v2i16(<2 x i16> %0, <2 x i16> %1) +// NATIVE_HALF: ret i16 %dot uint16_t test_dot_ushort2(uint16_t2 p0, uint16_t2 p1) { return dot(p0, p1); } -// NATIVE_HALF: %dx.dot = call i16 @llvm.dx.udot.v3i16(<3 x i16> %0, <3 x i16> %1) -// NATIVE_HALF: ret i16 %dx.dot +// NATIVE_HALF: %dot = call i16 @llvm.udot.v3i16(<3 x i16> %0, <3 x i16> %1) +// NATIVE_HALF: ret i16 %dot uint16_t test_dot_ushort3(uint16_t3 p0, uint16_t3 p1) { return dot(p0, p1); } -// NATIVE_HALF: %dx.dot = call i16 @llvm.dx.udot.v4i16(<4 x i16> %0, <4 x i16> %1) -// NATIVE_HALF: ret i16 %dx.dot +// NATIVE_HALF: %dot = call i16 @llvm.udot.v4i16(<4 x i16> %0, <4 x i16> %1) +// NATIVE_HALF: ret i16 %dot uint16_t test_dot_ushort4(uint16_t4 p0, uint16_t4 p1) { return dot(p0, p1); } #endif -// CHECK: %dx.dot = mul i32 %0, %1 -// CHECK: ret i32 %dx.dot +// CHECK: %dot = mul i32 %0, %1 +// CHECK: ret i32 %dot int test_dot_int(int p0, int p1) { return dot(p0, p1); } -// CHECK: %dx.dot = call i32 @llvm.dx.sdot.v2i32(<2 x i32> %0, <2 x i32> %1) -// CHECK: ret i32 %dx.dot +// CHECK: %dot = call i32 @llvm.sdot.v2i32(<2 x i32> %0, <2 x i32> %1) +// CHECK: ret i32 %dot int test_dot_int2(int2 p0, int2 p1) { return dot(p0, p1); } -// CHECK: %dx.dot = call i32 @llvm.dx.sdot.v3i32(<3 x i32> %0, <3 x i32> %1) -// CHECK: ret i32 %dx.dot +// CHECK: %dot = call i32 @llvm.sdot.v3i32(<3 x i32> %0, <3 x i32> %1) +// CHECK: ret i32 %dot int test_dot_int3(int3 p0, int3 p1) { return dot(p0, p1); } -// CHECK: %dx.dot = call i32 @llvm.dx.sdot.v4i32(<4 x i32> %0, <4 x i32> %1) -// CHECK: ret i32 %dx.dot +// CHECK: %dot = call i32 @llvm.sdot.v4i32(<4 x i32> %0, <4 x i32> %1) +// CHECK: ret i32 %dot int test_dot_int4(int4 p0, int4 p1) { return dot(p0, p1); } -// CHECK: %dx.dot = mul i32 %0, %1 -// CHECK: ret i32 %dx.dot +// CHECK: %dot = mul i32 %0, %1 +// CHECK: ret i32 %dot uint test_dot_uint(uint p0, uint p1) { return dot(p0, p1); } -// CHECK: %dx.dot = call i32 @llvm.dx.udot.v2i32(<2 x i32> %0, <2 x i32> %1) -// CHECK: ret i32 %dx.dot +// CHECK: %dot = call i32 @llvm.udot.v2i32(<2 x i32> %0, <2 x i32> %1) +// CHECK: ret i32 %dot uint test_dot_uint2(uint2 p0, uint2 p1) { return dot(p0, p1); } -// CHECK: %dx.dot = call i32 @llvm.dx.udot.v3i32(<3 x i32> %0, <3 x i32> %1) -// CHECK: ret i32 %dx.dot +// CHECK: %dot = call i32 @llvm.udot.v3i32(<3 x i32> %0, <3 x i32> %1) +// CHECK: ret i32 %dot uint test_dot_uint3(uint3 p0, uint3 p1) { return dot(p0, p1); } -// CHECK: %dx.dot = call i32 @llvm.dx.udot.v4i32(<4 x i32> %0, <4 x i32> %1) -// CHECK: ret i32 %dx.dot +// CHECK: %dot = call i32 @llvm.udot.v4i32(<4 x i32> %0, <4 x i32> %1) +// CHECK: ret i32 %dot uint test_dot_uint4(uint4 p0, uint4 p1) { return dot(p0, p1); } -// CHECK: %dx.dot = mul i64 %0, %1 -// CHECK: ret i64 %dx.dot +// CHECK: %dot = mul i64 %0, %1 +// CHECK: ret i64 %dot int64_t test_dot_long(int64_t p0, int64_t p1) { return dot(p0, p1); } -// CHECK: %dx.dot = call i64 @llvm.dx.sdot.v2i64(<2 x i64> %0, <2 x i64> %1) -// CHECK: ret i64 %dx.dot +// CHECK: %dot = call i64 @llvm.sdot.v2i64(<2 x i64> %0, <2 x i64> %1) +// CHECK: ret i64 %dot int64_t test_dot_long2(int64_t2 p0, int64_t2 p1) { return dot(p0, p1); } -// CHECK: %dx.dot = call i64 @llvm.dx.sdot.v3i64(<3 x i64> %0, <3 x i64> %1) -// CHECK: ret i64 %dx.dot +// CHECK: %dot = call i64 @llvm.sdot.v3i64(<3 x i64> %0, <3 x i64> %1) +// CHECK: ret i64 %dot int64_t test_dot_long3(int64_t3 p0, int64_t3 p1) { return dot(p0, p1); } -// CHECK: %dx.dot = call i64 @llvm.dx.sdot.v4i64(<4 x i64> %0, <4 x i64> %1) -// CHECK: ret i64 %dx.dot +// CHECK: %dot = call i64 @llvm.sdot.v4i64(<4 x i64> %0, <4 x i64> %1) +// CHECK: ret i64 %dot int64_t test_dot_long4(int64_t4 p0, int64_t4 p1) { return dot(p0, p1); } -// CHECK: %dx.dot = mul i64 %0, %1 -// CHECK: ret i64 %dx.dot +// CHECK: %dot = mul i64 %0, %1 +// CHECK: ret i64 %dot uint64_t test_dot_ulong(uint64_t p0, uint64_t p1) { return dot(p0, p1); } -// CHECK: %dx.dot = call i64 @llvm.dx.udot.v2i64(<2 x i64> %0, <2 x i64> %1) -// CHECK: ret i64 %dx.dot +// CHECK: %dot = call i64 @llvm.udot.v2i64(<2 x i64> %0, <2 x i64> %1) +// CHECK: ret i64 %dot uint64_t test_dot_ulong2(uint64_t2 p0, uint64_t2 p1) { return dot(p0, p1); } -// CHECK: %dx.dot = call i64 @llvm.dx.udot.v3i64(<3 x i64> %0, <3 x i64> %1) -// CHECK: ret i64 %dx.dot +// CHECK: %dot = call i64 @llvm.udot.v3i64(<3 x i64> %0, <3 x i64> %1) +// CHECK: ret i64 %dot uint64_t test_dot_ulong3(uint64_t3 p0, uint64_t3 p1) { return dot(p0, p1); } -// CHECK: %dx.dot = call i64 @llvm.dx.udot.v4i64(<4 x i64> %0, <4 x i64> %1) -// CHECK: ret i64 %dx.dot +// CHECK: %dot = call i64 @llvm.udot.v4i64(<4 x i64> %0, <4 x i64> %1) +// CHECK: ret i64 %dot uint64_t test_dot_ulong4(uint64_t4 p0, uint64_t4 p1) { return dot(p0, p1); } -// NATIVE_HALF: %dx.dot = fmul half %0, %1 -// NATIVE_HALF: ret half %dx.dot -// NO_HALF: %dx.dot = fmul float %0, %1 -// NO_HALF: ret float %dx.dot +// NATIVE_HALF: %dot = fmul half %0, %1 +// NATIVE_HALF: ret half %dot +// NO_HALF: %dot = fmul float %0, %1 +// NO_HALF: ret float %dot half test_dot_half(half p0, half p1) { return dot(p0, p1); } -// NATIVE_HALF: %dx.dot = call half @llvm.dx.dot2.v2f16(<2 x half> %0, <2 x half> %1) -// NATIVE_HALF: ret half %dx.dot -// NO_HALF: %dx.dot = call float @llvm.dx.dot2.v2f32(<2 x float> %0, <2 x float> %1) -// NO_HALF: ret float %dx.dot +// NATIVE_HALF: %dot = call half @llvm.fdot.v2f16(<2 x half> %0, <2 x half> %1) +// NATIVE_HALF: ret half %dot +// NO_HALF: %dot = call float @llvm.fdot.v2f32(<2 x float> %0, <2 x float> %1) +// NO_HALF: ret float %dot ---------------- pow2clk wrote:
Before this, the only change was in the temp names since they are no longer dx-exclusive ops. Here and hereafter, for floating-point values, we no longer lower to the vector-size-specific ops until DXIL intrinsic expansion, so these have a more generic form here. https://github.com/llvm/llvm-project/pull/102872 _______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits