craig.topper created this revision. craig.topper added reviewers: kito-cheng, asb, VincentWu. Herald added subscribers: jobnoorman, luke, vkmr, frasercrmck, luismarques, apazos, sameer.abuasal, s.egerton, Jim, benna, psnobl, jocewei, PkmX, the_o, brucehoult, MartinMosbeck, rogfer01, edward-jones, zzheng, jrtc27, shiva0217, niosHD, sabuasal, simoncook, johnrusso, rbar, arichardson. Herald added a project: All. craig.topper requested review of this revision. Herald added subscribers: wangpc, eopXD, MaskRay. Herald added a project: clang.
This matches the data type of the intrinsics. This case be seen from the removal of sext and trunc instructions from the IR. Repository: rG LLVM Github Monorepo https://reviews.llvm.org/D154577 Files: clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknh.c Index: clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknh.c =================================================================== --- clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknh.c +++ clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknh.c @@ -5,60 +5,52 @@ // RV64ZKNH-LABEL: @sha512sig0( // RV64ZKNH-NEXT: entry: -// RV64ZKNH-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4 -// RV64ZKNH-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4 -// RV64ZKNH-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4 -// RV64ZKNH-NEXT: [[CONV:%.*]] = sext i32 [[TMP0]] to i64 -// RV64ZKNH-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.sha512sig0(i64 [[CONV]]) -// RV64ZKNH-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// RV64ZKNH-NEXT: ret i32 [[CONV1]] +// RV64ZKNH-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8 +// RV64ZKNH-NEXT: store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8 +// RV64ZKNH-NEXT: [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8 +// RV64ZKNH-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.sha512sig0(i64 [[TMP0]]) +// RV64ZKNH-NEXT: ret i64 [[TMP1]] // -int sha512sig0(int rs1) { +long sha512sig0(long rs1) { return __builtin_riscv_sha512sig0_64(rs1); } // RV64ZKNH-LABEL: @sha512sig1( // RV64ZKNH-NEXT: entry: -// RV64ZKNH-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4 -// RV64ZKNH-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4 -// RV64ZKNH-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4 -// RV64ZKNH-NEXT: [[CONV:%.*]] = sext i32 [[TMP0]] to i64 -// RV64ZKNH-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.sha512sig1(i64 [[CONV]]) -// RV64ZKNH-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// RV64ZKNH-NEXT: ret i32 [[CONV1]] +// RV64ZKNH-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8 +// RV64ZKNH-NEXT: store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8 +// RV64ZKNH-NEXT: [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8 +// RV64ZKNH-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.sha512sig1(i64 [[TMP0]]) +// RV64ZKNH-NEXT: ret i64 [[TMP1]] // -int sha512sig1(int rs1) { +long sha512sig1(long rs1) { return __builtin_riscv_sha512sig1_64(rs1); } // RV64ZKNH-LABEL: @sha512sum0( // RV64ZKNH-NEXT: entry: -// RV64ZKNH-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4 -// RV64ZKNH-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4 -// RV64ZKNH-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4 -// RV64ZKNH-NEXT: [[CONV:%.*]] = sext i32 [[TMP0]] to i64 -// RV64ZKNH-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.sha512sum0(i64 [[CONV]]) -// RV64ZKNH-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// RV64ZKNH-NEXT: ret i32 [[CONV1]] +// RV64ZKNH-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8 +// RV64ZKNH-NEXT: store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8 +// RV64ZKNH-NEXT: [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8 +// RV64ZKNH-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.sha512sum0(i64 [[TMP0]]) +// RV64ZKNH-NEXT: ret i64 [[TMP1]] // -int sha512sum0(int rs1) { +long sha512sum0(long rs1) { return __builtin_riscv_sha512sum0_64(rs1); } // RV64ZKNH-LABEL: @sha512sum1( // RV64ZKNH-NEXT: entry: -// RV64ZKNH-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4 -// RV64ZKNH-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4 -// RV64ZKNH-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4 -// RV64ZKNH-NEXT: [[CONV:%.*]] = sext i32 [[TMP0]] to i64 -// RV64ZKNH-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.sha512sum1(i64 [[CONV]]) -// RV64ZKNH-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// RV64ZKNH-NEXT: ret i32 [[CONV1]] +// RV64ZKNH-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8 +// RV64ZKNH-NEXT: store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8 +// RV64ZKNH-NEXT: [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8 +// RV64ZKNH-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.sha512sum1(i64 [[TMP0]]) +// RV64ZKNH-NEXT: ret i64 [[TMP1]] // -int sha512sum1(int rs1) { +long sha512sum1(long rs1) { return __builtin_riscv_sha512sum1_64(rs1); }
Index: clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknh.c =================================================================== --- clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknh.c +++ clang/test/CodeGen/RISCV/rvk-intrinsics/riscv64-zknh.c @@ -5,60 +5,52 @@ // RV64ZKNH-LABEL: @sha512sig0( // RV64ZKNH-NEXT: entry: -// RV64ZKNH-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4 -// RV64ZKNH-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4 -// RV64ZKNH-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4 -// RV64ZKNH-NEXT: [[CONV:%.*]] = sext i32 [[TMP0]] to i64 -// RV64ZKNH-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.sha512sig0(i64 [[CONV]]) -// RV64ZKNH-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// RV64ZKNH-NEXT: ret i32 [[CONV1]] +// RV64ZKNH-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8 +// RV64ZKNH-NEXT: store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8 +// RV64ZKNH-NEXT: [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8 +// RV64ZKNH-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.sha512sig0(i64 [[TMP0]]) +// RV64ZKNH-NEXT: ret i64 [[TMP1]] // -int sha512sig0(int rs1) { +long sha512sig0(long rs1) { return __builtin_riscv_sha512sig0_64(rs1); } // RV64ZKNH-LABEL: @sha512sig1( // RV64ZKNH-NEXT: entry: -// RV64ZKNH-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4 -// RV64ZKNH-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4 -// RV64ZKNH-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4 -// RV64ZKNH-NEXT: [[CONV:%.*]] = sext i32 [[TMP0]] to i64 -// RV64ZKNH-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.sha512sig1(i64 [[CONV]]) -// RV64ZKNH-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// RV64ZKNH-NEXT: ret i32 [[CONV1]] +// RV64ZKNH-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8 +// RV64ZKNH-NEXT: store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8 +// RV64ZKNH-NEXT: [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8 +// RV64ZKNH-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.sha512sig1(i64 [[TMP0]]) +// RV64ZKNH-NEXT: ret i64 [[TMP1]] // -int sha512sig1(int rs1) { +long sha512sig1(long rs1) { return __builtin_riscv_sha512sig1_64(rs1); } // RV64ZKNH-LABEL: @sha512sum0( // RV64ZKNH-NEXT: entry: -// RV64ZKNH-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4 -// RV64ZKNH-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4 -// RV64ZKNH-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4 -// RV64ZKNH-NEXT: [[CONV:%.*]] = sext i32 [[TMP0]] to i64 -// RV64ZKNH-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.sha512sum0(i64 [[CONV]]) -// RV64ZKNH-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// RV64ZKNH-NEXT: ret i32 [[CONV1]] +// RV64ZKNH-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8 +// RV64ZKNH-NEXT: store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8 +// RV64ZKNH-NEXT: [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8 +// RV64ZKNH-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.sha512sum0(i64 [[TMP0]]) +// RV64ZKNH-NEXT: ret i64 [[TMP1]] // -int sha512sum0(int rs1) { +long sha512sum0(long rs1) { return __builtin_riscv_sha512sum0_64(rs1); } // RV64ZKNH-LABEL: @sha512sum1( // RV64ZKNH-NEXT: entry: -// RV64ZKNH-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4 -// RV64ZKNH-NEXT: store i32 [[RS1:%.*]], ptr [[RS1_ADDR]], align 4 -// RV64ZKNH-NEXT: [[TMP0:%.*]] = load i32, ptr [[RS1_ADDR]], align 4 -// RV64ZKNH-NEXT: [[CONV:%.*]] = sext i32 [[TMP0]] to i64 -// RV64ZKNH-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.sha512sum1(i64 [[CONV]]) -// RV64ZKNH-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32 -// RV64ZKNH-NEXT: ret i32 [[CONV1]] +// RV64ZKNH-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8 +// RV64ZKNH-NEXT: store i64 [[RS1:%.*]], ptr [[RS1_ADDR]], align 8 +// RV64ZKNH-NEXT: [[TMP0:%.*]] = load i64, ptr [[RS1_ADDR]], align 8 +// RV64ZKNH-NEXT: [[TMP1:%.*]] = call i64 @llvm.riscv.sha512sum1(i64 [[TMP0]]) +// RV64ZKNH-NEXT: ret i64 [[TMP1]] // -int sha512sum1(int rs1) { +long sha512sum1(long rs1) { return __builtin_riscv_sha512sum1_64(rs1); }
_______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits