Author: Wang Pengcheng Date: 2023-10-25T16:23:32+08:00 New Revision: 0e27cbe1879f400d64088d8770803e884782a34e
URL: https://github.com/llvm/llvm-project/commit/0e27cbe1879f400d64088d8770803e884782a34e DIFF: https://github.com/llvm/llvm-project/commit/0e27cbe1879f400d64088d8770803e884782a34e.diff LOG: [RISCV] Run mem2reg to simplify Zbc tests (#70169) Added: Modified: clang/test/CodeGen/RISCV/rvb-intrinsics/zbc.c Removed: ################################################################################ diff --git a/clang/test/CodeGen/RISCV/rvb-intrinsics/zbc.c b/clang/test/CodeGen/RISCV/rvb-intrinsics/zbc.c index aa5bebe38dd6b2d..ae9153eff155e19 100644 --- a/clang/test/CodeGen/RISCV/rvb-intrinsics/zbc.c +++ b/clang/test/CodeGen/RISCV/rvb-intrinsics/zbc.c @@ -1,7 +1,9 @@ // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // RUN: %clang_cc1 -triple riscv32 -target-feature +zbc -emit-llvm %s -o - \ +// RUN: -disable-O0-optnone | opt -S -passes=mem2reg \ // RUN: | FileCheck %s -check-prefix=RV32ZBC // RUN: %clang_cc1 -triple riscv64 -target-feature +zbc -emit-llvm %s -o - \ +// RUN: -disable-O0-optnone | opt -S -passes=mem2reg \ // RUN: | FileCheck %s -check-prefix=RV64ZBC #include <stdint.h> @@ -9,14 +11,8 @@ #if __riscv_xlen == 64 // RV64ZBC-LABEL: @clmul_64( // RV64ZBC-NEXT: entry: -// RV64ZBC-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// RV64ZBC-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 -// RV64ZBC-NEXT: store i64 [[A:%.*]], ptr [[A_ADDR]], align 8 -// RV64ZBC-NEXT: store i64 [[B:%.*]], ptr [[B_ADDR]], align 8 -// RV64ZBC-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 -// RV64ZBC-NEXT: [[TMP1:%.*]] = load i64, ptr [[B_ADDR]], align 8 -// RV64ZBC-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.clmul.i64(i64 [[TMP0]], i64 [[TMP1]]) -// RV64ZBC-NEXT: ret i64 [[TMP2]] +// RV64ZBC-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.clmul.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// RV64ZBC-NEXT: ret i64 [[TMP0]] // uint64_t clmul_64(uint64_t a, uint64_t b) { return __builtin_riscv_clmul_64(a, b); @@ -24,14 +20,8 @@ uint64_t clmul_64(uint64_t a, uint64_t b) { // RV64ZBC-LABEL: @clmulh_64( // RV64ZBC-NEXT: entry: -// RV64ZBC-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// RV64ZBC-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 -// RV64ZBC-NEXT: store i64 [[A:%.*]], ptr [[A_ADDR]], align 8 -// RV64ZBC-NEXT: store i64 [[B:%.*]], ptr [[B_ADDR]], align 8 -// RV64ZBC-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 -// RV64ZBC-NEXT: [[TMP1:%.*]] = load i64, ptr [[B_ADDR]], align 8 -// RV64ZBC-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.clmulh.i64(i64 [[TMP0]], i64 [[TMP1]]) -// RV64ZBC-NEXT: ret i64 [[TMP2]] +// RV64ZBC-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.clmulh.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// RV64ZBC-NEXT: ret i64 [[TMP0]] // uint64_t clmulh_64(uint64_t a, uint64_t b) { return __builtin_riscv_clmulh_64(a, b); @@ -39,14 +29,8 @@ uint64_t clmulh_64(uint64_t a, uint64_t b) { // RV64ZBC-LABEL: @clmulr_64( // RV64ZBC-NEXT: entry: -// RV64ZBC-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8 -// RV64ZBC-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8 -// RV64ZBC-NEXT: store i64 [[A:%.*]], ptr [[A_ADDR]], align 8 -// RV64ZBC-NEXT: store i64 [[B:%.*]], ptr [[B_ADDR]], align 8 -// RV64ZBC-NEXT: [[TMP0:%.*]] = load i64, ptr [[A_ADDR]], align 8 -// RV64ZBC-NEXT: [[TMP1:%.*]] = load i64, ptr [[B_ADDR]], align 8 -// RV64ZBC-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.clmulr.i64(i64 [[TMP0]], i64 [[TMP1]]) -// RV64ZBC-NEXT: ret i64 [[TMP2]] +// RV64ZBC-NEXT: [[TMP0:%.*]] = call i64 @llvm.riscv.clmulr.i64(i64 [[A:%.*]], i64 [[B:%.*]]) +// RV64ZBC-NEXT: ret i64 [[TMP0]] // uint64_t clmulr_64(uint64_t a, uint64_t b) { return __builtin_riscv_clmulr_64(a, b); @@ -55,25 +39,13 @@ uint64_t clmulr_64(uint64_t a, uint64_t b) { // RV32ZBC-LABEL: @clmul_32( // RV32ZBC-NEXT: entry: -// RV32ZBC-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 -// RV32ZBC-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 -// RV32ZBC-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4 -// RV32ZBC-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4 -// RV32ZBC-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 -// RV32ZBC-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4 -// RV32ZBC-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.clmul.i32(i32 [[TMP0]], i32 [[TMP1]]) -// RV32ZBC-NEXT: ret i32 [[TMP2]] +// RV32ZBC-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.clmul.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// RV32ZBC-NEXT: ret i32 [[TMP0]] // // RV64ZBC-LABEL: @clmul_32( // RV64ZBC-NEXT: entry: -// RV64ZBC-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 -// RV64ZBC-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 -// RV64ZBC-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4 -// RV64ZBC-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4 -// RV64ZBC-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 -// RV64ZBC-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4 -// RV64ZBC-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.clmul.i32(i32 [[TMP0]], i32 [[TMP1]]) -// RV64ZBC-NEXT: ret i32 [[TMP2]] +// RV64ZBC-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.clmul.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// RV64ZBC-NEXT: ret i32 [[TMP0]] // uint32_t clmul_32(uint32_t a, uint32_t b) { return __builtin_riscv_clmul_32(a, b); @@ -82,14 +54,8 @@ uint32_t clmul_32(uint32_t a, uint32_t b) { #if __riscv_xlen == 32 // RV32ZBC-LABEL: @clmulh_32( // RV32ZBC-NEXT: entry: -// RV32ZBC-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 -// RV32ZBC-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 -// RV32ZBC-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4 -// RV32ZBC-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4 -// RV32ZBC-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 -// RV32ZBC-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4 -// RV32ZBC-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.clmulh.i32(i32 [[TMP0]], i32 [[TMP1]]) -// RV32ZBC-NEXT: ret i32 [[TMP2]] +// RV32ZBC-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.clmulh.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// RV32ZBC-NEXT: ret i32 [[TMP0]] // uint32_t clmulh_32(uint32_t a, uint32_t b) { return __builtin_riscv_clmulh_32(a, b); @@ -97,14 +63,8 @@ uint32_t clmulh_32(uint32_t a, uint32_t b) { // RV32ZBC-LABEL: @clmulr_32( // RV32ZBC-NEXT: entry: -// RV32ZBC-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 -// RV32ZBC-NEXT: [[B_ADDR:%.*]] = alloca i32, align 4 -// RV32ZBC-NEXT: store i32 [[A:%.*]], ptr [[A_ADDR]], align 4 -// RV32ZBC-NEXT: store i32 [[B:%.*]], ptr [[B_ADDR]], align 4 -// RV32ZBC-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4 -// RV32ZBC-NEXT: [[TMP1:%.*]] = load i32, ptr [[B_ADDR]], align 4 -// RV32ZBC-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.clmulr.i32(i32 [[TMP0]], i32 [[TMP1]]) -// RV32ZBC-NEXT: ret i32 [[TMP2]] +// RV32ZBC-NEXT: [[TMP0:%.*]] = call i32 @llvm.riscv.clmulr.i32(i32 [[A:%.*]], i32 [[B:%.*]]) +// RV32ZBC-NEXT: ret i32 [[TMP0]] // uint32_t clmulr_32(uint32_t a, uint32_t b) { return __builtin_riscv_clmulr_32(a, b); _______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits