https://github.com/michaelmaitland updated https://github.com/llvm/llvm-project/pull/79015
>From 639d404b3b8a8ca7e92160fa8512459be07e631f Mon Sep 17 00:00:00 2001 From: Michael Maitland <michaeltmaitl...@gmail.com> Date: Mon, 22 Jan 2024 07:53:55 -0800 Subject: [PATCH 1/6] [RISCV] Add sifive-p670 processor This is an OOO core that has a vector unit. For more information see https://www.sifive.com/cores/performance-p650-670. This CPU prefers to not sink splat operands since it requires a s2V transfer buffer to move scalars into buffers. Scheduler model and other tuning will come in separate patches. --- clang/test/Driver/riscv-cpus.c | 12 +- clang/test/Misc/target-invalid-cpu-note.c | 4 +- llvm/lib/Target/RISCV/RISCVFeatures.td | 7 + llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 8 + llvm/lib/Target/RISCV/RISCVProcessors.td | 37 ++ .../RISCV/rvv/dont-sink-splat-operands.ll | 353 ++++++++++++++++++ 6 files changed, 418 insertions(+), 3 deletions(-) create mode 100644 llvm/test/CodeGen/RISCV/rvv/dont-sink-splat-operands.ll diff --git a/clang/test/Driver/riscv-cpus.c b/clang/test/Driver/riscv-cpus.c index d181755bb585070..666a3e2beabeb1e 100644 --- a/clang/test/Driver/riscv-cpus.c +++ b/clang/test/Driver/riscv-cpus.c @@ -241,7 +241,17 @@ // MCPU-SIFIVE-P450-SAME: "-target-feature" "+zbb" // MCPU-SIFIVE-P450-SAME: "-target-feature" "+zbs" // MCPU-SIFIVE-P450-SAME: "-target-abi" "lp64d" -// + +// RUN: %clang -target riscv64 -### -c %s 2>&1 -mcpu=sifive-p670 | FileCheck -check-prefix=MCPU-SIFIVE-P670 %s +// MCPU-SIFIVE-P670: "-target-cpu" "sifive-p670" +// MCPU-SIFIVE-P670-SAME: "-target-feature" "+m" "-target-feature" "+a" "-target-feature" "+f" "-target-feature" "+d" +// MCPU-SIFIVE-P670-SAME: "-target-feature" "+c" "-target-feature" "+v" "-target-feature" "+zic64b" "-target-feature" "+zicbom" "-target-feature" "+zicbop" "-target-feature" "+zicboz" "-target-feature" "+ziccamoa" "-target-feature" "+ziccif" "-target-feature" "+zicclsm" "-target-feature" "+ziccrse" "-target-feature" "+zicsr" "-target-feature" "+zifencei" "-target-feature" "+zihintntl" "-target-feature" "+zihintpause" "-target-feature" "+zihpm" "-target-feature" "+za64rs" "-target-feature" "+zfhmin" +// MCPU-SIFIVE-P670-SAME: "-target-feature" "+zba" "-target-feature" "+zbb" "-target-feature" "+zbs" +// MCPU-SIFIVE-P670-SAME: "-target-feature" "+zvbb" "-target-feature" "+zvbc" +// MCPU-SIFIVE-P670-SAME: "-target-feature" "+zve32f" "-target-feature" "+zve32x" "-target-feature" "+zve64d" "-target-feature" "+zve64f" "-target-feature" "+zve64x" +// MCPU-SIFIVE-P670-SAME: "-target-feature" "+zvkg" "-target-feature" "+zvkn" "-target-feature" "+zvknc" "-target-feature" "+zvkned" "-target-feature" "+zvkng" "-target-feature" "+zvknhb" "-target-feature" "+zvks" "-target-feature" "+zvksc" "-target-feature" "+zvksed" "-target-feature" "+zvksg" "-target-feature" "+zvksh" "-target-feature" "+zvkt" +// MCPU-SIFIVE-P670-SAME: "-target-abi" "lp64d" + // Check failed cases // RUN: not %clang --target=riscv32 -### -c %s 2>&1 -mcpu=generic-rv321 | FileCheck -check-prefix=FAIL-MCPU-NAME %s diff --git a/clang/test/Misc/target-invalid-cpu-note.c b/clang/test/Misc/target-invalid-cpu-note.c index 48e9f05d9b03de2..84aed5c9c36fe47 100644 --- a/clang/test/Misc/target-invalid-cpu-note.c +++ b/clang/test/Misc/target-invalid-cpu-note.c @@ -85,7 +85,7 @@ // RUN: not %clang_cc1 -triple riscv64 -target-cpu not-a-cpu -fsyntax-only %s 2>&1 | FileCheck %s --check-prefix RISCV64 // RISCV64: error: unknown target CPU 'not-a-cpu' -// RISCV64-NEXT: note: valid target CPU values are: generic-rv64, rocket-rv64, sifive-p450, sifive-s21, sifive-s51, sifive-s54, sifive-s76, sifive-u54, sifive-u74, sifive-x280, veyron-v1, xiangshan-nanhu{{$}} +// RISCV64-NEXT: note: valid target CPU values are: generic-rv64, rocket-rv64, sifive-p450, sifive-p670, sifive-s21, sifive-s51, sifive-s54, sifive-s76, sifive-u54, sifive-u74, sifive-x280, veyron-v1, xiangshan-nanhu{{$}} // RUN: not %clang_cc1 -triple riscv32 -tune-cpu not-a-cpu -fsyntax-only %s 2>&1 | FileCheck %s --check-prefix TUNE-RISCV32 // TUNE-RISCV32: error: unknown target CPU 'not-a-cpu' @@ -93,4 +93,4 @@ // RUN: not %clang_cc1 -triple riscv64 -tune-cpu not-a-cpu -fsyntax-only %s 2>&1 | FileCheck %s --check-prefix TUNE-RISCV64 // TUNE-RISCV64: error: unknown target CPU 'not-a-cpu' -// TUNE-RISCV64-NEXT: note: valid target CPU values are: generic-rv64, rocket-rv64, sifive-p450, sifive-s21, sifive-s51, sifive-s54, sifive-s76, sifive-u54, sifive-u74, sifive-x280, veyron-v1, xiangshan-nanhu, generic, rocket, sifive-7-series{{$}} +// TUNE-RISCV64-NEXT: note: valid target CPU values are: generic-rv64, rocket-rv64, sifive-p450, sifive-p670, sifive-s21, sifive-s51, sifive-s54, sifive-s76, sifive-u54, sifive-u74, sifive-x280, veyron-v1, xiangshan-nanhu, generic, rocket, sifive-7-series{{$}} diff --git a/llvm/lib/Target/RISCV/RISCVFeatures.td b/llvm/lib/Target/RISCV/RISCVFeatures.td index cbb096ba20ae67b..04ee6d6d0547378 100644 --- a/llvm/lib/Target/RISCV/RISCVFeatures.td +++ b/llvm/lib/Target/RISCV/RISCVFeatures.td @@ -1082,6 +1082,13 @@ def TuneShortForwardBranchOpt def HasShortForwardBranchOpt : Predicate<"Subtarget->hasShortForwardBranchOpt()">; def NoShortForwardBranchOpt : Predicate<"!Subtarget->hasShortForwardBranchOpt()">; +// P670 requires a S2V transfer buffer to move scalars into vectors. +// FIXME: Forming .vx/.vf can reduce register pressure. +def TuneDontSinkSplatOperands + : SubtargetFeature<"dont-sink-splat-operands", "DontSinkSplatOperands", + "true", "Don't sink splat operands to enable .vx or .vf " + "instructions">; + def TuneConditionalCompressedMoveFusion : SubtargetFeature<"conditional-cmv-fusion", "HasConditionalCompressedMoveFusion", "true", "Enable branch+c.mv fusion">; diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index b41e2f40dc72f01..6737f1c16238909 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -2000,6 +2000,14 @@ bool RISCVTargetLowering::shouldSinkOperands( if (!I->getType()->isVectorTy() || !Subtarget.hasVInstructions()) return false; + // Don't sink splat operands if the target prefers it. Some targets requires + // S2V transfer buffers and we can run out of them copying the same value + // repeatedly. + // FIXME: It could still be worth doing if it would improve vector register + // pressure and prevent a vector spill. + if (Subtarget.dontSinkSplatOperands()) + return false; + for (auto OpIdx : enumerate(I->operands())) { if (!canSplatOperand(I, OpIdx.index())) continue; diff --git a/llvm/lib/Target/RISCV/RISCVProcessors.td b/llvm/lib/Target/RISCV/RISCVProcessors.td index d1cd9ba1dd84d1b..c7425de99dc132c 100644 --- a/llvm/lib/Target/RISCV/RISCVProcessors.td +++ b/llvm/lib/Target/RISCV/RISCVProcessors.td @@ -237,6 +237,43 @@ def SIFIVE_P450 : RISCVProcessorModel<"sifive-p450", SiFiveP400Model, TuneLUIADDIFusion, TuneAUIPCADDIFusion]>; +def SIFIVE_P670 : RISCVProcessorModel<"sifive-p670", NoSchedModel, + [Feature64Bit, + FeatureStdExtZifencei, + FeatureStdExtM, + FeatureStdExtA, + FeatureStdExtF, + FeatureStdExtD, + FeatureStdExtC, + FeatureStdExtZa64rs, + FeatureStdExtZic64b, + FeatureStdExtZicbop, + FeatureStdExtZicbom, + FeatureStdExtZicboz, + FeatureStdExtZiccamoa, + FeatureStdExtZiccif, + FeatureStdExtZicclsm, + FeatureStdExtZiccrse, + FeatureStdExtZihintntl, + FeatureStdExtZihintpause, + FeatureStdExtZihpm, + FeatureStdExtZba, + FeatureStdExtZbb, + FeatureStdExtZbs, + FeatureStdExtZfhmin, + FeatureStdExtV, + FeatureStdExtZvl128b, + FeatureStdExtZvbb, + FeatureStdExtZvknc, + FeatureStdExtZvkng, + FeatureStdExtZvksc, + FeatureStdExtZvksg, + FeatureFastUnalignedAccess], + [TuneNoDefaultUnroll, + TuneConditionalCompressedMoveFusion, + TuneLUIADDIFusion, + TuneDontSinkSplatOperands]>; + def SYNTACORE_SCR1_BASE : RISCVProcessorModel<"syntacore-scr1-base", SyntacoreSCR1Model, [Feature32Bit, diff --git a/llvm/test/CodeGen/RISCV/rvv/dont-sink-splat-operands.ll b/llvm/test/CodeGen/RISCV/rvv/dont-sink-splat-operands.ll new file mode 100644 index 000000000000000..88eac06294240c4 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/dont-sink-splat-operands.ll @@ -0,0 +1,353 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+v,+f -target-abi=lp64f \ +; RUN: -mattr=+dont-sink-splat-operands -riscv-v-vector-bits-min=128 | FileCheck %s + +; Test that we don't sink splat operands when compiling with dont-sink-splat-operands. +; Each scalar register access requires a S2V transfer buffer entry. Using too many +; limits performance. +; FIXME: This is potentially bad for register pressure. Need a better heuristic. + +define void @sink_splat_add(i32* nocapture %a, i32 signext %x) { +; CHECK-LABEL: sink_splat_add: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; CHECK-NEXT: vmv.v.x v8, a1 +; CHECK-NEXT: li a1, 1024 +; CHECK-NEXT: .LBB0_1: # %vector.body +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: vle32.v v9, (a0) +; CHECK-NEXT: vadd.vv v9, v9, v8 +; CHECK-NEXT: vse32.v v9, (a0) +; CHECK-NEXT: addi a1, a1, -4 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: bnez a1, .LBB0_1 +; CHECK-NEXT: # %bb.2: # %for.cond.cleanup +; CHECK-NEXT: ret +entry: + %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0 + %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer + br label %vector.body + +vector.body: ; preds = %vector.body, %entry + %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ] + %0 = getelementptr inbounds i32, i32* %a, i64 %index + %1 = bitcast i32* %0 to <4 x i32>* + %wide.load = load <4 x i32>, <4 x i32>* %1, align 4 + %2 = add <4 x i32> %wide.load, %broadcast.splat + %3 = bitcast i32* %0 to <4 x i32>* + store <4 x i32> %2, <4 x i32>* %3, align 4 + %index.next = add nuw i64 %index, 4 + %4 = icmp eq i64 %index.next, 1024 + br i1 %4, label %for.cond.cleanup, label %vector.body + +for.cond.cleanup: ; preds = %vector.body + ret void +} + +declare i64 @llvm.vscale.i64() + +define void @sink_splat_add_scalable(i32* nocapture %a, i32 signext %x) { +; CHECK-LABEL: sink_splat_add_scalable: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a5, vlenb +; CHECK-NEXT: srli a3, a5, 1 +; CHECK-NEXT: li a2, 1024 +; CHECK-NEXT: bgeu a2, a3, .LBB1_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: li a2, 0 +; CHECK-NEXT: j .LBB1_5 +; CHECK-NEXT: .LBB1_2: # %vector.ph +; CHECK-NEXT: addi a2, a3, -1 +; CHECK-NEXT: andi a4, a2, 1024 +; CHECK-NEXT: xori a2, a4, 1024 +; CHECK-NEXT: vsetvli a6, zero, e32, m2, ta, ma +; CHECK-NEXT: vmv.v.x v8, a1 +; CHECK-NEXT: slli a5, a5, 1 +; CHECK-NEXT: mv a6, a0 +; CHECK-NEXT: mv a7, a2 +; CHECK-NEXT: .LBB1_3: # %vector.body +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: vl2re32.v v10, (a6) +; CHECK-NEXT: vadd.vv v10, v10, v8 +; CHECK-NEXT: vs2r.v v10, (a6) +; CHECK-NEXT: sub a7, a7, a3 +; CHECK-NEXT: add a6, a6, a5 +; CHECK-NEXT: bnez a7, .LBB1_3 +; CHECK-NEXT: # %bb.4: # %middle.block +; CHECK-NEXT: beqz a4, .LBB1_7 +; CHECK-NEXT: .LBB1_5: # %for.body.preheader +; CHECK-NEXT: addi a3, a2, -1024 +; CHECK-NEXT: slli a2, a2, 2 +; CHECK-NEXT: add a0, a0, a2 +; CHECK-NEXT: .LBB1_6: # %for.body +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: lw a2, 0(a0) +; CHECK-NEXT: add a2, a2, a1 +; CHECK-NEXT: sw a2, 0(a0) +; CHECK-NEXT: addi a3, a3, 1 +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: bnez a3, .LBB1_6 +; CHECK-NEXT: .LBB1_7: # %for.cond.cleanup +; CHECK-NEXT: ret +entry: + %0 = call i64 @llvm.vscale.i64() + %1 = shl i64 %0, 2 + %min.iters.check = icmp ugt i64 %1, 1024 + br i1 %min.iters.check, label %for.body.preheader, label %vector.ph + +vector.ph: ; preds = %entry + %2 = call i64 @llvm.vscale.i64() + %3 = shl i64 %2, 2 + %n.mod.vf = urem i64 1024, %3 + %n.vec = sub nsw i64 1024, %n.mod.vf + %broadcast.splatinsert = insertelement <vscale x 4 x i32> poison, i32 %x, i32 0 + %broadcast.splat = shufflevector <vscale x 4 x i32> %broadcast.splatinsert, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer + %4 = call i64 @llvm.vscale.i64() + %5 = shl i64 %4, 2 + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %6 = getelementptr inbounds i32, i32* %a, i64 %index + %7 = bitcast i32* %6 to <vscale x 4 x i32>* + %wide.load = load <vscale x 4 x i32>, <vscale x 4 x i32>* %7, align 4 + %8 = add <vscale x 4 x i32> %wide.load, %broadcast.splat + %9 = bitcast i32* %6 to <vscale x 4 x i32>* + store <vscale x 4 x i32> %8, <vscale x 4 x i32>* %9, align 4 + %index.next = add nuw i64 %index, %5 + %10 = icmp eq i64 %index.next, %n.vec + br i1 %10, label %middle.block, label %vector.body + +middle.block: ; preds = %vector.body + %cmp.n = icmp eq i64 %n.mod.vf, 0 + br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader + +for.body.preheader: ; preds = %entry, %middle.block + %indvars.iv.ph = phi i64 [ 0, %entry ], [ %n.vec, %middle.block ] + br label %for.body + +for.cond.cleanup: ; preds = %for.body, %middle.block + ret void + +for.body: ; preds = %for.body.preheader, %for.body + %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ] + %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv + %11 = load i32, i32* %arrayidx, align 4 + %add = add i32 %11, %x + store i32 %add, i32* %arrayidx, align 4 + %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 + %cmp.not = icmp eq i64 %indvars.iv.next, 1024 + br i1 %cmp.not, label %for.cond.cleanup, label %for.body +} + +declare <4 x i32> @llvm.vp.add.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) + +define void @sink_splat_vp_add(i32* nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) { +; CHECK-LABEL: sink_splat_vp_add: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; CHECK-NEXT: vmv.v.x v8, a1 +; CHECK-NEXT: li a1, 1024 +; CHECK-NEXT: .LBB2_1: # %vector.body +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: vle32.v v9, (a0) +; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma +; CHECK-NEXT: vadd.vv v9, v9, v8, v0.t +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; CHECK-NEXT: vse32.v v9, (a0) +; CHECK-NEXT: addi a1, a1, -4 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: bnez a1, .LBB2_1 +; CHECK-NEXT: # %bb.2: # %for.cond.cleanup +; CHECK-NEXT: ret +entry: + %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0 + %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer + br label %vector.body + +vector.body: ; preds = %vector.body, %entry + %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ] + %0 = getelementptr inbounds i32, i32* %a, i64 %index + %1 = bitcast i32* %0 to <4 x i32>* + %wide.load = load <4 x i32>, <4 x i32>* %1, align 4 + %2 = call <4 x i32> @llvm.vp.add.v4i32(<4 x i32> %wide.load, <4 x i32> %broadcast.splat, <4 x i1> %m, i32 %vl) + %3 = bitcast i32* %0 to <4 x i32>* + store <4 x i32> %2, <4 x i32>* %3, align 4 + %index.next = add nuw i64 %index, 4 + %4 = icmp eq i64 %index.next, 1024 + br i1 %4, label %for.cond.cleanup, label %vector.body + +for.cond.cleanup: ; preds = %vector.body + ret void +} + +define void @sink_splat_fadd(float* nocapture %a, float %x) { +; CHECK-LABEL: sink_splat_fadd: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; CHECK-NEXT: vfmv.v.f v8, fa0 +; CHECK-NEXT: li a1, 1024 +; CHECK-NEXT: .LBB3_1: # %vector.body +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: vle32.v v9, (a0) +; CHECK-NEXT: vfadd.vv v9, v9, v8 +; CHECK-NEXT: vse32.v v9, (a0) +; CHECK-NEXT: addi a1, a1, -4 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: bnez a1, .LBB3_1 +; CHECK-NEXT: # %bb.2: # %for.cond.cleanup +; CHECK-NEXT: ret +entry: + %broadcast.splatinsert = insertelement <4 x float> poison, float %x, i32 0 + %broadcast.splat = shufflevector <4 x float> %broadcast.splatinsert, <4 x float> poison, <4 x i32> zeroinitializer + br label %vector.body + +vector.body: ; preds = %vector.body, %entry + %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ] + %0 = getelementptr inbounds float, float* %a, i64 %index + %1 = bitcast float* %0 to <4 x float>* + %wide.load = load <4 x float>, <4 x float>* %1, align 4 + %2 = fadd <4 x float> %wide.load, %broadcast.splat + %3 = bitcast float* %0 to <4 x float>* + store <4 x float> %2, <4 x float>* %3, align 4 + %index.next = add nuw i64 %index, 4 + %4 = icmp eq i64 %index.next, 1024 + br i1 %4, label %for.cond.cleanup, label %vector.body + +for.cond.cleanup: ; preds = %vector.body + ret void +} + +define void @sink_splat_fadd_scalable(float* nocapture %a, float %x) { +; CHECK-LABEL: sink_splat_fadd_scalable: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: srli a3, a1, 2 +; CHECK-NEXT: li a2, 1024 +; CHECK-NEXT: bgeu a2, a3, .LBB4_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: li a2, 0 +; CHECK-NEXT: j .LBB4_5 +; CHECK-NEXT: .LBB4_2: # %vector.ph +; CHECK-NEXT: addi a2, a3, -1 +; CHECK-NEXT: andi a4, a2, 1024 +; CHECK-NEXT: xori a2, a4, 1024 +; CHECK-NEXT: vsetvli a5, zero, e32, m1, ta, ma +; CHECK-NEXT: vfmv.v.f v8, fa0 +; CHECK-NEXT: mv a5, a0 +; CHECK-NEXT: mv a6, a2 +; CHECK-NEXT: .LBB4_3: # %vector.body +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: vl1re32.v v9, (a5) +; CHECK-NEXT: vfadd.vv v9, v9, v8 +; CHECK-NEXT: vs1r.v v9, (a5) +; CHECK-NEXT: sub a6, a6, a3 +; CHECK-NEXT: add a5, a5, a1 +; CHECK-NEXT: bnez a6, .LBB4_3 +; CHECK-NEXT: # %bb.4: # %middle.block +; CHECK-NEXT: beqz a4, .LBB4_7 +; CHECK-NEXT: .LBB4_5: # %for.body.preheader +; CHECK-NEXT: addi a1, a2, -1024 +; CHECK-NEXT: slli a2, a2, 2 +; CHECK-NEXT: add a0, a0, a2 +; CHECK-NEXT: .LBB4_6: # %for.body +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: flw fa5, 0(a0) +; CHECK-NEXT: fadd.s fa5, fa5, fa0 +; CHECK-NEXT: fsw fa5, 0(a0) +; CHECK-NEXT: addi a1, a1, 1 +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: bnez a1, .LBB4_6 +; CHECK-NEXT: .LBB4_7: # %for.cond.cleanup +; CHECK-NEXT: ret +entry: + %0 = call i64 @llvm.vscale.i64() + %1 = shl i64 %0, 1 + %min.iters.check = icmp ugt i64 %1, 1024 + br i1 %min.iters.check, label %for.body.preheader, label %vector.ph + +vector.ph: ; preds = %entry + %2 = call i64 @llvm.vscale.i64() + %3 = shl i64 %2, 1 + %n.mod.vf = urem i64 1024, %3 + %n.vec = sub nsw i64 1024, %n.mod.vf + %broadcast.splatinsert = insertelement <vscale x 2 x float> poison, float %x, i32 0 + %broadcast.splat = shufflevector <vscale x 2 x float> %broadcast.splatinsert, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer + %4 = call i64 @llvm.vscale.i64() + %5 = shl i64 %4, 1 + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %6 = getelementptr inbounds float, float* %a, i64 %index + %7 = bitcast float* %6 to <vscale x 2 x float>* + %wide.load = load <vscale x 2 x float>, <vscale x 2 x float>* %7, align 4 + %8 = fadd <vscale x 2 x float> %wide.load, %broadcast.splat + %9 = bitcast float* %6 to <vscale x 2 x float>* + store <vscale x 2 x float> %8, <vscale x 2 x float>* %9, align 4 + %index.next = add nuw i64 %index, %5 + %10 = icmp eq i64 %index.next, %n.vec + br i1 %10, label %middle.block, label %vector.body + +middle.block: ; preds = %vector.body + %cmp.n = icmp eq i64 %n.mod.vf, 0 + br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader + +for.body.preheader: ; preds = %entry, %middle.block + %indvars.iv.ph = phi i64 [ 0, %entry ], [ %n.vec, %middle.block ] + br label %for.body + +for.cond.cleanup: ; preds = %for.body, %middle.block + ret void + +for.body: ; preds = %for.body.preheader, %for.body + %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ] + %arrayidx = getelementptr inbounds float, float* %a, i64 %indvars.iv + %11 = load float, float* %arrayidx, align 4 + %mul = fadd float %11, %x + store float %mul, float* %arrayidx, align 4 + %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 + %cmp.not = icmp eq i64 %indvars.iv.next, 1024 + br i1 %cmp.not, label %for.cond.cleanup, label %for.body +} + +declare <4 x float> @llvm.vp.fadd.v4i32(<4 x float>, <4 x float>, <4 x i1>, i32) + +define void @sink_splat_vp_fadd(float* nocapture %a, float %x, <4 x i1> %m, i32 zeroext %vl) { +; CHECK-LABEL: sink_splat_vp_fadd: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; CHECK-NEXT: vfmv.v.f v8, fa0 +; CHECK-NEXT: li a2, 1024 +; CHECK-NEXT: .LBB5_1: # %vector.body +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: vle32.v v9, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vfadd.vv v9, v9, v8, v0.t +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; CHECK-NEXT: vse32.v v9, (a0) +; CHECK-NEXT: addi a2, a2, -4 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: bnez a2, .LBB5_1 +; CHECK-NEXT: # %bb.2: # %for.cond.cleanup +; CHECK-NEXT: ret +entry: + %broadcast.splatinsert = insertelement <4 x float> poison, float %x, i32 0 + %broadcast.splat = shufflevector <4 x float> %broadcast.splatinsert, <4 x float> poison, <4 x i32> zeroinitializer + br label %vector.body + +vector.body: ; preds = %vector.body, %entry + %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ] + %0 = getelementptr inbounds float, float* %a, i64 %index + %1 = bitcast float* %0 to <4 x float>* + %wide.load = load <4 x float>, <4 x float>* %1, align 4 + %2 = call <4 x float> @llvm.vp.fadd.v4i32(<4 x float> %wide.load, <4 x float> %broadcast.splat, <4 x i1> %m, i32 %vl) + %3 = bitcast float* %0 to <4 x float>* + store <4 x float> %2, <4 x float>* %3, align 4 + %index.next = add nuw i64 %index, 4 + %4 = icmp eq i64 %index.next, 1024 + br i1 %4, label %for.cond.cleanup, label %vector.body + +for.cond.cleanup: ; preds = %vector.body + ret void +} >From d6f7c9fb615bd067eb3cd7e47c0caa3fe06202c2 Mon Sep 17 00:00:00 2001 From: Michael Maitland <michaeltmaitl...@gmail.com> Date: Mon, 22 Jan 2024 09:29:03 -0800 Subject: [PATCH 2/6] !fixup add auipc fusion --- llvm/lib/Target/RISCV/RISCVProcessors.td | 1 + 1 file changed, 1 insertion(+) diff --git a/llvm/lib/Target/RISCV/RISCVProcessors.td b/llvm/lib/Target/RISCV/RISCVProcessors.td index c7425de99dc132c..c5dc2a6e064d752 100644 --- a/llvm/lib/Target/RISCV/RISCVProcessors.td +++ b/llvm/lib/Target/RISCV/RISCVProcessors.td @@ -272,6 +272,7 @@ def SIFIVE_P670 : RISCVProcessorModel<"sifive-p670", NoSchedModel, [TuneNoDefaultUnroll, TuneConditionalCompressedMoveFusion, TuneLUIADDIFusion, + TuneAUIPCADDIFusion, TuneDontSinkSplatOperands]>; def SYNTACORE_SCR1_BASE : RISCVProcessorModel<"syntacore-scr1-base", >From 5f812ad9f65ebeb766b2d63e23f3d0132bfdb8a3 Mon Sep 17 00:00:00 2001 From: Michael Maitland <michaeltmaitl...@gmail.com> Date: Mon, 22 Jan 2024 11:08:37 -0800 Subject: [PATCH 3/6] !fixup update docs/ReleaseNotes.rst --- llvm/docs/ReleaseNotes.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/llvm/docs/ReleaseNotes.rst b/llvm/docs/ReleaseNotes.rst index c17c834c8081b8a..8956ded9e4b0f64 100644 --- a/llvm/docs/ReleaseNotes.rst +++ b/llvm/docs/ReleaseNotes.rst @@ -165,6 +165,7 @@ Changes to the RISC-V Backend and Zic64b extensions which were introduced as a part of the RISC-V Profiles specification. * The Smepmp 1.0 extension is now supported. +* ``-mcpu=sifive-p670`` was added. Changes to the WebAssembly Backend ---------------------------------- >From c082dd78845985f0d9de12a0ed97003b6c340ec3 Mon Sep 17 00:00:00 2001 From: Michael Maitland <michaeltmaitl...@gmail.com> Date: Mon, 22 Jan 2024 11:10:34 -0800 Subject: [PATCH 4/6] !fixup update test case --- .../RISCV/rvv/dont-sink-splat-operands.ll | 92 +++++++++---------- 1 file changed, 46 insertions(+), 46 deletions(-) diff --git a/llvm/test/CodeGen/RISCV/rvv/dont-sink-splat-operands.ll b/llvm/test/CodeGen/RISCV/rvv/dont-sink-splat-operands.ll index 88eac06294240c4..38c1ee6a9c71a5e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/dont-sink-splat-operands.ll +++ b/llvm/test/CodeGen/RISCV/rvv/dont-sink-splat-operands.ll @@ -12,15 +12,15 @@ define void @sink_splat_add(i32* nocapture %a, i32 signext %x) { ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.x v8, a1 -; CHECK-NEXT: li a1, 1024 +; CHECK-NEXT: lui a1, 1 +; CHECK-NEXT: add a1, a0, a1 ; CHECK-NEXT: .LBB0_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v9, (a0) ; CHECK-NEXT: vadd.vv v9, v9, v8 ; CHECK-NEXT: vse32.v v9, (a0) -; CHECK-NEXT: addi a1, a1, -4 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: bnez a1, .LBB0_1 +; CHECK-NEXT: bne a0, a1, .LBB0_1 ; CHECK-NEXT: # %bb.2: # %for.cond.cleanup ; CHECK-NEXT: ret entry: @@ -50,43 +50,43 @@ define void @sink_splat_add_scalable(i32* nocapture %a, i32 signext %x) { ; CHECK-LABEL: sink_splat_add_scalable: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: csrr a5, vlenb -; CHECK-NEXT: srli a3, a5, 1 -; CHECK-NEXT: li a2, 1024 -; CHECK-NEXT: bgeu a2, a3, .LBB1_2 +; CHECK-NEXT: srli a2, a5, 1 +; CHECK-NEXT: li a3, 1024 +; CHECK-NEXT: bgeu a3, a2, .LBB1_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: li a2, 0 +; CHECK-NEXT: li a3, 0 ; CHECK-NEXT: j .LBB1_5 ; CHECK-NEXT: .LBB1_2: # %vector.ph -; CHECK-NEXT: addi a2, a3, -1 -; CHECK-NEXT: andi a4, a2, 1024 -; CHECK-NEXT: xori a2, a4, 1024 +; CHECK-NEXT: addi a3, a2, -1 +; CHECK-NEXT: andi a4, a3, 1024 +; CHECK-NEXT: xori a3, a4, 1024 ; CHECK-NEXT: vsetvli a6, zero, e32, m2, ta, ma ; CHECK-NEXT: vmv.v.x v8, a1 ; CHECK-NEXT: slli a5, a5, 1 ; CHECK-NEXT: mv a6, a0 -; CHECK-NEXT: mv a7, a2 +; CHECK-NEXT: mv a7, a3 ; CHECK-NEXT: .LBB1_3: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vl2re32.v v10, (a6) ; CHECK-NEXT: vadd.vv v10, v10, v8 ; CHECK-NEXT: vs2r.v v10, (a6) -; CHECK-NEXT: sub a7, a7, a3 +; CHECK-NEXT: sub a7, a7, a2 ; CHECK-NEXT: add a6, a6, a5 ; CHECK-NEXT: bnez a7, .LBB1_3 ; CHECK-NEXT: # %bb.4: # %middle.block ; CHECK-NEXT: beqz a4, .LBB1_7 ; CHECK-NEXT: .LBB1_5: # %for.body.preheader -; CHECK-NEXT: addi a3, a2, -1024 -; CHECK-NEXT: slli a2, a2, 2 -; CHECK-NEXT: add a0, a0, a2 +; CHECK-NEXT: slli a2, a3, 2 +; CHECK-NEXT: add a2, a0, a2 +; CHECK-NEXT: lui a3, 1 +; CHECK-NEXT: add a0, a0, a3 ; CHECK-NEXT: .LBB1_6: # %for.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: lw a2, 0(a0) -; CHECK-NEXT: add a2, a2, a1 -; CHECK-NEXT: sw a2, 0(a0) -; CHECK-NEXT: addi a3, a3, 1 -; CHECK-NEXT: addi a0, a0, 4 -; CHECK-NEXT: bnez a3, .LBB1_6 +; CHECK-NEXT: lw a3, 0(a2) +; CHECK-NEXT: add a3, a3, a1 +; CHECK-NEXT: sw a3, 0(a2) +; CHECK-NEXT: addi a2, a2, 4 +; CHECK-NEXT: bne a2, a0, .LBB1_6 ; CHECK-NEXT: .LBB1_7: # %for.cond.cleanup ; CHECK-NEXT: ret entry: @@ -147,7 +147,8 @@ define void @sink_splat_vp_add(i32* nocapture %a, i32 signext %x, <4 x i1> %m, i ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.x v8, a1 -; CHECK-NEXT: li a1, 1024 +; CHECK-NEXT: lui a1, 1 +; CHECK-NEXT: add a1, a0, a1 ; CHECK-NEXT: .LBB2_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v9, (a0) @@ -155,9 +156,8 @@ define void @sink_splat_vp_add(i32* nocapture %a, i32 signext %x, <4 x i1> %m, i ; CHECK-NEXT: vadd.vv v9, v9, v8, v0.t ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v9, (a0) -; CHECK-NEXT: addi a1, a1, -4 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: bnez a1, .LBB2_1 +; CHECK-NEXT: bne a0, a1, .LBB2_1 ; CHECK-NEXT: # %bb.2: # %for.cond.cleanup ; CHECK-NEXT: ret entry: @@ -186,15 +186,15 @@ define void @sink_splat_fadd(float* nocapture %a, float %x) { ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vfmv.v.f v8, fa0 -; CHECK-NEXT: li a1, 1024 +; CHECK-NEXT: lui a1, 1 +; CHECK-NEXT: add a1, a0, a1 ; CHECK-NEXT: .LBB3_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v9, (a0) ; CHECK-NEXT: vfadd.vv v9, v9, v8 ; CHECK-NEXT: vse32.v v9, (a0) -; CHECK-NEXT: addi a1, a1, -4 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: bnez a1, .LBB3_1 +; CHECK-NEXT: bne a0, a1, .LBB3_1 ; CHECK-NEXT: # %bb.2: # %for.cond.cleanup ; CHECK-NEXT: ret entry: @@ -222,42 +222,42 @@ define void @sink_splat_fadd_scalable(float* nocapture %a, float %x) { ; CHECK-LABEL: sink_splat_fadd_scalable: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: srli a3, a1, 2 -; CHECK-NEXT: li a2, 1024 -; CHECK-NEXT: bgeu a2, a3, .LBB4_2 +; CHECK-NEXT: srli a2, a1, 2 +; CHECK-NEXT: li a3, 1024 +; CHECK-NEXT: bgeu a3, a2, .LBB4_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: li a2, 0 +; CHECK-NEXT: li a3, 0 ; CHECK-NEXT: j .LBB4_5 ; CHECK-NEXT: .LBB4_2: # %vector.ph -; CHECK-NEXT: addi a2, a3, -1 -; CHECK-NEXT: andi a4, a2, 1024 -; CHECK-NEXT: xori a2, a4, 1024 +; CHECK-NEXT: addi a3, a2, -1 +; CHECK-NEXT: andi a4, a3, 1024 +; CHECK-NEXT: xori a3, a4, 1024 ; CHECK-NEXT: vsetvli a5, zero, e32, m1, ta, ma ; CHECK-NEXT: vfmv.v.f v8, fa0 ; CHECK-NEXT: mv a5, a0 -; CHECK-NEXT: mv a6, a2 +; CHECK-NEXT: mv a6, a3 ; CHECK-NEXT: .LBB4_3: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vl1re32.v v9, (a5) ; CHECK-NEXT: vfadd.vv v9, v9, v8 ; CHECK-NEXT: vs1r.v v9, (a5) -; CHECK-NEXT: sub a6, a6, a3 +; CHECK-NEXT: sub a6, a6, a2 ; CHECK-NEXT: add a5, a5, a1 ; CHECK-NEXT: bnez a6, .LBB4_3 ; CHECK-NEXT: # %bb.4: # %middle.block ; CHECK-NEXT: beqz a4, .LBB4_7 ; CHECK-NEXT: .LBB4_5: # %for.body.preheader -; CHECK-NEXT: addi a1, a2, -1024 -; CHECK-NEXT: slli a2, a2, 2 +; CHECK-NEXT: slli a1, a3, 2 +; CHECK-NEXT: add a1, a0, a1 +; CHECK-NEXT: lui a2, 1 ; CHECK-NEXT: add a0, a0, a2 ; CHECK-NEXT: .LBB4_6: # %for.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: flw fa5, 0(a0) +; CHECK-NEXT: flw fa5, 0(a1) ; CHECK-NEXT: fadd.s fa5, fa5, fa0 -; CHECK-NEXT: fsw fa5, 0(a0) -; CHECK-NEXT: addi a1, a1, 1 -; CHECK-NEXT: addi a0, a0, 4 -; CHECK-NEXT: bnez a1, .LBB4_6 +; CHECK-NEXT: fsw fa5, 0(a1) +; CHECK-NEXT: addi a1, a1, 4 +; CHECK-NEXT: bne a1, a0, .LBB4_6 ; CHECK-NEXT: .LBB4_7: # %for.cond.cleanup ; CHECK-NEXT: ret entry: @@ -318,7 +318,8 @@ define void @sink_splat_vp_fadd(float* nocapture %a, float %x, <4 x i1> %m, i32 ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vfmv.v.f v8, fa0 -; CHECK-NEXT: li a2, 1024 +; CHECK-NEXT: lui a2, 1 +; CHECK-NEXT: add a2, a0, a2 ; CHECK-NEXT: .LBB5_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vle32.v v9, (a0) @@ -326,9 +327,8 @@ define void @sink_splat_vp_fadd(float* nocapture %a, float %x, <4 x i1> %m, i32 ; CHECK-NEXT: vfadd.vv v9, v9, v8, v0.t ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v9, (a0) -; CHECK-NEXT: addi a2, a2, -4 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: bnez a2, .LBB5_1 +; CHECK-NEXT: bne a0, a2, .LBB5_1 ; CHECK-NEXT: # %bb.2: # %for.cond.cleanup ; CHECK-NEXT: ret entry: >From e87902eb91b7ea200887f68de1e81885d80487ca Mon Sep 17 00:00:00 2001 From: Michael Maitland <michaeltmaitl...@gmail.com> Date: Tue, 23 Jan 2024 11:57:27 -0800 Subject: [PATCH 5/6] !fixup one feature, one line --- clang/test/Driver/riscv-cpus.c | 49 +++++++++++++++++++++++++++++----- 1 file changed, 43 insertions(+), 6 deletions(-) diff --git a/clang/test/Driver/riscv-cpus.c b/clang/test/Driver/riscv-cpus.c index 666a3e2beabeb1e..faf5d2da23c9db3 100644 --- a/clang/test/Driver/riscv-cpus.c +++ b/clang/test/Driver/riscv-cpus.c @@ -244,12 +244,49 @@ // RUN: %clang -target riscv64 -### -c %s 2>&1 -mcpu=sifive-p670 | FileCheck -check-prefix=MCPU-SIFIVE-P670 %s // MCPU-SIFIVE-P670: "-target-cpu" "sifive-p670" -// MCPU-SIFIVE-P670-SAME: "-target-feature" "+m" "-target-feature" "+a" "-target-feature" "+f" "-target-feature" "+d" -// MCPU-SIFIVE-P670-SAME: "-target-feature" "+c" "-target-feature" "+v" "-target-feature" "+zic64b" "-target-feature" "+zicbom" "-target-feature" "+zicbop" "-target-feature" "+zicboz" "-target-feature" "+ziccamoa" "-target-feature" "+ziccif" "-target-feature" "+zicclsm" "-target-feature" "+ziccrse" "-target-feature" "+zicsr" "-target-feature" "+zifencei" "-target-feature" "+zihintntl" "-target-feature" "+zihintpause" "-target-feature" "+zihpm" "-target-feature" "+za64rs" "-target-feature" "+zfhmin" -// MCPU-SIFIVE-P670-SAME: "-target-feature" "+zba" "-target-feature" "+zbb" "-target-feature" "+zbs" -// MCPU-SIFIVE-P670-SAME: "-target-feature" "+zvbb" "-target-feature" "+zvbc" -// MCPU-SIFIVE-P670-SAME: "-target-feature" "+zve32f" "-target-feature" "+zve32x" "-target-feature" "+zve64d" "-target-feature" "+zve64f" "-target-feature" "+zve64x" -// MCPU-SIFIVE-P670-SAME: "-target-feature" "+zvkg" "-target-feature" "+zvkn" "-target-feature" "+zvknc" "-target-feature" "+zvkned" "-target-feature" "+zvkng" "-target-feature" "+zvknhb" "-target-feature" "+zvks" "-target-feature" "+zvksc" "-target-feature" "+zvksed" "-target-feature" "+zvksg" "-target-feature" "+zvksh" "-target-feature" "+zvkt" +// MCPU-SIFIVE-P670-SAME: "-target-feature" "+m" +// MCPU-SIFIVE-P670-SAME: "-target-feature" "+a" +// MCPU-SIFIVE-P670-SAME: "-target-feature" "+f" +// MCPU-SIFIVE-P670-SAME: "-target-feature" "+d" +// MCPU-SIFIVE-P670-SAME: "-target-feature" "+c" +// MCPU-SIFIVE-P670-SAME: "-target-feature" "+v" +// MCPU-SIFIVE-P670-SAME: "-target-feature" "+zic64b" +// MCPU-SIFIVE-P670-SAME: "-target-feature" "+zicbom" +// MCPU-SIFIVE-P670-SAME: "-target-feature" "+zicbop" +// MCPU-SIFIVE-P670-SAME: "-target-feature" "+zicboz" +// MCPU-SIFIVE-P670-SAME: "-target-feature" "+ziccamoa" +// MCPU-SIFIVE-P670-SAME: "-target-feature" "+ziccif" +// MCPU-SIFIVE-P670-SAME: "-target-feature" "+zicclsm" +// MCPU-SIFIVE-P670-SAME: "-target-feature" "+ziccrse" +// MCPU-SIFIVE-P670-SAME: "-target-feature" "+zicsr" +// MCPU-SIFIVE-P670-SAME: "-target-feature" "+zifencei" +// MCPU-SIFIVE-P670-SAME: "-target-feature" "+zihintntl" +// MCPU-SIFIVE-P670-SAME: "-target-feature" "+zihintpause" +// MCPU-SIFIVE-P670-SAME: "-target-feature" "+zihpm" +// MCPU-SIFIVE-P670-SAME: "-target-feature" "+za64rs" +// MCPU-SIFIVE-P670-SAME: "-target-feature" "+zfhmin" +// MCPU-SIFIVE-P670-SAME: "-target-feature" "+zba" +// MCPU-SIFIVE-P670-SAME: "-target-feature" "+zbb" +// MCPU-SIFIVE-P670-SAME: "-target-feature" "+zbs" +// MCPU-SIFIVE-P670-SAME: "-target-feature" "+zvbb" +// MCPU-SIFIVE-P670-SAME: "-target-feature" "+zvbc" +// MCPU-SIFIVE-P670-SAME: "-target-feature" "+zve32f" +// MCPU-SIFIVE-P670-SAME: "-target-feature" "+zve32x" +// MCPU-SIFIVE-P670-SAME: "-target-feature" "+zve64d" +// MCPU-SIFIVE-P670-SAME: "-target-feature" "+zve64f" +// MCPU-SIFIVE-P670-SAME: "-target-feature" "+zve64x" +// MCPU-SIFIVE-P670-SAME: "-target-feature" "+zvkg" +// MCPU-SIFIVE-P670-SAME: "-target-feature" "+zvkn" +// MCPU-SIFIVE-P670-SAME: "-target-feature" "+zvknc" +// MCPU-SIFIVE-P670-SAME: "-target-feature" "+zvkned" +// MCPU-SIFIVE-P670-SAME: "-target-feature" "+zvkng" +// MCPU-SIFIVE-P670-SAME: "-target-feature" "+zvknhb" +// MCPU-SIFIVE-P670-SAME: "-target-feature" "+zvks" +// MCPU-SIFIVE-P670-SAME: "-target-feature" "+zvksc" +// MCPU-SIFIVE-P670-SAME: "-target-feature" "+zvksed" +// MCPU-SIFIVE-P670-SAME: "-target-feature" "+zvksg" +// MCPU-SIFIVE-P670-SAME: "-target-feature" "+zvksh" +// MCPU-SIFIVE-P670-SAME: "-target-feature" "+zvkt" // MCPU-SIFIVE-P670-SAME: "-target-abi" "lp64d" // Check failed cases >From 5d7bb3920e93df4559591e840946db6d83a64c79 Mon Sep 17 00:00:00 2001 From: Michael Maitland <michaeltmaitl...@gmail.com> Date: Tue, 23 Jan 2024 12:38:39 -0800 Subject: [PATCH 6/6] !fixup remove DontSinkSplatOperands from this patch --- llvm/lib/Target/RISCV/RISCVFeatures.td | 7 - llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 8 - llvm/lib/Target/RISCV/RISCVProcessors.td | 3 +- .../RISCV/rvv/dont-sink-splat-operands.ll | 353 ------------------ 4 files changed, 1 insertion(+), 370 deletions(-) delete mode 100644 llvm/test/CodeGen/RISCV/rvv/dont-sink-splat-operands.ll diff --git a/llvm/lib/Target/RISCV/RISCVFeatures.td b/llvm/lib/Target/RISCV/RISCVFeatures.td index 04ee6d6d0547378..cbb096ba20ae67b 100644 --- a/llvm/lib/Target/RISCV/RISCVFeatures.td +++ b/llvm/lib/Target/RISCV/RISCVFeatures.td @@ -1082,13 +1082,6 @@ def TuneShortForwardBranchOpt def HasShortForwardBranchOpt : Predicate<"Subtarget->hasShortForwardBranchOpt()">; def NoShortForwardBranchOpt : Predicate<"!Subtarget->hasShortForwardBranchOpt()">; -// P670 requires a S2V transfer buffer to move scalars into vectors. -// FIXME: Forming .vx/.vf can reduce register pressure. -def TuneDontSinkSplatOperands - : SubtargetFeature<"dont-sink-splat-operands", "DontSinkSplatOperands", - "true", "Don't sink splat operands to enable .vx or .vf " - "instructions">; - def TuneConditionalCompressedMoveFusion : SubtargetFeature<"conditional-cmv-fusion", "HasConditionalCompressedMoveFusion", "true", "Enable branch+c.mv fusion">; diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index 6737f1c16238909..b41e2f40dc72f01 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -2000,14 +2000,6 @@ bool RISCVTargetLowering::shouldSinkOperands( if (!I->getType()->isVectorTy() || !Subtarget.hasVInstructions()) return false; - // Don't sink splat operands if the target prefers it. Some targets requires - // S2V transfer buffers and we can run out of them copying the same value - // repeatedly. - // FIXME: It could still be worth doing if it would improve vector register - // pressure and prevent a vector spill. - if (Subtarget.dontSinkSplatOperands()) - return false; - for (auto OpIdx : enumerate(I->operands())) { if (!canSplatOperand(I, OpIdx.index())) continue; diff --git a/llvm/lib/Target/RISCV/RISCVProcessors.td b/llvm/lib/Target/RISCV/RISCVProcessors.td index c5dc2a6e064d752..db98702989a7430 100644 --- a/llvm/lib/Target/RISCV/RISCVProcessors.td +++ b/llvm/lib/Target/RISCV/RISCVProcessors.td @@ -272,8 +272,7 @@ def SIFIVE_P670 : RISCVProcessorModel<"sifive-p670", NoSchedModel, [TuneNoDefaultUnroll, TuneConditionalCompressedMoveFusion, TuneLUIADDIFusion, - TuneAUIPCADDIFusion, - TuneDontSinkSplatOperands]>; + TuneAUIPCADDIFusion]>; def SYNTACORE_SCR1_BASE : RISCVProcessorModel<"syntacore-scr1-base", SyntacoreSCR1Model, diff --git a/llvm/test/CodeGen/RISCV/rvv/dont-sink-splat-operands.ll b/llvm/test/CodeGen/RISCV/rvv/dont-sink-splat-operands.ll deleted file mode 100644 index 38c1ee6a9c71a5e..000000000000000 --- a/llvm/test/CodeGen/RISCV/rvv/dont-sink-splat-operands.ll +++ /dev/null @@ -1,353 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+v,+f -target-abi=lp64f \ -; RUN: -mattr=+dont-sink-splat-operands -riscv-v-vector-bits-min=128 | FileCheck %s - -; Test that we don't sink splat operands when compiling with dont-sink-splat-operands. -; Each scalar register access requires a S2V transfer buffer entry. Using too many -; limits performance. -; FIXME: This is potentially bad for register pressure. Need a better heuristic. - -define void @sink_splat_add(i32* nocapture %a, i32 signext %x) { -; CHECK-LABEL: sink_splat_add: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; CHECK-NEXT: vmv.v.x v8, a1 -; CHECK-NEXT: lui a1, 1 -; CHECK-NEXT: add a1, a0, a1 -; CHECK-NEXT: .LBB0_1: # %vector.body -; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vle32.v v9, (a0) -; CHECK-NEXT: vadd.vv v9, v9, v8 -; CHECK-NEXT: vse32.v v9, (a0) -; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: bne a0, a1, .LBB0_1 -; CHECK-NEXT: # %bb.2: # %for.cond.cleanup -; CHECK-NEXT: ret -entry: - %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0 - %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer - br label %vector.body - -vector.body: ; preds = %vector.body, %entry - %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ] - %0 = getelementptr inbounds i32, i32* %a, i64 %index - %1 = bitcast i32* %0 to <4 x i32>* - %wide.load = load <4 x i32>, <4 x i32>* %1, align 4 - %2 = add <4 x i32> %wide.load, %broadcast.splat - %3 = bitcast i32* %0 to <4 x i32>* - store <4 x i32> %2, <4 x i32>* %3, align 4 - %index.next = add nuw i64 %index, 4 - %4 = icmp eq i64 %index.next, 1024 - br i1 %4, label %for.cond.cleanup, label %vector.body - -for.cond.cleanup: ; preds = %vector.body - ret void -} - -declare i64 @llvm.vscale.i64() - -define void @sink_splat_add_scalable(i32* nocapture %a, i32 signext %x) { -; CHECK-LABEL: sink_splat_add_scalable: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: csrr a5, vlenb -; CHECK-NEXT: srli a2, a5, 1 -; CHECK-NEXT: li a3, 1024 -; CHECK-NEXT: bgeu a3, a2, .LBB1_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: li a3, 0 -; CHECK-NEXT: j .LBB1_5 -; CHECK-NEXT: .LBB1_2: # %vector.ph -; CHECK-NEXT: addi a3, a2, -1 -; CHECK-NEXT: andi a4, a3, 1024 -; CHECK-NEXT: xori a3, a4, 1024 -; CHECK-NEXT: vsetvli a6, zero, e32, m2, ta, ma -; CHECK-NEXT: vmv.v.x v8, a1 -; CHECK-NEXT: slli a5, a5, 1 -; CHECK-NEXT: mv a6, a0 -; CHECK-NEXT: mv a7, a3 -; CHECK-NEXT: .LBB1_3: # %vector.body -; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vl2re32.v v10, (a6) -; CHECK-NEXT: vadd.vv v10, v10, v8 -; CHECK-NEXT: vs2r.v v10, (a6) -; CHECK-NEXT: sub a7, a7, a2 -; CHECK-NEXT: add a6, a6, a5 -; CHECK-NEXT: bnez a7, .LBB1_3 -; CHECK-NEXT: # %bb.4: # %middle.block -; CHECK-NEXT: beqz a4, .LBB1_7 -; CHECK-NEXT: .LBB1_5: # %for.body.preheader -; CHECK-NEXT: slli a2, a3, 2 -; CHECK-NEXT: add a2, a0, a2 -; CHECK-NEXT: lui a3, 1 -; CHECK-NEXT: add a0, a0, a3 -; CHECK-NEXT: .LBB1_6: # %for.body -; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: lw a3, 0(a2) -; CHECK-NEXT: add a3, a3, a1 -; CHECK-NEXT: sw a3, 0(a2) -; CHECK-NEXT: addi a2, a2, 4 -; CHECK-NEXT: bne a2, a0, .LBB1_6 -; CHECK-NEXT: .LBB1_7: # %for.cond.cleanup -; CHECK-NEXT: ret -entry: - %0 = call i64 @llvm.vscale.i64() - %1 = shl i64 %0, 2 - %min.iters.check = icmp ugt i64 %1, 1024 - br i1 %min.iters.check, label %for.body.preheader, label %vector.ph - -vector.ph: ; preds = %entry - %2 = call i64 @llvm.vscale.i64() - %3 = shl i64 %2, 2 - %n.mod.vf = urem i64 1024, %3 - %n.vec = sub nsw i64 1024, %n.mod.vf - %broadcast.splatinsert = insertelement <vscale x 4 x i32> poison, i32 %x, i32 0 - %broadcast.splat = shufflevector <vscale x 4 x i32> %broadcast.splatinsert, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer - %4 = call i64 @llvm.vscale.i64() - %5 = shl i64 %4, 2 - br label %vector.body - -vector.body: ; preds = %vector.body, %vector.ph - %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] - %6 = getelementptr inbounds i32, i32* %a, i64 %index - %7 = bitcast i32* %6 to <vscale x 4 x i32>* - %wide.load = load <vscale x 4 x i32>, <vscale x 4 x i32>* %7, align 4 - %8 = add <vscale x 4 x i32> %wide.load, %broadcast.splat - %9 = bitcast i32* %6 to <vscale x 4 x i32>* - store <vscale x 4 x i32> %8, <vscale x 4 x i32>* %9, align 4 - %index.next = add nuw i64 %index, %5 - %10 = icmp eq i64 %index.next, %n.vec - br i1 %10, label %middle.block, label %vector.body - -middle.block: ; preds = %vector.body - %cmp.n = icmp eq i64 %n.mod.vf, 0 - br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader - -for.body.preheader: ; preds = %entry, %middle.block - %indvars.iv.ph = phi i64 [ 0, %entry ], [ %n.vec, %middle.block ] - br label %for.body - -for.cond.cleanup: ; preds = %for.body, %middle.block - ret void - -for.body: ; preds = %for.body.preheader, %for.body - %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ] - %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv - %11 = load i32, i32* %arrayidx, align 4 - %add = add i32 %11, %x - store i32 %add, i32* %arrayidx, align 4 - %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 - %cmp.not = icmp eq i64 %indvars.iv.next, 1024 - br i1 %cmp.not, label %for.cond.cleanup, label %for.body -} - -declare <4 x i32> @llvm.vp.add.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32) - -define void @sink_splat_vp_add(i32* nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) { -; CHECK-LABEL: sink_splat_vp_add: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; CHECK-NEXT: vmv.v.x v8, a1 -; CHECK-NEXT: lui a1, 1 -; CHECK-NEXT: add a1, a0, a1 -; CHECK-NEXT: .LBB2_1: # %vector.body -; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vle32.v v9, (a0) -; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma -; CHECK-NEXT: vadd.vv v9, v9, v8, v0.t -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; CHECK-NEXT: vse32.v v9, (a0) -; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: bne a0, a1, .LBB2_1 -; CHECK-NEXT: # %bb.2: # %for.cond.cleanup -; CHECK-NEXT: ret -entry: - %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0 - %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer - br label %vector.body - -vector.body: ; preds = %vector.body, %entry - %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ] - %0 = getelementptr inbounds i32, i32* %a, i64 %index - %1 = bitcast i32* %0 to <4 x i32>* - %wide.load = load <4 x i32>, <4 x i32>* %1, align 4 - %2 = call <4 x i32> @llvm.vp.add.v4i32(<4 x i32> %wide.load, <4 x i32> %broadcast.splat, <4 x i1> %m, i32 %vl) - %3 = bitcast i32* %0 to <4 x i32>* - store <4 x i32> %2, <4 x i32>* %3, align 4 - %index.next = add nuw i64 %index, 4 - %4 = icmp eq i64 %index.next, 1024 - br i1 %4, label %for.cond.cleanup, label %vector.body - -for.cond.cleanup: ; preds = %vector.body - ret void -} - -define void @sink_splat_fadd(float* nocapture %a, float %x) { -; CHECK-LABEL: sink_splat_fadd: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; CHECK-NEXT: vfmv.v.f v8, fa0 -; CHECK-NEXT: lui a1, 1 -; CHECK-NEXT: add a1, a0, a1 -; CHECK-NEXT: .LBB3_1: # %vector.body -; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vle32.v v9, (a0) -; CHECK-NEXT: vfadd.vv v9, v9, v8 -; CHECK-NEXT: vse32.v v9, (a0) -; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: bne a0, a1, .LBB3_1 -; CHECK-NEXT: # %bb.2: # %for.cond.cleanup -; CHECK-NEXT: ret -entry: - %broadcast.splatinsert = insertelement <4 x float> poison, float %x, i32 0 - %broadcast.splat = shufflevector <4 x float> %broadcast.splatinsert, <4 x float> poison, <4 x i32> zeroinitializer - br label %vector.body - -vector.body: ; preds = %vector.body, %entry - %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ] - %0 = getelementptr inbounds float, float* %a, i64 %index - %1 = bitcast float* %0 to <4 x float>* - %wide.load = load <4 x float>, <4 x float>* %1, align 4 - %2 = fadd <4 x float> %wide.load, %broadcast.splat - %3 = bitcast float* %0 to <4 x float>* - store <4 x float> %2, <4 x float>* %3, align 4 - %index.next = add nuw i64 %index, 4 - %4 = icmp eq i64 %index.next, 1024 - br i1 %4, label %for.cond.cleanup, label %vector.body - -for.cond.cleanup: ; preds = %vector.body - ret void -} - -define void @sink_splat_fadd_scalable(float* nocapture %a, float %x) { -; CHECK-LABEL: sink_splat_fadd_scalable: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: srli a2, a1, 2 -; CHECK-NEXT: li a3, 1024 -; CHECK-NEXT: bgeu a3, a2, .LBB4_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: li a3, 0 -; CHECK-NEXT: j .LBB4_5 -; CHECK-NEXT: .LBB4_2: # %vector.ph -; CHECK-NEXT: addi a3, a2, -1 -; CHECK-NEXT: andi a4, a3, 1024 -; CHECK-NEXT: xori a3, a4, 1024 -; CHECK-NEXT: vsetvli a5, zero, e32, m1, ta, ma -; CHECK-NEXT: vfmv.v.f v8, fa0 -; CHECK-NEXT: mv a5, a0 -; CHECK-NEXT: mv a6, a3 -; CHECK-NEXT: .LBB4_3: # %vector.body -; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vl1re32.v v9, (a5) -; CHECK-NEXT: vfadd.vv v9, v9, v8 -; CHECK-NEXT: vs1r.v v9, (a5) -; CHECK-NEXT: sub a6, a6, a2 -; CHECK-NEXT: add a5, a5, a1 -; CHECK-NEXT: bnez a6, .LBB4_3 -; CHECK-NEXT: # %bb.4: # %middle.block -; CHECK-NEXT: beqz a4, .LBB4_7 -; CHECK-NEXT: .LBB4_5: # %for.body.preheader -; CHECK-NEXT: slli a1, a3, 2 -; CHECK-NEXT: add a1, a0, a1 -; CHECK-NEXT: lui a2, 1 -; CHECK-NEXT: add a0, a0, a2 -; CHECK-NEXT: .LBB4_6: # %for.body -; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: flw fa5, 0(a1) -; CHECK-NEXT: fadd.s fa5, fa5, fa0 -; CHECK-NEXT: fsw fa5, 0(a1) -; CHECK-NEXT: addi a1, a1, 4 -; CHECK-NEXT: bne a1, a0, .LBB4_6 -; CHECK-NEXT: .LBB4_7: # %for.cond.cleanup -; CHECK-NEXT: ret -entry: - %0 = call i64 @llvm.vscale.i64() - %1 = shl i64 %0, 1 - %min.iters.check = icmp ugt i64 %1, 1024 - br i1 %min.iters.check, label %for.body.preheader, label %vector.ph - -vector.ph: ; preds = %entry - %2 = call i64 @llvm.vscale.i64() - %3 = shl i64 %2, 1 - %n.mod.vf = urem i64 1024, %3 - %n.vec = sub nsw i64 1024, %n.mod.vf - %broadcast.splatinsert = insertelement <vscale x 2 x float> poison, float %x, i32 0 - %broadcast.splat = shufflevector <vscale x 2 x float> %broadcast.splatinsert, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer - %4 = call i64 @llvm.vscale.i64() - %5 = shl i64 %4, 1 - br label %vector.body - -vector.body: ; preds = %vector.body, %vector.ph - %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] - %6 = getelementptr inbounds float, float* %a, i64 %index - %7 = bitcast float* %6 to <vscale x 2 x float>* - %wide.load = load <vscale x 2 x float>, <vscale x 2 x float>* %7, align 4 - %8 = fadd <vscale x 2 x float> %wide.load, %broadcast.splat - %9 = bitcast float* %6 to <vscale x 2 x float>* - store <vscale x 2 x float> %8, <vscale x 2 x float>* %9, align 4 - %index.next = add nuw i64 %index, %5 - %10 = icmp eq i64 %index.next, %n.vec - br i1 %10, label %middle.block, label %vector.body - -middle.block: ; preds = %vector.body - %cmp.n = icmp eq i64 %n.mod.vf, 0 - br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader - -for.body.preheader: ; preds = %entry, %middle.block - %indvars.iv.ph = phi i64 [ 0, %entry ], [ %n.vec, %middle.block ] - br label %for.body - -for.cond.cleanup: ; preds = %for.body, %middle.block - ret void - -for.body: ; preds = %for.body.preheader, %for.body - %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ] - %arrayidx = getelementptr inbounds float, float* %a, i64 %indvars.iv - %11 = load float, float* %arrayidx, align 4 - %mul = fadd float %11, %x - store float %mul, float* %arrayidx, align 4 - %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 - %cmp.not = icmp eq i64 %indvars.iv.next, 1024 - br i1 %cmp.not, label %for.cond.cleanup, label %for.body -} - -declare <4 x float> @llvm.vp.fadd.v4i32(<4 x float>, <4 x float>, <4 x i1>, i32) - -define void @sink_splat_vp_fadd(float* nocapture %a, float %x, <4 x i1> %m, i32 zeroext %vl) { -; CHECK-LABEL: sink_splat_vp_fadd: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; CHECK-NEXT: vfmv.v.f v8, fa0 -; CHECK-NEXT: lui a2, 1 -; CHECK-NEXT: add a2, a0, a2 -; CHECK-NEXT: .LBB5_1: # %vector.body -; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vle32.v v9, (a0) -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma -; CHECK-NEXT: vfadd.vv v9, v9, v8, v0.t -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma -; CHECK-NEXT: vse32.v v9, (a0) -; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: bne a0, a2, .LBB5_1 -; CHECK-NEXT: # %bb.2: # %for.cond.cleanup -; CHECK-NEXT: ret -entry: - %broadcast.splatinsert = insertelement <4 x float> poison, float %x, i32 0 - %broadcast.splat = shufflevector <4 x float> %broadcast.splatinsert, <4 x float> poison, <4 x i32> zeroinitializer - br label %vector.body - -vector.body: ; preds = %vector.body, %entry - %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ] - %0 = getelementptr inbounds float, float* %a, i64 %index - %1 = bitcast float* %0 to <4 x float>* - %wide.load = load <4 x float>, <4 x float>* %1, align 4 - %2 = call <4 x float> @llvm.vp.fadd.v4i32(<4 x float> %wide.load, <4 x float> %broadcast.splat, <4 x i1> %m, i32 %vl) - %3 = bitcast float* %0 to <4 x float>* - store <4 x float> %2, <4 x float>* %3, align 4 - %index.next = add nuw i64 %index, 4 - %4 = icmp eq i64 %index.next, 1024 - br i1 %4, label %for.cond.cleanup, label %vector.body - -for.cond.cleanup: ; preds = %vector.body - ret void -} _______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits