https://github.com/4vtomat updated https://github.com/llvm/llvm-project/pull/150724
>From 689addb5c3aeffeac70abc69af0ac3b6b48439b8 Mon Sep 17 00:00:00 2001 From: Brandon Wu <songwu0...@gmail.com> Date: Fri, 25 Jul 2025 16:49:54 -0700 Subject: [PATCH 1/2] [RISCV][llvm] Support fixed-length vector inline assembly constraints --- .../riscv-inline-asm-fixed-length-vector.c | 39 +++++++++++++++++++ llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 34 +++++++++++++++- 2 files changed, 71 insertions(+), 2 deletions(-) create mode 100644 clang/test/CodeGen/RISCV/riscv-inline-asm-fixed-length-vector.c diff --git a/clang/test/CodeGen/RISCV/riscv-inline-asm-fixed-length-vector.c b/clang/test/CodeGen/RISCV/riscv-inline-asm-fixed-length-vector.c new file mode 100644 index 0000000000000..0bfd9d6f158c6 --- /dev/null +++ b/clang/test/CodeGen/RISCV/riscv-inline-asm-fixed-length-vector.c @@ -0,0 +1,39 @@ +// REQUIRES: riscv-registered-target + +// RUN: %clang_cc1 -triple riscv32 -target-feature +v \ +// RUN: -mvscale-min=2 -mvscale-max=2 -O2 -emit-llvm %s -o - \ +// RUN: | FileCheck %s +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -mvscale-min=2 -mvscale-max=2 -O2 -emit-llvm %s -o - \ +// RUN: | FileCheck %s + +// Test RISC-V V-extension fixed-length vector inline assembly constraints. +#include <riscv_vector.h> + +typedef vbool1_t fixed_bool1_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen))); +typedef vint32m1_t fixed_i32m1_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen))); +typedef vint8mf2_t fixed_i8mf2_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen / 2))); + +fixed_i32m1_t test_vr(fixed_i32m1_t a) { +// CHECK-LABEL: define{{.*}} @test_vr +// CHECK: %0 = tail call <4 x i32> asm sideeffect "vadd.vv $0, $1, $2", "=^vr,^vr,^vr"(<4 x i32> %a, <4 x i32> %a) + fixed_i32m1_t ret; + asm volatile ("vadd.vv %0, %1, %2" : "=vr"(ret) : "vr"(a), "vr"(a)); + return ret; +} + +fixed_i8mf2_t test_vd(fixed_i8mf2_t a) { +// CHECK-LABEL: define{{.*}} @test_vd +// CHECK: %0 = tail call <8 x i8> asm sideeffect "vadd.vv $0, $1, $2", "=^vd,^vr,^vr"(<8 x i8> %a, <8 x i8> %a) + fixed_i8mf2_t ret; + asm volatile ("vadd.vv %0, %1, %2" : "=vd"(ret) : "vr"(a), "vr"(a)); + return ret; +} + +fixed_bool1_t test_vm(fixed_bool1_t a) { +// CHECK-LABEL: define{{.*}} @test_vm +// CHECK: %1 = tail call <16 x i8> asm sideeffect "vmand.mm $0, $1, $2", "=^vm,^vm,^vm"(<16 x i8> %a, <16 x i8> %a) + fixed_bool1_t ret; + asm volatile ("vmand.mm %0, %1, %2" : "=vm"(ret) : "vm"(a), "vm"(a)); + return ret; +} diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index 809fbc8926e35..53550e69e4c6d 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -23133,6 +23133,12 @@ RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, &RISCV::VRN2M4RegClass}) { if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy)) return std::make_pair(0U, RC); + + if (VT.isFixedLengthVector() && Subtarget.useRVVForFixedLengthVectors()) { + MVT ContainerVT = getContainerForFixedLengthVector(VT); + if (TRI->isTypeLegalForClass(*RC, ContainerVT)) + return std::make_pair(0U, RC); + } } } else if (Constraint == "vd") { for (const auto *RC : @@ -23146,10 +23152,24 @@ RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, &RISCV::VRN2M4NoV0RegClass}) { if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy)) return std::make_pair(0U, RC); + + if (VT.isFixedLengthVector() && Subtarget.useRVVForFixedLengthVectors()) { + MVT ContainerVT = getContainerForFixedLengthVector(VT); + if (TRI->isTypeLegalForClass(*RC, ContainerVT)) + return std::make_pair(0U, RC); + } } } else if (Constraint == "vm") { if (TRI->isTypeLegalForClass(RISCV::VMV0RegClass, VT.SimpleTy)) return std::make_pair(0U, &RISCV::VMV0RegClass); + + if (VT.isFixedLengthVector() && Subtarget.useRVVForFixedLengthVectors()) { + MVT ContainerVT = getContainerForFixedLengthVector(VT); + // VT here is coerced to vector with i8 elements, so we need to check if + // this is a M1 register here instead of checking VMV0RegClass. + if (TRI->isTypeLegalForClass(RISCV::VRRegClass, ContainerVT)) + return std::make_pair(0U, &RISCV::VMV0RegClass); + } } else if (Constraint == "cr") { if (VT == MVT::f16 && Subtarget.hasStdExtZhinxmin()) return std::make_pair(0U, &RISCV::GPRF16CRegClass); @@ -24027,7 +24047,12 @@ bool RISCVTargetLowering::splitValueIntoRegisterParts( return true; } - if (ValueVT.isScalableVector() && PartVT.isScalableVector()) { + if ((ValueVT.isScalableVector() || ValueVT.isFixedLengthVector()) && + PartVT.isScalableVector()) { + if (ValueVT.isFixedLengthVector()) { + ValueVT = getContainerForFixedLengthVector(ValueVT.getSimpleVT()); + Val = convertToScalableVector(ValueVT, Val, DAG, Subtarget); + } LLVMContext &Context = *DAG.getContext(); EVT ValueEltVT = ValueVT.getVectorElementType(); EVT PartEltVT = PartVT.getVectorElementType(); @@ -24097,12 +24122,17 @@ SDValue RISCVTargetLowering::joinRegisterPartsIntoValue( return Val; } - if (ValueVT.isScalableVector() && PartVT.isScalableVector()) { + if ((ValueVT.isScalableVector() || ValueVT.isFixedLengthVector()) && + PartVT.isScalableVector()) { LLVMContext &Context = *DAG.getContext(); SDValue Val = Parts[0]; EVT ValueEltVT = ValueVT.getVectorElementType(); EVT PartEltVT = PartVT.getVectorElementType(); unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinValue(); + if (ValueVT.isFixedLengthVector()) + ValueVTBitSize = getContainerForFixedLengthVector(ValueVT.getSimpleVT()) + .getSizeInBits() + .getKnownMinValue(); unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinValue(); if (PartVTBitSize % ValueVTBitSize == 0) { assert(PartVTBitSize >= ValueVTBitSize); >From 0c273ed7c0e77bb23e0d0bcccdc301af27a894d3 Mon Sep 17 00:00:00 2001 From: Brandon Wu <songwu0...@gmail.com> Date: Sat, 26 Jul 2025 00:56:59 -0700 Subject: [PATCH 2/2] fixup! [RISCV][llvm] Support fixed-length vector inline assembly constraints --- .../riscv-inline-asm-fixed-length-vector.c | 27 ++++++++ llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 9 ++- .../RISCV/inline-asm-fixed-v-constraint.ll | 68 +++++++++++++++++++ 3 files changed, 101 insertions(+), 3 deletions(-) create mode 100644 llvm/test/CodeGen/RISCV/inline-asm-fixed-v-constraint.ll diff --git a/clang/test/CodeGen/RISCV/riscv-inline-asm-fixed-length-vector.c b/clang/test/CodeGen/RISCV/riscv-inline-asm-fixed-length-vector.c index 0bfd9d6f158c6..699c588950c62 100644 --- a/clang/test/CodeGen/RISCV/riscv-inline-asm-fixed-length-vector.c +++ b/clang/test/CodeGen/RISCV/riscv-inline-asm-fixed-length-vector.c @@ -9,11 +9,16 @@ // Test RISC-V V-extension fixed-length vector inline assembly constraints. #include <riscv_vector.h> +#include <stdbool.h> typedef vbool1_t fixed_bool1_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen))); typedef vint32m1_t fixed_i32m1_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen))); typedef vint8mf2_t fixed_i8mf2_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen / 2))); +typedef bool bx2 __attribute__((ext_vector_type(16))); +typedef int i32x2 __attribute__((ext_vector_type(2))); +typedef char i8x4 __attribute__((ext_vector_type(4))); + fixed_i32m1_t test_vr(fixed_i32m1_t a) { // CHECK-LABEL: define{{.*}} @test_vr // CHECK: %0 = tail call <4 x i32> asm sideeffect "vadd.vv $0, $1, $2", "=^vr,^vr,^vr"(<4 x i32> %a, <4 x i32> %a) @@ -22,6 +27,14 @@ fixed_i32m1_t test_vr(fixed_i32m1_t a) { return ret; } +i32x2 test_vr2(i32x2 a) { +// CHECK-LABEL: define{{.*}} @test_vr2 +// CHECK: %1 = tail call <2 x i32> asm sideeffect "vadd.vv $0, $1, $2", "=^vr,^vr,^vr"(<2 x i32> %0, <2 x i32> %0) + i32x2 ret; + asm volatile ("vadd.vv %0, %1, %2" : "=vr"(ret) : "vr"(a), "vr"(a)); + return ret; +} + fixed_i8mf2_t test_vd(fixed_i8mf2_t a) { // CHECK-LABEL: define{{.*}} @test_vd // CHECK: %0 = tail call <8 x i8> asm sideeffect "vadd.vv $0, $1, $2", "=^vd,^vr,^vr"(<8 x i8> %a, <8 x i8> %a) @@ -30,6 +43,14 @@ fixed_i8mf2_t test_vd(fixed_i8mf2_t a) { return ret; } +i8x4 test_vd2(i8x4 a) { +// CHECK-LABEL: define{{.*}} @test_vd2 +// CHECK: %1 = tail call <4 x i8> asm sideeffect "vadd.vv $0, $1, $2", "=^vd,^vr,^vr"(<4 x i8> %0, <4 x i8> %0) + i8x4 ret; + asm volatile ("vadd.vv %0, %1, %2" : "=vd"(ret) : "vr"(a), "vr"(a)); + return ret; +} + fixed_bool1_t test_vm(fixed_bool1_t a) { // CHECK-LABEL: define{{.*}} @test_vm // CHECK: %1 = tail call <16 x i8> asm sideeffect "vmand.mm $0, $1, $2", "=^vm,^vm,^vm"(<16 x i8> %a, <16 x i8> %a) @@ -37,3 +58,9 @@ fixed_bool1_t test_vm(fixed_bool1_t a) { asm volatile ("vmand.mm %0, %1, %2" : "=vm"(ret) : "vm"(a), "vm"(a)); return ret; } + +void test_vm2(bx2 a) { +// CHECK-LABEL: define{{.*}} @test_vm2 +// CHECK: tail call void asm sideeffect "dummy $0", "^vm"(<16 x i1> %a1) + asm volatile ("dummy %0" :: "vm"(a)); +} diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index 53550e69e4c6d..bad87d69a69f8 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -23165,8 +23165,8 @@ RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI, if (VT.isFixedLengthVector() && Subtarget.useRVVForFixedLengthVectors()) { MVT ContainerVT = getContainerForFixedLengthVector(VT); - // VT here is coerced to vector with i8 elements, so we need to check if - // this is a M1 register here instead of checking VMV0RegClass. + // VT here might be coerced to vector with i8 elements, so we need to + // check if this is a M1 register here instead of checking VMV0RegClass. if (TRI->isTypeLegalForClass(RISCV::VRRegClass, ContainerVT)) return std::make_pair(0U, &RISCV::VMV0RegClass); } @@ -24150,7 +24150,10 @@ SDValue RISCVTargetLowering::joinRegisterPartsIntoValue( EVT::getVectorVT(Context, ValueEltVT, Count, /*IsScalable=*/true); Val = DAG.getNode(ISD::BITCAST, DL, SameEltTypeVT, Val); } - Val = DAG.getExtractSubvector(DL, ValueVT, Val, 0); + if (ValueVT.isFixedLengthVector()) + Val = convertFromScalableVector(ValueVT, Val, DAG, Subtarget); + else + Val = DAG.getExtractSubvector(DL, ValueVT, Val, 0); return Val; } } diff --git a/llvm/test/CodeGen/RISCV/inline-asm-fixed-v-constraint.ll b/llvm/test/CodeGen/RISCV/inline-asm-fixed-v-constraint.ll new file mode 100644 index 0000000000000..2c698adc201f9 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/inline-asm-fixed-v-constraint.ll @@ -0,0 +1,68 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV32I %s +; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefix=RV64I %s + +define <1 x i8> @constraint_vr_fixed(<1 x i8> %0, <1 x i8> %1) nounwind { +; RV32I-LABEL: constraint_vr_fixed: +; RV32I: # %bb.0: +; RV32I-NEXT: #APP +; RV32I-NEXT: vadd.vv v8, v8, v9 +; RV32I-NEXT: #NO_APP +; RV32I-NEXT: ret +; +; RV64I-LABEL: constraint_vr_fixed: +; RV64I: # %bb.0: +; RV64I-NEXT: #APP +; RV64I-NEXT: vadd.vv v8, v8, v9 +; RV64I-NEXT: #NO_APP +; RV64I-NEXT: ret + %a = tail call <1 x i8> asm "vadd.vv $0, $1, $2", "=^vr,^vr,^vr"( + <1 x i8> %0, <1 x i8> %1) + ret <1 x i8> %a +} + +define <4 x i32> @constraint_vd_fixed(<4 x i32> %0, <4 x i32> %1) nounwind { +; RV32I-LABEL: constraint_vd_fixed: +; RV32I: # %bb.0: +; RV32I-NEXT: #APP +; RV32I-NEXT: vadd.vv v8, v8, v9 +; RV32I-NEXT: #NO_APP +; RV32I-NEXT: ret +; +; RV64I-LABEL: constraint_vd_fixed: +; RV64I: # %bb.0: +; RV64I-NEXT: #APP +; RV64I-NEXT: vadd.vv v8, v8, v9 +; RV64I-NEXT: #NO_APP +; RV64I-NEXT: ret + %a = tail call <4 x i32> asm "vadd.vv $0, $1, $2", "=^vd,^vr,^vr"( + <4 x i32> %0, <4 x i32> %1) + ret <4 x i32> %a +} + +define <16 x i1> @constraint_vm_fixed(<16 x i1> %0, <16 x i1> %1) nounwind { +; RV32I-LABEL: constraint_vm_fixed: +; RV32I: # %bb.0: +; RV32I-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; RV32I-NEXT: vmv1r.v v9, v0 +; RV32I-NEXT: vmv1r.v v0, v8 +; RV32I-NEXT: #APP +; RV32I-NEXT: vadd.vv v0, v9, v0 +; RV32I-NEXT: #NO_APP +; RV32I-NEXT: ret +; +; RV64I-LABEL: constraint_vm_fixed: +; RV64I: # %bb.0: +; RV64I-NEXT: vsetivli zero, 1, e8, m1, ta, ma +; RV64I-NEXT: vmv1r.v v9, v0 +; RV64I-NEXT: vmv1r.v v0, v8 +; RV64I-NEXT: #APP +; RV64I-NEXT: vadd.vv v0, v9, v0 +; RV64I-NEXT: #NO_APP +; RV64I-NEXT: ret + %a = tail call <16 x i1> asm "vadd.vv $0, $1, $2", "=^vr,^vr,^vm"( + <16 x i1> %0, <16 x i1> %1) + ret <16 x i1> %a +} _______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits