This revision was automatically updated to reflect the committed changes. Closed by commit rG8b33839f010f: [RISCV] Rename vector inline constraint from 'v' to 'vr' and 'vm' in IR. (authored by HsiangKai).
Repository: rG LLVM Github Monorepo CHANGES SINCE LAST ACTION https://reviews.llvm.org/D107139/new/ https://reviews.llvm.org/D107139 Files: clang/lib/Basic/Targets/RISCV.cpp clang/test/CodeGen/RISCV/riscv-inline-asm-rvv.c llvm/lib/Target/RISCV/RISCVISelLowering.cpp llvm/test/CodeGen/RISCV/rvv/inline-asm.ll
Index: llvm/test/CodeGen/RISCV/rvv/inline-asm.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/inline-asm.ll +++ llvm/test/CodeGen/RISCV/rvv/inline-asm.ll @@ -10,7 +10,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: ret entry: - %0 = tail call <vscale x 1 x i1> asm "vmand.mm $0, $1, $2", "=v,v,v"(<vscale x 1 x i1> %in, <vscale x 1 x i1> %in2) + %0 = tail call <vscale x 1 x i1> asm "vmand.mm $0, $1, $2", "=^vm,^vm,^vm"(<vscale x 1 x i1> %in, <vscale x 1 x i1> %in2) ret <vscale x 1 x i1> %0 } @@ -22,7 +22,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: ret entry: - %0 = tail call <vscale x 2 x i1> asm "vmand.mm $0, $1, $2", "=v,v,v"(<vscale x 2 x i1> %in, <vscale x 2 x i1> %in2) + %0 = tail call <vscale x 2 x i1> asm "vmand.mm $0, $1, $2", "=^vm,^vm,^vm"(<vscale x 2 x i1> %in, <vscale x 2 x i1> %in2) ret <vscale x 2 x i1> %0 } @@ -34,7 +34,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: ret entry: - %0 = tail call <vscale x 4 x i1> asm "vmand.mm $0, $1, $2", "=v,v,v"(<vscale x 4 x i1> %in, <vscale x 4 x i1> %in2) + %0 = tail call <vscale x 4 x i1> asm "vmand.mm $0, $1, $2", "=^vm,^vm,^vm"(<vscale x 4 x i1> %in, <vscale x 4 x i1> %in2) ret <vscale x 4 x i1> %0 } @@ -46,7 +46,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: ret entry: - %0 = tail call <vscale x 8 x i1> asm "vmand.mm $0, $1, $2", "=v,v,v"(<vscale x 8 x i1> %in, <vscale x 8 x i1> %in2) + %0 = tail call <vscale x 8 x i1> asm "vmand.mm $0, $1, $2", "=^vm,^vm,^vm"(<vscale x 8 x i1> %in, <vscale x 8 x i1> %in2) ret <vscale x 8 x i1> %0 } @@ -58,7 +58,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: ret entry: - %0 = tail call <vscale x 16 x i1> asm "vmand.mm $0, $1, $2", "=v,v,v"(<vscale x 16 x i1> %in, <vscale x 16 x i1> %in2) + %0 = tail call <vscale x 16 x i1> asm "vmand.mm $0, $1, $2", "=^vm,^vm,^vm"(<vscale x 16 x i1> %in, <vscale x 16 x i1> %in2) ret <vscale x 16 x i1> %0 } @@ -70,7 +70,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: ret entry: - %0 = tail call <vscale x 32 x i1> asm "vmand.mm $0, $1, $2", "=v,v,v"(<vscale x 32 x i1> %in, <vscale x 32 x i1> %in2) + %0 = tail call <vscale x 32 x i1> asm "vmand.mm $0, $1, $2", "=^vm,^vm,^vm"(<vscale x 32 x i1> %in, <vscale x 32 x i1> %in2) ret <vscale x 32 x i1> %0 } @@ -82,7 +82,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: ret entry: - %0 = tail call <vscale x 64 x i1> asm "vmand.mm $0, $1, $2", "=v,v,v"(<vscale x 64 x i1> %in, <vscale x 64 x i1> %in2) + %0 = tail call <vscale x 64 x i1> asm "vmand.mm $0, $1, $2", "=^vm,^vm,^vm"(<vscale x 64 x i1> %in, <vscale x 64 x i1> %in2) ret <vscale x 64 x i1> %0 } @@ -94,7 +94,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: ret entry: - %0 = tail call <vscale x 1 x i64> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 1 x i64> %in, <vscale x 1 x i64> %in2) + %0 = tail call <vscale x 1 x i64> asm "vadd.vv $0, $1, $2", "=^vr,^vr,^vr"(<vscale x 1 x i64> %in, <vscale x 1 x i64> %in2) ret <vscale x 1 x i64> %0 } @@ -106,7 +106,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: ret entry: - %0 = tail call <vscale x 2 x i64> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 2 x i64> %in, <vscale x 2 x i64> %in2) + %0 = tail call <vscale x 2 x i64> asm "vadd.vv $0, $1, $2", "=^vr,^vr,^vr"(<vscale x 2 x i64> %in, <vscale x 2 x i64> %in2) ret <vscale x 2 x i64> %0 } @@ -118,7 +118,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: ret entry: - %0 = tail call <vscale x 4 x i64> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 4 x i64> %in, <vscale x 4 x i64> %in2) + %0 = tail call <vscale x 4 x i64> asm "vadd.vv $0, $1, $2", "=^vr,^vr,^vr"(<vscale x 4 x i64> %in, <vscale x 4 x i64> %in2) ret <vscale x 4 x i64> %0 } @@ -130,7 +130,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: ret entry: - %0 = tail call <vscale x 8 x i64> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 8 x i64> %in, <vscale x 8 x i64> %in2) + %0 = tail call <vscale x 8 x i64> asm "vadd.vv $0, $1, $2", "=^vr,^vr,^vr"(<vscale x 8 x i64> %in, <vscale x 8 x i64> %in2) ret <vscale x 8 x i64> %0 } @@ -142,7 +142,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: ret entry: - %0 = tail call <vscale x 1 x i32> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 1 x i32> %in, <vscale x 1 x i32> %in2) + %0 = tail call <vscale x 1 x i32> asm "vadd.vv $0, $1, $2", "=^vr,^vr,^vr"(<vscale x 1 x i32> %in, <vscale x 1 x i32> %in2) ret <vscale x 1 x i32> %0 } @@ -154,7 +154,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: ret entry: - %0 = tail call <vscale x 2 x i32> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 2 x i32> %in, <vscale x 2 x i32> %in2) + %0 = tail call <vscale x 2 x i32> asm "vadd.vv $0, $1, $2", "=^vr,^vr,^vr"(<vscale x 2 x i32> %in, <vscale x 2 x i32> %in2) ret <vscale x 2 x i32> %0 } @@ -166,7 +166,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: ret entry: - %0 = tail call <vscale x 4 x i32> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 4 x i32> %in, <vscale x 4 x i32> %in2) + %0 = tail call <vscale x 4 x i32> asm "vadd.vv $0, $1, $2", "=^vr,^vr,^vr"(<vscale x 4 x i32> %in, <vscale x 4 x i32> %in2) ret <vscale x 4 x i32> %0 } @@ -178,7 +178,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: ret entry: - %0 = tail call <vscale x 8 x i32> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 8 x i32> %in, <vscale x 8 x i32> %in2) + %0 = tail call <vscale x 8 x i32> asm "vadd.vv $0, $1, $2", "=^vr,^vr,^vr"(<vscale x 8 x i32> %in, <vscale x 8 x i32> %in2) ret <vscale x 8 x i32> %0 } @@ -190,7 +190,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: ret entry: - %0 = tail call <vscale x 16 x i32> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 16 x i32> %in, <vscale x 16 x i32> %in2) + %0 = tail call <vscale x 16 x i32> asm "vadd.vv $0, $1, $2", "=^vr,^vr,^vr"(<vscale x 16 x i32> %in, <vscale x 16 x i32> %in2) ret <vscale x 16 x i32> %0 } @@ -202,7 +202,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: ret entry: - %0 = tail call <vscale x 1 x i16> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 1 x i16> %in, <vscale x 1 x i16> %in2) + %0 = tail call <vscale x 1 x i16> asm "vadd.vv $0, $1, $2", "=^vr,^vr,^vr"(<vscale x 1 x i16> %in, <vscale x 1 x i16> %in2) ret <vscale x 1 x i16> %0 } @@ -214,7 +214,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: ret entry: - %0 = tail call <vscale x 2 x i16> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 2 x i16> %in, <vscale x 2 x i16> %in2) + %0 = tail call <vscale x 2 x i16> asm "vadd.vv $0, $1, $2", "=^vr,^vr,^vr"(<vscale x 2 x i16> %in, <vscale x 2 x i16> %in2) ret <vscale x 2 x i16> %0 } @@ -226,7 +226,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: ret entry: - %0 = tail call <vscale x 4 x i16> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 4 x i16> %in, <vscale x 4 x i16> %in2) + %0 = tail call <vscale x 4 x i16> asm "vadd.vv $0, $1, $2", "=^vr,^vr,^vr"(<vscale x 4 x i16> %in, <vscale x 4 x i16> %in2) ret <vscale x 4 x i16> %0 } @@ -238,7 +238,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: ret entry: - %0 = tail call <vscale x 8 x i16> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 8 x i16> %in, <vscale x 8 x i16> %in2) + %0 = tail call <vscale x 8 x i16> asm "vadd.vv $0, $1, $2", "=^vr,^vr,^vr"(<vscale x 8 x i16> %in, <vscale x 8 x i16> %in2) ret <vscale x 8 x i16> %0 } @@ -250,7 +250,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: ret entry: - %0 = tail call <vscale x 16 x i16> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 16 x i16> %in, <vscale x 16 x i16> %in2) + %0 = tail call <vscale x 16 x i16> asm "vadd.vv $0, $1, $2", "=^vr,^vr,^vr"(<vscale x 16 x i16> %in, <vscale x 16 x i16> %in2) ret <vscale x 16 x i16> %0 } @@ -262,7 +262,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: ret entry: - %0 = tail call <vscale x 32 x i16> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 32 x i16> %in, <vscale x 32 x i16> %in2) + %0 = tail call <vscale x 32 x i16> asm "vadd.vv $0, $1, $2", "=^vr,^vr,^vr"(<vscale x 32 x i16> %in, <vscale x 32 x i16> %in2) ret <vscale x 32 x i16> %0 } @@ -274,7 +274,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: ret entry: - %0 = tail call <vscale x 1 x i8> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 1 x i8> %in, <vscale x 1 x i8> %in2) + %0 = tail call <vscale x 1 x i8> asm "vadd.vv $0, $1, $2", "=^vr,^vr,^vr"(<vscale x 1 x i8> %in, <vscale x 1 x i8> %in2) ret <vscale x 1 x i8> %0 } @@ -286,7 +286,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: ret entry: - %0 = tail call <vscale x 2 x i8> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 2 x i8> %in, <vscale x 2 x i8> %in2) + %0 = tail call <vscale x 2 x i8> asm "vadd.vv $0, $1, $2", "=^vr,^vr,^vr"(<vscale x 2 x i8> %in, <vscale x 2 x i8> %in2) ret <vscale x 2 x i8> %0 } @@ -298,7 +298,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: ret entry: - %0 = tail call <vscale x 4 x i8> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 4 x i8> %in, <vscale x 4 x i8> %in2) + %0 = tail call <vscale x 4 x i8> asm "vadd.vv $0, $1, $2", "=^vr,^vr,^vr"(<vscale x 4 x i8> %in, <vscale x 4 x i8> %in2) ret <vscale x 4 x i8> %0 } @@ -310,7 +310,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: ret entry: - %0 = tail call <vscale x 8 x i8> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 8 x i8> %in, <vscale x 8 x i8> %in2) + %0 = tail call <vscale x 8 x i8> asm "vadd.vv $0, $1, $2", "=^vr,^vr,^vr"(<vscale x 8 x i8> %in, <vscale x 8 x i8> %in2) ret <vscale x 8 x i8> %0 } @@ -322,7 +322,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: ret entry: - %0 = tail call <vscale x 16 x i8> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 16 x i8> %in, <vscale x 16 x i8> %in2) + %0 = tail call <vscale x 16 x i8> asm "vadd.vv $0, $1, $2", "=^vr,^vr,^vr"(<vscale x 16 x i8> %in, <vscale x 16 x i8> %in2) ret <vscale x 16 x i8> %0 } @@ -334,7 +334,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: ret entry: - %0 = tail call <vscale x 32 x i8> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 32 x i8> %in, <vscale x 32 x i8> %in2) + %0 = tail call <vscale x 32 x i8> asm "vadd.vv $0, $1, $2", "=^vr,^vr,^vr"(<vscale x 32 x i8> %in, <vscale x 32 x i8> %in2) ret <vscale x 32 x i8> %0 } @@ -346,7 +346,7 @@ ; CHECK-NEXT: #NO_APP ; CHECK-NEXT: ret entry: - %0 = tail call <vscale x 64 x i8> asm "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 64 x i8> %in, <vscale x 64 x i8> %in2) + %0 = tail call <vscale x 64 x i8> asm "vadd.vv $0, $1, $2", "=^vr,^vr,^vr"(<vscale x 64 x i8> %in, <vscale x 64 x i8> %in2) ret <vscale x 64 x i8> %0 } Index: llvm/lib/Target/RISCV/RISCVISelLowering.cpp =================================================================== --- llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -8474,7 +8474,6 @@ default: break; case 'f': - case 'v': return C_RegisterClass; case 'I': case 'J': @@ -8485,6 +8484,9 @@ case 'S': // A symbolic address return C_Other; } + } else { + if (Constraint == "vr" || Constraint == "vm") + return C_RegisterClass; } return TargetLowering::getConstraintType(Constraint); } @@ -8507,16 +8509,19 @@ if (Subtarget.hasStdExtD() && VT == MVT::f64) return std::make_pair(0U, &RISCV::FPR64RegClass); break; - case 'v': - for (const auto *RC : - {&RISCV::VMRegClass, &RISCV::VRRegClass, &RISCV::VRM2RegClass, - &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) { + default: + break; + } + } else { + if (Constraint == "vr") { + for (const auto *RC : {&RISCV::VRRegClass, &RISCV::VRM2RegClass, + &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) { if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy)) return std::make_pair(0U, RC); } - break; - default: - break; + } else if (Constraint == "vm") { + if (TRI->isTypeLegalForClass(RISCV::VMRegClass, VT.SimpleTy)) + return std::make_pair(0U, &RISCV::VMRegClass); } } Index: clang/test/CodeGen/RISCV/riscv-inline-asm-rvv.c =================================================================== --- clang/test/CodeGen/RISCV/riscv-inline-asm-rvv.c +++ clang/test/CodeGen/RISCV/riscv-inline-asm-rvv.c @@ -21,7 +21,7 @@ vint32m1_t test_vr(vint32m1_t a, vint32m1_t b) { // CHECK-LABEL: define{{.*}} @test_vr -// CHECK: %0 = tail call <vscale x 2 x i32> asm sideeffect "vadd.vv $0, $1, $2", "=v,v,v"(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) +// CHECK: %0 = tail call <vscale x 2 x i32> asm sideeffect "vadd.vv $0, $1, $2", "=^vr,^vr,^vr"(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b) vint32m1_t ret; asm volatile ("vadd.vv %0, %1, %2" : "=vr"(ret) : "vr"(a), "vr"(b)); return ret; @@ -29,7 +29,7 @@ vbool1_t test_vm(vbool1_t a, vbool1_t b) { // CHECK-LABEL: define{{.*}} @test_vm -// CHECK: %0 = tail call <vscale x 64 x i1> asm sideeffect "vmand.mm $0, $1, $2", "=v,v,v"(<vscale x 64 x i1> %a, <vscale x 64 x i1> %b) +// CHECK: %0 = tail call <vscale x 64 x i1> asm sideeffect "vmand.mm $0, $1, $2", "=^vm,^vm,^vm"(<vscale x 64 x i1> %a, <vscale x 64 x i1> %b) vbool1_t ret; asm volatile ("vmand.mm %0, %1, %2" : "=vm"(ret) : "vm"(a), "vm"(b)); return ret; Index: clang/lib/Basic/Targets/RISCV.cpp =================================================================== --- clang/lib/Basic/Targets/RISCV.cpp +++ clang/lib/Basic/Targets/RISCV.cpp @@ -105,7 +105,7 @@ std::string R; switch (*Constraint) { case 'v': - R = std::string("v"); + R = std::string("^") + std::string(Constraint, 2); Constraint += 1; break; default:
_______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits