https://github.com/rj-jesus updated https://github.com/llvm/llvm-project/pull/130625
>From 03471cbf9270d1707191057de46dd38409c8a046 Mon Sep 17 00:00:00 2001 From: Ricardo Jesus <r...@nvidia.com> Date: Mon, 10 Mar 2025 01:57:20 -0700 Subject: [PATCH 1/4] Reapply "[AArch64][SVE] Improve fixed-length addressing modes." (#130263) This reverts commit 21610e3ecc8bc727f99047e544186b35b1291bcd. --- .../CodeGen/AArch64/sve-vector-bits-codegen.c | 9 +- .../Target/AArch64/AArch64ISelDAGToDAG.cpp | 15 +- llvm/lib/Target/AArch64/AArch64Subtarget.h | 12 +- .../AArch64/sve-fixed-length-offsets.ll | 362 ++++++++++++++++++ .../AArch64/sve-fixed-length-shuffles.ll | 90 ++--- 5 files changed, 434 insertions(+), 54 deletions(-) create mode 100644 llvm/test/CodeGen/AArch64/sve-fixed-length-offsets.ll diff --git a/clang/test/CodeGen/AArch64/sve-vector-bits-codegen.c b/clang/test/CodeGen/AArch64/sve-vector-bits-codegen.c index 0ed14b4b3b793..1391a1b09fbd1 100644 --- a/clang/test/CodeGen/AArch64/sve-vector-bits-codegen.c +++ b/clang/test/CodeGen/AArch64/sve-vector-bits-codegen.c @@ -13,12 +13,9 @@ void func(int *restrict a, int *restrict b) { // CHECK-LABEL: func -// CHECK256-COUNT-1: str -// CHECK256-COUNT-7: st1w -// CHECK512-COUNT-1: str -// CHECK512-COUNT-3: st1w -// CHECK1024-COUNT-1: str -// CHECK1024-COUNT-1: st1w +// CHECK256-COUNT-8: str +// CHECK512-COUNT-4: str +// CHECK1024-COUNT-2: str // CHECK2048-COUNT-1: st1w #pragma clang loop vectorize(enable) for (int i = 0; i < 64; ++i) diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp index 3ca9107cb2ce5..07bcd802962fa 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -7380,12 +7380,23 @@ bool AArch64DAGToDAGISel::SelectAddrModeIndexedSVE(SDNode *Root, SDValue N, return false; SDValue VScale = N.getOperand(1); - if (VScale.getOpcode() != ISD::VSCALE) + int64_t MulImm = std::numeric_limits<int64_t>::max(); + if (VScale.getOpcode() == ISD::VSCALE) { + MulImm = cast<ConstantSDNode>(VScale.getOperand(0))->getSExtValue(); + } else if (auto C = dyn_cast<ConstantSDNode>(VScale)) { + int64_t ByteOffset = C->getSExtValue(); + const auto KnownVScale = + Subtarget->getSVEVectorSizeInBits() / AArch64::SVEBitsPerBlock; + + if (!KnownVScale || ByteOffset % KnownVScale != 0) + return false; + + MulImm = ByteOffset / KnownVScale; + } else return false; TypeSize TS = MemVT.getSizeInBits(); int64_t MemWidthBytes = static_cast<int64_t>(TS.getKnownMinValue()) / 8; - int64_t MulImm = cast<ConstantSDNode>(VScale.getOperand(0))->getSExtValue(); if ((MulImm % MemWidthBytes) != 0) return false; diff --git a/llvm/lib/Target/AArch64/AArch64Subtarget.h b/llvm/lib/Target/AArch64/AArch64Subtarget.h index c6eb77e3bc3ba..f5ffc72cae537 100644 --- a/llvm/lib/Target/AArch64/AArch64Subtarget.h +++ b/llvm/lib/Target/AArch64/AArch64Subtarget.h @@ -391,7 +391,7 @@ class AArch64Subtarget final : public AArch64GenSubtargetInfo { void mirFileLoaded(MachineFunction &MF) const override; // Return the known range for the bit length of SVE data registers. A value - // of 0 means nothing is known about that particular limit beyong what's + // of 0 means nothing is known about that particular limit beyond what's // implied by the architecture. unsigned getMaxSVEVectorSizeInBits() const { assert(isSVEorStreamingSVEAvailable() && @@ -405,6 +405,16 @@ class AArch64Subtarget final : public AArch64GenSubtargetInfo { return MinSVEVectorSizeInBits; } + // Return the known bit length of SVE data registers. A value of 0 means the + // length is unkown beyond what's implied by the architecture. + unsigned getSVEVectorSizeInBits() const { + assert(isSVEorStreamingSVEAvailable() && + "Tried to get SVE vector length without SVE support!"); + if (MinSVEVectorSizeInBits == MaxSVEVectorSizeInBits) + return MaxSVEVectorSizeInBits; + return 0; + } + bool useSVEForFixedLengthVectors() const { if (!isSVEorStreamingSVEAvailable()) return false; diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-offsets.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-offsets.ll new file mode 100644 index 0000000000000..700bbe4f060ca --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-offsets.ll @@ -0,0 +1,362 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve -aarch64-sve-vector-bits-min=128 -aarch64-sve-vector-bits-max=128 < %s | FileCheck %s --check-prefix=CHECK-128 +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve -aarch64-sve-vector-bits-min=256 -aarch64-sve-vector-bits-max=256 < %s | FileCheck %s --check-prefix=CHECK-256 +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve -aarch64-sve-vector-bits-min=512 -aarch64-sve-vector-bits-max=512 < %s | FileCheck %s --check-prefix=CHECK-512 +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve -aarch64-sve-vector-bits-min=1024 -aarch64-sve-vector-bits-max=1024 < %s | FileCheck %s --check-prefix=CHECK-1024 +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve -aarch64-sve-vector-bits-min=2048 -aarch64-sve-vector-bits-max=2048 < %s | FileCheck %s --check-prefix=CHECK-2048 + +define void @nxv16i8(ptr %ldptr, ptr %stptr) { +; CHECK-LABEL: nxv16i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.b +; CHECK-NEXT: mov w8, #256 // =0x100 +; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0, x8] +; CHECK-NEXT: st1b { z0.b }, p0, [x1, x8] +; CHECK-NEXT: ret +; +; CHECK-128-LABEL: nxv16i8: +; CHECK-128: // %bb.0: +; CHECK-128-NEXT: ldr z0, [x0, #16, mul vl] +; CHECK-128-NEXT: str z0, [x1, #16, mul vl] +; CHECK-128-NEXT: ret +; +; CHECK-256-LABEL: nxv16i8: +; CHECK-256: // %bb.0: +; CHECK-256-NEXT: ldr z0, [x0, #8, mul vl] +; CHECK-256-NEXT: str z0, [x1, #8, mul vl] +; CHECK-256-NEXT: ret +; +; CHECK-512-LABEL: nxv16i8: +; CHECK-512: // %bb.0: +; CHECK-512-NEXT: ldr z0, [x0, #4, mul vl] +; CHECK-512-NEXT: str z0, [x1, #4, mul vl] +; CHECK-512-NEXT: ret +; +; CHECK-1024-LABEL: nxv16i8: +; CHECK-1024: // %bb.0: +; CHECK-1024-NEXT: ldr z0, [x0, #2, mul vl] +; CHECK-1024-NEXT: str z0, [x1, #2, mul vl] +; CHECK-1024-NEXT: ret +; +; CHECK-2048-LABEL: nxv16i8: +; CHECK-2048: // %bb.0: +; CHECK-2048-NEXT: ldr z0, [x0, #1, mul vl] +; CHECK-2048-NEXT: str z0, [x1, #1, mul vl] +; CHECK-2048-NEXT: ret + %ldoff = getelementptr inbounds nuw i8, ptr %ldptr, i64 256 + %stoff = getelementptr inbounds nuw i8, ptr %stptr, i64 256 + %x = load <vscale x 16 x i8>, ptr %ldoff, align 1 + store <vscale x 16 x i8> %x, ptr %stoff, align 1 + ret void +} + +define void @nxv8i16(ptr %ldptr, ptr %stptr) { +; CHECK-LABEL: nxv8i16: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.h +; CHECK-NEXT: mov x8, #128 // =0x80 +; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0, x8, lsl #1] +; CHECK-NEXT: st1h { z0.h }, p0, [x1, x8, lsl #1] +; CHECK-NEXT: ret +; +; CHECK-128-LABEL: nxv8i16: +; CHECK-128: // %bb.0: +; CHECK-128-NEXT: ldr z0, [x0, #16, mul vl] +; CHECK-128-NEXT: str z0, [x1, #16, mul vl] +; CHECK-128-NEXT: ret +; +; CHECK-256-LABEL: nxv8i16: +; CHECK-256: // %bb.0: +; CHECK-256-NEXT: ldr z0, [x0, #8, mul vl] +; CHECK-256-NEXT: str z0, [x1, #8, mul vl] +; CHECK-256-NEXT: ret +; +; CHECK-512-LABEL: nxv8i16: +; CHECK-512: // %bb.0: +; CHECK-512-NEXT: ldr z0, [x0, #4, mul vl] +; CHECK-512-NEXT: str z0, [x1, #4, mul vl] +; CHECK-512-NEXT: ret +; +; CHECK-1024-LABEL: nxv8i16: +; CHECK-1024: // %bb.0: +; CHECK-1024-NEXT: ldr z0, [x0, #2, mul vl] +; CHECK-1024-NEXT: str z0, [x1, #2, mul vl] +; CHECK-1024-NEXT: ret +; +; CHECK-2048-LABEL: nxv8i16: +; CHECK-2048: // %bb.0: +; CHECK-2048-NEXT: ldr z0, [x0, #1, mul vl] +; CHECK-2048-NEXT: str z0, [x1, #1, mul vl] +; CHECK-2048-NEXT: ret + %ldoff = getelementptr inbounds nuw i16, ptr %ldptr, i64 128 + %stoff = getelementptr inbounds nuw i16, ptr %stptr, i64 128 + %x = load <vscale x 8 x i16>, ptr %ldoff, align 2 + store <vscale x 8 x i16> %x, ptr %stoff, align 2 + ret void +} + +define void @nxv4i32(ptr %ldptr, ptr %stptr) { +; CHECK-LABEL: nxv4i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: mov x8, #64 // =0x40 +; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0, x8, lsl #2] +; CHECK-NEXT: st1w { z0.s }, p0, [x1, x8, lsl #2] +; CHECK-NEXT: ret +; +; CHECK-128-LABEL: nxv4i32: +; CHECK-128: // %bb.0: +; CHECK-128-NEXT: ldr z0, [x0, #16, mul vl] +; CHECK-128-NEXT: str z0, [x1, #16, mul vl] +; CHECK-128-NEXT: ret +; +; CHECK-256-LABEL: nxv4i32: +; CHECK-256: // %bb.0: +; CHECK-256-NEXT: ldr z0, [x0, #8, mul vl] +; CHECK-256-NEXT: str z0, [x1, #8, mul vl] +; CHECK-256-NEXT: ret +; +; CHECK-512-LABEL: nxv4i32: +; CHECK-512: // %bb.0: +; CHECK-512-NEXT: ldr z0, [x0, #4, mul vl] +; CHECK-512-NEXT: str z0, [x1, #4, mul vl] +; CHECK-512-NEXT: ret +; +; CHECK-1024-LABEL: nxv4i32: +; CHECK-1024: // %bb.0: +; CHECK-1024-NEXT: ldr z0, [x0, #2, mul vl] +; CHECK-1024-NEXT: str z0, [x1, #2, mul vl] +; CHECK-1024-NEXT: ret +; +; CHECK-2048-LABEL: nxv4i32: +; CHECK-2048: // %bb.0: +; CHECK-2048-NEXT: ldr z0, [x0, #1, mul vl] +; CHECK-2048-NEXT: str z0, [x1, #1, mul vl] +; CHECK-2048-NEXT: ret + %ldoff = getelementptr inbounds nuw i32, ptr %ldptr, i64 64 + %stoff = getelementptr inbounds nuw i32, ptr %stptr, i64 64 + %x = load <vscale x 4 x i32>, ptr %ldoff, align 4 + store <vscale x 4 x i32> %x, ptr %stoff, align 4 + ret void +} + +define void @nxv2i64(ptr %ldptr, ptr %stptr) { +; CHECK-LABEL: nxv2i64: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: mov x8, #32 // =0x20 +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0, x8, lsl #3] +; CHECK-NEXT: st1d { z0.d }, p0, [x1, x8, lsl #3] +; CHECK-NEXT: ret +; +; CHECK-128-LABEL: nxv2i64: +; CHECK-128: // %bb.0: +; CHECK-128-NEXT: ldr z0, [x0, #16, mul vl] +; CHECK-128-NEXT: str z0, [x1, #16, mul vl] +; CHECK-128-NEXT: ret +; +; CHECK-256-LABEL: nxv2i64: +; CHECK-256: // %bb.0: +; CHECK-256-NEXT: ldr z0, [x0, #8, mul vl] +; CHECK-256-NEXT: str z0, [x1, #8, mul vl] +; CHECK-256-NEXT: ret +; +; CHECK-512-LABEL: nxv2i64: +; CHECK-512: // %bb.0: +; CHECK-512-NEXT: ldr z0, [x0, #4, mul vl] +; CHECK-512-NEXT: str z0, [x1, #4, mul vl] +; CHECK-512-NEXT: ret +; +; CHECK-1024-LABEL: nxv2i64: +; CHECK-1024: // %bb.0: +; CHECK-1024-NEXT: ldr z0, [x0, #2, mul vl] +; CHECK-1024-NEXT: str z0, [x1, #2, mul vl] +; CHECK-1024-NEXT: ret +; +; CHECK-2048-LABEL: nxv2i64: +; CHECK-2048: // %bb.0: +; CHECK-2048-NEXT: ldr z0, [x0, #1, mul vl] +; CHECK-2048-NEXT: str z0, [x1, #1, mul vl] +; CHECK-2048-NEXT: ret + %ldoff = getelementptr inbounds nuw i64, ptr %ldptr, i64 32 + %stoff = getelementptr inbounds nuw i64, ptr %stptr, i64 32 + %x = load <vscale x 2 x i64>, ptr %ldoff, align 8 + store <vscale x 2 x i64> %x, ptr %stoff, align 8 + ret void +} + +define void @nxv4i8(ptr %ldptr, ptr %stptr) { +; CHECK-LABEL: nxv4i8: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.s +; CHECK-NEXT: mov w8, #32 // =0x20 +; CHECK-NEXT: ld1b { z0.s }, p0/z, [x0, x8] +; CHECK-NEXT: st1b { z0.s }, p0, [x1, x8] +; CHECK-NEXT: ret +; +; CHECK-128-LABEL: nxv4i8: +; CHECK-128: // %bb.0: +; CHECK-128-NEXT: ptrue p0.s +; CHECK-128-NEXT: mov w8, #32 // =0x20 +; CHECK-128-NEXT: ld1b { z0.s }, p0/z, [x0, x8] +; CHECK-128-NEXT: st1b { z0.s }, p0, [x1, x8] +; CHECK-128-NEXT: ret +; +; CHECK-256-LABEL: nxv4i8: +; CHECK-256: // %bb.0: +; CHECK-256-NEXT: ptrue p0.s +; CHECK-256-NEXT: ld1b { z0.s }, p0/z, [x0, #4, mul vl] +; CHECK-256-NEXT: st1b { z0.s }, p0, [x1, #4, mul vl] +; CHECK-256-NEXT: ret +; +; CHECK-512-LABEL: nxv4i8: +; CHECK-512: // %bb.0: +; CHECK-512-NEXT: ptrue p0.s +; CHECK-512-NEXT: ld1b { z0.s }, p0/z, [x0, #2, mul vl] +; CHECK-512-NEXT: st1b { z0.s }, p0, [x1, #2, mul vl] +; CHECK-512-NEXT: ret +; +; CHECK-1024-LABEL: nxv4i8: +; CHECK-1024: // %bb.0: +; CHECK-1024-NEXT: ptrue p0.s +; CHECK-1024-NEXT: ld1b { z0.s }, p0/z, [x0, #1, mul vl] +; CHECK-1024-NEXT: st1b { z0.s }, p0, [x1, #1, mul vl] +; CHECK-1024-NEXT: ret +; +; CHECK-2048-LABEL: nxv4i8: +; CHECK-2048: // %bb.0: +; CHECK-2048-NEXT: ptrue p0.s +; CHECK-2048-NEXT: mov w8, #32 // =0x20 +; CHECK-2048-NEXT: ld1b { z0.s }, p0/z, [x0, x8] +; CHECK-2048-NEXT: st1b { z0.s }, p0, [x1, x8] +; CHECK-2048-NEXT: ret + %ldoff = getelementptr inbounds nuw i8, ptr %ldptr, i64 32 + %stoff = getelementptr inbounds nuw i8, ptr %stptr, i64 32 + %x = load <vscale x 4 x i8>, ptr %ldoff, align 1 + store <vscale x 4 x i8> %x, ptr %stoff, align 1 + ret void +} + +define void @nxv2f32(ptr %ldptr, ptr %stptr) { +; CHECK-LABEL: nxv2f32: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: mov x8, #16 // =0x10 +; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0, x8, lsl #2] +; CHECK-NEXT: st1w { z0.d }, p0, [x1, x8, lsl #2] +; CHECK-NEXT: ret +; +; CHECK-128-LABEL: nxv2f32: +; CHECK-128: // %bb.0: +; CHECK-128-NEXT: ptrue p0.d +; CHECK-128-NEXT: mov x8, #16 // =0x10 +; CHECK-128-NEXT: ld1w { z0.d }, p0/z, [x0, x8, lsl #2] +; CHECK-128-NEXT: st1w { z0.d }, p0, [x1, x8, lsl #2] +; CHECK-128-NEXT: ret +; +; CHECK-256-LABEL: nxv2f32: +; CHECK-256: // %bb.0: +; CHECK-256-NEXT: ptrue p0.d +; CHECK-256-NEXT: ld1w { z0.d }, p0/z, [x0, #4, mul vl] +; CHECK-256-NEXT: st1w { z0.d }, p0, [x1, #4, mul vl] +; CHECK-256-NEXT: ret +; +; CHECK-512-LABEL: nxv2f32: +; CHECK-512: // %bb.0: +; CHECK-512-NEXT: ptrue p0.d +; CHECK-512-NEXT: ld1w { z0.d }, p0/z, [x0, #2, mul vl] +; CHECK-512-NEXT: st1w { z0.d }, p0, [x1, #2, mul vl] +; CHECK-512-NEXT: ret +; +; CHECK-1024-LABEL: nxv2f32: +; CHECK-1024: // %bb.0: +; CHECK-1024-NEXT: ptrue p0.d +; CHECK-1024-NEXT: ld1w { z0.d }, p0/z, [x0, #1, mul vl] +; CHECK-1024-NEXT: st1w { z0.d }, p0, [x1, #1, mul vl] +; CHECK-1024-NEXT: ret +; +; CHECK-2048-LABEL: nxv2f32: +; CHECK-2048: // %bb.0: +; CHECK-2048-NEXT: ptrue p0.d +; CHECK-2048-NEXT: mov x8, #16 // =0x10 +; CHECK-2048-NEXT: ld1w { z0.d }, p0/z, [x0, x8, lsl #2] +; CHECK-2048-NEXT: st1w { z0.d }, p0, [x1, x8, lsl #2] +; CHECK-2048-NEXT: ret + %ldoff = getelementptr inbounds nuw i8, ptr %ldptr, i64 64 + %stoff = getelementptr inbounds nuw i8, ptr %stptr, i64 64 + %x = load <vscale x 2 x float>, ptr %ldoff, align 1 + store <vscale x 2 x float> %x, ptr %stoff, align 1 + ret void +} + +define void @nxv4f64(ptr %ldptr, ptr %stptr) { +; CHECK-LABEL: nxv4f64: +; CHECK: // %bb.0: +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: mov x8, #16 // =0x10 +; CHECK-NEXT: add x9, x0, #128 +; CHECK-NEXT: ldr z1, [x9, #1, mul vl] +; CHECK-NEXT: add x9, x1, #128 +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0, x8, lsl #3] +; CHECK-NEXT: st1d { z0.d }, p0, [x1, x8, lsl #3] +; CHECK-NEXT: str z1, [x9, #1, mul vl] +; CHECK-NEXT: ret +; +; CHECK-128-LABEL: nxv4f64: +; CHECK-128: // %bb.0: +; CHECK-128-NEXT: add x8, x0, #128 +; CHECK-128-NEXT: ldr z1, [x0, #8, mul vl] +; CHECK-128-NEXT: ldr z0, [x8, #1, mul vl] +; CHECK-128-NEXT: add x8, x1, #128 +; CHECK-128-NEXT: str z0, [x8, #1, mul vl] +; CHECK-128-NEXT: str z1, [x1, #8, mul vl] +; CHECK-128-NEXT: ret +; +; CHECK-256-LABEL: nxv4f64: +; CHECK-256: // %bb.0: +; CHECK-256-NEXT: add x8, x0, #128 +; CHECK-256-NEXT: ldr z1, [x0, #4, mul vl] +; CHECK-256-NEXT: ldr z0, [x8, #1, mul vl] +; CHECK-256-NEXT: add x8, x1, #128 +; CHECK-256-NEXT: str z0, [x8, #1, mul vl] +; CHECK-256-NEXT: str z1, [x1, #4, mul vl] +; CHECK-256-NEXT: ret +; +; CHECK-512-LABEL: nxv4f64: +; CHECK-512: // %bb.0: +; CHECK-512-NEXT: add x8, x0, #128 +; CHECK-512-NEXT: ldr z1, [x0, #2, mul vl] +; CHECK-512-NEXT: ldr z0, [x8, #1, mul vl] +; CHECK-512-NEXT: add x8, x1, #128 +; CHECK-512-NEXT: str z0, [x8, #1, mul vl] +; CHECK-512-NEXT: str z1, [x1, #2, mul vl] +; CHECK-512-NEXT: ret +; +; CHECK-1024-LABEL: nxv4f64: +; CHECK-1024: // %bb.0: +; CHECK-1024-NEXT: add x8, x0, #128 +; CHECK-1024-NEXT: ldr z1, [x0, #1, mul vl] +; CHECK-1024-NEXT: ldr z0, [x8, #1, mul vl] +; CHECK-1024-NEXT: add x8, x1, #128 +; CHECK-1024-NEXT: str z0, [x8, #1, mul vl] +; CHECK-1024-NEXT: str z1, [x1, #1, mul vl] +; CHECK-1024-NEXT: ret +; +; CHECK-2048-LABEL: nxv4f64: +; CHECK-2048: // %bb.0: +; CHECK-2048-NEXT: ptrue p0.d +; CHECK-2048-NEXT: mov x8, #16 // =0x10 +; CHECK-2048-NEXT: add x9, x0, #128 +; CHECK-2048-NEXT: ldr z1, [x9, #1, mul vl] +; CHECK-2048-NEXT: add x9, x1, #128 +; CHECK-2048-NEXT: ld1d { z0.d }, p0/z, [x0, x8, lsl #3] +; CHECK-2048-NEXT: st1d { z0.d }, p0, [x1, x8, lsl #3] +; CHECK-2048-NEXT: str z1, [x9, #1, mul vl] +; CHECK-2048-NEXT: ret + %ldoff = getelementptr inbounds nuw i8, ptr %ldptr, i64 128 + %stoff = getelementptr inbounds nuw i8, ptr %stptr, i64 128 + %x = load <vscale x 4 x double>, ptr %ldoff, align 1 + store <vscale x 4 x double> %x, ptr %stoff, align 1 + ret void +} diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-shuffles.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-shuffles.ll index e33bc8da97c05..2d4cdfa7278b9 100644 --- a/llvm/test/CodeGen/AArch64/sve-fixed-length-shuffles.ll +++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-shuffles.ll @@ -30,64 +30,64 @@ define void @crash_when_lowering_extract_shuffle(ptr %dst, i1 %cond) vscale_rang ; CHECK-NEXT: // %bb.1: // %vector.body ; CHECK-NEXT: mov z0.b, #0 // =0x0 ; CHECK-NEXT: ptrue p0.s -; CHECK-NEXT: mov x9, #8 // =0x8 -; CHECK-NEXT: mov x10, #24 // =0x18 +; CHECK-NEXT: mov x9, #24 // =0x18 ; CHECK-NEXT: umov w8, v0.b[8] -; CHECK-NEXT: mov v1.16b, v0.16b -; CHECK-NEXT: mov v1.b[1], v0.b[1] -; CHECK-NEXT: fmov s2, w8 -; CHECK-NEXT: mov x8, #16 // =0x10 -; CHECK-NEXT: mov v2.b[1], v0.b[9] -; CHECK-NEXT: mov v1.b[2], v0.b[2] -; CHECK-NEXT: mov v2.b[2], v0.b[10] -; CHECK-NEXT: mov v1.b[3], v0.b[3] -; CHECK-NEXT: mov v2.b[3], v0.b[11] -; CHECK-NEXT: mov v1.b[4], v0.b[4] -; CHECK-NEXT: mov v2.b[4], v0.b[12] -; CHECK-NEXT: mov v1.b[5], v0.b[5] -; CHECK-NEXT: mov v2.b[5], v0.b[13] -; CHECK-NEXT: mov v1.b[6], v0.b[6] -; CHECK-NEXT: mov v2.b[6], v0.b[14] -; CHECK-NEXT: mov v1.b[7], v0.b[7] -; CHECK-NEXT: mov v2.b[7], v0.b[15] -; CHECK-NEXT: ext z0.b, z0.b, z0.b, #16 -; CHECK-NEXT: uunpklo z1.h, z1.b -; CHECK-NEXT: ext v3.16b, v0.16b, v0.16b, #8 -; CHECK-NEXT: uunpklo z0.h, z0.b +; CHECK-NEXT: mov v2.16b, v0.16b +; CHECK-NEXT: mov z3.d, z0.d +; CHECK-NEXT: mov v2.b[1], v0.b[1] +; CHECK-NEXT: ext z3.b, z3.b, z0.b, #16 +; CHECK-NEXT: fmov s1, w8 +; CHECK-NEXT: mov x8, #8 // =0x8 +; CHECK-NEXT: ext v4.16b, v3.16b, v3.16b, #8 +; CHECK-NEXT: mov v1.b[1], v0.b[9] +; CHECK-NEXT: mov v2.b[2], v0.b[2] +; CHECK-NEXT: mov v1.b[2], v0.b[10] +; CHECK-NEXT: mov v2.b[3], v0.b[3] +; CHECK-NEXT: mov v1.b[3], v0.b[11] +; CHECK-NEXT: mov v2.b[4], v0.b[4] +; CHECK-NEXT: mov v1.b[4], v0.b[12] +; CHECK-NEXT: mov v2.b[5], v0.b[5] +; CHECK-NEXT: mov v1.b[5], v0.b[13] +; CHECK-NEXT: mov v2.b[6], v0.b[6] +; CHECK-NEXT: mov v1.b[6], v0.b[14] +; CHECK-NEXT: mov v2.b[7], v0.b[7] +; CHECK-NEXT: mov v1.b[7], v0.b[15] ; CHECK-NEXT: uunpklo z2.h, z2.b -; CHECK-NEXT: uunpklo z1.s, z1.h -; CHECK-NEXT: uunpklo z3.h, z3.b -; CHECK-NEXT: uunpklo z0.s, z0.h +; CHECK-NEXT: uunpklo z0.h, z1.b +; CHECK-NEXT: uunpklo z1.h, z3.b +; CHECK-NEXT: uunpklo z3.h, z4.b ; CHECK-NEXT: uunpklo z2.s, z2.h -; CHECK-NEXT: lsl z1.s, z1.s, #31 +; CHECK-NEXT: uunpklo z0.s, z0.h +; CHECK-NEXT: uunpklo z1.s, z1.h ; CHECK-NEXT: uunpklo z3.s, z3.h -; CHECK-NEXT: lsl z0.s, z0.s, #31 -; CHECK-NEXT: asr z1.s, z1.s, #31 ; CHECK-NEXT: lsl z2.s, z2.s, #31 -; CHECK-NEXT: asr z0.s, z0.s, #31 -; CHECK-NEXT: and z1.s, z1.s, #0x1 +; CHECK-NEXT: lsl z0.s, z0.s, #31 +; CHECK-NEXT: lsl z1.s, z1.s, #31 ; CHECK-NEXT: lsl z3.s, z3.s, #31 ; CHECK-NEXT: asr z2.s, z2.s, #31 -; CHECK-NEXT: and z0.s, z0.s, #0x1 -; CHECK-NEXT: cmpne p4.s, p0/z, z1.s, #0 -; CHECK-NEXT: ld1w { z1.s }, p0/z, [x0] +; CHECK-NEXT: asr z0.s, z0.s, #31 +; CHECK-NEXT: asr z1.s, z1.s, #31 ; CHECK-NEXT: asr z3.s, z3.s, #31 ; CHECK-NEXT: and z2.s, z2.s, #0x1 -; CHECK-NEXT: cmpne p1.s, p0/z, z0.s, #0 -; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0, x8, lsl #2] +; CHECK-NEXT: and z0.s, z0.s, #0x1 +; CHECK-NEXT: and z1.s, z1.s, #0x1 ; CHECK-NEXT: and z3.s, z3.s, #0x1 -; CHECK-NEXT: cmpne p2.s, p0/z, z2.s, #0 -; CHECK-NEXT: ld1w { z2.s }, p0/z, [x0, x9, lsl #2] -; CHECK-NEXT: mov z1.s, p4/m, #0 // =0x0 +; CHECK-NEXT: cmpne p4.s, p0/z, z2.s, #0 +; CHECK-NEXT: ld1w { z2.s }, p0/z, [x0] +; CHECK-NEXT: cmpne p1.s, p0/z, z0.s, #0 +; CHECK-NEXT: cmpne p2.s, p0/z, z1.s, #0 ; CHECK-NEXT: cmpne p3.s, p0/z, z3.s, #0 -; CHECK-NEXT: ld1w { z3.s }, p0/z, [x0, x10, lsl #2] +; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0, x8, lsl #2] +; CHECK-NEXT: ld1w { z1.s }, p0/z, [x0, #1, mul vl] +; CHECK-NEXT: ld1w { z3.s }, p0/z, [x0, x9, lsl #2] +; CHECK-NEXT: mov z2.s, p4/m, #0 // =0x0 ; CHECK-NEXT: mov z0.s, p1/m, #0 // =0x0 -; CHECK-NEXT: mov z2.s, p2/m, #0 // =0x0 -; CHECK-NEXT: st1w { z1.s }, p0, [x0] -; CHECK-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2] +; CHECK-NEXT: mov z1.s, p2/m, #0 // =0x0 ; CHECK-NEXT: mov z3.s, p3/m, #0 // =0x0 -; CHECK-NEXT: st1w { z2.s }, p0, [x0, x9, lsl #2] -; CHECK-NEXT: st1w { z3.s }, p0, [x0, x10, lsl #2] +; CHECK-NEXT: st1w { z2.s }, p0, [x0] +; CHECK-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2] +; CHECK-NEXT: st1w { z1.s }, p0, [x0, #1, mul vl] +; CHECK-NEXT: st1w { z3.s }, p0, [x0, x9, lsl #2] ; CHECK-NEXT: .LBB1_2: // %exit ; CHECK-NEXT: ret %broadcast.splat = shufflevector <32 x i1> zeroinitializer, <32 x i1> zeroinitializer, <32 x i32> zeroinitializer >From 114d8cda4971542067c81cc8d7ee1d39e7c636d5 Mon Sep 17 00:00:00 2001 From: Ricardo Jesus <r...@nvidia.com> Date: Mon, 10 Mar 2025 04:05:23 -0700 Subject: [PATCH 2/4] Add tests --- .../AArch64/sve-fixed-length-offsets.ll | 117 +++++++++++++++++- 1 file changed, 113 insertions(+), 4 deletions(-) diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-offsets.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-offsets.ll index 700bbe4f060ca..8b2026e85155f 100644 --- a/llvm/test/CodeGen/AArch64/sve-fixed-length-offsets.ll +++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-offsets.ll @@ -285,8 +285,8 @@ define void @nxv2f32(ptr %ldptr, ptr %stptr) { ; CHECK-2048-NEXT: ret %ldoff = getelementptr inbounds nuw i8, ptr %ldptr, i64 64 %stoff = getelementptr inbounds nuw i8, ptr %stptr, i64 64 - %x = load <vscale x 2 x float>, ptr %ldoff, align 1 - store <vscale x 2 x float> %x, ptr %stoff, align 1 + %x = load <vscale x 2 x float>, ptr %ldoff, align 4 + store <vscale x 2 x float> %x, ptr %stoff, align 4 ret void } @@ -356,7 +356,116 @@ define void @nxv4f64(ptr %ldptr, ptr %stptr) { ; CHECK-2048-NEXT: ret %ldoff = getelementptr inbounds nuw i8, ptr %ldptr, i64 128 %stoff = getelementptr inbounds nuw i8, ptr %stptr, i64 128 - %x = load <vscale x 4 x double>, ptr %ldoff, align 1 - store <vscale x 4 x double> %x, ptr %stoff, align 1 + %x = load <vscale x 4 x double>, ptr %ldoff, align 8 + store <vscale x 4 x double> %x, ptr %stoff, align 8 + ret void +} + +define void @v8i32(ptr %ldptr, ptr %stptr) { +; CHECK-LABEL: v8i32: +; CHECK: // %bb.0: +; CHECK-NEXT: ldp q0, q1, [x0, #64] +; CHECK-NEXT: ldp q3, q2, [x0, #32] +; CHECK-NEXT: stp q0, q1, [x1, #64] +; CHECK-NEXT: stp q3, q2, [x1, #32] +; CHECK-NEXT: ret +; +; CHECK-128-LABEL: v8i32: +; CHECK-128: // %bb.0: +; CHECK-128-NEXT: ldp q0, q1, [x0, #64] +; CHECK-128-NEXT: ldp q3, q2, [x0, #32] +; CHECK-128-NEXT: stp q0, q1, [x1, #64] +; CHECK-128-NEXT: stp q3, q2, [x1, #32] +; CHECK-128-NEXT: ret +; +; CHECK-256-LABEL: v8i32: +; CHECK-256: // %bb.0: +; CHECK-256-NEXT: ptrue p0.s +; CHECK-256-NEXT: mov x8, #8 // =0x8 +; CHECK-256-NEXT: ld1w { z0.s }, p0/z, [x0, x8, lsl #2] +; CHECK-256-NEXT: ld1w { z1.s }, p0/z, [x0, #1, mul vl] +; CHECK-256-NEXT: st1w { z0.s }, p0, [x1, x8, lsl #2] +; CHECK-256-NEXT: st1w { z1.s }, p0, [x1, #1, mul vl] +; CHECK-256-NEXT: ret +; +; CHECK-512-LABEL: v8i32: +; CHECK-512: // %bb.0: +; CHECK-512-NEXT: ptrue p0.s +; CHECK-512-NEXT: mov x8, #8 // =0x8 +; CHECK-512-NEXT: ld1w { z0.s }, p0/z, [x0, x8, lsl #2] +; CHECK-512-NEXT: st1w { z0.s }, p0, [x1, x8, lsl #2] +; CHECK-512-NEXT: ret +; +; CHECK-1024-LABEL: v8i32: +; CHECK-1024: // %bb.0: +; CHECK-1024-NEXT: ptrue p0.s, vl16 +; CHECK-1024-NEXT: mov x8, #8 // =0x8 +; CHECK-1024-NEXT: ld1w { z0.s }, p0/z, [x0, x8, lsl #2] +; CHECK-1024-NEXT: st1w { z0.s }, p0, [x1, x8, lsl #2] +; CHECK-1024-NEXT: ret +; +; CHECK-2048-LABEL: v8i32: +; CHECK-2048: // %bb.0: +; CHECK-2048-NEXT: ptrue p0.s, vl16 +; CHECK-2048-NEXT: mov x8, #8 // =0x8 +; CHECK-2048-NEXT: ld1w { z0.s }, p0/z, [x0, x8, lsl #2] +; CHECK-2048-NEXT: st1w { z0.s }, p0, [x1, x8, lsl #2] +; CHECK-2048-NEXT: ret + %ldoff = getelementptr inbounds nuw i8, ptr %ldptr, i64 32 + %stoff = getelementptr inbounds nuw i8, ptr %stptr, i64 32 + %x = load <16 x i32>, ptr %ldoff, align 4 + store <16 x i32> %x, ptr %stoff, align 4 + ret void +} + +; FIXME: This is wrong for VLS. +define void @v8i32_vscale(ptr %0) { +; CHECK-LABEL: v8i32_vscale: +; CHECK: // %bb.0: +; CHECK-NEXT: movi v0.4s, #1 +; CHECK-NEXT: rdvl x8, #2 +; CHECK-NEXT: add x8, x0, x8 +; CHECK-NEXT: stp q0, q0, [x8] +; CHECK-NEXT: ret +; +; CHECK-128-LABEL: v8i32_vscale: +; CHECK-128: // %bb.0: +; CHECK-128-NEXT: movi v0.4s, #1 +; CHECK-128-NEXT: rdvl x8, #2 +; CHECK-128-NEXT: add x8, x0, x8 +; CHECK-128-NEXT: stp q0, q0, [x8] +; CHECK-128-NEXT: ret +; +; CHECK-256-LABEL: v8i32_vscale: +; CHECK-256: // %bb.0: +; CHECK-256-NEXT: mov z0.s, #1 // =0x1 +; CHECK-256-NEXT: ptrue p0.s +; CHECK-256-NEXT: st1w { z0.s }, p0, [x0, #1, mul vl] +; CHECK-256-NEXT: ret +; +; CHECK-512-LABEL: v8i32_vscale: +; CHECK-512: // %bb.0: +; CHECK-512-NEXT: mov z0.s, #1 // =0x1 +; CHECK-512-NEXT: ptrue p0.s, vl8 +; CHECK-512-NEXT: st1w { z0.s }, p0, [x0, #1, mul vl] +; CHECK-512-NEXT: ret +; +; CHECK-1024-LABEL: v8i32_vscale: +; CHECK-1024: // %bb.0: +; CHECK-1024-NEXT: mov z0.s, #1 // =0x1 +; CHECK-1024-NEXT: ptrue p0.s, vl8 +; CHECK-1024-NEXT: st1w { z0.s }, p0, [x0, #1, mul vl] +; CHECK-1024-NEXT: ret +; +; CHECK-2048-LABEL: v8i32_vscale: +; CHECK-2048: // %bb.0: +; CHECK-2048-NEXT: mov z0.s, #1 // =0x1 +; CHECK-2048-NEXT: ptrue p0.s, vl8 +; CHECK-2048-NEXT: st1w { z0.s }, p0, [x0, #1, mul vl] +; CHECK-2048-NEXT: ret + %vl = call i64 @llvm.vscale() + %vlx = shl i64 %vl, 5 + %2 = getelementptr inbounds nuw i8, ptr %0, i64 %vlx + store <8 x i32> splat (i32 1), ptr %2, align 4 ret void } >From 2a1ed4e2fbbf42bec09dbad51077091960798334 Mon Sep 17 00:00:00 2001 From: Ricardo Jesus <r...@nvidia.com> Date: Mon, 10 Mar 2025 04:41:40 -0700 Subject: [PATCH 3/4] Bail out if MemVT is a fixed-length vector --- .../Target/AArch64/AArch64ISelDAGToDAG.cpp | 3 +- .../AArch64/sve-fixed-length-offsets.ll | 7 +- .../AArch64/sve-fixed-length-shuffles.ll | 90 +++++++++---------- 3 files changed, 51 insertions(+), 49 deletions(-) diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp index 07bcd802962fa..d338c22267885 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -7388,7 +7388,8 @@ bool AArch64DAGToDAGISel::SelectAddrModeIndexedSVE(SDNode *Root, SDValue N, const auto KnownVScale = Subtarget->getSVEVectorSizeInBits() / AArch64::SVEBitsPerBlock; - if (!KnownVScale || ByteOffset % KnownVScale != 0) + if (!KnownVScale || ByteOffset % KnownVScale != 0 || + !MemVT.isScalableVector()) return false; MulImm = ByteOffset / KnownVScale; diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-offsets.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-offsets.ll index 8b2026e85155f..84ab5493b03ee 100644 --- a/llvm/test/CodeGen/AArch64/sve-fixed-length-offsets.ll +++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-offsets.ll @@ -381,11 +381,12 @@ define void @v8i32(ptr %ldptr, ptr %stptr) { ; CHECK-256-LABEL: v8i32: ; CHECK-256: // %bb.0: ; CHECK-256-NEXT: ptrue p0.s -; CHECK-256-NEXT: mov x8, #8 // =0x8 +; CHECK-256-NEXT: mov x8, #16 // =0x10 +; CHECK-256-NEXT: mov x9, #8 // =0x8 ; CHECK-256-NEXT: ld1w { z0.s }, p0/z, [x0, x8, lsl #2] -; CHECK-256-NEXT: ld1w { z1.s }, p0/z, [x0, #1, mul vl] +; CHECK-256-NEXT: ld1w { z1.s }, p0/z, [x0, x9, lsl #2] ; CHECK-256-NEXT: st1w { z0.s }, p0, [x1, x8, lsl #2] -; CHECK-256-NEXT: st1w { z1.s }, p0, [x1, #1, mul vl] +; CHECK-256-NEXT: st1w { z1.s }, p0, [x1, x9, lsl #2] ; CHECK-256-NEXT: ret ; ; CHECK-512-LABEL: v8i32: diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-shuffles.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-shuffles.ll index 2d4cdfa7278b9..e33bc8da97c05 100644 --- a/llvm/test/CodeGen/AArch64/sve-fixed-length-shuffles.ll +++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-shuffles.ll @@ -30,64 +30,64 @@ define void @crash_when_lowering_extract_shuffle(ptr %dst, i1 %cond) vscale_rang ; CHECK-NEXT: // %bb.1: // %vector.body ; CHECK-NEXT: mov z0.b, #0 // =0x0 ; CHECK-NEXT: ptrue p0.s -; CHECK-NEXT: mov x9, #24 // =0x18 +; CHECK-NEXT: mov x9, #8 // =0x8 +; CHECK-NEXT: mov x10, #24 // =0x18 ; CHECK-NEXT: umov w8, v0.b[8] -; CHECK-NEXT: mov v2.16b, v0.16b -; CHECK-NEXT: mov z3.d, z0.d -; CHECK-NEXT: mov v2.b[1], v0.b[1] -; CHECK-NEXT: ext z3.b, z3.b, z0.b, #16 -; CHECK-NEXT: fmov s1, w8 -; CHECK-NEXT: mov x8, #8 // =0x8 -; CHECK-NEXT: ext v4.16b, v3.16b, v3.16b, #8 -; CHECK-NEXT: mov v1.b[1], v0.b[9] -; CHECK-NEXT: mov v2.b[2], v0.b[2] -; CHECK-NEXT: mov v1.b[2], v0.b[10] -; CHECK-NEXT: mov v2.b[3], v0.b[3] -; CHECK-NEXT: mov v1.b[3], v0.b[11] -; CHECK-NEXT: mov v2.b[4], v0.b[4] -; CHECK-NEXT: mov v1.b[4], v0.b[12] -; CHECK-NEXT: mov v2.b[5], v0.b[5] -; CHECK-NEXT: mov v1.b[5], v0.b[13] -; CHECK-NEXT: mov v2.b[6], v0.b[6] -; CHECK-NEXT: mov v1.b[6], v0.b[14] -; CHECK-NEXT: mov v2.b[7], v0.b[7] -; CHECK-NEXT: mov v1.b[7], v0.b[15] +; CHECK-NEXT: mov v1.16b, v0.16b +; CHECK-NEXT: mov v1.b[1], v0.b[1] +; CHECK-NEXT: fmov s2, w8 +; CHECK-NEXT: mov x8, #16 // =0x10 +; CHECK-NEXT: mov v2.b[1], v0.b[9] +; CHECK-NEXT: mov v1.b[2], v0.b[2] +; CHECK-NEXT: mov v2.b[2], v0.b[10] +; CHECK-NEXT: mov v1.b[3], v0.b[3] +; CHECK-NEXT: mov v2.b[3], v0.b[11] +; CHECK-NEXT: mov v1.b[4], v0.b[4] +; CHECK-NEXT: mov v2.b[4], v0.b[12] +; CHECK-NEXT: mov v1.b[5], v0.b[5] +; CHECK-NEXT: mov v2.b[5], v0.b[13] +; CHECK-NEXT: mov v1.b[6], v0.b[6] +; CHECK-NEXT: mov v2.b[6], v0.b[14] +; CHECK-NEXT: mov v1.b[7], v0.b[7] +; CHECK-NEXT: mov v2.b[7], v0.b[15] +; CHECK-NEXT: ext z0.b, z0.b, z0.b, #16 +; CHECK-NEXT: uunpklo z1.h, z1.b +; CHECK-NEXT: ext v3.16b, v0.16b, v0.16b, #8 +; CHECK-NEXT: uunpklo z0.h, z0.b ; CHECK-NEXT: uunpklo z2.h, z2.b -; CHECK-NEXT: uunpklo z0.h, z1.b -; CHECK-NEXT: uunpklo z1.h, z3.b -; CHECK-NEXT: uunpklo z3.h, z4.b -; CHECK-NEXT: uunpklo z2.s, z2.h -; CHECK-NEXT: uunpklo z0.s, z0.h ; CHECK-NEXT: uunpklo z1.s, z1.h +; CHECK-NEXT: uunpklo z3.h, z3.b +; CHECK-NEXT: uunpklo z0.s, z0.h +; CHECK-NEXT: uunpklo z2.s, z2.h +; CHECK-NEXT: lsl z1.s, z1.s, #31 ; CHECK-NEXT: uunpklo z3.s, z3.h -; CHECK-NEXT: lsl z2.s, z2.s, #31 ; CHECK-NEXT: lsl z0.s, z0.s, #31 -; CHECK-NEXT: lsl z1.s, z1.s, #31 +; CHECK-NEXT: asr z1.s, z1.s, #31 +; CHECK-NEXT: lsl z2.s, z2.s, #31 +; CHECK-NEXT: asr z0.s, z0.s, #31 +; CHECK-NEXT: and z1.s, z1.s, #0x1 ; CHECK-NEXT: lsl z3.s, z3.s, #31 ; CHECK-NEXT: asr z2.s, z2.s, #31 -; CHECK-NEXT: asr z0.s, z0.s, #31 -; CHECK-NEXT: asr z1.s, z1.s, #31 +; CHECK-NEXT: and z0.s, z0.s, #0x1 +; CHECK-NEXT: cmpne p4.s, p0/z, z1.s, #0 +; CHECK-NEXT: ld1w { z1.s }, p0/z, [x0] ; CHECK-NEXT: asr z3.s, z3.s, #31 ; CHECK-NEXT: and z2.s, z2.s, #0x1 -; CHECK-NEXT: and z0.s, z0.s, #0x1 -; CHECK-NEXT: and z1.s, z1.s, #0x1 -; CHECK-NEXT: and z3.s, z3.s, #0x1 -; CHECK-NEXT: cmpne p4.s, p0/z, z2.s, #0 -; CHECK-NEXT: ld1w { z2.s }, p0/z, [x0] ; CHECK-NEXT: cmpne p1.s, p0/z, z0.s, #0 -; CHECK-NEXT: cmpne p2.s, p0/z, z1.s, #0 -; CHECK-NEXT: cmpne p3.s, p0/z, z3.s, #0 ; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0, x8, lsl #2] -; CHECK-NEXT: ld1w { z1.s }, p0/z, [x0, #1, mul vl] -; CHECK-NEXT: ld1w { z3.s }, p0/z, [x0, x9, lsl #2] -; CHECK-NEXT: mov z2.s, p4/m, #0 // =0x0 +; CHECK-NEXT: and z3.s, z3.s, #0x1 +; CHECK-NEXT: cmpne p2.s, p0/z, z2.s, #0 +; CHECK-NEXT: ld1w { z2.s }, p0/z, [x0, x9, lsl #2] +; CHECK-NEXT: mov z1.s, p4/m, #0 // =0x0 +; CHECK-NEXT: cmpne p3.s, p0/z, z3.s, #0 +; CHECK-NEXT: ld1w { z3.s }, p0/z, [x0, x10, lsl #2] ; CHECK-NEXT: mov z0.s, p1/m, #0 // =0x0 -; CHECK-NEXT: mov z1.s, p2/m, #0 // =0x0 -; CHECK-NEXT: mov z3.s, p3/m, #0 // =0x0 -; CHECK-NEXT: st1w { z2.s }, p0, [x0] +; CHECK-NEXT: mov z2.s, p2/m, #0 // =0x0 +; CHECK-NEXT: st1w { z1.s }, p0, [x0] ; CHECK-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2] -; CHECK-NEXT: st1w { z1.s }, p0, [x0, #1, mul vl] -; CHECK-NEXT: st1w { z3.s }, p0, [x0, x9, lsl #2] +; CHECK-NEXT: mov z3.s, p3/m, #0 // =0x0 +; CHECK-NEXT: st1w { z2.s }, p0, [x0, x9, lsl #2] +; CHECK-NEXT: st1w { z3.s }, p0, [x0, x10, lsl #2] ; CHECK-NEXT: .LBB1_2: // %exit ; CHECK-NEXT: ret %broadcast.splat = shufflevector <32 x i1> zeroinitializer, <32 x i1> zeroinitializer, <32 x i32> zeroinitializer >From 8de8e9570321df8b166d306423c873908aa8b6a8 Mon Sep 17 00:00:00 2001 From: Ricardo Jesus <r...@nvidia.com> Date: Thu, 13 Mar 2025 02:35:48 -0700 Subject: [PATCH 4/4] Fix getMemVTFromNode for fixed length vectors.. --- .../Target/AArch64/AArch64ISelDAGToDAG.cpp | 26 ++++++++--- .../AArch64/sve-fixed-length-offsets.ll | 19 ++++---- .../sve-fixed-length-permute-zip-uzp-trn.ll | 43 +++++++++---------- .../AArch64/sve-fixed-length-shuffles.ll | 27 ++++++------ llvm/test/CodeGen/AArch64/sve-vscale-attr.ll | 13 +++--- 5 files changed, 66 insertions(+), 62 deletions(-) diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp index d338c22267885..22083460b400a 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -7275,11 +7275,26 @@ static EVT getPackedVectorTypeFromPredicateType(LLVMContext &Ctx, EVT PredVT, /// Return the EVT of the data associated to a memory operation in \p /// Root. If such EVT cannot be retrived, it returns an invalid EVT. static EVT getMemVTFromNode(LLVMContext &Ctx, SDNode *Root) { - if (isa<MemSDNode>(Root)) - return cast<MemSDNode>(Root)->getMemoryVT(); + if (auto *MemIntr = dyn_cast<MemIntrinsicSDNode>(Root)) + return MemIntr->getMemoryVT(); + + if (isa<MemSDNode>(Root)) { + EVT MemVT = cast<MemSDNode>(Root)->getMemoryVT(); + + EVT DataVT; + if (auto *Load = dyn_cast<LoadSDNode>(Root)) + DataVT = Load->getValueType(0); + else if (auto *Load = dyn_cast<MaskedLoadSDNode>(Root)) + DataVT = Load->getValueType(0); + else if (auto *Store = dyn_cast<StoreSDNode>(Root)) + DataVT = Store->getValue().getValueType(); + else if (auto *Store = dyn_cast<MaskedStoreSDNode>(Root)) + DataVT = Store->getValue().getValueType(); + else + llvm_unreachable("Unexpected MemSDNode!"); - if (isa<MemIntrinsicSDNode>(Root)) - return cast<MemIntrinsicSDNode>(Root)->getMemoryVT(); + return DataVT.changeVectorElementType(MemVT.getVectorElementType()); + } const unsigned Opcode = Root->getOpcode(); // For custom ISD nodes, we have to look at them individually to extract the @@ -7388,8 +7403,7 @@ bool AArch64DAGToDAGISel::SelectAddrModeIndexedSVE(SDNode *Root, SDValue N, const auto KnownVScale = Subtarget->getSVEVectorSizeInBits() / AArch64::SVEBitsPerBlock; - if (!KnownVScale || ByteOffset % KnownVScale != 0 || - !MemVT.isScalableVector()) + if (!KnownVScale || ByteOffset % KnownVScale != 0) return false; MulImm = ByteOffset / KnownVScale; diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-offsets.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-offsets.ll index 84ab5493b03ee..d7b67d73a671e 100644 --- a/llvm/test/CodeGen/AArch64/sve-fixed-length-offsets.ll +++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-offsets.ll @@ -381,12 +381,10 @@ define void @v8i32(ptr %ldptr, ptr %stptr) { ; CHECK-256-LABEL: v8i32: ; CHECK-256: // %bb.0: ; CHECK-256-NEXT: ptrue p0.s -; CHECK-256-NEXT: mov x8, #16 // =0x10 -; CHECK-256-NEXT: mov x9, #8 // =0x8 -; CHECK-256-NEXT: ld1w { z0.s }, p0/z, [x0, x8, lsl #2] -; CHECK-256-NEXT: ld1w { z1.s }, p0/z, [x0, x9, lsl #2] -; CHECK-256-NEXT: st1w { z0.s }, p0, [x1, x8, lsl #2] -; CHECK-256-NEXT: st1w { z1.s }, p0, [x1, x9, lsl #2] +; CHECK-256-NEXT: ld1w { z0.s }, p0/z, [x0, #2, mul vl] +; CHECK-256-NEXT: ld1w { z1.s }, p0/z, [x0, #1, mul vl] +; CHECK-256-NEXT: st1w { z0.s }, p0, [x1, #2, mul vl] +; CHECK-256-NEXT: st1w { z1.s }, p0, [x1, #1, mul vl] ; CHECK-256-NEXT: ret ; ; CHECK-512-LABEL: v8i32: @@ -419,7 +417,6 @@ define void @v8i32(ptr %ldptr, ptr %stptr) { ret void } -; FIXME: This is wrong for VLS. define void @v8i32_vscale(ptr %0) { ; CHECK-LABEL: v8i32_vscale: ; CHECK: // %bb.0: @@ -441,28 +438,28 @@ define void @v8i32_vscale(ptr %0) { ; CHECK-256: // %bb.0: ; CHECK-256-NEXT: mov z0.s, #1 // =0x1 ; CHECK-256-NEXT: ptrue p0.s -; CHECK-256-NEXT: st1w { z0.s }, p0, [x0, #1, mul vl] +; CHECK-256-NEXT: st1w { z0.s }, p0, [x0, #2, mul vl] ; CHECK-256-NEXT: ret ; ; CHECK-512-LABEL: v8i32_vscale: ; CHECK-512: // %bb.0: ; CHECK-512-NEXT: mov z0.s, #1 // =0x1 ; CHECK-512-NEXT: ptrue p0.s, vl8 -; CHECK-512-NEXT: st1w { z0.s }, p0, [x0, #1, mul vl] +; CHECK-512-NEXT: st1w { z0.s }, p0, [x0, #2, mul vl] ; CHECK-512-NEXT: ret ; ; CHECK-1024-LABEL: v8i32_vscale: ; CHECK-1024: // %bb.0: ; CHECK-1024-NEXT: mov z0.s, #1 // =0x1 ; CHECK-1024-NEXT: ptrue p0.s, vl8 -; CHECK-1024-NEXT: st1w { z0.s }, p0, [x0, #1, mul vl] +; CHECK-1024-NEXT: st1w { z0.s }, p0, [x0, #2, mul vl] ; CHECK-1024-NEXT: ret ; ; CHECK-2048-LABEL: v8i32_vscale: ; CHECK-2048: // %bb.0: ; CHECK-2048-NEXT: mov z0.s, #1 // =0x1 ; CHECK-2048-NEXT: ptrue p0.s, vl8 -; CHECK-2048-NEXT: st1w { z0.s }, p0, [x0, #1, mul vl] +; CHECK-2048-NEXT: st1w { z0.s }, p0, [x0, #2, mul vl] ; CHECK-2048-NEXT: ret %vl = call i64 @llvm.vscale() %vlx = shl i64 %vl, 5 diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-permute-zip-uzp-trn.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-permute-zip-uzp-trn.ll index 52a4a5ff7cc4a..25143837285b0 100644 --- a/llvm/test/CodeGen/AArch64/sve-fixed-length-permute-zip-uzp-trn.ll +++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-permute-zip-uzp-trn.ll @@ -33,19 +33,18 @@ define void @zip_v32i16(ptr %a, ptr %b) #0 { ; VBITS_EQ_256-LABEL: zip_v32i16: ; VBITS_EQ_256: // %bb.0: ; VBITS_EQ_256-NEXT: ptrue p0.h -; VBITS_EQ_256-NEXT: mov x8, #16 // =0x10 -; VBITS_EQ_256-NEXT: ld1h { z0.h }, p0/z, [x0, x8, lsl #1] +; VBITS_EQ_256-NEXT: ld1h { z0.h }, p0/z, [x0, #1, mul vl] ; VBITS_EQ_256-NEXT: ld1h { z1.h }, p0/z, [x0] -; VBITS_EQ_256-NEXT: ld1h { z2.h }, p0/z, [x1, x8, lsl #1] +; VBITS_EQ_256-NEXT: ld1h { z2.h }, p0/z, [x1, #1, mul vl] ; VBITS_EQ_256-NEXT: ld1h { z3.h }, p0/z, [x1] -; VBITS_EQ_256-NEXT: zip2 z5.h, z0.h, z2.h -; VBITS_EQ_256-NEXT: zip1 z0.h, z0.h, z2.h -; VBITS_EQ_256-NEXT: zip2 z4.h, z1.h, z3.h -; VBITS_EQ_256-NEXT: zip1 z1.h, z1.h, z3.h +; VBITS_EQ_256-NEXT: zip1 z5.h, z0.h, z2.h +; VBITS_EQ_256-NEXT: zip2 z0.h, z0.h, z2.h +; VBITS_EQ_256-NEXT: zip1 z4.h, z1.h, z3.h +; VBITS_EQ_256-NEXT: zip2 z1.h, z1.h, z3.h ; VBITS_EQ_256-NEXT: add z2.h, z4.h, z5.h ; VBITS_EQ_256-NEXT: add z0.h, z1.h, z0.h -; VBITS_EQ_256-NEXT: st1h { z2.h }, p0, [x0, x8, lsl #1] -; VBITS_EQ_256-NEXT: st1h { z0.h }, p0, [x0] +; VBITS_EQ_256-NEXT: st1h { z2.h }, p0, [x0] +; VBITS_EQ_256-NEXT: st1h { z0.h }, p0, [x0, #1, mul vl] ; VBITS_EQ_256-NEXT: ret ; ; VBITS_EQ_512-LABEL: zip_v32i16: @@ -241,19 +240,18 @@ define void @trn_v32i16(ptr %a, ptr %b) #0 { ; VBITS_EQ_256-LABEL: trn_v32i16: ; VBITS_EQ_256: // %bb.0: ; VBITS_EQ_256-NEXT: ptrue p0.h -; VBITS_EQ_256-NEXT: mov x8, #16 // =0x10 -; VBITS_EQ_256-NEXT: ld1h { z0.h }, p0/z, [x0, x8, lsl #1] -; VBITS_EQ_256-NEXT: ld1h { z1.h }, p0/z, [x1, x8, lsl #1] -; VBITS_EQ_256-NEXT: ld1h { z2.h }, p0/z, [x0] -; VBITS_EQ_256-NEXT: ld1h { z3.h }, p0/z, [x1] +; VBITS_EQ_256-NEXT: ld1h { z0.h }, p0/z, [x0] +; VBITS_EQ_256-NEXT: ld1h { z1.h }, p0/z, [x1] +; VBITS_EQ_256-NEXT: ld1h { z2.h }, p0/z, [x0, #1, mul vl] +; VBITS_EQ_256-NEXT: ld1h { z3.h }, p0/z, [x1, #1, mul vl] ; VBITS_EQ_256-NEXT: trn1 z4.h, z0.h, z1.h ; VBITS_EQ_256-NEXT: trn2 z0.h, z0.h, z1.h ; VBITS_EQ_256-NEXT: trn1 z1.h, z2.h, z3.h ; VBITS_EQ_256-NEXT: trn2 z2.h, z2.h, z3.h ; VBITS_EQ_256-NEXT: add z0.h, z4.h, z0.h ; VBITS_EQ_256-NEXT: add z1.h, z1.h, z2.h -; VBITS_EQ_256-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1] -; VBITS_EQ_256-NEXT: st1h { z1.h }, p0, [x0] +; VBITS_EQ_256-NEXT: st1h { z0.h }, p0, [x0] +; VBITS_EQ_256-NEXT: st1h { z1.h }, p0, [x0, #1, mul vl] ; VBITS_EQ_256-NEXT: ret ; ; VBITS_EQ_512-LABEL: trn_v32i16: @@ -514,19 +512,18 @@ define void @uzp_v32i16(ptr %a, ptr %b) #1 { ; CHECK-LABEL: uzp_v32i16: ; CHECK: // %bb.0: ; CHECK-NEXT: ptrue p0.h -; CHECK-NEXT: mov x8, #16 // =0x10 -; CHECK-NEXT: ld1h { z0.h }, p0/z, [x1, x8, lsl #1] -; CHECK-NEXT: ld1h { z1.h }, p0/z, [x1] -; CHECK-NEXT: ld1h { z2.h }, p0/z, [x0, x8, lsl #1] -; CHECK-NEXT: ld1h { z3.h }, p0/z, [x0] +; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0, #1, mul vl] +; CHECK-NEXT: ld1h { z1.h }, p0/z, [x0] +; CHECK-NEXT: ld1h { z2.h }, p0/z, [x1, #1, mul vl] +; CHECK-NEXT: ld1h { z3.h }, p0/z, [x1] ; CHECK-NEXT: uzp1 z4.h, z1.h, z0.h ; CHECK-NEXT: uzp2 z0.h, z1.h, z0.h ; CHECK-NEXT: uzp1 z1.h, z3.h, z2.h ; CHECK-NEXT: uzp2 z2.h, z3.h, z2.h ; CHECK-NEXT: add z0.h, z4.h, z0.h ; CHECK-NEXT: add z1.h, z1.h, z2.h -; CHECK-NEXT: st1h { z0.h }, p0, [x0, x8, lsl #1] -; CHECK-NEXT: st1h { z1.h }, p0, [x0] +; CHECK-NEXT: st1h { z0.h }, p0, [x0] +; CHECK-NEXT: st1h { z1.h }, p0, [x0, #1, mul vl] ; CHECK-NEXT: ret %tmp1 = load <32 x i16>, ptr %a %tmp2 = load <32 x i16>, ptr %b diff --git a/llvm/test/CodeGen/AArch64/sve-fixed-length-shuffles.ll b/llvm/test/CodeGen/AArch64/sve-fixed-length-shuffles.ll index e33bc8da97c05..3fa7eca02c351 100644 --- a/llvm/test/CodeGen/AArch64/sve-fixed-length-shuffles.ll +++ b/llvm/test/CodeGen/AArch64/sve-fixed-length-shuffles.ll @@ -30,13 +30,10 @@ define void @crash_when_lowering_extract_shuffle(ptr %dst, i1 %cond) vscale_rang ; CHECK-NEXT: // %bb.1: // %vector.body ; CHECK-NEXT: mov z0.b, #0 // =0x0 ; CHECK-NEXT: ptrue p0.s -; CHECK-NEXT: mov x9, #8 // =0x8 -; CHECK-NEXT: mov x10, #24 // =0x18 ; CHECK-NEXT: umov w8, v0.b[8] ; CHECK-NEXT: mov v1.16b, v0.16b ; CHECK-NEXT: mov v1.b[1], v0.b[1] ; CHECK-NEXT: fmov s2, w8 -; CHECK-NEXT: mov x8, #16 // =0x10 ; CHECK-NEXT: mov v2.b[1], v0.b[9] ; CHECK-NEXT: mov v1.b[2], v0.b[2] ; CHECK-NEXT: mov v2.b[2], v0.b[10] @@ -69,25 +66,25 @@ define void @crash_when_lowering_extract_shuffle(ptr %dst, i1 %cond) vscale_rang ; CHECK-NEXT: lsl z3.s, z3.s, #31 ; CHECK-NEXT: asr z2.s, z2.s, #31 ; CHECK-NEXT: and z0.s, z0.s, #0x1 -; CHECK-NEXT: cmpne p4.s, p0/z, z1.s, #0 +; CHECK-NEXT: cmpne p1.s, p0/z, z1.s, #0 ; CHECK-NEXT: ld1w { z1.s }, p0/z, [x0] ; CHECK-NEXT: asr z3.s, z3.s, #31 ; CHECK-NEXT: and z2.s, z2.s, #0x1 -; CHECK-NEXT: cmpne p1.s, p0/z, z0.s, #0 -; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0, x8, lsl #2] +; CHECK-NEXT: cmpne p2.s, p0/z, z0.s, #0 +; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0, #2, mul vl] ; CHECK-NEXT: and z3.s, z3.s, #0x1 -; CHECK-NEXT: cmpne p2.s, p0/z, z2.s, #0 -; CHECK-NEXT: ld1w { z2.s }, p0/z, [x0, x9, lsl #2] -; CHECK-NEXT: mov z1.s, p4/m, #0 // =0x0 +; CHECK-NEXT: cmpne p4.s, p0/z, z2.s, #0 +; CHECK-NEXT: ld1w { z2.s }, p0/z, [x0, #1, mul vl] +; CHECK-NEXT: mov z1.s, p1/m, #0 // =0x0 ; CHECK-NEXT: cmpne p3.s, p0/z, z3.s, #0 -; CHECK-NEXT: ld1w { z3.s }, p0/z, [x0, x10, lsl #2] -; CHECK-NEXT: mov z0.s, p1/m, #0 // =0x0 -; CHECK-NEXT: mov z2.s, p2/m, #0 // =0x0 +; CHECK-NEXT: ld1w { z3.s }, p0/z, [x0, #3, mul vl] +; CHECK-NEXT: mov z0.s, p2/m, #0 // =0x0 +; CHECK-NEXT: mov z2.s, p4/m, #0 // =0x0 ; CHECK-NEXT: st1w { z1.s }, p0, [x0] -; CHECK-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2] +; CHECK-NEXT: st1w { z0.s }, p0, [x0, #2, mul vl] ; CHECK-NEXT: mov z3.s, p3/m, #0 // =0x0 -; CHECK-NEXT: st1w { z2.s }, p0, [x0, x9, lsl #2] -; CHECK-NEXT: st1w { z3.s }, p0, [x0, x10, lsl #2] +; CHECK-NEXT: st1w { z2.s }, p0, [x0, #1, mul vl] +; CHECK-NEXT: st1w { z3.s }, p0, [x0, #3, mul vl] ; CHECK-NEXT: .LBB1_2: // %exit ; CHECK-NEXT: ret %broadcast.splat = shufflevector <32 x i1> zeroinitializer, <32 x i1> zeroinitializer, <32 x i32> zeroinitializer diff --git a/llvm/test/CodeGen/AArch64/sve-vscale-attr.ll b/llvm/test/CodeGen/AArch64/sve-vscale-attr.ll index 5f746861d868e..5d5aa4b1c0e92 100644 --- a/llvm/test/CodeGen/AArch64/sve-vscale-attr.ll +++ b/llvm/test/CodeGen/AArch64/sve-vscale-attr.ll @@ -63,15 +63,14 @@ define void @func_vscale2_2(ptr %a, ptr %b) #2 { ; CHECK-LABEL: func_vscale2_2: ; CHECK: // %bb.0: ; CHECK-NEXT: ptrue p0.s -; CHECK-NEXT: mov x8, #8 // =0x8 -; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0, x8, lsl #2] -; CHECK-NEXT: ld1w { z1.s }, p0/z, [x1, x8, lsl #2] -; CHECK-NEXT: ld1w { z2.s }, p0/z, [x0] -; CHECK-NEXT: ld1w { z3.s }, p0/z, [x1] +; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0] +; CHECK-NEXT: ld1w { z1.s }, p0/z, [x1] +; CHECK-NEXT: ld1w { z2.s }, p0/z, [x0, #1, mul vl] +; CHECK-NEXT: ld1w { z3.s }, p0/z, [x1, #1, mul vl] ; CHECK-NEXT: add z0.s, z0.s, z1.s ; CHECK-NEXT: add z1.s, z2.s, z3.s -; CHECK-NEXT: st1w { z0.s }, p0, [x0, x8, lsl #2] -; CHECK-NEXT: st1w { z1.s }, p0, [x0] +; CHECK-NEXT: st1w { z0.s }, p0, [x0] +; CHECK-NEXT: st1w { z1.s }, p0, [x0, #1, mul vl] ; CHECK-NEXT: ret %op1 = load <16 x i32>, ptr %a %op2 = load <16 x i32>, ptr %b _______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits