paulwalker-arm created this revision.
Herald added a project: All.
paulwalker-arm requested review of this revision.
Herald added a project: clang.
Herald added a subscriber: cfe-commits.

Fixes https://github.com/llvm/llvm-project/issues/63223


Repository:
  rG LLVM Github Monorepo

https://reviews.llvm.org/D153560

Files:
  clang/lib/CodeGen/CGExprScalar.cpp
  clang/test/CodeGen/aarch64-sve.cpp

Index: clang/test/CodeGen/aarch64-sve.cpp
===================================================================
--- /dev/null
+++ clang/test/CodeGen/aarch64-sve.cpp
@@ -0,0 +1,167 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -emit-llvm -o - %s | FileCheck %s
+
+// CHECK-LABEL: define dso_local void @_Z11test_localsv
+// CHECK-SAME: () #[[ATTR0:[0-9]+]] {
+// CHECK-NEXT:  entry:
+// CHECK-NEXT:    [[S8:%.*]] = alloca <vscale x 16 x i8>, align 16
+// CHECK-NEXT:    [[S16:%.*]] = alloca <vscale x 8 x i16>, align 16
+// CHECK-NEXT:    [[S32:%.*]] = alloca <vscale x 4 x i32>, align 16
+// CHECK-NEXT:    [[S64:%.*]] = alloca <vscale x 2 x i64>, align 16
+// CHECK-NEXT:    [[U8:%.*]] = alloca <vscale x 16 x i8>, align 16
+// CHECK-NEXT:    [[U16:%.*]] = alloca <vscale x 8 x i16>, align 16
+// CHECK-NEXT:    [[U32:%.*]] = alloca <vscale x 4 x i32>, align 16
+// CHECK-NEXT:    [[U64:%.*]] = alloca <vscale x 2 x i64>, align 16
+// CHECK-NEXT:    [[F16:%.*]] = alloca <vscale x 8 x half>, align 16
+// CHECK-NEXT:    [[F32:%.*]] = alloca <vscale x 4 x float>, align 16
+// CHECK-NEXT:    [[F64:%.*]] = alloca <vscale x 2 x double>, align 16
+// CHECK-NEXT:    [[BF16:%.*]] = alloca <vscale x 8 x bfloat>, align 16
+// CHECK-NEXT:    [[S8X2:%.*]] = alloca <vscale x 32 x i8>, align 16
+// CHECK-NEXT:    [[S16X2:%.*]] = alloca <vscale x 16 x i16>, align 16
+// CHECK-NEXT:    [[S32X2:%.*]] = alloca <vscale x 8 x i32>, align 16
+// CHECK-NEXT:    [[X64X2:%.*]] = alloca <vscale x 4 x i64>, align 16
+// CHECK-NEXT:    [[U8X2:%.*]] = alloca <vscale x 32 x i8>, align 16
+// CHECK-NEXT:    [[U16X2:%.*]] = alloca <vscale x 16 x i16>, align 16
+// CHECK-NEXT:    [[U32X2:%.*]] = alloca <vscale x 8 x i32>, align 16
+// CHECK-NEXT:    [[U64X2:%.*]] = alloca <vscale x 4 x i64>, align 16
+// CHECK-NEXT:    [[F16X2:%.*]] = alloca <vscale x 16 x half>, align 16
+// CHECK-NEXT:    [[F32X2:%.*]] = alloca <vscale x 8 x float>, align 16
+// CHECK-NEXT:    [[F64X2:%.*]] = alloca <vscale x 4 x double>, align 16
+// CHECK-NEXT:    [[BF16X2:%.*]] = alloca <vscale x 16 x bfloat>, align 16
+// CHECK-NEXT:    [[S8X3:%.*]] = alloca <vscale x 48 x i8>, align 16
+// CHECK-NEXT:    [[S16X3:%.*]] = alloca <vscale x 24 x i16>, align 16
+// CHECK-NEXT:    [[S32X3:%.*]] = alloca <vscale x 12 x i32>, align 16
+// CHECK-NEXT:    [[X64X3:%.*]] = alloca <vscale x 6 x i64>, align 16
+// CHECK-NEXT:    [[U8X3:%.*]] = alloca <vscale x 48 x i8>, align 16
+// CHECK-NEXT:    [[U16X3:%.*]] = alloca <vscale x 24 x i16>, align 16
+// CHECK-NEXT:    [[U32X3:%.*]] = alloca <vscale x 12 x i32>, align 16
+// CHECK-NEXT:    [[U64X3:%.*]] = alloca <vscale x 6 x i64>, align 16
+// CHECK-NEXT:    [[F16X3:%.*]] = alloca <vscale x 24 x half>, align 16
+// CHECK-NEXT:    [[F32X3:%.*]] = alloca <vscale x 12 x float>, align 16
+// CHECK-NEXT:    [[F64X3:%.*]] = alloca <vscale x 6 x double>, align 16
+// CHECK-NEXT:    [[BF16X3:%.*]] = alloca <vscale x 24 x bfloat>, align 16
+// CHECK-NEXT:    [[S8X4:%.*]] = alloca <vscale x 64 x i8>, align 16
+// CHECK-NEXT:    [[S16X4:%.*]] = alloca <vscale x 32 x i16>, align 16
+// CHECK-NEXT:    [[S32X4:%.*]] = alloca <vscale x 16 x i32>, align 16
+// CHECK-NEXT:    [[X64X4:%.*]] = alloca <vscale x 8 x i64>, align 16
+// CHECK-NEXT:    [[U8X4:%.*]] = alloca <vscale x 64 x i8>, align 16
+// CHECK-NEXT:    [[U16X4:%.*]] = alloca <vscale x 32 x i16>, align 16
+// CHECK-NEXT:    [[U32X4:%.*]] = alloca <vscale x 16 x i32>, align 16
+// CHECK-NEXT:    [[U64X4:%.*]] = alloca <vscale x 8 x i64>, align 16
+// CHECK-NEXT:    [[F16X4:%.*]] = alloca <vscale x 32 x half>, align 16
+// CHECK-NEXT:    [[F32X4:%.*]] = alloca <vscale x 16 x float>, align 16
+// CHECK-NEXT:    [[F64X4:%.*]] = alloca <vscale x 8 x double>, align 16
+// CHECK-NEXT:    [[BF16X4:%.*]] = alloca <vscale x 32 x bfloat>, align 16
+// CHECK-NEXT:    [[B8:%.*]] = alloca <vscale x 16 x i1>, align 2
+// CHECK-NEXT:    [[B8X2:%.*]] = alloca <vscale x 32 x i1>, align 2
+// CHECK-NEXT:    [[B8X4:%.*]] = alloca <vscale x 64 x i1>, align 2
+// CHECK-NEXT:    store <vscale x 16 x i8> zeroinitializer, ptr [[S8]], align 16
+// CHECK-NEXT:    store <vscale x 8 x i16> zeroinitializer, ptr [[S16]], align 16
+// CHECK-NEXT:    store <vscale x 4 x i32> zeroinitializer, ptr [[S32]], align 16
+// CHECK-NEXT:    store <vscale x 2 x i64> zeroinitializer, ptr [[S64]], align 16
+// CHECK-NEXT:    store <vscale x 16 x i8> zeroinitializer, ptr [[U8]], align 16
+// CHECK-NEXT:    store <vscale x 8 x i16> zeroinitializer, ptr [[U16]], align 16
+// CHECK-NEXT:    store <vscale x 4 x i32> zeroinitializer, ptr [[U32]], align 16
+// CHECK-NEXT:    store <vscale x 2 x i64> zeroinitializer, ptr [[U64]], align 16
+// CHECK-NEXT:    store <vscale x 8 x half> zeroinitializer, ptr [[F16]], align 16
+// CHECK-NEXT:    store <vscale x 4 x float> zeroinitializer, ptr [[F32]], align 16
+// CHECK-NEXT:    store <vscale x 2 x double> zeroinitializer, ptr [[F64]], align 16
+// CHECK-NEXT:    store <vscale x 8 x bfloat> zeroinitializer, ptr [[BF16]], align 16
+// CHECK-NEXT:    store <vscale x 32 x i8> zeroinitializer, ptr [[S8X2]], align 16
+// CHECK-NEXT:    store <vscale x 16 x i16> zeroinitializer, ptr [[S16X2]], align 16
+// CHECK-NEXT:    store <vscale x 8 x i32> zeroinitializer, ptr [[S32X2]], align 16
+// CHECK-NEXT:    store <vscale x 4 x i64> zeroinitializer, ptr [[X64X2]], align 16
+// CHECK-NEXT:    store <vscale x 32 x i8> zeroinitializer, ptr [[U8X2]], align 16
+// CHECK-NEXT:    store <vscale x 16 x i16> zeroinitializer, ptr [[U16X2]], align 16
+// CHECK-NEXT:    store <vscale x 8 x i32> zeroinitializer, ptr [[U32X2]], align 16
+// CHECK-NEXT:    store <vscale x 4 x i64> zeroinitializer, ptr [[U64X2]], align 16
+// CHECK-NEXT:    store <vscale x 16 x half> zeroinitializer, ptr [[F16X2]], align 16
+// CHECK-NEXT:    store <vscale x 8 x float> zeroinitializer, ptr [[F32X2]], align 16
+// CHECK-NEXT:    store <vscale x 4 x double> zeroinitializer, ptr [[F64X2]], align 16
+// CHECK-NEXT:    store <vscale x 16 x bfloat> zeroinitializer, ptr [[BF16X2]], align 16
+// CHECK-NEXT:    store <vscale x 48 x i8> zeroinitializer, ptr [[S8X3]], align 16
+// CHECK-NEXT:    store <vscale x 24 x i16> zeroinitializer, ptr [[S16X3]], align 16
+// CHECK-NEXT:    store <vscale x 12 x i32> zeroinitializer, ptr [[S32X3]], align 16
+// CHECK-NEXT:    store <vscale x 6 x i64> zeroinitializer, ptr [[X64X3]], align 16
+// CHECK-NEXT:    store <vscale x 48 x i8> zeroinitializer, ptr [[U8X3]], align 16
+// CHECK-NEXT:    store <vscale x 24 x i16> zeroinitializer, ptr [[U16X3]], align 16
+// CHECK-NEXT:    store <vscale x 12 x i32> zeroinitializer, ptr [[U32X3]], align 16
+// CHECK-NEXT:    store <vscale x 6 x i64> zeroinitializer, ptr [[U64X3]], align 16
+// CHECK-NEXT:    store <vscale x 24 x half> zeroinitializer, ptr [[F16X3]], align 16
+// CHECK-NEXT:    store <vscale x 12 x float> zeroinitializer, ptr [[F32X3]], align 16
+// CHECK-NEXT:    store <vscale x 6 x double> zeroinitializer, ptr [[F64X3]], align 16
+// CHECK-NEXT:    store <vscale x 24 x bfloat> zeroinitializer, ptr [[BF16X3]], align 16
+// CHECK-NEXT:    store <vscale x 64 x i8> zeroinitializer, ptr [[S8X4]], align 16
+// CHECK-NEXT:    store <vscale x 32 x i16> zeroinitializer, ptr [[S16X4]], align 16
+// CHECK-NEXT:    store <vscale x 16 x i32> zeroinitializer, ptr [[S32X4]], align 16
+// CHECK-NEXT:    store <vscale x 8 x i64> zeroinitializer, ptr [[X64X4]], align 16
+// CHECK-NEXT:    store <vscale x 64 x i8> zeroinitializer, ptr [[U8X4]], align 16
+// CHECK-NEXT:    store <vscale x 32 x i16> zeroinitializer, ptr [[U16X4]], align 16
+// CHECK-NEXT:    store <vscale x 16 x i32> zeroinitializer, ptr [[U32X4]], align 16
+// CHECK-NEXT:    store <vscale x 8 x i64> zeroinitializer, ptr [[U64X4]], align 16
+// CHECK-NEXT:    store <vscale x 32 x half> zeroinitializer, ptr [[F16X4]], align 16
+// CHECK-NEXT:    store <vscale x 16 x float> zeroinitializer, ptr [[F32X4]], align 16
+// CHECK-NEXT:    store <vscale x 8 x double> zeroinitializer, ptr [[F64X4]], align 16
+// CHECK-NEXT:    store <vscale x 32 x bfloat> zeroinitializer, ptr [[BF16X4]], align 16
+// CHECK-NEXT:    store <vscale x 16 x i1> zeroinitializer, ptr [[B8]], align 2
+// CHECK-NEXT:    store <vscale x 32 x i1> zeroinitializer, ptr [[B8X2]], align 2
+// CHECK-NEXT:    store <vscale x 64 x i1> zeroinitializer, ptr [[B8X4]], align 2
+// CHECK-NEXT:    ret void
+//
+void test_locals(void) {
+  __SVInt8_t s8{};
+  __SVInt16_t s16{};
+  __SVInt32_t s32{};
+  __SVInt64_t s64{};
+  __SVUint8_t u8{};
+  __SVUint16_t u16{};
+  __SVUint32_t u32{};
+  __SVUint64_t u64{};
+  __SVFloat16_t f16{};
+  __SVFloat32_t f32{};
+  __SVFloat64_t f64{};
+  __SVBFloat16_t bf16{};
+
+  __clang_svint8x2_t s8x2{};
+  __clang_svint16x2_t s16x2{};
+  __clang_svint32x2_t s32x2{};
+  __clang_svint64x2_t x64x2{};
+  __clang_svuint8x2_t u8x2{};
+  __clang_svuint16x2_t u16x2{};
+  __clang_svuint32x2_t u32x2{};
+  __clang_svuint64x2_t u64x2{};
+  __clang_svfloat16x2_t f16x2{};
+  __clang_svfloat32x2_t f32x2{};
+  __clang_svfloat64x2_t f64x2{};
+  __clang_svbfloat16x2_t bf16x2{};
+
+  __clang_svint8x3_t s8x3{};
+  __clang_svint16x3_t s16x3{};
+  __clang_svint32x3_t s32x3{};
+  __clang_svint64x3_t x64x3{};
+  __clang_svuint8x3_t u8x3{};
+  __clang_svuint16x3_t u16x3{};
+  __clang_svuint32x3_t u32x3{};
+  __clang_svuint64x3_t u64x3{};
+  __clang_svfloat16x3_t f16x3{};
+  __clang_svfloat32x3_t f32x3{};
+  __clang_svfloat64x3_t f64x3{};
+  __clang_svbfloat16x3_t bf16x3{};
+
+  __clang_svint8x4_t s8x4{};
+  __clang_svint16x4_t s16x4{};
+  __clang_svint32x4_t s32x4{};
+  __clang_svint64x4_t x64x4{};
+  __clang_svuint8x4_t u8x4{};
+  __clang_svuint16x4_t u16x4{};
+  __clang_svuint32x4_t u32x4{};
+  __clang_svuint64x4_t u64x4{};
+  __clang_svfloat16x4_t f16x4{};
+  __clang_svfloat32x4_t f32x4{};
+  __clang_svfloat64x4_t f64x4{};
+  __clang_svbfloat16x4_t bf16x4{};
+
+  __SVBool_t b8{};
+  __clang_svboolx2_t b8x2{};
+  __clang_svboolx4_t b8x4{};
+}
Index: clang/lib/CodeGen/CGExprScalar.cpp
===================================================================
--- clang/lib/CodeGen/CGExprScalar.cpp
+++ clang/lib/CodeGen/CGExprScalar.cpp
@@ -1869,6 +1869,15 @@
     return Visit(E->getInit(0));
   }
 
+  if (isa<llvm::ScalableVectorType>(VType)) {
+    if (NumInitElements == 0) {
+      // C++11 value-initialization for the vector.
+      return EmitNullValue(E->getType());
+    }
+
+    llvm_unreachable("Unexpected initialization of a scalable vector!");
+  }
+
   unsigned ResElts = cast<llvm::FixedVectorType>(VType)->getNumElements();
 
   // Loop over initializers collecting the Value for each, and remembering
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to