Author: Andy Kaylor
Date: 2025-06-27T09:43:26-07:00
New Revision: 74cabdb806aea341f6bdcc57e2377882f08a4684

URL: 
https://github.com/llvm/llvm-project/commit/74cabdb806aea341f6bdcc57e2377882f08a4684
DIFF: 
https://github.com/llvm/llvm-project/commit/74cabdb806aea341f6bdcc57e2377882f08a4684.diff

LOG: [CIR] Add basic support for operator new (#145802)

This adds the code to handle operator new expressions in ClangIR.

Added: 
    clang/test/CIR/CodeGen/new.cpp

Modified: 
    clang/include/clang/CIR/MissingFeatures.h
    clang/lib/CIR/CodeGen/CIRGenCXXExpr.cpp
    clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
    clang/lib/CIR/CodeGen/CIRGenFunction.h
    clang/lib/CIR/CodeGen/CIRGenModule.cpp
    clang/lib/CIR/CodeGen/CIRGenTypeCache.h

Removed: 
    


################################################################################
diff  --git a/clang/include/clang/CIR/MissingFeatures.h 
b/clang/include/clang/CIR/MissingFeatures.h
index 7009d6d7d702a..06dc1ab714dd7 100644
--- a/clang/include/clang/CIR/MissingFeatures.h
+++ b/clang/include/clang/CIR/MissingFeatures.h
@@ -110,6 +110,9 @@ struct MissingFeatures {
   static bool opCallLandingPad() { return false; }
   static bool opCallContinueBlock() { return false; }
 
+  // CXXNewExpr
+  static bool exprNewNullCheck() { return false; }
+
   // FnInfoOpts -- This is used to track whether calls are chain calls or
   // instance methods. Classic codegen uses chain call to track and extra free
   // register for x86 and uses instance method as a condition for a thunk
@@ -170,6 +173,7 @@ struct MissingFeatures {
   static bool armComputeVolatileBitfields() { return false; }
   static bool asmLabelAttr() { return false; }
   static bool astVarDeclInterface() { return false; }
+  static bool attributeBuiltin() { return false; }
   static bool attributeNoBuiltin() { return false; }
   static bool bitfields() { return false; }
   static bool builtinCall() { return false; }

diff  --git a/clang/lib/CIR/CodeGen/CIRGenCXXExpr.cpp 
b/clang/lib/CIR/CodeGen/CIRGenCXXExpr.cpp
index d3888baea5d5e..e0d30ad22d271 100644
--- a/clang/lib/CIR/CodeGen/CIRGenCXXExpr.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenCXXExpr.cpp
@@ -197,3 +197,195 @@ RValue CIRGenFunction::emitCXXMemberOrOperatorCall(
   assert(!cir::MissingFeatures::opCallMustTail());
   return emitCall(fnInfo, callee, returnValue, args, nullptr, loc);
 }
+
+static mlir::Value emitCXXNewAllocSize(CIRGenFunction &cgf, const CXXNewExpr 
*e,
+                                       unsigned minElements,
+                                       mlir::Value &numElements,
+                                       mlir::Value &sizeWithoutCookie) {
+  QualType type = e->getAllocatedType();
+  mlir::Location loc = cgf.getLoc(e->getSourceRange());
+
+  if (!e->isArray()) {
+    CharUnits typeSize = cgf.getContext().getTypeSizeInChars(type);
+    sizeWithoutCookie = cgf.getBuilder().getConstant(
+        loc, cir::IntAttr::get(cgf.SizeTy, typeSize.getQuantity()));
+    return sizeWithoutCookie;
+  }
+
+  cgf.cgm.errorNYI(e->getSourceRange(), "emitCXXNewAllocSize: array");
+  return {};
+}
+
+static void storeAnyExprIntoOneUnit(CIRGenFunction &cgf, const Expr *init,
+                                    QualType allocType, Address newPtr,
+                                    AggValueSlot::Overlap_t mayOverlap) {
+  // FIXME: Refactor with emitExprAsInit.
+  switch (cgf.getEvaluationKind(allocType)) {
+  case cir::TEK_Scalar:
+    cgf.emitScalarInit(init, cgf.getLoc(init->getSourceRange()),
+                       cgf.makeAddrLValue(newPtr, allocType), false);
+    return;
+  case cir::TEK_Complex:
+    cgf.cgm.errorNYI(init->getSourceRange(),
+                     "storeAnyExprIntoOneUnit: complex");
+    return;
+  case cir::TEK_Aggregate: {
+    assert(!cir::MissingFeatures::aggValueSlotGC());
+    assert(!cir::MissingFeatures::sanitizers());
+    AggValueSlot slot = AggValueSlot::forAddr(
+        newPtr, allocType.getQualifiers(), AggValueSlot::IsDestructed,
+        AggValueSlot::IsNotAliased, mayOverlap, AggValueSlot::IsNotZeroed);
+    cgf.emitAggExpr(init, slot);
+    return;
+  }
+  }
+  llvm_unreachable("bad evaluation kind");
+}
+
+static void emitNewInitializer(CIRGenFunction &cgf, const CXXNewExpr *e,
+                               QualType elementType, mlir::Type elementTy,
+                               Address newPtr, mlir::Value numElements,
+                               mlir::Value allocSizeWithoutCookie) {
+  assert(!cir::MissingFeatures::generateDebugInfo());
+  if (e->isArray()) {
+    cgf.cgm.errorNYI(e->getSourceRange(), "emitNewInitializer: array");
+  } else if (const Expr *init = e->getInitializer()) {
+    storeAnyExprIntoOneUnit(cgf, init, e->getAllocatedType(), newPtr,
+                            AggValueSlot::DoesNotOverlap);
+  }
+}
+
+/// Emit a call to an operator new or operator delete function, as implicitly
+/// created by new-expressions and delete-expressions.
+static RValue emitNewDeleteCall(CIRGenFunction &cgf,
+                                const FunctionDecl *calleeDecl,
+                                const FunctionProtoType *calleeType,
+                                const CallArgList &args) {
+  cir::CIRCallOpInterface callOrTryCall;
+  cir::FuncOp calleePtr = cgf.cgm.getAddrOfFunction(calleeDecl);
+  CIRGenCallee callee =
+      CIRGenCallee::forDirect(calleePtr, GlobalDecl(calleeDecl));
+  RValue rv =
+      cgf.emitCall(cgf.cgm.getTypes().arrangeFreeFunctionCall(args, 
calleeType),
+                   callee, ReturnValueSlot(), args, &callOrTryCall);
+
+  /// C++1y [expr.new]p10:
+  ///   [In a new-expression,] an implementation is allowed to omit a call
+  ///   to a replaceable global allocation function.
+  ///
+  /// We model such elidable calls with the 'builtin' attribute.
+  assert(!cir::MissingFeatures::attributeBuiltin());
+  return rv;
+}
+
+mlir::Value CIRGenFunction::emitCXXNewExpr(const CXXNewExpr *e) {
+  // The element type being allocated.
+  QualType allocType = getContext().getBaseElementType(e->getAllocatedType());
+
+  // 1. Build a call to the allocation function.
+  FunctionDecl *allocator = e->getOperatorNew();
+
+  // If there is a brace-initializer, cannot allocate fewer elements than 
inits.
+  unsigned minElements = 0;
+  if (e->isArray() && e->hasInitializer()) {
+    cgm.errorNYI(e->getSourceRange(), "emitCXXNewExpr: array initializer");
+  }
+
+  mlir::Value numElements = nullptr;
+  mlir::Value allocSizeWithoutCookie = nullptr;
+  mlir::Value allocSize = emitCXXNewAllocSize(
+      *this, e, minElements, numElements, allocSizeWithoutCookie);
+  CharUnits allocAlign = getContext().getTypeAlignInChars(allocType);
+
+  // Emit the allocation call.
+  Address allocation = Address::invalid();
+  CallArgList allocatorArgs;
+  if (allocator->isReservedGlobalPlacementOperator()) {
+    cgm.errorNYI(e->getSourceRange(),
+                 "emitCXXNewExpr: reserved global placement operator");
+  } else {
+    const FunctionProtoType *allocatorType =
+        allocator->getType()->castAs<FunctionProtoType>();
+    unsigned paramsToSkip = 0;
+
+    // The allocation size is the first argument.
+    QualType sizeType = getContext().getSizeType();
+    allocatorArgs.add(RValue::get(allocSize), sizeType);
+    ++paramsToSkip;
+
+    if (allocSize != allocSizeWithoutCookie) {
+      CharUnits cookieAlign = getSizeAlign(); // FIXME: Ask the ABI.
+      allocAlign = std::max(allocAlign, cookieAlign);
+    }
+
+    // The allocation alignment may be passed as the second argument.
+    if (e->passAlignment()) {
+      cgm.errorNYI(e->getSourceRange(), "emitCXXNewExpr: pass alignment");
+    }
+
+    // FIXME: Why do we not pass a CalleeDecl here?
+    emitCallArgs(allocatorArgs, allocatorType, e->placement_arguments(),
+                 AbstractCallee(), paramsToSkip);
+    RValue rv =
+        emitNewDeleteCall(*this, allocator, allocatorType, allocatorArgs);
+
+    // Set !heapallocsite metadata on the call to operator new.
+    assert(!cir::MissingFeatures::generateDebugInfo());
+
+    // If this was a call to a global replaceable allocation function that does
+    // not take an alignment argument, the allocator is known to produce 
storage
+    // that's suitably aligned for any object that fits, up to a known
+    // threshold. Otherwise assume it's suitably aligned for the allocated 
type.
+    CharUnits allocationAlign = allocAlign;
+    if (!e->passAlignment() &&
+        allocator->isReplaceableGlobalAllocationFunction()) {
+      const TargetInfo &target = cgm.getASTContext().getTargetInfo();
+      unsigned allocatorAlign = llvm::bit_floor(std::min<uint64_t>(
+          target.getNewAlign(), getContext().getTypeSize(allocType)));
+      allocationAlign = std::max(
+          allocationAlign, getContext().toCharUnitsFromBits(allocatorAlign));
+    }
+
+    mlir::Value allocPtr = rv.getValue();
+    allocation = Address(
+        allocPtr, 
mlir::cast<cir::PointerType>(allocPtr.getType()).getPointee(),
+        allocationAlign);
+  }
+
+  // Emit a null check on the allocation result if the allocation
+  // function is allowed to return null (because it has a non-throwing
+  // exception spec or is the reserved placement new) and we have an
+  // interesting initializer will be running sanitizers on the initialization.
+  bool nullCheck = e->shouldNullCheckAllocation() &&
+                   (!allocType.isPODType(getContext()) || e->hasInitializer());
+  assert(!cir::MissingFeatures::exprNewNullCheck());
+  if (nullCheck)
+    cgm.errorNYI(e->getSourceRange(), "emitCXXNewExpr: null check");
+
+  // If there's an operator delete, enter a cleanup to call it if an
+  // exception is thrown.
+  if (e->getOperatorDelete() &&
+      !e->getOperatorDelete()->isReservedGlobalPlacementOperator())
+    cgm.errorNYI(e->getSourceRange(), "emitCXXNewExpr: operator delete");
+
+  if (allocSize != allocSizeWithoutCookie)
+    cgm.errorNYI(e->getSourceRange(), "emitCXXNewExpr: array with cookies");
+
+  mlir::Type elementTy = convertTypeForMem(allocType);
+  Address result = builder.createElementBitCast(getLoc(e->getSourceRange()),
+                                                allocation, elementTy);
+
+  // Passing pointer through launder.invariant.group to avoid propagation of
+  // vptrs information which may be included in previous type.
+  // To not break LTO with 
diff erent optimizations levels, we do it regardless
+  // of optimization level.
+  if (cgm.getCodeGenOpts().StrictVTablePointers &&
+      allocator->isReservedGlobalPlacementOperator())
+    cgm.errorNYI(e->getSourceRange(), "emitCXXNewExpr: strict vtable 
pointers");
+
+  assert(!cir::MissingFeatures::sanitizers());
+
+  emitNewInitializer(*this, e, allocType, elementTy, result, numElements,
+                     allocSizeWithoutCookie);
+  return result.getPointer();
+}

diff  --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp 
b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
index b6270ea36fe05..955bb5ffc4395 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
@@ -609,6 +609,10 @@ class ScalarExprEmitter : public 
StmtVisitor<ScalarExprEmitter, mlir::Value> {
 
   mlir::Value VisitCXXThisExpr(CXXThisExpr *te) { return cgf.loadCXXThis(); }
 
+  mlir::Value VisitCXXNewExpr(const CXXNewExpr *e) {
+    return cgf.emitCXXNewExpr(e);
+  }
+
   /// Emit a conversion from the specified type to the specified destination
   /// type, both of which are CIR scalar types.
   /// TODO: do we need ScalarConversionOpts here? Should be done in another

diff  --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h 
b/clang/lib/CIR/CodeGen/CIRGenFunction.h
index 2e54243f18cff..7e6fdf130cca1 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.h
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h
@@ -770,6 +770,15 @@ class CIRGenFunction : public CIRGenTypeCache {
                   const CIRGenCallee &callee, ReturnValueSlot returnValue,
                   const CallArgList &args, cir::CIRCallOpInterface *callOp,
                   mlir::Location loc);
+  RValue emitCall(const CIRGenFunctionInfo &funcInfo,
+                  const CIRGenCallee &callee, ReturnValueSlot returnValue,
+                  const CallArgList &args,
+                  cir::CIRCallOpInterface *callOrTryCall = nullptr) {
+    assert(currSrcLoc && "source location must have been set");
+    return emitCall(funcInfo, callee, returnValue, args, callOrTryCall,
+                    *currSrcLoc);
+  }
+
   RValue emitCall(clang::QualType calleeTy, const CIRGenCallee &callee,
                   const clang::CallExpr *e, ReturnValueSlot returnValue);
   void emitCallArg(CallArgList &args, const clang::Expr *e,
@@ -836,6 +845,8 @@ class CIRGenFunction : public CIRGenTypeCache {
       clang::NestedNameSpecifier *qualifier, bool isArrow,
       const clang::Expr *base);
 
+  mlir::Value emitCXXNewExpr(const CXXNewExpr *e);
+
   RValue emitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *e,
                                        const CXXMethodDecl *md,
                                        ReturnValueSlot returnValue);

diff  --git a/clang/lib/CIR/CodeGen/CIRGenModule.cpp 
b/clang/lib/CIR/CodeGen/CIRGenModule.cpp
index 63cd55aef06e7..0f38d9aee5e72 100644
--- a/clang/lib/CIR/CodeGen/CIRGenModule.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenModule.cpp
@@ -95,6 +95,7 @@ CIRGenModule::CIRGenModule(mlir::MLIRContext &mlirContext,
   // TODO(CIR): Should be updated once TypeSizeInfoAttr is upstreamed
   const unsigned sizeTypeSize =
       astContext.getTypeSize(astContext.getSignedSizeType());
+  SizeAlignInBytes = 
astContext.toCharUnitsFromBits(sizeTypeSize).getQuantity();
   // In CIRGenTypeCache, UIntPtrTy and SizeType are fields of the same union
   UIntPtrTy =
       cir::IntType::get(&getMLIRContext(), sizeTypeSize, /*isSigned=*/false);

diff  --git a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h 
b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h
index 12dbc3297a072..1d081d53ad15c 100644
--- a/clang/lib/CIR/CodeGen/CIRGenTypeCache.h
+++ b/clang/lib/CIR/CodeGen/CIRGenTypeCache.h
@@ -66,6 +66,13 @@ struct CIRGenTypeCache {
     unsigned char PointerSizeInBytes;
   };
 
+  /// The alignment of size_t.
+  unsigned char SizeAlignInBytes;
+
+  clang::CharUnits getSizeAlign() const {
+    return clang::CharUnits::fromQuantity(SizeAlignInBytes);
+  }
+
   clang::CharUnits getPointerAlign() const {
     return clang::CharUnits::fromQuantity(PointerAlignInBytes);
   }

diff  --git a/clang/test/CIR/CodeGen/new.cpp b/clang/test/CIR/CodeGen/new.cpp
new file mode 100644
index 0000000000000..4f88addc6116c
--- /dev/null
+++ b/clang/test/CIR/CodeGen/new.cpp
@@ -0,0 +1,158 @@
+// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir 
-emit-cir %s -o %t.cir
+// RUN: FileCheck --input-file=%t.cir %s
+// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -fclangir 
-emit-llvm %s -o %t-cir.ll
+// RUN: FileCheck --check-prefix=LLVM --input-file=%t-cir.ll %s
+// RUN: %clang_cc1 -std=c++20 -triple x86_64-unknown-linux-gnu -emit-llvm %s 
-o %t.ll
+// RUN: FileCheck --check-prefix=OGCG --input-file=%t.ll %s
+
+struct S {
+  int a;
+  int b;
+};
+
+void test_basic_new() {
+  S *ps = new S;
+  int *pn = new int;
+  double *pd = new double;
+}
+
+// CHECK: cir.func{{.*}} @_Z14test_basic_newv
+// CHECK:   %[[PS_ADDR:.*]] = cir.alloca !cir.ptr<!rec_S>, 
!cir.ptr<!cir.ptr<!rec_S>>, ["ps", init]
+// CHECK:   %[[PN_ADDR:.*]] = cir.alloca !cir.ptr<!s32i>, 
!cir.ptr<!cir.ptr<!s32i>>, ["pn", init]
+// CHECK:   %[[PD_ADDR:.*]] = cir.alloca !cir.ptr<!cir.double>, 
!cir.ptr<!cir.ptr<!cir.double>>, ["pd", init]
+// CHECK:   %[[EIGHT:.*]] = cir.const #cir.int<8>
+// CHECK:   %[[NEW_S:.*]] = cir.call @_Znwm(%[[EIGHT]])
+// CHECK:   %[[NEW_S_PTR:.*]] = cir.cast(bitcast, %[[NEW_S]]
+// CHECK:   cir.call @_ZN1SC1Ev(%[[NEW_S_PTR]])
+// CHECK:   cir.store{{.*}} %[[NEW_S_PTR]], %[[PS_ADDR]]
+// CHECK:   %[[FOUR:.*]] = cir.const #cir.int<4>
+// CHECK:   %[[NEW_INT:.*]] = cir.call @_Znwm(%[[FOUR]])
+// CHECK:   %[[NEW_INT_PTR:.*]] = cir.cast(bitcast, %[[NEW_INT]]
+// CHECK:   cir.store{{.*}} %[[NEW_INT_PTR]], %[[PN_ADDR]]
+// CHECK:   %[[EIGHT:.*]] = cir.const #cir.int<8>
+// CHECK:   %[[NEW_DOUBLE:.*]] = cir.call @_Znwm(%[[EIGHT]])
+// CHECK:   %[[NEW_DOUBLE_PTR:.*]] = cir.cast(bitcast, %[[NEW_DOUBLE]]
+// CHECK:   cir.store{{.*}} %[[NEW_DOUBLE_PTR]], %[[PD_ADDR]]
+// CHECK:   cir.return
+
+// LLVM: define{{.*}} void @_Z14test_basic_newv
+// LLVM:   %[[PS_ADDR:.*]] = alloca ptr, i64 1, align 8
+// LLVM:   %[[PN_ADDR:.*]] = alloca ptr, i64 1, align 8
+// LLVM:   %[[PD_ADDR:.*]] = alloca ptr, i64 1, align 8
+// LLVM:   %[[NEW_S:.*]] = call{{.*}} ptr @_Znwm(i64 8)
+// LLVM:   call{{.*}} void @_ZN1SC1Ev(ptr %[[NEW_S]])
+// LLVM:   store ptr %[[NEW_S]], ptr %[[PS_ADDR]], align 8
+// LLVM:   %[[NEW_INT:.*]] = call{{.*}} ptr @_Znwm(i64 4)
+// LLVM:   store ptr %[[NEW_INT]], ptr %[[PN_ADDR]], align 8
+// LLVM:   %[[NEW_DOUBLE:.*]] = call{{.*}} ptr @_Znwm(i64 8)
+// LLVM:   store ptr %[[NEW_DOUBLE]], ptr %[[PD_ADDR]], align 8
+// LLVM:   ret void
+
+// NOTE: OGCG elides the constructor call here, but CIR does not.
+
+// OGCG: define{{.*}} void @_Z14test_basic_newv
+// OGCG:   %[[PS_ADDR:.*]] = alloca ptr, align 8
+// OGCG:   %[[PN_ADDR:.*]] = alloca ptr, align 8
+// OGCG:   %[[PD_ADDR:.*]] = alloca ptr, align 8
+// OGCG:   %[[NEW_S:.*]] = call{{.*}} ptr @_Znwm(i64 noundef 8)
+// OGCG:   store ptr %[[NEW_S]], ptr %[[PS_ADDR]], align 8
+// OGCG:   %[[NEW_INT:.*]] = call{{.*}} ptr @_Znwm(i64 noundef 4)
+// OGCG:   store ptr %[[NEW_INT]], ptr %[[PN_ADDR]], align 8
+// OGCG:   %[[NEW_DOUBLE:.*]] = call{{.*}} ptr @_Znwm(i64 noundef 8)
+// OGCG:   store ptr %[[NEW_DOUBLE]], ptr %[[PD_ADDR]], align 8
+// OGCG:   ret void
+
+void test_new_with_init() {
+  int *pn = new int{2};
+  double *pd = new double{3.0};
+}
+
+// CHECK: cir.func{{.*}} @_Z18test_new_with_initv
+// CHECK:   %[[PN_ADDR:.*]] = cir.alloca !cir.ptr<!s32i>, 
!cir.ptr<!cir.ptr<!s32i>>, ["pn", init]
+// CHECK:   %[[PD_ADDR:.*]] = cir.alloca !cir.ptr<!cir.double>, 
!cir.ptr<!cir.ptr<!cir.double>>, ["pd", init]
+// CHECK:   %[[FOUR:.*]] = cir.const #cir.int<4>
+// CHECK:   %[[NEW_INT:.*]] = cir.call @_Znwm(%[[FOUR]])
+// CHECK:   %[[NEW_INT_PTR:.*]] = cir.cast(bitcast, %[[NEW_INT]]
+// CHECK:   %[[TWO:.*]] = cir.const #cir.int<2>
+// CHECK:   cir.store{{.*}} %[[TWO]], %[[NEW_INT_PTR]]
+// CHECK:   cir.store{{.*}} %[[NEW_INT_PTR]], %[[PN_ADDR]]
+// CHECK:   %[[EIGHT:.*]] = cir.const #cir.int<8>
+// CHECK:   %[[NEW_DOUBLE:.*]] = cir.call @_Znwm(%[[EIGHT]])
+// CHECK:   %[[NEW_DOUBLE_PTR:.*]] = cir.cast(bitcast, %[[NEW_DOUBLE]]
+// CHECK:   %[[THREE:.*]] = cir.const #cir.fp<3.000000e+00>
+// CHECK:   cir.store{{.*}} %[[THREE]], %[[NEW_DOUBLE_PTR]]
+// CHECK:   cir.store{{.*}} %[[NEW_DOUBLE_PTR]], %[[PD_ADDR]]
+// CHECK:   cir.return
+
+// LLVM: define{{.*}} void @_Z18test_new_with_initv
+// LLVM:   %[[PN_ADDR:.*]] = alloca ptr, i64 1, align 8
+// LLVM:   %[[PD_ADDR:.*]] = alloca ptr, i64 1, align 8
+// LLVM:   %[[NEW_INT:.*]] = call{{.*}} ptr @_Znwm(i64 4)
+// LLVM:   store i32 2, ptr %[[NEW_INT]], align 4
+// LLVM:   store ptr %[[NEW_INT]], ptr %[[PN_ADDR]], align 8
+// LLVM:   %[[NEW_DOUBLE:.*]] = call{{.*}} ptr @_Znwm(i64 8)
+// LLVM:   store double 3.000000e+00, ptr %[[NEW_DOUBLE]], align 8
+// LLVM:   store ptr %[[NEW_DOUBLE]], ptr %[[PD_ADDR]], align 8
+// LLVM:   ret void
+
+// OGCG: define{{.*}} void @_Z18test_new_with_initv
+// OGCG:   %[[PN_ADDR:.*]] = alloca ptr, align 8
+// OGCG:   %[[PD_ADDR:.*]] = alloca ptr, align 8
+// OGCG:   %[[NEW_INT:.*]] = call{{.*}} ptr @_Znwm(i64 noundef 4)
+// OGCG:   store i32 2, ptr %[[NEW_INT]], align 4
+// OGCG:   store ptr %[[NEW_INT]], ptr %[[PN_ADDR]], align 8
+// OGCG:   %[[NEW_DOUBLE:.*]] = call{{.*}} ptr @_Znwm(i64 noundef 8)
+// OGCG:   store double 3.000000e+00, ptr %[[NEW_DOUBLE]], align 8
+// OGCG:   store ptr %[[NEW_DOUBLE]], ptr %[[PD_ADDR]], align 8
+// OGCG:   ret void
+
+struct S2 {
+  S2();
+  S2(int, int);
+  int a;
+  int b;
+};
+
+void test_new_with_ctor() {
+  S2 *ps2 = new S2;
+  S2 *ps2_2 = new S2(1, 2);
+}
+
+// CHECK: cir.func{{.*}} @_Z18test_new_with_ctorv
+// CHECK:   %[[PS2_ADDR:.*]] = cir.alloca !cir.ptr<!rec_S2>, 
!cir.ptr<!cir.ptr<!rec_S2>>, ["ps2", init]
+// CHECK:   %[[PS2_2_ADDR:.*]] = cir.alloca !cir.ptr<!rec_S2>, 
!cir.ptr<!cir.ptr<!rec_S2>>, ["ps2_2", init]
+// CHECK:   %[[EIGHT:.*]] = cir.const #cir.int<8>
+// CHECK:   %[[NEW_S2:.*]] = cir.call @_Znwm(%[[EIGHT]])
+// CHECK:   %[[NEW_S2_PTR:.*]] = cir.cast(bitcast, %[[NEW_S2]]
+// CHECK:   cir.call @_ZN2S2C1Ev(%[[NEW_S2_PTR]])
+// CHECK:   cir.store{{.*}} %[[NEW_S2_PTR]], %[[PS2_ADDR]]
+// CHECK:   %[[EIGHT:.*]] = cir.const #cir.int<8>
+// CHECK:   %[[NEW_S2_2:.*]] = cir.call @_Znwm(%[[EIGHT]])
+// CHECK:   %[[NEW_S2_2_PTR:.*]] = cir.cast(bitcast, %[[NEW_S2_2]]
+// CHECK:   %[[ONE:.*]] = cir.const #cir.int<1>
+// CHECK:   %[[TWO:.*]] = cir.const #cir.int<2>
+// CHECK:   cir.call @_ZN2S2C1Eii(%[[NEW_S2_2_PTR]], %[[ONE]], %[[TWO]])
+// CHECK:   cir.store{{.*}} %[[NEW_S2_2_PTR]], %[[PS2_2_ADDR]]
+// CHECK:   cir.return
+
+// LLVM: define{{.*}} void @_Z18test_new_with_ctorv
+// LLVM:   %[[PS2_ADDR:.*]] = alloca ptr, i64 1, align 8
+// LLVM:   %[[PS2_2_ADDR:.*]] = alloca ptr, i64 1, align 8
+// LLVM:   %[[NEW_S2:.*]] = call{{.*}} ptr @_Znwm(i64 8)
+// LLVM:   call{{.*}} void @_ZN2S2C1Ev(ptr %[[NEW_S2]])
+// LLVM:   store ptr %[[NEW_S2]], ptr %[[PS2_ADDR]], align 8
+// LLVM:   %[[NEW_S2_2:.*]] = call{{.*}} ptr @_Znwm(i64 8)
+// LLVM:   call{{.*}} void @_ZN2S2C1Eii(ptr %[[NEW_S2_2]], i32 1, i32 2)
+// LLVM:   store ptr %[[NEW_S2_2]], ptr %[[PS2_2_ADDR]], align 8
+// LLVM:   ret void
+
+// OGCG: define{{.*}} void @_Z18test_new_with_ctorv
+// OGCG:   %[[PS2_ADDR:.*]] = alloca ptr, align 8
+// OGCG:   %[[PS2_2_ADDR:.*]] = alloca ptr, align 8
+// OGCG:   %[[NEW_S2:.*]] = call{{.*}} ptr @_Znwm(i64 noundef 8)
+// OGCG:   call{{.*}} void @_ZN2S2C1Ev(ptr {{.*}} %[[NEW_S2]])
+// OGCG:   store ptr %[[NEW_S2]], ptr %[[PS2_ADDR]], align 8
+// OGCG:   %[[NEW_S2_2:.*]] = call{{.*}} ptr @_Znwm(i64 noundef 8)
+// OGCG:   call{{.*}} void @_ZN2S2C1Eii(ptr {{.*}} %[[NEW_S2_2]], i32 noundef 
1, i32 noundef 2)
+// OGCG:   store ptr %[[NEW_S2_2]], ptr %[[PS2_2_ADDR]], align 8
+// OGCG:   ret void


        
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to