Author: Andy Kaylor
Date: 2025-05-01T14:47:20-07:00
New Revision: a76936f1c01c7cadbce8ea6553af758d0f614b6a

URL: 
https://github.com/llvm/llvm-project/commit/a76936f1c01c7cadbce8ea6553af758d0f614b6a
DIFF: 
https://github.com/llvm/llvm-project/commit/a76936f1c01c7cadbce8ea6553af758d0f614b6a.diff

LOG: [CIR] Upstream support for range-based for loops (#138176)

This upstreams the code needed to handle CXXForRangeStmt.

Added: 
    

Modified: 
    clang/lib/CIR/CodeGen/CIRGenExpr.cpp
    clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
    clang/lib/CIR/CodeGen/CIRGenFunction.h
    clang/lib/CIR/CodeGen/CIRGenStmt.cpp
    clang/test/CIR/CodeGen/loop.cpp

Removed: 
    


################################################################################
diff  --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp 
b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
index da5a0b97a395e..471c8b3975d96 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
@@ -948,6 +948,41 @@ void CIRGenFunction::emitIgnoredExpr(const Expr *e) {
   emitLValue(e);
 }
 
+Address CIRGenFunction::emitArrayToPointerDecay(const Expr *e) {
+  assert(e->getType()->isArrayType() &&
+         "Array to pointer decay must have array source type!");
+
+  // Expressions of array type can't be bitfields or vector elements.
+  LValue lv = emitLValue(e);
+  Address addr = lv.getAddress();
+
+  // If the array type was an incomplete type, we need to make sure
+  // the decay ends up being the right type.
+  auto lvalueAddrTy = 
mlir::cast<cir::PointerType>(addr.getPointer().getType());
+
+  if (e->getType()->isVariableArrayType())
+    return addr;
+
+  auto pointeeTy = mlir::cast<cir::ArrayType>(lvalueAddrTy.getPointee());
+
+  mlir::Type arrayTy = convertType(e->getType());
+  assert(mlir::isa<cir::ArrayType>(arrayTy) && "expected array");
+  assert(pointeeTy == arrayTy);
+
+  // The result of this decay conversion points to an array element within the
+  // base lvalue. However, since TBAA currently does not support representing
+  // accesses to elements of member arrays, we conservatively represent 
accesses
+  // to the pointee object as if it had no any base lvalue specified.
+  // TODO: Support TBAA for member arrays.
+  QualType eltType = e->getType()->castAsArrayTypeUnsafe()->getElementType();
+  assert(!cir::MissingFeatures::opTBAA());
+
+  mlir::Value ptr = builder.maybeBuildArrayDecay(
+      cgm.getLoc(e->getSourceRange()), addr.getPointer(),
+      convertTypeForMem(eltType));
+  return Address(ptr, addr.getAlignment());
+}
+
 /// Emit an `if` on a boolean condition, filling `then` and `else` into
 /// appropriated regions.
 mlir::LogicalResult CIRGenFunction::emitIfOnBoolExpr(const Expr *cond,

diff  --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp 
b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
index 78eb3cbd430bc..423cddd374e8f 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
@@ -1567,6 +1567,9 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr 
*ce) {
     return v;
   }
 
+  case CK_ArrayToPointerDecay:
+    return cgf.emitArrayToPointerDecay(subExpr).getPointer();
+
   case CK_NullToPointer: {
     if (mustVisitNullValue(subExpr))
       cgf.emitIgnoredExpr(subExpr);

diff  --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h 
b/clang/lib/CIR/CodeGen/CIRGenFunction.h
index d50abfcfbc867..ac5d39fc61795 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.h
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h
@@ -449,6 +449,8 @@ class CIRGenFunction : public CIRGenTypeCache {
 
   LValue emitArraySubscriptExpr(const clang::ArraySubscriptExpr *e);
 
+  Address emitArrayToPointerDecay(const Expr *array);
+
   AutoVarEmission emitAutoVarAlloca(const clang::VarDecl &d);
 
   /// Emit code and set up symbol table for a variable declaration with auto,
@@ -485,6 +487,10 @@ class CIRGenFunction : public CIRGenTypeCache {
   LValue emitCompoundAssignmentLValue(const clang::CompoundAssignOperator *e);
 
   mlir::LogicalResult emitContinueStmt(const clang::ContinueStmt &s);
+
+  mlir::LogicalResult emitCXXForRangeStmt(const CXXForRangeStmt &s,
+                                          llvm::ArrayRef<const Attr *> attrs);
+
   mlir::LogicalResult emitDoStmt(const clang::DoStmt &s);
 
   /// Emit an expression as an initializer for an object (variable, field, 
etc.)

diff  --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp 
b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
index dffa71046df1d..ee4dcc861a1f2 100644
--- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp
@@ -97,6 +97,8 @@ mlir::LogicalResult CIRGenFunction::emitStmt(const Stmt *s,
     return emitWhileStmt(cast<WhileStmt>(*s));
   case Stmt::DoStmtClass:
     return emitDoStmt(cast<DoStmt>(*s));
+  case Stmt::CXXForRangeStmtClass:
+    return emitCXXForRangeStmt(cast<CXXForRangeStmt>(*s), attr);
   case Stmt::OpenACCComputeConstructClass:
     return emitOpenACCComputeConstruct(cast<OpenACCComputeConstruct>(*s));
   case Stmt::OpenACCLoopConstructClass:
@@ -137,7 +139,6 @@ mlir::LogicalResult CIRGenFunction::emitStmt(const Stmt *s,
   case Stmt::CoroutineBodyStmtClass:
   case Stmt::CoreturnStmtClass:
   case Stmt::CXXTryStmtClass:
-  case Stmt::CXXForRangeStmtClass:
   case Stmt::IndirectGotoStmtClass:
   case Stmt::GCCAsmStmtClass:
   case Stmt::MSAsmStmtClass:
@@ -547,6 +548,83 @@ mlir::LogicalResult CIRGenFunction::emitSwitchCase(const 
SwitchCase &s,
   llvm_unreachable("expect case or default stmt");
 }
 
+mlir::LogicalResult
+CIRGenFunction::emitCXXForRangeStmt(const CXXForRangeStmt &s,
+                                    ArrayRef<const Attr *> forAttrs) {
+  cir::ForOp forOp;
+
+  // TODO(cir): pass in array of attributes.
+  auto forStmtBuilder = [&]() -> mlir::LogicalResult {
+    mlir::LogicalResult loopRes = mlir::success();
+    // Evaluate the first pieces before the loop.
+    if (s.getInit())
+      if (emitStmt(s.getInit(), /*useCurrentScope=*/true).failed())
+        return mlir::failure();
+    if (emitStmt(s.getRangeStmt(), /*useCurrentScope=*/true).failed())
+      return mlir::failure();
+    if (emitStmt(s.getBeginStmt(), /*useCurrentScope=*/true).failed())
+      return mlir::failure();
+    if (emitStmt(s.getEndStmt(), /*useCurrentScope=*/true).failed())
+      return mlir::failure();
+
+    assert(!cir::MissingFeatures::loopInfoStack());
+    // From LLVM: if there are any cleanups between here and the loop-exit
+    // scope, create a block to stage a loop exit along.
+    // We probably already do the right thing because of ScopeOp, but make
+    // sure we handle all cases.
+    assert(!cir::MissingFeatures::requiresCleanups());
+
+    forOp = builder.createFor(
+        getLoc(s.getSourceRange()),
+        /*condBuilder=*/
+        [&](mlir::OpBuilder &b, mlir::Location loc) {
+          assert(!cir::MissingFeatures::createProfileWeightsForLoop());
+          
assert(!cir::MissingFeatures::emitCondLikelihoodViaExpectIntrinsic());
+          mlir::Value condVal = evaluateExprAsBool(s.getCond());
+          builder.createCondition(condVal);
+        },
+        /*bodyBuilder=*/
+        [&](mlir::OpBuilder &b, mlir::Location loc) {
+          // https://en.cppreference.com/w/cpp/language/for
+          // In C++ the scope of the init-statement and the scope of
+          // statement are one and the same.
+          bool useCurrentScope = true;
+          if (emitStmt(s.getLoopVarStmt(), useCurrentScope).failed())
+            loopRes = mlir::failure();
+          if (emitStmt(s.getBody(), useCurrentScope).failed())
+            loopRes = mlir::failure();
+          emitStopPoint(&s);
+        },
+        /*stepBuilder=*/
+        [&](mlir::OpBuilder &b, mlir::Location loc) {
+          if (s.getInc())
+            if (emitStmt(s.getInc(), /*useCurrentScope=*/true).failed())
+              loopRes = mlir::failure();
+          builder.createYield(loc);
+        });
+    return loopRes;
+  };
+
+  mlir::LogicalResult res = mlir::success();
+  mlir::Location scopeLoc = getLoc(s.getSourceRange());
+  builder.create<cir::ScopeOp>(scopeLoc, /*scopeBuilder=*/
+                               [&](mlir::OpBuilder &b, mlir::Location loc) {
+                                 // Create a cleanup scope for the condition
+                                 // variable cleanups. Logical equivalent from
+                                 // LLVM codegn for LexicalScope
+                                 // ConditionScope(*this, 
S.getSourceRange())...
+                                 LexicalScope lexScope{
+                                     *this, loc, builder.getInsertionBlock()};
+                                 res = forStmtBuilder();
+                               });
+
+  if (res.failed())
+    return res;
+
+  terminateBody(builder, forOp.getBody(), getLoc(s.getEndLoc()));
+  return mlir::success();
+}
+
 mlir::LogicalResult CIRGenFunction::emitForStmt(const ForStmt &s) {
   cir::ForOp forOp;
 

diff  --git a/clang/test/CIR/CodeGen/loop.cpp b/clang/test/CIR/CodeGen/loop.cpp
index c69d5097bbdf7..e0165c91d0a75 100644
--- a/clang/test/CIR/CodeGen/loop.cpp
+++ b/clang/test/CIR/CodeGen/loop.cpp
@@ -190,6 +190,250 @@ void l3() {
 // OGCG:   store i32 0, ptr %[[I]], align 4
 // OGCG:   br label %[[FOR_COND]]
 
+void l4() {
+  int a[10];
+  for (int n : a)
+    ;
+}
+
+// CIR: cir.func @_Z2l4v
+// CIR:   %[[A_ADDR:.*]] = cir.alloca {{.*}} ["a"]
+// CIR:   cir.scope {
+// CIR:     %[[RANGE_ADDR:.*]] = cir.alloca {{.*}} ["__range1", init, const]
+// CIR:     %[[BEGIN_ADDR:.*]] = cir.alloca {{.*}} ["__begin1", init]
+// CIR:     %[[END_ADDR:.*]] = cir.alloca {{.*}} ["__end1", init]
+// CIR:     %[[N_ADDR:.*]] = cir.alloca {{.*}} ["n", init]
+// CIR:     cir.store %[[A_ADDR]], %[[RANGE_ADDR]]
+// CIR:     %[[RANGE_LOAD:.*]] = cir.load %[[RANGE_ADDR]]
+// CIR:     %[[RANGE_CAST:.*]] = cir.cast(array_to_ptrdecay, %[[RANGE_LOAD]] : 
{{.*}})
+// CIR:     cir.store %[[RANGE_CAST]], %[[BEGIN_ADDR]]
+// CIR:     %[[BEGIN:.*]] = cir.load %[[RANGE_ADDR]]
+// CIR:     %[[BEGIN_CAST:.*]] = cir.cast(array_to_ptrdecay, %[[BEGIN]] : 
{{.*}})
+// CIR:     %[[TEN:.*]] = cir.const #cir.int<10>
+// CIR:     %[[END_PTR:.*]] = cir.ptr_stride(%[[BEGIN_CAST]] : {{.*}}, 
%[[TEN]] : {{.*}})
+// CIR:     cir.store %[[END_PTR]], %[[END_ADDR]]
+// CIR:     cir.for : cond {
+// CIR:       %[[CUR:.*]] = cir.load %[[BEGIN_ADDR]]
+// CIR:       %[[END:.*]] = cir.load %[[END_ADDR]]
+// CIR:       %[[CMP:.*]] = cir.cmp(ne, %[[CUR]], %[[END]])
+// CIR:       cir.condition(%[[CMP]])
+// CIR:     } body {
+// CIR:       %[[CUR:.*]] = cir.load deref %[[BEGIN_ADDR]]
+// CIR:       %[[N:.*]] = cir.load %[[CUR]]
+// CIR:       cir.store %[[N]], %[[N_ADDR]]
+// CIR:       cir.yield
+// CIR:     } step {
+// CIR:       %[[CUR:.*]] = cir.load %[[BEGIN_ADDR]]
+// CIR:       %[[ONE:.*]] = cir.const #cir.int<1>
+// CIR:       %[[NEXT:.*]] = cir.ptr_stride(%[[CUR]] : {{.*}}, %[[ONE]] : 
{{.*}})
+// CIR:       cir.store %[[NEXT]], %[[BEGIN_ADDR]]
+// CIR:       cir.yield
+// CIR:     }
+// CIR:   }
+
+// LLVM: define void @_Z2l4v() {
+// LLVM:   %[[RANGE_ADDR:.*]] = alloca ptr
+// LLVM:   %[[BEGIN_ADDR:.*]] = alloca ptr
+// LLVM:   %[[END_ADDR:.*]] = alloca ptr
+// LLVM:   %[[N_ADDR:.*]] = alloca i32
+// LLVM:   %[[A_ADDR:.*]] = alloca [10 x i32]
+// LLVM:   br label %[[SETUP:.*]]
+// LLVM: [[SETUP]]:
+// LLVM:   store ptr %[[A_ADDR]], ptr %[[RANGE_ADDR]]
+// LLVM:   %[[BEGIN:.*]] = load ptr, ptr %[[RANGE_ADDR]]
+// LLVM:   %[[BEGIN_CAST:.*]] = getelementptr i32, ptr %[[BEGIN]], i32 0
+// LLVM:   store ptr %[[BEGIN_CAST]], ptr %[[BEGIN_ADDR]]
+// LLVM:   %[[RANGE:.*]] = load ptr, ptr %[[RANGE_ADDR]]
+// LLVM:   %[[RANGE_CAST:.*]] = getelementptr i32, ptr %[[RANGE]], i32 0
+// LLVM:   %[[END_PTR:.*]] = getelementptr i32, ptr %[[RANGE_CAST]], i64 10
+// LLVM:   store ptr %[[END_PTR]], ptr %[[END_ADDR]]
+// LLVM:   br label %[[COND:.*]]
+// LLVM: [[COND]]:
+// LLVM:   %[[BEGIN:.*]] = load ptr, ptr %[[BEGIN_ADDR]]
+// LLVM:   %[[END:.*]] = load ptr, ptr %[[END_ADDR]]
+// LLVM:   %[[CMP:.*]] = icmp ne ptr %[[BEGIN]], %[[END]]
+// LLVM:   br i1 %[[CMP]], label %[[BODY:.*]], label %[[END:.*]]
+// LLVM: [[BODY]]:
+// LLVM:   %[[CUR:.*]] = load ptr, ptr %[[BEGIN_ADDR]]
+// LLVM:   %[[A_CUR:.*]] = load i32, ptr %[[CUR]]
+// LLVM:   store i32 %[[A_CUR]], ptr %[[N_ADDR]]
+// LLVM:   br label %[[STEP:.*]]
+// LLVM: [[STEP]]:
+// LLVM:   %[[BEGIN:.*]] = load ptr, ptr %[[BEGIN_ADDR]]
+// LLVM:   %[[NEXT:.*]] = getelementptr i32, ptr %[[BEGIN]], i64 1
+// LLVM:   store ptr %[[NEXT]], ptr %[[BEGIN_ADDR]]
+// LLVM:   br label %[[COND]]
+// LLVM: [[END]]:
+// LLVM:   br label %[[EXIT:.*]]
+// LLVM: [[EXIT]]:
+// LLVM:   ret void
+
+// OGCG: define{{.*}} void @_Z2l4v()
+// OGCG:   %[[A_ADDR:.*]] = alloca [10 x i32]
+// OGCG:   %[[RANGE_ADDR:.*]] = alloca ptr
+// OGCG:   %[[BEGIN_ADDR:.*]] = alloca ptr
+// OGCG:   %[[END_ADDR:.*]] = alloca ptr
+// OGCG:   %[[N_ADDR:.*]] = alloca i32
+// OGCG:   store ptr %[[A_ADDR]], ptr %[[RANGE_ADDR]]
+// OGCG:   %[[BEGIN:.*]] = load ptr, ptr %[[RANGE_ADDR]]
+// OGCG:   %[[BEGIN_CAST:.*]] = getelementptr inbounds [10 x i32], ptr 
%[[BEGIN]], i64 0, i64 0
+// OGCG:   store ptr %[[BEGIN_CAST]], ptr %[[BEGIN_ADDR]]
+// OGCG:   %[[RANGE:.*]] = load ptr, ptr %[[RANGE_ADDR]]
+// OGCG:   %[[RANGE_CAST:.*]] = getelementptr inbounds [10 x i32], ptr 
%[[RANGE]], i64 0, i64 0
+// OGCG:   %[[END_PTR:.*]] = getelementptr inbounds i32, ptr %[[RANGE_CAST]], 
i64 10
+// OGCG:   store ptr %[[END_PTR]], ptr %[[END_ADDR]]
+// OGCG:   br label %[[COND:.*]]
+// OGCG: [[COND]]:
+// OGCG:   %[[BEGIN:.*]] = load ptr, ptr %[[BEGIN_ADDR]]
+// OGCG:   %[[END:.*]] = load ptr, ptr %[[END_ADDR]]
+// OGCG:   %[[CMP:.*]] = icmp ne ptr %[[BEGIN]], %[[END]]
+// OGCG:   br i1 %[[CMP]], label %[[BODY:.*]], label %[[END:.*]]
+// OGCG: [[BODY]]:
+// OGCG:   %[[CUR:.*]] = load ptr, ptr %[[BEGIN_ADDR]]
+// OGCG:   %[[A_CUR:.*]] = load i32, ptr %[[CUR]]
+// OGCG:   store i32 %[[A_CUR]], ptr %[[N_ADDR]]
+// OGCG:   br label %[[STEP:.*]]
+// OGCG: [[STEP]]:
+// OGCG:   %[[BEGIN:.*]] = load ptr, ptr %[[BEGIN_ADDR]]
+// OGCG:   %[[NEXT:.*]] = getelementptr inbounds nuw i32, ptr %[[BEGIN]], i32 1
+// OGCG:   store ptr %[[NEXT]], ptr %[[BEGIN_ADDR]]
+// OGCG:   br label %[[COND]]
+// OGCG: [[END]]:
+// OGCG:   ret void
+
+void l5() {
+  for (int arr[]{1,2,3,4}; auto x : arr) {} 
+}
+
+// CIR: cir.func @_Z2l5v
+// CIR:   cir.scope {
+// CIR:     %[[ARR_ADDR:.*]] = cir.alloca {{.*}} ["arr", init]
+// CIR:     %[[RANGE_ADDR:.*]] = cir.alloca {{.*}} ["__range1", init, const]
+// CIR:     %[[BEGIN_ADDR:.*]] = cir.alloca {{.*}} ["__begin1", init]
+// CIR:     %[[END_ADDR:.*]] = cir.alloca {{.*}} ["__end1", init]
+// CIR:     %[[X_ADDR:.*]] = cir.alloca {{.*}} ["x", init]
+// CIR:     %[[ARR_CAST:.*]] = cir.cast(array_to_ptrdecay, %[[ARR_ADDR]] : 
{{.*}})
+// CIR:     %[[ONE:.*]] = cir.const #cir.int<1> : !s32i
+// CIR:     cir.store %[[ONE]], %[[ARR_CAST]]
+// CIR:     %[[OFFSET1:.*]] = cir.const #cir.int<1> : !s64i
+// CIR:     %[[STRIDE:.*]] = cir.ptr_stride(%[[ARR_CAST]] : {{.*}}, 
%[[OFFSET1]] : {{.*}})
+// CIR:     %[[TWO:.*]] = cir.const #cir.int<2> : !s32i
+// CIR:     cir.store %[[TWO]], %[[STRIDE]]
+// CIR:     %[[OFFSET2:.*]] = cir.const #cir.int<2> : !s64i
+// CIR:     %[[STRIDE2:.*]] = cir.ptr_stride(%[[ARR_CAST]] : {{.*}}, 
%[[OFFSET2]] : {{.*}})
+// CIR:     %[[THREE:.*]] = cir.const #cir.int<3> : !s32i
+// CIR:     cir.store %[[THREE]], %[[STRIDE2]]
+// CIR:     %[[OFFSET3:.*]] = cir.const #cir.int<3> : !s64i
+// CIR:     %[[STRIDE3:.*]] = cir.ptr_stride(%[[ARR_CAST]] : {{.*}}, 
%[[OFFSET3]] : {{.*}})
+// CIR:     %[[FOUR:.*]] = cir.const #cir.int<4> : !s32i
+// CIR:     cir.store %[[FOUR]], %[[STRIDE3]]
+// CIR:     cir.store %[[ARR_ADDR]], %[[RANGE_ADDR]]
+// CIR:     %[[RANGE_LOAD:.*]] = cir.load %[[RANGE_ADDR]]
+// CIR:     %[[RANGE_CAST:.*]] = cir.cast(array_to_ptrdecay, %[[RANGE_LOAD]] : 
{{.*}})
+// CIR:     cir.store %[[RANGE_CAST]], %[[BEGIN_ADDR]]
+// CIR:     %[[BEGIN:.*]] = cir.load %[[RANGE_ADDR]]
+// CIR:     %[[BEGIN_CAST:.*]] = cir.cast(array_to_ptrdecay, %[[BEGIN]] : 
{{.*}})
+// CIR:     %[[FOUR:.*]] = cir.const #cir.int<4> : !s64i
+// CIR:     %[[END_PTR:.*]] = cir.ptr_stride(%[[BEGIN_CAST]] : {{.*}}, 
%[[FOUR]] : {{.*}})
+// CIR:     cir.store %[[END_PTR]], %[[END_ADDR]]
+// CIR:     cir.for : cond {
+// CIR:       %[[CUR:.*]] = cir.load %[[BEGIN_ADDR]]
+// CIR:       %[[END:.*]] = cir.load %[[END_ADDR]]
+// CIR:       %[[CMP:.*]] = cir.cmp(ne, %[[CUR]], %[[END]])
+// CIR:       cir.condition(%[[CMP]])
+// CIR:     } body {
+// CIR:       %[[CUR:.*]] = cir.load deref %[[BEGIN_ADDR]]
+// CIR:       %[[X:.*]] = cir.load %[[CUR]]
+// CIR:       cir.store %[[X]], %[[X_ADDR]]
+// CIR:       cir.yield
+// CIR:     } step {
+// CIR:       %[[CUR:.*]] = cir.load %[[BEGIN_ADDR]]
+// CIR:       %[[ONE:.*]] = cir.const #cir.int<1>
+// CIR:       %[[NEXT:.*]] = cir.ptr_stride(%[[CUR]] : {{.*}}, %[[ONE]] : 
{{.*}})
+// CIR:       cir.store %[[NEXT]], %[[BEGIN_ADDR]]
+// CIR:       cir.yield
+// CIR:     }
+// CIR:   }
+
+// LLVM: define void @_Z2l5v() {
+// LLVM:   %[[ARR_ADDR:.*]] = alloca [4 x i32]
+// LLVM:   %[[RANGE_ADDR:.*]] = alloca ptr
+// LLVM:   %[[BEGIN_ADDR:.*]] = alloca ptr
+// LLVM:   %[[END_ADDR:.*]] = alloca ptr
+// LLVM:   %[[X_ADDR:.*]] = alloca i32
+// LLVM:   br label %[[SETUP:.*]]
+// LLVM: [[SETUP]]:
+// LLVM:   %[[ARR_0:.*]] = getelementptr i32, ptr %[[ARR_ADDR]], i32 0
+// LLVM:   store i32 1, ptr %[[ARR_0]]
+// LLVM:   %[[ARR_1:.*]] = getelementptr i32, ptr %[[ARR_0]], i64 1
+// LLVM:   store i32 2, ptr %[[ARR_1]]
+// LLVM:   %[[ARR_2:.*]] = getelementptr i32, ptr %[[ARR_0]], i64 2
+// LLVM:   store i32 3, ptr %[[ARR_2]]
+// LLVM:   %[[ARR_3:.*]] = getelementptr i32, ptr %[[ARR_0]], i64 3
+// LLVM:   store i32 4, ptr %[[ARR_3]]
+// LLVM:   store ptr %[[ARR_ADDR]], ptr %[[RANGE_ADDR]]
+// LLVM:   %[[BEGIN:.*]] = load ptr, ptr %[[RANGE_ADDR]]
+// LLVM:   %[[BEGIN_CAST:.*]] = getelementptr i32, ptr %[[BEGIN]], i32 0
+// LLVM:   store ptr %[[BEGIN_CAST]], ptr %[[BEGIN_ADDR]]
+// LLVM:   %[[RANGE:.*]] = load ptr, ptr %[[RANGE_ADDR]]
+// LLVM:   %[[RANGE_CAST:.*]] = getelementptr i32, ptr %[[RANGE]], i32 0
+// LLVM:   %[[END_PTR:.*]] = getelementptr i32, ptr %[[RANGE_CAST]], i64 4
+// LLVM:   store ptr %[[END_PTR]], ptr %[[END_ADDR]]
+// LLVM:   br label %[[COND:.*]]
+// LLVM: [[COND]]:
+// LLVM:   %[[BEGIN:.*]] = load ptr, ptr %[[BEGIN_ADDR]]
+// LLVM:   %[[END:.*]] = load ptr, ptr %[[END_ADDR]]
+// LLVM:   %[[CMP:.*]] = icmp ne ptr %[[BEGIN]], %[[END]]
+// LLVM:   br i1 %[[CMP]], label %[[BODY:.*]], label %[[END:.*]]
+// LLVM: [[BODY]]:
+// LLVM:   %[[CUR:.*]] = load ptr, ptr %[[BEGIN_ADDR]]
+// LLVM:   %[[ARR_CUR:.*]] = load i32, ptr %[[CUR]]
+// LLVM:   store i32 %[[ARR_CUR]], ptr %[[X_ADDR]]
+// LLVM:   br label %[[STEP:.*]]
+// LLVM: [[STEP]]:
+// LLVM:   %[[BEGIN:.*]] = load ptr, ptr %[[BEGIN_ADDR]]
+// LLVM:   %[[NEXT:.*]] = getelementptr i32, ptr %[[BEGIN]], i64 1
+// LLVM:   store ptr %[[NEXT]], ptr %[[BEGIN_ADDR]]
+// LLVM:   br label %[[COND]]
+// LLVM: [[END]]:
+// LLVM:   br label %[[EXIT:.*]]
+// LLVM: [[EXIT]]:
+// LLVM:   ret void
+
+// OGCG: define{{.*}} void @_Z2l5v()
+// OGCG:   %[[ARR_ADDR:.*]] = alloca [4 x i32]
+// OGCG:   %[[RANGE_ADDR:.*]] = alloca ptr
+// OGCG:   %[[BEGIN_ADDR:.*]] = alloca ptr
+// OGCG:   %[[END_ADDR:.*]] = alloca ptr
+// OGCG:   %[[X_ADDR:.*]] = alloca i32
+// OGCG:   call void @llvm.memcpy.p0.p0.i64
+// OGCG:   store ptr %[[ARR_ADDR]], ptr %[[RANGE_ADDR]]
+// OGCG:   %[[BEGIN:.*]] = load ptr, ptr %[[RANGE_ADDR]]
+// OGCG:   %[[BEGIN_CAST:.*]] = getelementptr inbounds [4 x i32], ptr 
%[[BEGIN]], i64 0, i64 0
+// OGCG:   store ptr %[[BEGIN_CAST]], ptr %[[BEGIN_ADDR]]
+// OGCG:   %[[RANGE:.*]] = load ptr, ptr %[[RANGE_ADDR]]
+// OGCG:   %[[RANGE_CAST:.*]] = getelementptr inbounds [4 x i32], ptr 
%[[RANGE]], i64 0, i64 0
+// OGCG:   %[[END_PTR:.*]] = getelementptr inbounds i32, ptr %[[RANGE_CAST]], 
i64 4
+// OGCG:   store ptr %[[END_PTR]], ptr %[[END_ADDR]]
+// OGCG:   br label %[[COND:.*]]
+// OGCG: [[COND]]:
+// OGCG:   %[[BEGIN:.*]] = load ptr, ptr %[[BEGIN_ADDR]]
+// OGCG:   %[[END:.*]] = load ptr, ptr %[[END_ADDR]]
+// OGCG:   %[[CMP:.*]] = icmp ne ptr %[[BEGIN]], %[[END]]
+// OGCG:   br i1 %[[CMP]], label %[[BODY:.*]], label %[[END:.*]]
+// OGCG: [[BODY]]:
+// OGCG:   %[[CUR:.*]] = load ptr, ptr %[[BEGIN_ADDR]]
+// OGCG:   %[[ARR_CUR:.*]] = load i32, ptr %[[CUR]]
+// OGCG:   store i32 %[[ARR_CUR]], ptr %[[X_ADDR]]
+// OGCG:   br label %[[STEP:.*]]
+// OGCG: [[STEP]]:
+// OGCG:   %[[BEGIN:.*]] = load ptr, ptr %[[BEGIN_ADDR]]
+// OGCG:   %[[NEXT:.*]] = getelementptr inbounds nuw i32, ptr %[[BEGIN]], i32 1
+// OGCG:   store ptr %[[NEXT]], ptr %[[BEGIN_ADDR]]
+// OGCG:   br label %[[COND]]
+// OGCG: [[END]]:
+// OGCG:   ret void
+
 void test_do_while_false() {
   do {
   } while (0);


        
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to