Author: Amr Hesham
Date: 2025-04-15T19:52:26+02:00
New Revision: 30d13e359190f7a0e2122292ec4a4fc1a6c71acc

URL: 
https://github.com/llvm/llvm-project/commit/30d13e359190f7a0e2122292ec4a4fc1a6c71acc
DIFF: 
https://github.com/llvm/llvm-project/commit/30d13e359190f7a0e2122292ec4a4fc1a6c71acc.diff

LOG: [CIR] Upstream ArraySubscriptExpr from function parameter with pointer 
base (#135493)

This change adds an ArraySubscriptExpr from the function parameter with
base type as Pointer

Issue https://github.com/llvm/llvm-project/issues/130197

Added: 
    

Modified: 
    clang/lib/CIR/CodeGen/CIRGenExpr.cpp
    clang/test/CIR/CodeGen/array.cpp

Removed: 
    


################################################################################
diff  --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp 
b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
index f0732a8ea60af..cffe5c5cd1ec3 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
@@ -552,7 +552,19 @@ CIRGenFunction::emitArraySubscriptExpr(const 
clang::ArraySubscriptExpr *e) {
   // in lexical order (this complexity is, sadly, required by C++17).
   assert((e->getIdx() == e->getLHS() || e->getIdx() == e->getRHS()) &&
          "index was neither LHS nor RHS");
-  const mlir::Value idx = emitScalarExpr(e->getIdx());
+
+  auto emitIdxAfterBase = [&]() -> mlir::Value {
+    const mlir::Value idx = emitScalarExpr(e->getIdx());
+
+    // Extend or truncate the index type to 32 or 64-bits.
+    auto ptrTy = mlir::dyn_cast<cir::PointerType>(idx.getType());
+    if (ptrTy && mlir::isa<cir::IntType>(ptrTy.getPointee()))
+      cgm.errorNYI(e->getSourceRange(),
+                   "emitArraySubscriptExpr: index type cast");
+    return idx;
+  };
+
+  const mlir::Value idx = emitIdxAfterBase();
   if (const Expr *array = getSimpleArrayDecayOperand(e->getBase())) {
     LValue arrayLV;
     if (const auto *ase = dyn_cast<ArraySubscriptExpr>(array))
@@ -566,13 +578,34 @@ CIRGenFunction::emitArraySubscriptExpr(const 
clang::ArraySubscriptExpr *e) {
         arrayLV.getAddress(), e->getType(), idx, cgm.getLoc(e->getExprLoc()),
         /*shouldDecay=*/true);
 
-    return LValue::makeAddr(addr, e->getType(), LValueBaseInfo());
+    const LValue lv = LValue::makeAddr(addr, e->getType(), LValueBaseInfo());
+
+    if (getLangOpts().ObjC && getLangOpts().getGC() != LangOptions::NonGC) {
+      cgm.errorNYI(e->getSourceRange(), "emitArraySubscriptExpr: ObjC with 
GC");
+    }
+
+    return lv;
   }
 
   // The base must be a pointer; emit it with an estimate of its alignment.
-  cgm.errorNYI(e->getSourceRange(),
-               "emitArraySubscriptExpr: The base must be a pointer");
-  return {};
+  assert(e->getBase()->getType()->isPointerType() &&
+         "The base must be a pointer");
+
+  LValueBaseInfo eltBaseInfo;
+  const Address ptrAddr = emitPointerWithAlignment(e->getBase(), &eltBaseInfo);
+  // Propagate the alignment from the array itself to the result.
+  const Address addxr = emitArraySubscriptPtr(
+      *this, cgm.getLoc(e->getBeginLoc()), cgm.getLoc(e->getEndLoc()), ptrAddr,
+      e->getType(), idx, cgm.getLoc(e->getExprLoc()),
+      /*shouldDecay=*/false);
+
+  const LValue lv = LValue::makeAddr(addxr, e->getType(), eltBaseInfo);
+
+  if (getLangOpts().ObjC && getLangOpts().getGC() != LangOptions::NonGC) {
+    cgm.errorNYI(e->getSourceRange(), "emitArraySubscriptExpr: ObjC with GC");
+  }
+
+  return lv;
 }
 
 LValue CIRGenFunction::emitBinaryOperatorLValue(const BinaryOperator *e) {

diff  --git a/clang/test/CIR/CodeGen/array.cpp 
b/clang/test/CIR/CodeGen/array.cpp
index 5cda061cdbf12..08f6d730f161a 100644
--- a/clang/test/CIR/CodeGen/array.cpp
+++ b/clang/test/CIR/CodeGen/array.cpp
@@ -350,20 +350,118 @@ void func7() {
 // OGCG: %[[ARR:.*]] = alloca [1 x ptr], align 8
 // OGCG: call void @llvm.memset.p0.i64(ptr align 8 %[[ARR]], i8 0, i64 8, i1 
false)
 
-void func8(int p[10]) {}
-// CIR: cir.func @func8(%arg0: !cir.ptr<!s32i>
-// CIR: cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, ["p", init]
+void func8(int arr[10]) {
+  int e = arr[0];
+  int e2 = arr[1];
+}
 
-// LLVM: define void @func8(ptr {{%.*}})
-// LLVM-NEXT: alloca ptr, i64 1, align 8
+// CIR: cir.func @func8(%[[ARG:.*]]: !cir.ptr<!s32i>
+// CIR:  %[[ARR:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, 
["arr", init]
+// CIR:  %[[INIT:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["e", init]
+// CIR:  %[[INIT_2:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["e2", init]
+// CIR:  cir.store %[[ARG]], %[[ARR]] : !cir.ptr<!s32i>, 
!cir.ptr<!cir.ptr<!s32i>>
+// CIR:  %[[IDX:.*]] = cir.const #cir.int<0> : !s32i
+// CIR:  %[[TMP_1:.*]] = cir.load %[[ARR]] : !cir.ptr<!cir.ptr<!s32i>>, 
!cir.ptr<!s32i>
+// CIR:  %[[ELE_0:.*]] = cir.ptr_stride(%[[TMP_1]] : !cir.ptr<!s32i>, %[[IDX]] 
: !s32i), !cir.ptr<!s32i>
+// CIR:  %[[TMP_2:.*]] = cir.load %[[ELE_0]] : !cir.ptr<!s32i>, !s32i
+// CIR:  cir.store %[[TMP_2]], %[[INIT]] : !s32i, !cir.ptr<!s32i>
+// CIR:  %[[IDX_1:.*]] = cir.const #cir.int<1> : !s32i
+// CIR:  %[[TMP_3:.*]] = cir.load %[[ARR]] : !cir.ptr<!cir.ptr<!s32i>>, 
!cir.ptr<!s32i>
+// CIR:  %[[ELE_1:.*]] = cir.ptr_stride(%[[TMP_3]] : !cir.ptr<!s32i>, 
%[[IDX_1]] : !s32i), !cir.ptr<!s32i>
+// CIR:  %[[TMP_4:.*]] = cir.load %[[ELE_1]] : !cir.ptr<!s32i>, !s32i
+// CIR:  cir.store %[[TMP_4]], %[[INIT_2]] : !s32i, !cir.ptr<!s32i>
+
+// LLVM: define void @func8(ptr %[[ARG:.*]])
+// LLVM:  %[[ARR:.*]] = alloca ptr, i64 1, align 8
+// LLVM:  %[[INIT:.*]] = alloca i32, i64 1, align 4
+// LLVM:  %[[INIT_2:.*]] = alloca i32, i64 1, align 4
+// LLVM:  store ptr %[[ARG]], ptr %[[ARR]], align 8
+// LLVM:  %[[TMP_1:.*]] = load ptr, ptr %[[ARR]], align 8
+// LLVM:  %[[ELE_0:.*]] = getelementptr i32, ptr %[[TMP_1]], i64 0
+// LLVM:  %[[TMP_2:.*]] = load i32, ptr %[[ELE_0]], align 4
+// LLVM:  store i32 %[[TMP_2]], ptr %[[INIT]], align 4
+// LLVM:  %[[TMP_3:.*]] = load ptr, ptr %[[ARR]], align 8
+// LLVM:  %[[ELE_1:.*]] = getelementptr i32, ptr %[[TMP_3]], i64 1
+// LLVM:  %[[TMP_4:.*]] = load i32, ptr %[[ELE_1]], align 4
+// LLVM:  store i32 %[[TMP_4]], ptr %[[INIT_2]], align 4
+
+// OGCG: %[[ARR:.*]] = alloca ptr, align 8
+// OGCG: %[[INIT:.*]] = alloca i32, align 4
+// OGCG: %[[INIT_2:.*]] = alloca i32, align 4
+// OGCG: store ptr {{%.*}}, ptr %[[ARR]], align 8
+// OGCG: %[[TMP_1:.*]] = load ptr, ptr %[[ARR]], align 8
+// OGCG: %[[ELE_0:.*]] = getelementptr inbounds i32, ptr %[[TMP_1]], i64 0
+// OGCG: %[[TMP_2:.*]] = load i32, ptr %[[ELE_0]], align 4
+// OGCG: store i32 %[[TMP_2]], ptr %[[INIT]], align 4
+// OGCG: %[[TMP_3:.*]] = load ptr, ptr %[[ARR]], align 8
+// OGCG: %[[ELE_1:.*]] = getelementptr inbounds i32, ptr %[[TMP_3]], i64 1
+// OGCG: %[[TMP_2:.*]] = load i32, ptr %[[ELE_1]], align 4
+// OGCG: store i32 %[[TMP_2]], ptr %[[INIT_2]], align 4
 
-// OGCG: alloca ptr, align 8
+void func9(int arr[10][5]) {
+  int e = arr[1][2];
+}
 
-void func9(int pp[10][5]) {}
-// CIR: cir.func @func9(%arg0: !cir.ptr<!cir.array<!s32i x 5>>
-// CIR: cir.alloca !cir.ptr<!cir.array<!s32i x 5>>, 
!cir.ptr<!cir.ptr<!cir.array<!s32i x 5>>>
+// CIR: cir.func @func9(%[[ARG:.*]]: !cir.ptr<!cir.array<!s32i x 5>>
+// CIR:  %[[ARR:.*]] = cir.alloca !cir.ptr<!cir.array<!s32i x 5>>, 
!cir.ptr<!cir.ptr<!cir.array<!s32i x 5>>>, ["arr", init]
+// CIR:  %[[INIT:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["e", init]
+// CIR:  cir.store %[[ARG]], %[[ARR]] : !cir.ptr<!cir.array<!s32i x 5>>, 
!cir.ptr<!cir.ptr<!cir.array<!s32i x 5>>>
+// CIR:  %[[IDX:.*]] = cir.const #cir.int<2> : !s32i
+// CIR:  %[[IDX_1:.*]] = cir.const #cir.int<1> : !s32i
+// CIR:  %[[TMP_1:.*]] = cir.load %[[ARR]] : 
!cir.ptr<!cir.ptr<!cir.array<!s32i x 5>>>, !cir.ptr<!cir.array<!s32i x 5>>
+// CIR:  %[[ARR_1:.*]] = cir.ptr_stride(%[[TMP_1]] : !cir.ptr<!cir.array<!s32i 
x 5>>, %[[IDX_1]] : !s32i), !cir.ptr<!cir.array<!s32i x 5>>
+// CIR:  %[[ARR_1_PTR:.*]] = cir.cast(array_to_ptrdecay, %[[ARR_1]] : 
!cir.ptr<!cir.array<!s32i x 5>>), !cir.ptr<!s32i>
+// CIR:  %[[ARR_1_2:.*]] = cir.ptr_stride(%[[ARR_1_PTR]] : !cir.ptr<!s32i>, 
%[[IDX]] : !s32i), !cir.ptr<!s32i>
+// CIR:  %[[TMP_2:.*]] = cir.load %[[ARR_1_2]] : !cir.ptr<!s32i>, !s32i
+// CIR:  cir.store %[[TMP_2]], %[[INIT]] : !s32i, !cir.ptr<!s32i>
+
+// LLVM: define void @func9(ptr %[[ARG:.*]])
+// LLVM:  %[[ARR:.*]] = alloca ptr, i64 1, align 8
+// LLVM:  %[[INIT:.*]] = alloca i32, i64 1, align 4
+// LLVM:  store ptr %[[ARG]], ptr %[[ARR]], align 8
+// LLVM:  %[[TMP_1:.*]] = load ptr, ptr %[[ARR]], align 8
+// LLVM:  %[[ARR_1:.*]] = getelementptr [5 x i32], ptr %[[TMP_1]], i64 1
+// LLVM:  %[[ARR_1_PTR:.*]] = getelementptr i32, ptr %[[ARR_1]], i32 0
+// LLVM:  %[[ARR_1_2:.*]] = getelementptr i32, ptr %[[ARR_1_PTR]], i64 2
+// LLVM:  %[[TMP_2:.*]] = load i32, ptr %[[ARR_1_2]], align 4
+// LLVM:  store i32 %[[TMP_2]], ptr %[[INIT]], align 4
+
+// OGCG: %[[ARR:.*]] = alloca ptr, align 8
+// OGCG: %[[INIT:.*]] = alloca i32, align 4
+// OGCG: store ptr {{%.*}}, ptr %[[ARR]], align 8
+// OGCG: %[[TMP_1:.*]] = load ptr, ptr %[[ARR]], align 8
+// OGCG: %[[ARR_1:.*]] = getelementptr inbounds [5 x i32], ptr %[[TMP_1]], i64 
1
+// OGCG: %[[ARR_1_2:.*]] = getelementptr inbounds [5 x i32], ptr %[[ARR_1]], 
i64 0, i64 2
+// OGCG: %[[TMP_2:.*]] = load i32, ptr %[[ARR_1_2]], align 4
+// OGCG: store i32 %[[TMP_2]], ptr %[[INIT]], align 4
+
+void func10(int *a) {
+  int e = a[5];
+}
 
-// LLVM: define void @func9(ptr {{%.*}})
-// LLVM-NEXT: alloca ptr, i64 1, align 8
+// CIR: cir.func @func10(%[[ARG:.*]]: !cir.ptr<!s32i>
+// CIR: %[[ARR:.*]] = cir.alloca !cir.ptr<!s32i>, !cir.ptr<!cir.ptr<!s32i>>, 
["a", init]
+// CIR: %[[INIT:.*]] = cir.alloca !s32i, !cir.ptr<!s32i>, ["e", init]
+// CIR: cir.store %[[ARG]], %[[ARR]] : !cir.ptr<!s32i>, 
!cir.ptr<!cir.ptr<!s32i>>
+// CIR: %[[IDX:.*]] = cir.const #cir.int<5> : !s32i
+// CIR: %[[TMP_1:.*]] = cir.load %[[ARR]] : !cir.ptr<!cir.ptr<!s32i>>, 
!cir.ptr<!s32i>
+// CIR: %[[ELE:.*]] = cir.ptr_stride(%[[TMP_1]] : !cir.ptr<!s32i>, %[[IDX]] : 
!s32i), !cir.ptr<!s32i>
+// CIR: %[[TMP_2:.*]] = cir.load %[[ELE]] : !cir.ptr<!s32i>, !s32i
+// CIR: cir.store %[[TMP_2]], %[[INIT]] : !s32i, !cir.ptr<!s32i>
+
+// LLVM: define void @func10(ptr %[[ARG:.*]]) {
+// LLVM:  %[[ARR:.*]] = alloca ptr, i64 1, align 8
+// LLVM:  %[[INIT:.*]] = alloca i32, i64 1, align 4
+// LLVM:  store ptr %[[ARG]], ptr %[[ARR]], align 8
+// LLVM:  %[[TMP_1:.*]] = load ptr, ptr %[[ARR]], align 8
+// LLVM:  %[[ELE:.*]] = getelementptr i32, ptr %[[TMP_1]], i64 5
+// LLVM:  %[[TMP_2:.*]] = load i32, ptr %[[ELE]], align 4
+// LLVM:  store i32 %[[TMP_2]], ptr %[[INIT]], align 4
 
-// OGCG: alloca ptr, align 8
+// OGCG:  %[[ARR:.*]] = alloca ptr, align 8
+// OGCG:  %[[INIT:.*]] = alloca i32, align 4
+// OGCG:  store ptr {{%.*}}, ptr %[[ARR]], align 8
+// OGCG:  %[[TMP_1:.*]] = load ptr, ptr %[[ARR]], align 8
+// OGCG:  %[[ELE:.*]] = getelementptr inbounds i32, ptr %[[TMP_1]], i64 5
+// OGCG:  %[[TMP_2:.*]] = load i32, ptr %[[ELE]], align 4
+// OGCG:  store i32 %[[TMP_2]], ptr %[[INIT]], align 4


        
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to