https://github.com/AmrDeveloper created https://github.com/llvm/llvm-project/pull/150759
This change adds support for Plus & Minus CompoundAssignment for ComplexType https://github.com/llvm/llvm-project/issues/141365 >From cc8134454578899c228634799f6150c03b0fb403 Mon Sep 17 00:00:00 2001 From: AmrDeveloper <am...@programmer.net> Date: Sat, 26 Jul 2025 14:43:06 +0200 Subject: [PATCH] [CIR] Plus & Minus CompoundAssignment support for ComplexType --- clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp | 167 ++++++++++ clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp | 23 ++ clang/lib/CIR/CodeGen/CIRGenFunction.cpp | 5 +- clang/lib/CIR/CodeGen/CIRGenFunction.h | 6 + clang/test/CIR/CodeGen/complex-arithmetic.cpp | 288 ++++++++++++------ 5 files changed, 386 insertions(+), 103 deletions(-) diff --git a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp index 02685a3d64121..f80ced3e7924e 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprComplex.cpp @@ -108,6 +108,15 @@ class ComplexExprEmitter : public StmtVisitor<ComplexExprEmitter, mlir::Value> { mlir::Value emitPromotedComplexOperand(const Expr *e, QualType promotionTy); + LValue emitCompoundAssignLValue( + const CompoundAssignOperator *e, + mlir::Value (ComplexExprEmitter::*func)(const BinOpInfo &), + RValue &value); + + mlir::Value emitCompoundAssign( + const CompoundAssignOperator *e, + mlir::Value (ComplexExprEmitter::*func)(const BinOpInfo &)); + mlir::Value emitBinAdd(const BinOpInfo &op); mlir::Value emitBinSub(const BinOpInfo &op); @@ -143,6 +152,15 @@ class ComplexExprEmitter : public StmtVisitor<ComplexExprEmitter, mlir::Value> { HANDLEBINOP(Add) HANDLEBINOP(Sub) #undef HANDLEBINOP + + // Compound assignments. + mlir::Value VisitBinAddAssign(const CompoundAssignOperator *e) { + return emitCompoundAssign(e, &ComplexExprEmitter::emitBinAdd); + } + + mlir::Value VisitBinSubAssign(const CompoundAssignOperator *e) { + return emitCompoundAssign(e, &ComplexExprEmitter::emitBinSub); + } }; } // namespace @@ -153,6 +171,12 @@ static const ComplexType *getComplexType(QualType type) { return cast<ComplexType>(cast<AtomicType>(type)->getValueType()); } +static mlir::Value createComplexFromReal(CIRGenBuilderTy &builder, + mlir::Location loc, mlir::Value real) { + mlir::Value imag = builder.getNullValue(real.getType(), loc); + return builder.createComplexCreate(loc, real, imag); +} + LValue ComplexExprEmitter::emitBinAssignLValue(const BinaryOperator *e, mlir::Value &value) { assert(cgf.getContext().hasSameUnqualifiedType(e->getLHS()->getType(), @@ -570,6 +594,124 @@ ComplexExprEmitter::emitBinOps(const BinaryOperator *e, QualType promotionTy) { return binOpInfo; } +LValue ComplexExprEmitter::emitCompoundAssignLValue( + const CompoundAssignOperator *e, + mlir::Value (ComplexExprEmitter::*func)(const BinOpInfo &), RValue &value) { + QualType lhsTy = e->getLHS()->getType(); + QualType rhsTy = e->getRHS()->getType(); + SourceLocation exprLoc = e->getExprLoc(); + mlir::Location loc = cgf.getLoc(exprLoc); + + if (const AtomicType *atomicTy = lhsTy->getAs<AtomicType>()) + lhsTy = atomicTy->getValueType(); + + BinOpInfo opInfo{loc}; + opInfo.fpFeatures = e->getFPFeaturesInEffect(cgf.getLangOpts()); + + assert(!cir::MissingFeatures::cgFPOptionsRAII()); + + // Load the RHS and LHS operands. + // __block variables need to have the rhs evaluated first, plus this should + // improve codegen a little. + QualType promotionTypeCR = getPromotionType(e->getComputationResultType()); + opInfo.ty = promotionTypeCR.isNull() ? e->getComputationResultType() + : promotionTypeCR; + + QualType complexElementTy = + opInfo.ty->castAs<ComplexType>()->getElementType(); + QualType promotionTypeRHS = getPromotionType(rhsTy); + + // The RHS should have been converted to the computation type. + if (e->getRHS()->getType()->isRealFloatingType()) { + if (!promotionTypeRHS.isNull()) + opInfo.rhs = createComplexFromReal( + cgf.getBuilder(), loc, + cgf.emitPromotedScalarExpr(e->getRHS(), promotionTypeRHS)); + else { + assert(cgf.getContext().hasSameUnqualifiedType(complexElementTy, rhsTy)); + opInfo.rhs = createComplexFromReal(cgf.getBuilder(), loc, + cgf.emitScalarExpr(e->getRHS())); + } + } else { + if (!promotionTypeRHS.isNull()) { + opInfo.rhs = createComplexFromReal( + cgf.getBuilder(), loc, + cgf.emitPromotedComplexExpr(e->getRHS(), promotionTypeRHS)); + } else { + assert(cgf.getContext().hasSameUnqualifiedType(opInfo.ty, rhsTy)); + opInfo.rhs = Visit(e->getRHS()); + } + } + + LValue lhs = cgf.emitLValue(e->getLHS()); + + // Load from the l-value and convert it. + QualType promotionTypeLHS = getPromotionType(e->getComputationLHSType()); + if (lhsTy->isAnyComplexType()) { + mlir::Value lhsValue = emitLoadOfLValue(lhs, exprLoc); + QualType destTy = promotionTypeLHS.isNull() ? opInfo.ty : promotionTypeLHS; + opInfo.lhs = emitComplexToComplexCast(lhsValue, lhsTy, destTy, exprLoc); + } else { + mlir::Value lhsValue = cgf.emitLoadOfScalar(lhs, exprLoc); + // For floating point real operands we can directly pass the scalar form + // to the binary operator emission and potentially get more efficient code. + if (lhsTy->isRealFloatingType()) { + QualType promotedComplexElementTy; + if (!promotionTypeLHS.isNull()) { + promotedComplexElementTy = + cast<ComplexType>(promotionTypeLHS)->getElementType(); + if (!cgf.getContext().hasSameUnqualifiedType(promotedComplexElementTy, + promotionTypeLHS)) + lhsValue = cgf.emitScalarConversion( + lhsValue, lhsTy, promotedComplexElementTy, exprLoc); + } else { + if (!cgf.getContext().hasSameUnqualifiedType(complexElementTy, lhsTy)) + lhsValue = cgf.emitScalarConversion(lhsValue, lhsTy, complexElementTy, + exprLoc); + } + opInfo.lhs = createComplexFromReal(cgf.getBuilder(), + cgf.getLoc(e->getExprLoc()), lhsValue); + } else { + opInfo.lhs = emitScalarToComplexCast(lhsValue, lhsTy, opInfo.ty, exprLoc); + } + } + + // Expand the binary operator. + mlir::Value result = (this->*func)(opInfo); + + // Truncate the result and store it into the LHS lvalue. + if (lhsTy->isAnyComplexType()) { + mlir::Value resultValue = + emitComplexToComplexCast(result, opInfo.ty, lhsTy, exprLoc); + emitStoreOfComplex(loc, resultValue, lhs, /*isInit*/ false); + value = RValue::getComplex(resultValue); + } else { + mlir::Value resultValue = + cgf.emitComplexToScalarConversion(result, opInfo.ty, lhsTy, exprLoc); + cgf.emitStoreOfScalar(resultValue, lhs, /*isInit*/ false); + value = RValue::get(resultValue); + } + + return lhs; +} + +mlir::Value ComplexExprEmitter::emitCompoundAssign( + const CompoundAssignOperator *e, + mlir::Value (ComplexExprEmitter::*func)(const BinOpInfo &)) { + RValue val; + LValue lv = emitCompoundAssignLValue(e, func, val); + + // The result of an assignment in C is the assigned r-value. + if (!cgf.getLangOpts().CPlusPlus) + return val.getComplexValue(); + + // If the lvalue is non-volatile, return the computed value of the assignment. + if (!lv.isVolatileQualified()) + return val.getComplexValue(); + + return emitLoadOfLValue(lv, e->getExprLoc()); +} + mlir::Value ComplexExprEmitter::emitBinAdd(const BinOpInfo &op) { assert(!cir::MissingFeatures::fastMathFlags()); assert(!cir::MissingFeatures::cgFPOptionsRAII()); @@ -600,6 +742,31 @@ mlir::Value CIRGenFunction::emitComplexExpr(const Expr *e) { return ComplexExprEmitter(*this).Visit(const_cast<Expr *>(e)); } +using CompoundFunc = + mlir::Value (ComplexExprEmitter::*)(const ComplexExprEmitter::BinOpInfo &); + +static CompoundFunc getComplexOp(BinaryOperatorKind Op) { + switch (Op) { + case BO_MulAssign: + llvm_unreachable("getComplexOp: BO_MulAssign"); + case BO_DivAssign: + llvm_unreachable("getComplexOp: BO_DivAssign"); + case BO_SubAssign: + return &ComplexExprEmitter::emitBinSub; + case BO_AddAssign: + return &ComplexExprEmitter::emitBinAdd; + default: + llvm_unreachable("unexpected complex compound assignment"); + } +} + +LValue CIRGenFunction::emitComplexCompoundAssignmentLValue( + const CompoundAssignOperator *e) { + CompoundFunc op = getComplexOp(e->getOpcode()); + RValue val; + return ComplexExprEmitter(*this).emitCompoundAssignLValue(e, op, val); +} + mlir::Value CIRGenFunction::emitComplexPrePostIncDec(const UnaryOperator *e, LValue lv, cir::UnaryOpKind op, diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp index 2523b0ff33787..688d74d2174f2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp @@ -1931,6 +1931,29 @@ mlir::Value CIRGenFunction::emitScalarConversion(mlir::Value src, .emitScalarConversion(src, srcTy, dstTy, loc); } +mlir::Value CIRGenFunction::emitComplexToScalarConversion(mlir::Value src, + QualType srcTy, + QualType dstTy, + SourceLocation loc) { + assert(srcTy->isAnyComplexType() && hasScalarEvaluationKind(dstTy) && + "Invalid complex -> scalar conversion"); + + QualType complexElemTy = srcTy->castAs<ComplexType>()->getElementType(); + if (dstTy->isBooleanType()) { + auto kind = complexElemTy->isFloatingType() + ? cir::CastKind::float_complex_to_bool + : cir::CastKind::int_complex_to_bool; + return builder.createCast(getLoc(loc), kind, src, convertType(dstTy)); + } + + auto kind = complexElemTy->isFloatingType() + ? cir::CastKind::float_complex_to_real + : cir::CastKind::int_complex_to_real; + mlir::Value real = + builder.createCast(getLoc(loc), kind, src, convertType(complexElemTy)); + return emitScalarConversion(real, complexElemTy, dstTy, loc); +} + mlir::Value ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *e) { // Perform vector logical not on comparison with zero vector. if (e->getType()->isVectorType() && diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index b4b95d627c619..2193092eea174 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -783,9 +783,8 @@ LValue CIRGenFunction::emitLValue(const Expr *e) { } if (!ty->isAnyComplexType()) return emitCompoundAssignmentLValue(cast<CompoundAssignOperator>(e)); - cgm.errorNYI(e->getSourceRange(), - "CompoundAssignOperator with ComplexType"); - return LValue(); + + return emitComplexCompoundAssignmentLValue(cast<CompoundAssignOperator>(e)); } case Expr::CallExprClass: case Expr::CXXMemberCallExprClass: diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 4891c7496588f..915efa552fc3c 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -918,6 +918,11 @@ class CIRGenFunction : public CIRGenTypeCache { /// sanitizer is enabled, a runtime check is also emitted. mlir::Value emitCheckedArgForAssume(const Expr *e); + /// Emit a conversion from the specified complex type to the specified + /// destination type, where the destination type is an LLVM scalar type. + mlir::Value emitComplexToScalarConversion(mlir::Value src, QualType srcTy, + QualType dstTy, SourceLocation loc); + LValue emitCompoundAssignmentLValue(const clang::CompoundAssignOperator *e); LValue emitCompoundLiteralLValue(const CompoundLiteralExpr *e); @@ -1050,6 +1055,7 @@ class CIRGenFunction : public CIRGenTypeCache { cir::UnaryOpKind op, bool isPre); LValue emitComplexAssignmentLValue(const BinaryOperator *e); + LValue emitComplexCompoundAssignmentLValue(const CompoundAssignOperator *e); void emitCompoundStmt(const clang::CompoundStmt &s); diff --git a/clang/test/CIR/CodeGen/complex-arithmetic.cpp b/clang/test/CIR/CodeGen/complex-arithmetic.cpp index 5e384cd34ebfd..657b9dafd7b9d 100644 --- a/clang/test/CIR/CodeGen/complex-arithmetic.cpp +++ b/clang/test/CIR/CodeGen/complex-arithmetic.cpp @@ -11,16 +11,16 @@ void foo() { int _Complex c = a + b; } -// CIR: %[[COMPLEX_A:.*]] = cir.alloca !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>, ["a"] -// CIR: %[[COMPLEX_B:.*]] = cir.alloca !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>, ["b"] -// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[COMPLEX_A]] : !cir.ptr<!cir.complex<!s32i>>, !cir.complex<!s32i> -// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[COMPLEX_B]] : !cir.ptr<!cir.complex<!s32i>>, !cir.complex<!s32i> +// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>, ["a"] +// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>, ["b"] +// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!s32i>>, !cir.complex<!s32i> +// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!cir.complex<!s32i>>, !cir.complex<!s32i> // CIR: %[[ADD:.*]] = cir.complex.add %[[TMP_A]], %[[TMP_B]] : !cir.complex<!s32i> -// LLVM: %[[COMPLEX_A:.*]] = alloca { i32, i32 }, i64 1, align 4 -// LLVM: %[[COMPLEX_B:.*]] = alloca { i32, i32 }, i64 1, align 4 -// LLVM: %[[TMP_A:.*]] = load { i32, i32 }, ptr %[[COMPLEX_A]], align 4 -// LLVM: %[[TMP_B:.*]] = load { i32, i32 }, ptr %[[COMPLEX_B]], align 4 +// LLVM: %[[A_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4 +// LLVM: %[[B_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4 +// LLVM: %[[TMP_A:.*]] = load { i32, i32 }, ptr %[[A_ADDR]], align 4 +// LLVM: %[[TMP_B:.*]] = load { i32, i32 }, ptr %[[B_ADDR]], align 4 // LLVM: %[[A_REAL:.*]] = extractvalue { i32, i32 } %[[TMP_A]], 0 // LLVM: %[[A_IMAG:.*]] = extractvalue { i32, i32 } %[[TMP_A]], 1 // LLVM: %[[B_REAL:.*]] = extractvalue { i32, i32 } %[[TMP_B]], 0 @@ -30,16 +30,16 @@ void foo() { // LLVM: %[[RESULT:.*]] = insertvalue { i32, i32 } poison, i32 %[[ADD_REAL]], 0 // LLVM: %[[RESULT_2:.*]] = insertvalue { i32, i32 } %[[RESULT]], i32 %[[ADD_IMAG]], 1 -// OGCG: %[[COMPLEX_A:.*]] = alloca { i32, i32 }, align 4 -// OGCG: %[[COMPLEX_B:.*]] = alloca { i32, i32 }, align 4 +// OGCG: %[[A_ADDR:.*]] = alloca { i32, i32 }, align 4 +// OGCG: %[[B_ADDR:.*]] = alloca { i32, i32 }, align 4 // OGCG: %[[RESULT:.*]] = alloca { i32, i32 }, align 4 -// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[COMPLEX_A]], i32 0, i32 0 +// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[A_ADDR]], i32 0, i32 0 // OGCG: %[[A_REAL:.*]] = load i32, ptr %[[A_REAL_PTR]], align 4 -// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[COMPLEX_A]], i32 0, i32 1 +// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[A_ADDR]], i32 0, i32 1 // OGCG: %[[A_IMAG:.*]] = load i32, ptr %[[A_IMAG_PTR]], align 4 -// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[COMPLEX_B]], i32 0, i32 0 +// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[B_ADDR]], i32 0, i32 0 // OGCG: %[[B_REAL:.*]] = load i32, ptr %[[B_REAL_PTR]], align 4 -// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[COMPLEX_B]], i32 0, i32 1 +// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[B_ADDR]], i32 0, i32 1 // OGCG: %[[B_IMAG:.*]] = load i32, ptr %[[B_IMAG_PTR]], align 4 // OGCG: %[[ADD_REAL:.*]] = add i32 %[[A_REAL]], %[[B_REAL]] // OGCG: %[[ADD_IMAG:.*]] = add i32 %[[A_IMAG]], %[[B_IMAG]] @@ -54,16 +54,16 @@ void foo2() { float _Complex c = a + b; } -// CIR: %[[COMPLEX_A:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"] -// CIR: %[[COMPLEX_B:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["b"] -// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[COMPLEX_A]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float> -// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[COMPLEX_B]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float> +// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"] +// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["b"] +// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float> +// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float> // CIR: %[[ADD:.*]] = cir.complex.add %[[TMP_A]], %[[TMP_B]] : !cir.complex<!cir.float> -// LLVM: %[[COMPLEX_A:.*]] = alloca { float, float }, i64 1, align 4 -// LLVM: %[[COMPLEX_B:.*]] = alloca { float, float }, i64 1, align 4 -// LLVM: %[[TMP_A:.*]] = load { float, float }, ptr %[[COMPLEX_A]], align 4 -// LLVM: %[[TMP_B:.*]] = load { float, float }, ptr %[[COMPLEX_B]], align 4 +// LLVM: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 4 +// LLVM: %[[B_ADDR:.*]] = alloca { float, float }, i64 1, align 4 +// LLVM: %[[TMP_A:.*]] = load { float, float }, ptr %[[A_ADDR]], align 4 +// LLVM: %[[TMP_B:.*]] = load { float, float }, ptr %[[B_ADDR]], align 4 // LLVM: %[[A_REAL:.*]] = extractvalue { float, float } %[[TMP_A]], 0 // LLVM: %[[A_IMAG:.*]] = extractvalue { float, float } %[[TMP_A]], 1 // LLVM: %[[B_REAL:.*]] = extractvalue { float, float } %[[TMP_B]], 0 @@ -73,16 +73,16 @@ void foo2() { // LLVM: %[[RESULT:.*]] = insertvalue { float, float } poison, float %[[ADD_REAL]], 0 // LLVM: %[[RESULT_2:.*]] = insertvalue { float, float } %[[RESULT]], float %[[ADD_IMAG]], 1 -// OGCG: %[[COMPLEX_A:.*]] = alloca { float, float }, align 4 -// OGCG: %[[COMPLEX_B:.*]] = alloca { float, float }, align 4 +// OGCG: %[[A_ADDR:.*]] = alloca { float, float }, align 4 +// OGCG: %[[B_ADDR:.*]] = alloca { float, float }, align 4 // OGCG: %[[RESULT:.*]] = alloca { float, float }, align 4 -// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_A]], i32 0, i32 0 +// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0 // OGCG: %[[A_REAL:.*]] = load float, ptr %[[A_REAL_PTR]], align 4 -// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_A]], i32 0, i32 1 +// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1 // OGCG: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4 -// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_B]], i32 0, i32 0 +// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 0 // OGCG: %[[B_REAL:.*]] = load float, ptr %[[B_REAL_PTR]], align 4 -// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_B]], i32 0, i32 1 +// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 1 // OGCG: %[[B_IMAG:.*]] = load float, ptr %[[B_IMAG_PTR]], align 4 // OGCG: %[[ADD_REAL:.*]] = fadd float %[[A_REAL]], %[[B_REAL]] // OGCG: %[[ADD_IMAG:.*]] = fadd float %[[A_IMAG]], %[[B_IMAG]] @@ -98,23 +98,23 @@ void foo3() { float _Complex d = (a + b) + c; } -// CIR: %[[COMPLEX_A:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"] -// CIR: %[[COMPLEX_B:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["b"] -// CIR: %[[COMPLEX_C:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["c"] +// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"] +// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["b"] +// CIR: %[[C_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["c"] // CIR: %[[RESULT:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["d", init] -// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[COMPLEX_A]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float> -// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[COMPLEX_B]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float> +// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float> +// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float> // CIR: %[[ADD_A_B:.*]] = cir.complex.add %[[TMP_A]], %[[TMP_B]] : !cir.complex<!cir.float> -// CIR: %[[TMP_C:.*]] = cir.load{{.*}} %[[COMPLEX_C]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float> +// CIR: %[[TMP_C:.*]] = cir.load{{.*}} %[[C_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float> // CIR: %[[ADD_A_B_C:.*]] = cir.complex.add %[[ADD_A_B]], %[[TMP_C]] : !cir.complex<!cir.float> // CIR: cir.store{{.*}} %[[ADD_A_B_C]], %[[RESULT]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>> -// LLVM: %[[COMPLEX_A:.*]] = alloca { float, float }, i64 1, align 4 -// LLVM: %[[COMPLEX_B:.*]] = alloca { float, float }, i64 1, align 4 -// LLVM: %[[COMPLEX_C:.*]] = alloca { float, float }, i64 1, align 4 +// LLVM: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 4 +// LLVM: %[[B_ADDR:.*]] = alloca { float, float }, i64 1, align 4 +// LLVM: %[[C_ADDR:.*]] = alloca { float, float }, i64 1, align 4 // LLVM: %[[RESULT:.*]] = alloca { float, float }, i64 1, align 4 -// LLVM: %[[TMP_A:.*]] = load { float, float }, ptr %[[COMPLEX_A]], align 4 -// LLVM: %[[TMP_B:.*]] = load { float, float }, ptr %[[COMPLEX_B]], align 4 +// LLVM: %[[TMP_A:.*]] = load { float, float }, ptr %[[A_ADDR]], align 4 +// LLVM: %[[TMP_B:.*]] = load { float, float }, ptr %[[B_ADDR]], align 4 // LLVM: %[[A_REAL:.*]] = extractvalue { float, float } %[[TMP_A]], 0 // LLVM: %[[A_IMAG:.*]] = extractvalue { float, float } %[[TMP_A]], 1 // LLVM: %[[B_REAL:.*]] = extractvalue { float, float } %[[TMP_B]], 0 @@ -123,7 +123,7 @@ void foo3() { // LLVM: %[[ADD_IMAG_A_B:.*]] = fadd float %[[A_IMAG]], %[[B_IMAG]] // LLVM: %[[A_B:.*]] = insertvalue { float, float } poison, float %[[ADD_REAL_A_B]], 0 // LLVM: %[[TMP_A_B:.*]] = insertvalue { float, float } %[[A_B]], float %[[ADD_IMAG_A_B]], 1 -// LLVM: %[[TMP_C:.*]] = load { float, float }, ptr %[[COMPLEX_C]], align 4 +// LLVM: %[[TMP_C:.*]] = load { float, float }, ptr %[[C_ADDR]], align 4 // LLVM: %[[A_B_REAL:.*]] = extractvalue { float, float } %[[TMP_A_B]], 0 // LLVM: %[[A_B_IMAG:.*]] = extractvalue { float, float } %[[TMP_A_B]], 1 // LLVM: %[[C_REAL:.*]] = extractvalue { float, float } %[[TMP_C]], 0 @@ -134,23 +134,23 @@ void foo3() { // LLVM: %[[TMP_A_B_C:.*]] = insertvalue { float, float } %[[A_B_C]], float %[[ADD_IMAG_A_B_C]], 1 // LLVM: store { float, float } %[[TMP_A_B_C]], ptr %[[RESULT]], align 4 -// OGCG: %[[COMPLEX_A:.*]] = alloca { float, float }, align 4 -// OGCG: %[[COMPLEX_B:.*]] = alloca { float, float }, align 4 -// OGCG: %[[COMPLEX_C:.*]] = alloca { float, float }, align 4 +// OGCG: %[[A_ADDR:.*]] = alloca { float, float }, align 4 +// OGCG: %[[B_ADDR:.*]] = alloca { float, float }, align 4 +// OGCG: %[[C_ADDR:.*]] = alloca { float, float }, align 4 // OGCG: %[[RESULT:.*]] = alloca { float, float }, align 4 -// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_A]], i32 0, i32 0 +// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0 // OGCG: %[[A_REAL:.*]] = load float, ptr %[[A_REAL_PTR]], align 4 -// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_A]], i32 0, i32 1 +// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1 // OGCG: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4 -// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_B]], i32 0, i32 0 +// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 0 // OGCG: %[[B_REAL:.*]] = load float, ptr %[[B_REAL_PTR]], align 4 -// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_B]], i32 0, i32 1 +// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 1 // OGCG: %[[B_IMAG:.*]] = load float, ptr %[[B_IMAG_PTR]], align 4 // OGCG: %[[ADD_REAL_A_B:.*]] = fadd float %[[A_REAL]], %[[B_REAL]] // OGCG: %[[ADD_IMAG_A_B:.*]] = fadd float %[[A_IMAG]], %[[B_IMAG]] -// OGCG: %[[C_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_C]], i32 0, i32 0 +// OGCG: %[[C_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[C_ADDR]], i32 0, i32 0 // OGCG: %[[C_REAL:.*]] = load float, ptr %[[C_REAL_PTR]], align 4 -// OGCG: %[[C_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_C]], i32 0, i32 1 +// OGCG: %[[C_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[C_ADDR]], i32 0, i32 1 // OGCG: %[[C_IMAG:.*]] = load float, ptr %[[C_IMAG_PTR]], align 4 // OGCG: %[[ADD_REAL_A_B_C:.*]] = fadd float %[[ADD_REAL_A_B]], %[[C_REAL]] // OGCG: %[[ADD_IMAG_A_B_C:.*]] = fadd float %[[ADD_IMAG_A_B]], %[[C_IMAG]] @@ -165,17 +165,17 @@ void foo4() { int _Complex c = a - b; } -// CIR: %[[COMPLEX_A:.*]] = cir.alloca !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>, ["a"] -// CIR: %[[COMPLEX_B:.*]] = cir.alloca !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>, ["b"] -// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[COMPLEX_A]] : !cir.ptr<!cir.complex<!s32i>>, !cir.complex<!s32i> -// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[COMPLEX_B]] : !cir.ptr<!cir.complex<!s32i>>, !cir.complex<!s32i> +// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>, ["a"] +// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!s32i>, !cir.ptr<!cir.complex<!s32i>>, ["b"] +// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!s32i>>, !cir.complex<!s32i> +// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!cir.complex<!s32i>>, !cir.complex<!s32i> // CIR: %[[SUB:.*]] = cir.complex.sub %[[TMP_A]], %[[TMP_B]] : !cir.complex<!s32i> -// LLVM: %[[COMPLEX_A:.*]] = alloca { i32, i32 }, i64 1, align 4 -// LLVM: %[[COMPLEX_B:.*]] = alloca { i32, i32 }, i64 1, align 4 -// LLVM: %[[COMPLEX_C:.*]] = alloca { i32, i32 }, i64 1, align 4 -// LLVM: %[[TMP_A:.*]] = load { i32, i32 }, ptr %[[COMPLEX_A]], align 4 -// LLVM: %[[TMP_B:.*]] = load { i32, i32 }, ptr %[[COMPLEX_B]], align 4 +// LLVM: %[[A_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4 +// LLVM: %[[B_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4 +// LLVM: %[[C_ADDR:.*]] = alloca { i32, i32 }, i64 1, align 4 +// LLVM: %[[TMP_A:.*]] = load { i32, i32 }, ptr %[[A_ADDR]], align 4 +// LLVM: %[[TMP_B:.*]] = load { i32, i32 }, ptr %[[B_ADDR]], align 4 // LLVM: %[[A_REAL:.*]] = extractvalue { i32, i32 } %[[TMP_A]], 0 // LLVM: %[[A_IMAG:.*]] = extractvalue { i32, i32 } %[[TMP_A]], 1 // LLVM: %[[B_REAL:.*]] = extractvalue { i32, i32 } %[[TMP_B]], 0 @@ -184,18 +184,18 @@ void foo4() { // LLVM: %[[SUB_IMAG:.*]] = sub i32 %[[A_IMAG]], %[[B_IMAG]] // LLVM: %[[RESULT:.*]] = insertvalue { i32, i32 } poison, i32 %[[SUB_REAL]], 0 // LLVM: %[[RESULT_2:.*]] = insertvalue { i32, i32 } %[[RESULT]], i32 %[[SUB_IMAG]], 1 -// LLVM: store { i32, i32 } %[[RESULT_2]], ptr %[[COMPLEX_C]], align 4 +// LLVM: store { i32, i32 } %[[RESULT_2]], ptr %[[C_ADDR]], align 4 -// OGCG: %[[COMPLEX_A:.*]] = alloca { i32, i32 }, align 4 -// OGCG: %[[COMPLEX_B:.*]] = alloca { i32, i32 }, align 4 +// OGCG: %[[A_ADDR:.*]] = alloca { i32, i32 }, align 4 +// OGCG: %[[B_ADDR:.*]] = alloca { i32, i32 }, align 4 // OGCG: %[[RESULT:.*]] = alloca { i32, i32 }, align 4 -// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[COMPLEX_A]], i32 0, i32 0 +// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[A_ADDR]], i32 0, i32 0 // OGCG: %[[A_REAL:.*]] = load i32, ptr %[[A_REAL_PTR]], align 4 -// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[COMPLEX_A]], i32 0, i32 1 +// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[A_ADDR]], i32 0, i32 1 // OGCG: %[[A_IMAG:.*]] = load i32, ptr %[[A_IMAG_PTR]], align 4 -// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[COMPLEX_B]], i32 0, i32 0 +// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[B_ADDR]], i32 0, i32 0 // OGCG: %[[B_REAL:.*]] = load i32, ptr %[[B_REAL_PTR]], align 4 -// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[COMPLEX_B]], i32 0, i32 1 +// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { i32, i32 }, ptr %[[B_ADDR]], i32 0, i32 1 // OGCG: %[[B_IMAG:.*]] = load i32, ptr %[[B_IMAG_PTR]], align 4 // OGCG: %[[SUB_REAL:.*]] = sub i32 %[[A_REAL]], %[[B_REAL]] // OGCG: %[[SUB_IMAG:.*]] = sub i32 %[[A_IMAG]], %[[B_IMAG]] @@ -210,16 +210,16 @@ void foo5() { float _Complex c = a - b; } -// CIR: %[[COMPLEX_A:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"] -// CIR: %[[COMPLEX_B:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["b"] -// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[COMPLEX_A]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float> -// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[COMPLEX_B]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float> +// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"] +// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["b"] +// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float> +// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float> // CIR: %[[SUB:.*]] = cir.complex.sub %[[TMP_A]], %[[TMP_B]] : !cir.complex<!cir.float> -// LLVM: %[[COMPLEX_A:.*]] = alloca { float, float }, i64 1, align 4 -// LLVM: %[[COMPLEX_B:.*]] = alloca { float, float }, i64 1, align 4 -// LLVM: %[[TMP_A:.*]] = load { float, float }, ptr %[[COMPLEX_A]], align 4 -// LLVM: %[[TMP_B:.*]] = load { float, float }, ptr %[[COMPLEX_B]], align 4 +// LLVM: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 4 +// LLVM: %[[B_ADDR:.*]] = alloca { float, float }, i64 1, align 4 +// LLVM: %[[TMP_A:.*]] = load { float, float }, ptr %[[A_ADDR]], align 4 +// LLVM: %[[TMP_B:.*]] = load { float, float }, ptr %[[B_ADDR]], align 4 // LLVM: %[[A_REAL:.*]] = extractvalue { float, float } %[[TMP_A]], 0 // LLVM: %[[A_IMAG:.*]] = extractvalue { float, float } %[[TMP_A]], 1 // LLVM: %[[B_REAL:.*]] = extractvalue { float, float } %[[TMP_B]], 0 @@ -229,16 +229,16 @@ void foo5() { // LLVM: %[[RESULT:.*]] = insertvalue { float, float } poison, float %[[SUB_REAL]], 0 // LLVM: %[[RESULT_2:.*]] = insertvalue { float, float } %[[RESULT]], float %[[SUB_IMAG]], 1 -// OGCG: %[[COMPLEX_A:.*]] = alloca { float, float }, align 4 -// OGCG: %[[COMPLEX_B:.*]] = alloca { float, float }, align 4 +// OGCG: %[[A_ADDR:.*]] = alloca { float, float }, align 4 +// OGCG: %[[B_ADDR:.*]] = alloca { float, float }, align 4 // OGCG: %[[RESULT:.*]] = alloca { float, float }, align 4 -// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_A]], i32 0, i32 0 +// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0 // OGCG: %[[A_REAL:.*]] = load float, ptr %[[A_REAL_PTR]], align 4 -// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_A]], i32 0, i32 1 +// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1 // OGCG: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4 -// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_B]], i32 0, i32 0 +// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 0 // OGCG: %[[B_REAL:.*]] = load float, ptr %[[B_REAL_PTR]], align 4 -// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_B]], i32 0, i32 1 +// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 1 // OGCG: %[[B_IMAG:.*]] = load float, ptr %[[B_IMAG_PTR]], align 4 // OGCG: %[[SUB_REAL:.*]] = fsub float %[[A_REAL]], %[[B_REAL]] // OGCG: %[[SUB_IMAG:.*]] = fsub float %[[A_IMAG]], %[[B_IMAG]] @@ -254,23 +254,23 @@ void foo6() { float _Complex d = (a - b) - c; } -// CIR: %[[COMPLEX_A:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"] -// CIR: %[[COMPLEX_B:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["b"] -// CIR: %[[COMPLEX_C:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["c"] +// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"] +// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["b"] +// CIR: %[[C_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["c"] // CIR: %[[RESULT:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["d", init] -// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[COMPLEX_A]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float> -// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[COMPLEX_B]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float> +// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float> +// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float> // CIR: %[[SUB_A_B:.*]] = cir.complex.sub %[[TMP_A]], %[[TMP_B]] : !cir.complex<!cir.float> -// CIR: %[[TMP_C:.*]] = cir.load{{.*}} %[[COMPLEX_C]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float> +// CIR: %[[TMP_C:.*]] = cir.load{{.*}} %[[C_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float> // CIR: %[[SUB_A_B_C:.*]] = cir.complex.sub %[[SUB_A_B]], %[[TMP_C]] : !cir.complex<!cir.float> // CIR: cir.store{{.*}} %[[SUB_A_B_C]], %[[RESULT]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>> -// LLVM: %[[COMPLEX_A:.*]] = alloca { float, float }, i64 1, align 4 -// LLVM: %[[COMPLEX_B:.*]] = alloca { float, float }, i64 1, align 4 -// LLVM: %[[COMPLEX_C:.*]] = alloca { float, float }, i64 1, align 4 +// LLVM: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 4 +// LLVM: %[[B_ADDR:.*]] = alloca { float, float }, i64 1, align 4 +// LLVM: %[[C_ADDR:.*]] = alloca { float, float }, i64 1, align 4 // LLVM: %[[RESULT:.*]] = alloca { float, float }, i64 1, align 4 -// LLVM: %[[TMP_A:.*]] = load { float, float }, ptr %[[COMPLEX_A]], align 4 -// LLVM: %[[TMP_B:.*]] = load { float, float }, ptr %[[COMPLEX_B]], align 4 +// LLVM: %[[TMP_A:.*]] = load { float, float }, ptr %[[A_ADDR]], align 4 +// LLVM: %[[TMP_B:.*]] = load { float, float }, ptr %[[B_ADDR]], align 4 // LLVM: %[[A_REAL:.*]] = extractvalue { float, float } %[[TMP_A]], 0 // LLVM: %[[A_IMAG:.*]] = extractvalue { float, float } %[[TMP_A]], 1 // LLVM: %[[B_REAL:.*]] = extractvalue { float, float } %[[TMP_B]], 0 @@ -279,7 +279,7 @@ void foo6() { // LLVM: %[[SUB_IMAG_A_B:.*]] = fsub float %[[A_IMAG]], %[[B_IMAG]] // LLVM: %[[A_B:.*]] = insertvalue { float, float } poison, float %[[SUB_REAL_A_B]], 0 // LLVM: %[[TMP_A_B:.*]] = insertvalue { float, float } %[[A_B]], float %[[SUB_IMAG_A_B]], 1 -// LLVM: %[[TMP_C:.*]] = load { float, float }, ptr %[[COMPLEX_C]], align 4 +// LLVM: %[[TMP_C:.*]] = load { float, float }, ptr %[[C_ADDR]], align 4 // LLVM: %[[A_B_REAL:.*]] = extractvalue { float, float } %[[TMP_A_B]], 0 // LLVM: %[[A_B_IMAG:.*]] = extractvalue { float, float } %[[TMP_A_B]], 1 // LLVM: %[[C_REAL:.*]] = extractvalue { float, float } %[[TMP_C]], 0 @@ -290,23 +290,23 @@ void foo6() { // LLVM: %[[TMP_A_B_C:.*]] = insertvalue { float, float } %[[A_B_C]], float %[[SUB_IMAG_A_B_C]], 1 // LLVM: store { float, float } %[[TMP_A_B_C]], ptr %[[RESULT]], align 4 -// OGCG: %[[COMPLEX_A:.*]] = alloca { float, float }, align 4 -// OGCG: %[[COMPLEX_B:.*]] = alloca { float, float }, align 4 -// OGCG: %[[COMPLEX_C:.*]] = alloca { float, float }, align 4 +// OGCG: %[[A_ADDR:.*]] = alloca { float, float }, align 4 +// OGCG: %[[B_ADDR:.*]] = alloca { float, float }, align 4 +// OGCG: %[[C_ADDR:.*]] = alloca { float, float }, align 4 // OGCG: %[[RESULT:.*]] = alloca { float, float }, align 4 -// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_A]], i32 0, i32 0 +// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0 // OGCG: %[[A_REAL:.*]] = load float, ptr %[[A_REAL_PTR]], align 4 -// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_A]], i32 0, i32 1 +// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1 // OGCG: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4 -// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_B]], i32 0, i32 0 +// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 0 // OGCG: %[[B_REAL:.*]] = load float, ptr %[[B_REAL_PTR]], align 4 -// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_B]], i32 0, i32 1 +// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 1 // OGCG: %[[B_IMAG:.*]] = load float, ptr %[[B_IMAG_PTR]], align 4 // OGCG: %[[SUB_REAL_A_B:.*]] = fsub float %[[A_REAL]], %[[B_REAL]] // OGCG: %[[SUB_IMAG_A_B:.*]] = fsub float %[[A_IMAG]], %[[B_IMAG]] -// OGCG: %[[C_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_C]], i32 0, i32 0 +// OGCG: %[[C_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[C_ADDR]], i32 0, i32 0 // OGCG: %[[C_REAL:.*]] = load float, ptr %[[C_REAL_PTR]], align 4 -// OGCG: %[[C_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[COMPLEX_C]], i32 0, i32 1 +// OGCG: %[[C_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[C_ADDR]], i32 0, i32 1 // OGCG: %[[C_IMAG:.*]] = load float, ptr %[[C_IMAG_PTR]], align 4 // OGCG: %[[SUB_REAL_A_B_C:.*]] = fsub float %[[SUB_REAL_A_B]], %[[C_REAL]] // OGCG: %[[SUB_IMAG_A_B_C:.*]] = fsub float %[[SUB_IMAG_A_B]], %[[C_IMAG]] @@ -314,3 +314,91 @@ void foo6() { // OGCG: %[[RESULT_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[RESULT]], i32 0, i32 1 // OGCG: store float %[[SUB_REAL_A_B_C]], ptr %[[RESULT_REAL_PTR]], align 4 // OGCG: store float %[[SUB_IMAG_A_B_C]], ptr %[[RESULT_IMAG_PTR]], align 4 + +void foo7() { + float _Complex a; + float _Complex b; + b += a; +} + +// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"] +// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["b"] +// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float> +// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float> +// CIR: %[[RESULT:.*]] = cir.complex.add %[[TMP_B]], %[[TMP_A]] : !cir.complex<!cir.float> +// CIR: cir.store{{.*}} %[[RESULT]], %[[B_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>> + +// LLVM: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 4 +// LLVM: %[[B_ADDR:.*]] = alloca { float, float }, i64 1, align 4 +// LLVM: %[[TMP_A:.*]] = load { float, float }, ptr %[[A_ADDR]], align 4 +// LLVM: %[[TMP_B:.*]] = load { float, float }, ptr %[[B_ADDR]], align 4 +// LLVM: %[[B_REAL:.*]] = extractvalue { float, float } %[[TMP_B]], 0 +// LLVM: %[[B_IMAG:.*]] = extractvalue { float, float } %[[TMP_B]], 1 +// LLVM: %[[A_REAL:.*]] = extractvalue { float, float } %[[TMP_A]], 0 +// LLVM: %[[A_IMAG:.*]] = extractvalue { float, float } %[[TMP_A]], 1 +// LLVM: %[[ADD_REAL_A_B:.*]] = fadd float %[[B_REAL]], %[[A_REAL]] +// LLVM: %[[ADD_IMAG_A_B:.*]] = fadd float %[[B_IMAG]], %[[A_IMAG]] +// LLVM: %[[ADD_A_B:.*]] = insertvalue { float, float } poison, float %[[ADD_REAL_A_B]], 0 +// LLVM: %[[RESULT:.*]] = insertvalue { float, float } %[[ADD_A_B]], float %[[ADD_IMAG_A_B]], 1 +// LLVM: store { float, float } %[[RESULT]], ptr %[[B_ADDR]], align 4 + +// OGCG: %[[A_ADDR:.*]] = alloca { float, float }, align 4 +// OGCG: %[[B_ADDR:.*]] = alloca { float, float }, align 4 +// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0 +// OGCG: %[[A_REAL:.*]] = load float, ptr %[[A_REAL_PTR]], align 4 +// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1 +// OGCG: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4 +// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 0 +// OGCG: %[[B_REAL:.*]] = load float, ptr %[[B_REAL_PTR]], align 4 +// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 1 +// OGCG: %[[B_IMAG:.*]] = load float, ptr %[[B_IMAG_PTR]], align 4 +// OGCG: %[[ADD_REAL:.*]] = fadd float %[[B_REAL]], %[[A_REAL]] +// OGCG: %[[ADD_IMAG:.*]] = fadd float %[[B_IMAG]], %[[A_IMAG]] +// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 0 +// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 1 +// OGCG: store float %[[ADD_REAL]], ptr %[[B_REAL_PTR]], align 4 +// OGCG: store float %[[ADD_IMAG]], ptr %[[B_IMAG_PTR]], align 4 + +void foo8() { + float _Complex a; + float _Complex b; + b -= a; +} + +// CIR: %[[A_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["a"] +// CIR: %[[B_ADDR:.*]] = cir.alloca !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>>, ["b"] +// CIR: %[[TMP_A:.*]] = cir.load{{.*}} %[[A_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float> +// CIR: %[[TMP_B:.*]] = cir.load{{.*}} %[[B_ADDR]] : !cir.ptr<!cir.complex<!cir.float>>, !cir.complex<!cir.float> +// CIR: %[[RESULT:.*]] = cir.complex.sub %[[TMP_B]], %[[TMP_A]] : !cir.complex<!cir.float> +// CIR: cir.store{{.*}} %[[RESULT]], %[[B_ADDR]] : !cir.complex<!cir.float>, !cir.ptr<!cir.complex<!cir.float>> + +// LLVM: %[[A_ADDR:.*]] = alloca { float, float }, i64 1, align 4 +// LLVM: %[[B_ADDR:.*]] = alloca { float, float }, i64 1, align 4 +// LLVM: %[[TMP_A:.*]] = load { float, float }, ptr %[[A_ADDR]], align 4 +// LLVM: %[[TMP_B:.*]] = load { float, float }, ptr %[[B_ADDR]], align 4 +// LLVM: %[[B_REAL:.*]] = extractvalue { float, float } %[[TMP_B]], 0 +// LLVM: %[[B_IMAG:.*]] = extractvalue { float, float } %[[TMP_B]], 1 +// LLVM: %[[A_REAL:.*]] = extractvalue { float, float } %[[TMP_A]], 0 +// LLVM: %[[A_IMAG:.*]] = extractvalue { float, float } %[[TMP_A]], 1 +// LLVM: %[[SUB_REAL_A_B:.*]] = fsub float %[[B_REAL]], %[[A_REAL]] +// LLVM: %[[SUB_IMAG_A_B:.*]] = fsub float %[[B_IMAG]], %[[A_IMAG]] +// LLVM: %[[SUB_A_B:.*]] = insertvalue { float, float } poison, float %[[SUB_REAL_A_B]], 0 +// LLVM: %[[RESULT:.*]] = insertvalue { float, float } %[[SUB_A_B]], float %[[SUB_IMAG_A_B]], 1 +// LLVM: store { float, float } %[[RESULT]], ptr %[[B_ADDR]], align 4 + +// OGCG: %[[A_ADDR:.*]] = alloca { float, float }, align 4 +// OGCG: %[[B_ADDR:.*]] = alloca { float, float }, align 4 +// OGCG: %[[A_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 0 +// OGCG: %[[A_REAL:.*]] = load float, ptr %[[A_REAL_PTR]], align 4 +// OGCG: %[[A_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[A_ADDR]], i32 0, i32 1 +// OGCG: %[[A_IMAG:.*]] = load float, ptr %[[A_IMAG_PTR]], align 4 +// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 0 +// OGCG: %[[B_REAL:.*]] = load float, ptr %[[B_REAL_PTR]], align 4 +// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 1 +// OGCG: %[[B_IMAG:.*]] = load float, ptr %[[B_IMAG_PTR]], align 4 +// OGCG: %[[SUB_REAL:.*]] = fsub float %[[B_REAL]], %[[A_REAL]] +// OGCG: %[[SUB_IMAG:.*]] = fsub float %[[B_IMAG]], %[[A_IMAG]] +// OGCG: %[[B_REAL_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 0 +// OGCG: %[[B_IMAG_PTR:.*]] = getelementptr inbounds nuw { float, float }, ptr %[[B_ADDR]], i32 0, i32 1 +// OGCG: store float %[[SUB_REAL]], ptr %[[B_REAL_PTR]], align 4 +// OGCG: store float %[[SUB_IMAG]], ptr %[[B_IMAG_PTR]], align 4 _______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits