https://github.com/andykaylor created 
https://github.com/llvm/llvm-project/pull/131369

This adds support for the cir.unary operation.

>From d51bb762224f70b2b879198e2466ca0d258f9eae Mon Sep 17 00:00:00 2001
From: Andy Kaylor <akay...@nvidia.com>
Date: Mon, 10 Mar 2025 15:07:50 -0700
Subject: [PATCH] [CIR] Upstream initial support for unary op

This adds support for the cir.unary operation.
---
 clang/include/clang/CIR/Dialect/IR/CIROps.td  |  49 +++
 clang/include/clang/CIR/MissingFeatures.h     |   9 +
 clang/lib/CIR/CodeGen/CIRGenExpr.cpp          |  48 +++
 clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp    | 223 ++++++++++
 clang/lib/CIR/CodeGen/CIRGenFunction.cpp      |   2 +
 clang/lib/CIR/CodeGen/CIRGenFunction.h        |   4 +
 clang/lib/CIR/CodeGen/CIRGenValue.h           |   1 +
 clang/lib/CIR/Dialect/IR/CIRDialect.cpp       |  41 ++
 .../CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp | 125 +++++-
 .../CIR/Lowering/DirectToLLVM/LowerToLLVM.h   |  10 +
 clang/test/CIR/CodeGen/unary.cpp              | 392 ++++++++++++++++++
 11 files changed, 903 insertions(+), 1 deletion(-)
 create mode 100644 clang/test/CIR/CodeGen/unary.cpp

diff --git a/clang/include/clang/CIR/Dialect/IR/CIROps.td 
b/clang/include/clang/CIR/Dialect/IR/CIROps.td
index 77c43e5ace64a..52c78ffe42647 100644
--- a/clang/include/clang/CIR/Dialect/IR/CIROps.td
+++ b/clang/include/clang/CIR/Dialect/IR/CIROps.td
@@ -468,6 +468,55 @@ def BrOp : CIR_Op<"br",
   }];
 }
 
+//===----------------------------------------------------------------------===//
+// UnaryOp
+//===----------------------------------------------------------------------===//
+
+def UnaryOpKind_Inc   : I32EnumAttrCase<"Inc",   1, "inc">;
+def UnaryOpKind_Dec   : I32EnumAttrCase<"Dec",   2, "dec">;
+def UnaryOpKind_Plus  : I32EnumAttrCase<"Plus",  3, "plus">;
+def UnaryOpKind_Minus : I32EnumAttrCase<"Minus", 4, "minus">;
+def UnaryOpKind_Not   : I32EnumAttrCase<"Not",   5, "not">;
+
+def UnaryOpKind : I32EnumAttr<
+    "UnaryOpKind",
+    "unary operation kind",
+    [UnaryOpKind_Inc,
+     UnaryOpKind_Dec,
+     UnaryOpKind_Plus,
+     UnaryOpKind_Minus,
+     UnaryOpKind_Not,
+     ]> {
+  let cppNamespace = "::cir";
+}
+
+// FIXME: Pure won't work when we add overloading.
+def UnaryOp : CIR_Op<"unary", [Pure, SameOperandsAndResultType]> {
+  let summary = "Unary operations";
+  let description = [{
+    `cir.unary` performs the unary operation according to
+    the specified opcode kind: [inc, dec, plus, minus, not].
+
+    It requires one input operand and has one result, both types
+    should be the same.
+
+    ```mlir
+    %7 = cir.unary(inc, %1) : i32 -> i32
+    %8 = cir.unary(dec, %2) : i32 -> i32
+    ```
+  }];
+
+  let results = (outs CIR_AnyType:$result);
+  let arguments = (ins Arg<UnaryOpKind, "unary op kind">:$kind, 
Arg<CIR_AnyType>:$input);
+
+  let assemblyFormat = [{
+      `(` $kind `,` $input `)` `:` type($input) `,` type($result) attr-dict
+  }];
+
+  let hasVerifier = 1;
+  let hasFolder = 1;
+}
+
 
//===----------------------------------------------------------------------===//
 // GlobalOp
 
//===----------------------------------------------------------------------===//
diff --git a/clang/include/clang/CIR/MissingFeatures.h 
b/clang/include/clang/CIR/MissingFeatures.h
index 6c3d74cf96c64..fcbb2ae3db6aa 100644
--- a/clang/include/clang/CIR/MissingFeatures.h
+++ b/clang/include/clang/CIR/MissingFeatures.h
@@ -72,6 +72,10 @@ struct MissingFeatures {
   static bool opFuncLinkage() { return false; }
   static bool opFuncVisibility() { return false; }
 
+  // Unary operator handling
+  static bool opUnarySignedOverflow() { return false; }
+  static bool opUnaryPromotionType() { return false; }
+
   // Misc
   static bool scalarConversionOpts() { return false; }
   static bool tryEmitAsConstant() { return false; }
@@ -86,6 +90,11 @@ struct MissingFeatures {
   static bool aggValueSlot() { return false; }
 
   static bool unsizedTypes() { return false; }
+  static bool sanitizers() { return false; }
+  static bool CGFPOptionsRAII() { return false; }
+
+  // Missing types
+  static bool vectorType() { return false; }
 };
 
 } // namespace cir
diff --git a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp 
b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
index 5b81fe172e645..24c0c8a18efd8 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExpr.cpp
@@ -165,6 +165,54 @@ LValue CIRGenFunction::emitDeclRefLValue(const DeclRefExpr 
*e) {
   return LValue();
 }
 
+LValue CIRGenFunction::emitUnaryOpLValue(const UnaryOperator *e) {
+  UnaryOperatorKind op = e->getOpcode();
+
+  // __extension__ doesn't affect lvalue-ness.
+  if (op == UO_Extension)
+    return emitLValue(e->getSubExpr());
+
+  switch (op) {
+  case UO_Deref: {
+    cgm.errorNYI(e->getSourceRange(), "UnaryOp dereference");
+    return LValue();
+  }
+  case UO_Real:
+  case UO_Imag: {
+    cgm.errorNYI(e->getSourceRange(), "UnaryOp real/imag");
+    return LValue();
+  }
+  case UO_PreInc:
+  case UO_PreDec: {
+    bool isInc = e->isIncrementOp();
+    LValue lv = emitLValue(e->getSubExpr());
+
+    assert(e->isPrefix() && "Prefix operator in unexpected state!");
+
+    if (e->getType()->isAnyComplexType()) {
+      cgm.errorNYI(e->getSourceRange(), "UnaryOp complex inc/dec");
+      return LValue();
+    } else {
+      emitScalarPrePostIncDec(e, lv, isInc, /*isPre=*/true);
+    }
+
+    return lv;
+  }
+  case UO_Extension:
+    llvm_unreachable("UnaryOperator extension should be handled above!");
+  case UO_Plus:
+  case UO_Minus:
+  case UO_Not:
+  case UO_LNot:
+  case UO_AddrOf:
+  case UO_PostInc:
+  case UO_PostDec:
+  case UO_Coawait:
+    llvm_unreachable("UnaryOperator of non-lvalue kind!");
+  }
+  llvm_unreachable("Unknown unary operator kind!");
+}
+
 /// Emit code to compute the specified expression which
 /// can have any type.  The result is returned as an RValue struct.
 RValue CIRGenFunction::emitAnyExpr(const Expr *e) {
diff --git a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp 
b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
index b9e56dc4123d6..b0d644faade17 100644
--- a/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenExprScalar.cpp
@@ -92,6 +92,222 @@ class ScalarExprEmitter : public 
StmtVisitor<ScalarExprEmitter, mlir::Value> {
 
   mlir::Value VisitCastExpr(CastExpr *E);
 
+  // Unary Operators.
+  mlir::Value VisitUnaryPostDec(const UnaryOperator *e) {
+    LValue lv = cgf.emitLValue(e->getSubExpr());
+    return emitScalarPrePostIncDec(e, lv, false, false);
+  }
+  mlir::Value VisitUnaryPostInc(const UnaryOperator *e) {
+    LValue lv = cgf.emitLValue(e->getSubExpr());
+    return emitScalarPrePostIncDec(e, lv, true, false);
+  }
+  mlir::Value VisitUnaryPreDec(const UnaryOperator *e) {
+    LValue lv = cgf.emitLValue(e->getSubExpr());
+    return emitScalarPrePostIncDec(e, lv, false, true);
+  }
+  mlir::Value VisitUnaryPreInc(const UnaryOperator *e) {
+    LValue lv = cgf.emitLValue(e->getSubExpr());
+    return emitScalarPrePostIncDec(e, lv, true, true);
+  }
+  mlir::Value emitScalarPrePostIncDec(const UnaryOperator *e, LValue lv,
+                                      bool isInc, bool isPre) {
+    if (cgf.getLangOpts().OpenMP)
+      cgf.cgm.errorNYI(e->getSourceRange(), "inc/dec OpenMP");
+
+    QualType type = e->getSubExpr()->getType();
+
+    mlir::Value value;
+    mlir::Value input;
+
+    if (type->getAs<AtomicType>()) {
+      cgf.cgm.errorNYI(e->getSourceRange(), "Atomic inc/dec");
+      // TODO(cir): This is not correct, but it will produce reasonable code
+      // until atomic operations are implemented.
+      value = cgf.emitLoadOfLValue(lv, e->getExprLoc()).getScalarVal();
+      input = value;
+    } else {
+      value = cgf.emitLoadOfLValue(lv, e->getExprLoc()).getScalarVal();
+      input = value;
+    }
+
+    // NOTE: When possible, more frequent cases are handled first.
+
+    // Special case of integer increment that we have to check first: bool++.
+    // Due to promotion rules, we get:
+    //   bool++ -> bool = bool + 1
+    //          -> bool = (int)bool + 1
+    //          -> bool = ((int)bool + 1 != 0)
+    // An interesting aspect of this is that increment is always true.
+    // Decrement does not have this property.
+    if (isInc && type->isBooleanType()) {
+      value = builder.create<cir::ConstantOp>(cgf.getLoc(e->getExprLoc()),
+                                              cgf.convertType(type),
+                                              builder.getCIRBoolAttr(true));
+    } else if (type->isIntegerType()) {
+      QualType promotedType;
+      bool canPerformLossyDemotionCheck = false;
+      if (cgf.getContext().isPromotableIntegerType(type)) {
+        promotedType = cgf.getContext().getPromotedIntegerType(type);
+        assert(promotedType != type && "Shouldn't promote to the same type.");
+        canPerformLossyDemotionCheck = true;
+        canPerformLossyDemotionCheck &=
+            cgf.getContext().getCanonicalType(type) !=
+            cgf.getContext().getCanonicalType(promotedType);
+        canPerformLossyDemotionCheck &=
+            type->isIntegerType() && promotedType->isIntegerType();
+
+        // TODO(cir): Currently, we store bitwidths in CIR types only for
+        // integers. This might also be required for other types.
+        auto srcCirTy = mlir::dyn_cast<cir::IntType>(cgf.convertType(type));
+        auto promotedCirTy =
+            mlir::dyn_cast<cir::IntType>(cgf.convertType(type));
+        assert(srcCirTy && promotedCirTy && "Expected integer type");
+
+        assert(
+            (!canPerformLossyDemotionCheck ||
+             type->isSignedIntegerOrEnumerationType() ||
+             promotedType->isSignedIntegerOrEnumerationType() ||
+             srcCirTy.getWidth() == promotedCirTy.getWidth()) &&
+            "The following check expects that if we do promotion to different "
+            "underlying canonical type, at least one of the types (either "
+            "base or promoted) will be signed, or the bitwidths will match.");
+      }
+
+      assert(!cir::MissingFeatures::sanitizers());
+      if (e->canOverflow() && type->isSignedIntegerOrEnumerationType()) {
+        value = emitIncDecConsiderOverflowBehavior(e, value, isInc);
+      } else {
+        cir::UnaryOpKind kind =
+            e->isIncrementOp() ? cir::UnaryOpKind::Inc : cir::UnaryOpKind::Dec;
+        // NOTE(CIR): clang calls CreateAdd but folds this to a unary op
+        value = emitUnaryOp(e, kind, input);
+      }
+    } else if (const PointerType *ptr = type->getAs<PointerType>()) {
+      cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec pointer");
+      return {};
+    } else if (type->isVectorType()) {
+      cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec vector");
+      return {};
+    } else if (type->isRealFloatingType()) {
+      assert(!cir::MissingFeatures::CGFPOptionsRAII());
+
+      if (type->isHalfType() &&
+          !cgf.getContext().getLangOpts().NativeHalfType) {
+        cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec half");
+        return {};
+      }
+
+      if (mlir::isa<cir::SingleType, cir::DoubleType>(value.getType())) {
+        // Create the inc/dec operation.
+        // NOTE(CIR): clang calls CreateAdd but folds this to a unary op
+        cir::UnaryOpKind kind =
+            (isInc ? cir::UnaryOpKind::Inc : cir::UnaryOpKind::Dec);
+        value = emitUnaryOp(e, kind, value);
+      } else {
+        cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec other fp type");
+        return {};
+      }
+    } else if (type->isFixedPointType()) {
+      cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec other fixed point");
+      return {};
+    } else {
+      assert(type->castAs<ObjCObjectPointerType>());
+      cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec ObjectiveC 
pointer");
+      return {};
+    }
+
+    CIRGenFunction::SourceLocRAIIObject sourceloc{
+        cgf, cgf.getLoc(e->getSourceRange())};
+
+    // Store the updated result through the lvalue
+    if (lv.isBitField()) {
+      cgf.cgm.errorNYI(e->getSourceRange(), "Unary inc/dec bitfield");
+      return {};
+    } else {
+      cgf.emitStoreThroughLValue(RValue::get(value), lv);
+    }
+
+    // If this is a postinc, return the value read from memory, otherwise use
+    // the updated value.
+    return isPre ? value : input;
+  }
+
+  mlir::Value emitIncDecConsiderOverflowBehavior(const UnaryOperator *e,
+                                                 mlir::Value inVal,
+                                                 bool isInc) {
+    assert(!cir::MissingFeatures::opUnarySignedOverflow());
+    cir::UnaryOpKind kind =
+        e->isIncrementOp() ? cir::UnaryOpKind::Inc : cir::UnaryOpKind::Dec;
+    switch (cgf.getLangOpts().getSignedOverflowBehavior()) {
+    case LangOptions::SOB_Defined:
+      return emitUnaryOp(e, kind, inVal);
+    case LangOptions::SOB_Undefined:
+      assert(!cir::MissingFeatures::sanitizers());
+      return emitUnaryOp(e, kind, inVal);
+      break;
+    case LangOptions::SOB_Trapping:
+      if (!e->canOverflow())
+        return emitUnaryOp(e, kind, inVal);
+      cgf.cgm.errorNYI(e->getSourceRange(), "inc/def overflow SOB_Trapping");
+      return {};
+    }
+    llvm_unreachable("Unexpected signed overflow behavior kind");
+  }
+
+  mlir::Value VisitUnaryPlus(const UnaryOperator *e,
+                             QualType promotionType = QualType()) {
+    if (!promotionType.isNull())
+      cgf.cgm.errorNYI(e->getSourceRange(), "VisitUnaryPlus: promotionType");
+    assert(!cir::MissingFeatures::opUnaryPromotionType());
+    mlir::Value result = VisitPlus(e);
+    return result;
+  }
+
+  mlir::Value VisitPlus(const UnaryOperator *e) {
+    // This differs from gcc, though, most likely due to a bug in gcc.
+    ignoreResultAssign = false;
+
+    assert(!cir::MissingFeatures::opUnaryPromotionType());
+    mlir::Value operand = Visit(e->getSubExpr());
+
+    return emitUnaryOp(e, cir::UnaryOpKind::Plus, operand);
+  }
+
+  mlir::Value VisitUnaryMinus(const UnaryOperator *e,
+                              QualType promotionType = QualType()) {
+    if (!promotionType.isNull())
+      cgf.cgm.errorNYI(e->getSourceRange(), "VisitUnaryMinus: promotionType");
+    assert(!cir::MissingFeatures::opUnaryPromotionType());
+    mlir::Value result = VisitMinus(e);
+    return result;
+  }
+
+  mlir::Value VisitMinus(const UnaryOperator *e) {
+    ignoreResultAssign = false;
+
+    assert(!cir::MissingFeatures::opUnaryPromotionType());
+    mlir::Value operand = Visit(e->getSubExpr());
+
+    assert(!cir::MissingFeatures::opUnarySignedOverflow());
+
+    // NOTE: LLVM codegen will lower this directly to either a FNeg
+    // or a Sub instruction.  In CIR this will be handled later in LowerToLLVM.
+    return emitUnaryOp(e, cir::UnaryOpKind::Minus, operand);
+  }
+
+  mlir::Value emitUnaryOp(const UnaryOperator *e, cir::UnaryOpKind kind,
+                          mlir::Value input) {
+    return builder.create<cir::UnaryOp>(
+        cgf.getLoc(e->getSourceRange().getBegin()), input.getType(), kind,
+        input);
+  }
+
+  mlir::Value VisitUnaryNot(const UnaryOperator *e) {
+    ignoreResultAssign = false;
+    mlir::Value op = Visit(e->getSubExpr());
+    return emitUnaryOp(e, cir::UnaryOpKind::Not, op);
+  }
+
   /// Emit a conversion from the specified type to the specified destination
   /// type, both of which are CIR scalar types.
   /// TODO: do we need ScalarConversionOpts here? Should be done in another
@@ -148,3 +364,10 @@ mlir::Value ScalarExprEmitter::VisitCastExpr(CastExpr *ce) 
{
   }
   return {};
 }
+
+mlir::Value CIRGenFunction::emitScalarPrePostIncDec(const UnaryOperator *E,
+                                                    LValue LV, bool isInc,
+                                                    bool isPre) {
+  return ScalarExprEmitter(*this, builder)
+      .emitScalarPrePostIncDec(E, LV, isInc, isPre);
+}
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp 
b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
index 47d296b70d789..2338ec9cd952a 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp
@@ -305,6 +305,8 @@ LValue CIRGenFunction::emitLValue(const Expr *e) {
                                std::string("l-value not implemented for '") +
                                    e->getStmtClassName() + "'");
     return LValue();
+  case Expr::UnaryOperatorClass:
+    return emitUnaryOpLValue(cast<UnaryOperator>(e));
   case Expr::DeclRefExprClass:
     return emitDeclRefLValue(cast<DeclRefExpr>(e));
   }
diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h 
b/clang/lib/CIR/CodeGen/CIRGenFunction.h
index 5ab882666f3e0..3542b6cafbc9c 100644
--- a/clang/lib/CIR/CodeGen/CIRGenFunction.h
+++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h
@@ -206,6 +206,7 @@ class CIRGenFunction : public CIRGenTypeCache {
                       LValue lvalue, bool capturedByInit = false);
 
   LValue emitDeclRefLValue(const clang::DeclRefExpr *e);
+  LValue emitUnaryOpLValue(const clang::UnaryOperator *e);
 
   /// Determine whether the given initializer is trivial in the sense
   /// that it requires no code to be generated.
@@ -305,6 +306,9 @@ class CIRGenFunction : public CIRGenTypeCache {
     // TODO: Add symbol table support
   }
 
+  mlir::Value emitScalarPrePostIncDec(const UnaryOperator *e, LValue lv,
+                                      bool isInc, bool isPre);
+
   /// Emit the computation of the specified expression of scalar type.
   mlir::Value emitScalarExpr(const clang::Expr *e);
   cir::FuncOp generateCode(clang::GlobalDecl gd, cir::FuncOp fn,
diff --git a/clang/lib/CIR/CodeGen/CIRGenValue.h 
b/clang/lib/CIR/CodeGen/CIRGenValue.h
index d29646983fd30..c559e853aad39 100644
--- a/clang/lib/CIR/CodeGen/CIRGenValue.h
+++ b/clang/lib/CIR/CodeGen/CIRGenValue.h
@@ -93,6 +93,7 @@ class LValue {
 
 public:
   bool isSimple() const { return lvType == Simple; }
+  bool isBitField() const { return lvType == BitField; }
 
   // TODO: Add support for volatile
   bool isVolatile() const { return false; }
diff --git a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp 
b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
index d041791770d82..faf8996434f74 100644
--- a/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
+++ b/clang/lib/CIR/Dialect/IR/CIRDialect.cpp
@@ -457,6 +457,47 @@ void cir::FuncOp::print(OpAsmPrinter &p) {
 // been implemented yet.
 mlir::LogicalResult cir::FuncOp::verify() { return success(); }
 
+//===----------------------------------------------------------------------===//
+// UnaryOp
+//===----------------------------------------------------------------------===//
+
+LogicalResult cir::UnaryOp::verify() {
+  switch (getKind()) {
+  case cir::UnaryOpKind::Inc:
+  case cir::UnaryOpKind::Dec:
+  case cir::UnaryOpKind::Plus:
+  case cir::UnaryOpKind::Minus:
+  case cir::UnaryOpKind::Not:
+    // Nothing to verify.
+    return success();
+  }
+
+  llvm_unreachable("Unknown UnaryOp kind?");
+}
+
+static bool isBoolNot(cir::UnaryOp op) {
+  return isa<cir::BoolType>(op.getInput().getType()) &&
+         op.getKind() == cir::UnaryOpKind::Not;
+}
+
+// This folder simplifies the sequential boolean not operations.
+// For instance, the next two unary operations will be eliminated:
+//
+// ```mlir
+// %1 = cir.unary(not, %0) : !cir.bool, !cir.bool
+// %2 = cir.unary(not, %1) : !cir.bool, !cir.bool
+// ```
+//
+// and the argument of the first one (%0) will be used instead.
+OpFoldResult cir::UnaryOp::fold(FoldAdaptor adaptor) {
+  if (isBoolNot(*this))
+    if (auto previous = dyn_cast_or_null<UnaryOp>(getInput().getDefiningOp()))
+      if (isBoolNot(previous))
+        return previous.getInput();
+
+  return {};
+}
+
 
//===----------------------------------------------------------------------===//
 // TableGen'd op method definitions
 
//===----------------------------------------------------------------------===//
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp 
b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
index 0cd27ecf1a3bd..a126e1a29de13 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.cpp
@@ -568,6 +568,128 @@ mlir::LogicalResult 
CIRToLLVMGlobalOpLowering::matchAndRewrite(
   return mlir::success();
 }
 
+mlir::LogicalResult CIRToLLVMUnaryOpLowering::matchAndRewrite(
+    cir::UnaryOp op, OpAdaptor adaptor,
+    mlir::ConversionPatternRewriter &rewriter) const {
+  assert(op.getType() == op.getInput().getType() &&
+         "Unary operation's operand type and result type are different");
+  mlir::Type type = op.getType();
+  mlir::Type elementType = type;
+  bool isVector = false;
+  assert(!cir::MissingFeatures::vectorType());
+  mlir::Type llvmType = getTypeConverter()->convertType(type);
+  mlir::Location loc = op.getLoc();
+
+  auto createIntConstant = [&](int64_t value) -> mlir::Value {
+    return rewriter.create<mlir::LLVM::ConstantOp>(
+        loc, llvmType, mlir::IntegerAttr::get(llvmType, value));
+  };
+
+  auto createFloatConstant = [&](double value) -> mlir::Value {
+    mlir::FloatAttr attr = rewriter.getFloatAttr(llvmType, value);
+    return rewriter.create<mlir::LLVM::ConstantOp>(loc, llvmType, attr);
+  };
+
+  // Integer unary operations: + - ~ ++ --
+  if (mlir::isa<cir::IntType>(elementType)) {
+    mlir::LLVM::IntegerOverflowFlags maybeNSW =
+        mlir::LLVM::IntegerOverflowFlags::none;
+    if (mlir::dyn_cast<cir::IntType>(elementType).isSigned()) {
+      assert(!cir::MissingFeatures::opUnarySignedOverflow());
+      // TODO: For now, assume signed overflow is undefined. We'll need to add
+      // an attribute to the unary op to control this.
+      maybeNSW = mlir::LLVM::IntegerOverflowFlags::nsw;
+    }
+
+    switch (op.getKind()) {
+    case cir::UnaryOpKind::Inc:
+      assert(!isVector && "++ not allowed on vector types");
+      rewriter.replaceOpWithNewOp<mlir::LLVM::AddOp>(
+          op, llvmType, adaptor.getInput(), createIntConstant(1), maybeNSW);
+      return mlir::success();
+    case cir::UnaryOpKind::Dec:
+      assert(!isVector && "-- not allowed on vector types");
+      rewriter.replaceOpWithNewOp<mlir::LLVM::SubOp>(
+          op, llvmType, adaptor.getInput(), createIntConstant(1), maybeNSW);
+      return mlir::success();
+    case cir::UnaryOpKind::Plus:
+      rewriter.replaceOp(op, adaptor.getInput());
+      return mlir::success();
+    case cir::UnaryOpKind::Minus:
+      assert(!isVector &&
+             "Add vector handling when vector types are supported");
+      rewriter.replaceOpWithNewOp<mlir::LLVM::SubOp>(
+          op, llvmType, createIntConstant(0), adaptor.getInput(), maybeNSW);
+      return mlir::success();
+
+    case cir::UnaryOpKind::Not:
+      // bit-wise compliment operator, implemented as an XOR with -1.
+      assert(!isVector &&
+             "Add vector handling when vector types are supported");
+      rewriter.replaceOpWithNewOp<mlir::LLVM::XOrOp>(
+          op, llvmType, adaptor.getInput(), createIntConstant(-1));
+      return mlir::success();
+    }
+    llvm_unreachable("Unexpected unary op for int");
+  }
+
+  // Floating point unary operations: + - ++ --
+  if (mlir::isa<cir::CIRFPTypeInterface>(elementType)) {
+    switch (op.getKind()) {
+    case cir::UnaryOpKind::Inc:
+      assert(!isVector && "++ not allowed on vector types");
+      rewriter.replaceOpWithNewOp<mlir::LLVM::FAddOp>(
+          op, llvmType, createFloatConstant(1.0), adaptor.getInput());
+      return mlir::success();
+    case cir::UnaryOpKind::Dec:
+      assert(!isVector && "-- not allowed on vector types");
+      rewriter.replaceOpWithNewOp<mlir::LLVM::FAddOp>(
+          op, llvmType, createFloatConstant(-1.0), adaptor.getInput());
+      return mlir::success();
+    case cir::UnaryOpKind::Plus:
+      rewriter.replaceOp(op, adaptor.getInput());
+      return mlir::success();
+    case cir::UnaryOpKind::Minus:
+      rewriter.replaceOpWithNewOp<mlir::LLVM::FNegOp>(op, llvmType,
+                                                      adaptor.getInput());
+      return mlir::success();
+    case cir::UnaryOpKind::Not:
+      return op.emitError() << "Unary not is invalid for floating-point types";
+    }
+    llvm_unreachable("Unexpected unary op for float");
+  }
+
+  // Boolean unary operations: ! only. (For all others, the operand has
+  // already been promoted to int.)
+  if (mlir::isa<cir::BoolType>(elementType)) {
+    switch (op.getKind()) {
+    case cir::UnaryOpKind::Inc:
+    case cir::UnaryOpKind::Dec:
+    case cir::UnaryOpKind::Plus:
+    case cir::UnaryOpKind::Minus:
+      // Some of these are allowed in source code, but we shouldn't get here
+      // with a boolean type.
+      return op.emitError() << "Unsupported unary operation on boolean type";
+    case cir::UnaryOpKind::Not:
+      assert(!isVector && "NYI: op! on vector mask");
+      rewriter.replaceOpWithNewOp<mlir::LLVM::XOrOp>(
+          op, llvmType, adaptor.getInput(), createIntConstant(1));
+      return mlir::success();
+    }
+    llvm_unreachable("Unexpected unary op for bool");
+  }
+
+  // Pointer unary operations: + only.  (++ and -- of pointers are implemented
+  // with cir.ptr_stride, not cir.unary.)
+  if (mlir::isa<cir::PointerType>(elementType)) {
+    return op.emitError()
+           << "Unary operation on pointer types is not yet implemented";
+  }
+
+  return op.emitError() << "Unary operation has unsupported type: "
+                        << elementType;
+}
+
 static void prepareTypeConverter(mlir::LLVMTypeConverter &converter,
                                  mlir::DataLayout &dataLayout) {
   converter.addConversion([&](cir::PointerType type) -> mlir::Type {
@@ -707,7 +829,8 @@ void ConvertCIRToLLVMPass::runOnOperation() {
       // clang-format off
                CIRToLLVMBrOpLowering,
                CIRToLLVMFuncOpLowering,
-               CIRToLLVMTrapOpLowering
+               CIRToLLVMTrapOpLowering,
+               CIRToLLVMUnaryOpLowering
       // clang-format on
       >(converter, patterns.getContext());
 
diff --git a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h 
b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h
index d090bbe4f2e10..60518e55348e1 100644
--- a/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h
+++ b/clang/lib/CIR/Lowering/DirectToLLVM/LowerToLLVM.h
@@ -136,6 +136,16 @@ class CIRToLLVMGlobalOpLowering
       cir::GlobalOp op, mlir::ConversionPatternRewriter &rewriter) const;
 };
 
+class CIRToLLVMUnaryOpLowering
+    : public mlir::OpConversionPattern<cir::UnaryOp> {
+public:
+  using mlir::OpConversionPattern<cir::UnaryOp>::OpConversionPattern;
+
+  mlir::LogicalResult
+  matchAndRewrite(cir::UnaryOp op, OpAdaptor,
+                  mlir::ConversionPatternRewriter &) const override;
+};
+
 class CIRToLLVMBrOpLowering : public mlir::OpConversionPattern<cir::BrOp> {
 public:
   using mlir::OpConversionPattern<cir::BrOp>::OpConversionPattern;
diff --git a/clang/test/CIR/CodeGen/unary.cpp b/clang/test/CIR/CodeGen/unary.cpp
new file mode 100644
index 0000000000000..5d93587463562
--- /dev/null
+++ b/clang/test/CIR/CodeGen/unary.cpp
@@ -0,0 +1,392 @@
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir 
-Wno-unused-value -emit-cir %s -o %t.cir
+// RUN: FileCheck --input-file=%t.cir %s
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fclangir 
-Wno-unused-value -emit-llvm %s -o %t-cir.ll
+// RUN: FileCheck --input-file=%t-cir.ll %s --check-prefix=LLVM
+// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -Wno-unused-value 
-emit-llvm %s -o %t.ll
+// RUN: FileCheck --input-file=%t.ll %s --check-prefix=OGCG
+
+unsigned up0() {
+  unsigned a = 1u;
+  return +a;
+}
+
+// CHECK: cir.func @up0() -> !cir.int<u, 32>
+// CHECK:   %[[A:.*]] = cir.alloca !cir.int<u, 32>, !cir.ptr<!cir.int<u, 32>>, 
["a", init]
+// CHECK:   %[[INPUT:.*]] = cir.load %[[A]]
+// CHECK:   %[[OUTPUT:.*]] = cir.unary(plus, %[[INPUT]])
+
+// LLVM: define i32 @up0()
+// LLVM:   %[[A:.*]] = alloca i32, i64 1, align 4
+// LLVM:   store i32 1, ptr %[[A]], align 4
+// LLVM:   %[[A_LOAD:.*]] = load i32, ptr %[[A]], align 4
+
+// OGCG: define{{.*}} i32 @_Z3up0v()
+// OGCG:   %[[A:.*]] = alloca i32, align 4
+// OGCG:   store i32 1, ptr %[[A]], align 4
+// OGCG:   %[[A_LOAD:.*]] = load i32, ptr %[[A]], align 4
+
+unsigned um0() {
+  unsigned a = 1u;
+  return -a;
+}
+
+// CHECK: cir.func @um0() -> !cir.int<u, 32>
+// CHECK:   %[[A:.*]] = cir.alloca !cir.int<u, 32>, !cir.ptr<!cir.int<u, 32>>, 
["a", init]
+// CHECK:   %[[INPUT:.*]] = cir.load %[[A]]
+// CHECK:   %[[OUTPUT:.*]] = cir.unary(minus, %[[INPUT]])
+
+// LLVM: define i32 @um0()
+// LLVM:   %[[A:.*]] = alloca i32, i64 1, align 4
+// LLVM:   store i32 1, ptr %[[A]], align 4
+// LLVM:   %[[A_LOAD:.*]] = load i32, ptr %[[A]], align 4
+// LLVM:   %[[RESULT:.*]] = sub i32 0, %[[A_LOAD]]
+
+// OGCG: define{{.*}} i32 @_Z3um0v()
+// OGCG:   %[[A:.*]] = alloca i32, align 4
+// OGCG:   store i32 1, ptr %[[A]], align 4
+// OGCG:   %[[A_LOAD:.*]] = load i32, ptr %[[A]], align 4
+// OGCG:   %[[RESULT:.*]] = sub i32 0, %[[A_LOAD]]
+
+unsigned un0() {
+  unsigned a = 1u;
+  return ~a; // a ^ -1 , not
+}
+
+// CHECK: cir.func @un0() -> !cir.int<u, 32>
+// CHECK:   %[[A:.*]] = cir.alloca !cir.int<u, 32>, !cir.ptr<!cir.int<u, 32>>, 
["a", init]
+// CHECK:   %[[INPUT:.*]] = cir.load %[[A]]
+// CHECK:   %[[OUTPUT:.*]] = cir.unary(not, %[[INPUT]])
+
+// LLVM: define i32 @un0()
+// LLVM:   %[[A:.*]] = alloca i32, i64 1, align 4
+// LLVM:   store i32 1, ptr %[[A]], align 4
+// LLVM:   %[[A_LOAD:.*]] = load i32, ptr %[[A]], align 4
+// LLVM:   %[[RESULT:.*]] = xor i32 %[[A_LOAD]], -1
+
+// OGCG: define{{.*}} i32 @_Z3un0v()
+// OGCG:   %[[A:.*]] = alloca i32, align 4
+// OGCG:   store i32 1, ptr %[[A]], align 4
+// OGCG:   %[[A_LOAD:.*]] = load i32, ptr %[[A]], align 4
+// OGCG:   %[[RESULT:.*]] = xor i32 %[[A_LOAD]], -1
+
+int inc0() {
+  int a = 1;
+  ++a;
+  return a;
+}
+
+// CHECK: cir.func @inc0() -> !cir.int<s, 32>
+// CHECK:   %[[A:.*]] = cir.alloca !cir.int<s, 32>, !cir.ptr<!cir.int<s, 32>>, 
["a", init]
+// CHECK:   %[[ATMP:.*]] = cir.const #cir.int<1> : !cir.int<s, 32>
+// CHECK:   cir.store %[[ATMP]], %[[A]] : !cir.int<s, 32>
+// CHECK:   %[[INPUT:.*]] = cir.load %[[A]]
+// CHECK:   %[[INCREMENTED:.*]] = cir.unary(inc, %[[INPUT]])
+// CHECK:   cir.store %[[INCREMENTED]], %[[A]]
+// CHECK:   %[[A_TO_OUTPUT:.*]] = cir.load %[[A]]
+
+// LLVM: define i32 @inc0()
+// LLVM:   %[[A:.*]] = alloca i32, i64 1, align 4
+// LLVM:   store i32 1, ptr %[[A]], align 4
+// LLVM:   %[[A_LOAD:.*]] = load i32, ptr %[[A]], align 4
+// LLVM:   %[[RESULT:.*]] = add nsw i32 %[[A_LOAD]], 1
+
+// OGCG: define{{.*}} i32 @_Z4inc0v()
+// OGCG:   %[[A:.*]] = alloca i32, align 4
+// OGCG:   store i32 1, ptr %[[A]], align 4
+// OGCG:   %[[A_LOAD:.*]] = load i32, ptr %[[A]], align 4
+// OGCG:   %[[RESULT:.*]] = add nsw i32 %[[A_LOAD]], 1
+
+int dec0() {
+  int a = 1;
+  --a;
+  return a;
+}
+
+// CHECK: cir.func @dec0() -> !cir.int<s, 32>
+// CHECK:   %[[A:.*]] = cir.alloca !cir.int<s, 32>, !cir.ptr<!cir.int<s, 32>>, 
["a", init]
+// CHECK:   %[[ATMP:.*]] = cir.const #cir.int<1> : !cir.int<s, 32>
+// CHECK:   cir.store %[[ATMP]], %[[A]] : !cir.int<s, 32>
+// CHECK:   %[[INPUT:.*]] = cir.load %[[A]]
+// CHECK:   %[[INCREMENTED:.*]] = cir.unary(dec, %[[INPUT]])
+// CHECK:   cir.store %[[INCREMENTED]], %[[A]]
+// CHECK:   %[[A_TO_OUTPUT:.*]] = cir.load %[[A]]
+
+// LLVM: define i32 @dec0()
+// LLVM:   %[[A:.*]] = alloca i32, i64 1, align 4
+// LLVM:   store i32 1, ptr %[[A]], align 4
+// LLVM:   %[[A_LOAD:.*]] = load i32, ptr %[[A]], align 4
+// LLVM:   %[[RESULT:.*]] = sub nsw i32 %[[A_LOAD]], 1
+
+// OGCG: define{{.*}} i32 @_Z4dec0v()
+// OGCG:   %[[A:.*]] = alloca i32, align 4
+// OGCG:   store i32 1, ptr %[[A]], align 4
+// OGCG:   %[[A_LOAD:.*]] = load i32, ptr %[[A]], align 4
+// OGCG:   %[[RESULT:.*]] = add nsw i32 %[[A_LOAD]], -1
+
+int inc1() {
+  int a = 1;
+  a++;
+  return a;
+}
+
+// CHECK: cir.func @inc1() -> !cir.int<s, 32>
+// CHECK:   %[[A:.*]] = cir.alloca !cir.int<s, 32>, !cir.ptr<!cir.int<s, 32>>, 
["a", init]
+// CHECK:   %[[ATMP:.*]] = cir.const #cir.int<1> : !cir.int<s, 32>
+// CHECK:   cir.store %[[ATMP]], %[[A]] : !cir.int<s, 32>
+// CHECK:   %[[INPUT:.*]] = cir.load %[[A]]
+// CHECK:   %[[INCREMENTED:.*]] = cir.unary(inc, %[[INPUT]])
+// CHECK:   cir.store %[[INCREMENTED]], %[[A]]
+// CHECK:   %[[A_TO_OUTPUT:.*]] = cir.load %[[A]]
+
+// LLVM: define i32 @inc1()
+// LLVM:   %[[A:.*]] = alloca i32, i64 1, align 4
+// LLVM:   store i32 1, ptr %[[A]], align 4
+// LLVM:   %[[A_LOAD:.*]] = load i32, ptr %[[A]], align 4
+// LLVM:   %[[RESULT:.*]] = add nsw i32 %[[A_LOAD]], 1
+
+// OGCG: define{{.*}} i32 @_Z4inc1v()
+// OGCG:   %[[A:.*]] = alloca i32, align 4
+// OGCG:   store i32 1, ptr %[[A]], align 4
+// OGCG:   %[[A_LOAD:.*]] = load i32, ptr %[[A]], align 4
+// OGCG:   %[[RESULT:.*]] = add nsw i32 %[[A_LOAD]], 1
+
+int dec1() {
+  int a = 1;
+  a--;
+  return a;
+}
+
+// CHECK: cir.func @dec1() -> !cir.int<s, 32>
+// CHECK:   %[[A:.*]] = cir.alloca !cir.int<s, 32>, !cir.ptr<!cir.int<s, 32>>, 
["a", init]
+// CHECK:   %[[ATMP:.*]] = cir.const #cir.int<1> : !cir.int<s, 32>
+// CHECK:   cir.store %[[ATMP]], %[[A]] : !cir.int<s, 32>
+// CHECK:   %[[INPUT:.*]] = cir.load %[[A]]
+// CHECK:   %[[INCREMENTED:.*]] = cir.unary(dec, %[[INPUT]])
+// CHECK:   cir.store %[[INCREMENTED]], %[[A]]
+// CHECK:   %[[A_TO_OUTPUT:.*]] = cir.load %[[A]]
+
+// LLVM: define i32 @dec1()
+// LLVM:   %[[A:.*]] = alloca i32, i64 1, align 4
+// LLVM:   store i32 1, ptr %[[A]], align 4
+// LLVM:   %[[A_LOAD:.*]] = load i32, ptr %[[A]], align 4
+// LLVM:   %[[RESULT:.*]] = sub nsw i32 %[[A_LOAD]], 1
+
+// OGCG: define{{.*}} i32 @_Z4dec1v()
+// OGCG:   %[[A:.*]] = alloca i32, align 4
+// OGCG:   store i32 1, ptr %[[A]], align 4
+// OGCG:   %[[A_LOAD:.*]] = load i32, ptr %[[A]], align 4
+// OGCG:   %[[RESULT:.*]] = add nsw i32 %[[A_LOAD]], -1
+
+// Ensure the increment is performed after the assignment to b.
+int inc2() {
+  int a = 1;
+  int b = a++;
+  return b;
+}
+
+// CHECK: cir.func @inc2() -> !cir.int<s, 32>
+// CHECK:   %[[A:.*]] = cir.alloca !cir.int<s, 32>, !cir.ptr<!cir.int<s, 32>>, 
["a", init]
+// CHECK:   %[[B:.*]] = cir.alloca !cir.int<s, 32>, !cir.ptr<!cir.int<s, 32>>, 
["b", init]
+// CHECK:   %[[ATMP:.*]] = cir.const #cir.int<1> : !cir.int<s, 32>
+// CHECK:   cir.store %[[ATMP]], %[[A]] : !cir.int<s, 32>
+// CHECK:   %[[ATOB:.*]] = cir.load %[[A]]
+// CHECK:   %[[INCREMENTED:.*]] = cir.unary(inc, %[[ATOB]])
+// CHECK:   cir.store %[[INCREMENTED]], %[[A]]
+// CHECK:   cir.store %[[ATOB]], %[[B]]
+// CHECK:   %[[B_TO_OUTPUT:.*]] = cir.load %[[B]]
+
+// LLVM: define i32 @inc2()
+// LLVM:   %[[A:.*]] = alloca i32, i64 1, align 4
+// LLVM:   %[[B:.*]] = alloca i32, i64 1, align 4
+// LLVM:   store i32 1, ptr %[[A]], align 4
+// LLVM:   %[[A_LOAD:.*]] = load i32, ptr %[[A]], align 4
+// LLVM:   %[[A_INC:.*]] = add nsw i32 %[[A_LOAD]], 1
+// LLVM:   store i32 %[[A_INC]], ptr %[[A]], align 4
+// LLVM:   store i32 %[[A_LOAD]], ptr %[[B]], align 4
+// LLVM:   %[[B_TO_OUTPUT:.*]] = load i32, ptr %[[B]], align 4
+
+// OGCG: define{{.*}} i32 @_Z4inc2v()
+// OGCG:   %[[A:.*]] = alloca i32, align 4
+// OGCG:   %[[B:.*]] = alloca i32, align 4
+// OGCG:   store i32 1, ptr %[[A]], align 4
+// OGCG:   %[[A_LOAD:.*]] = load i32, ptr %[[A]], align 4
+// OGCG:   %[[A_INC:.*]] = add nsw i32 %[[A_LOAD]], 1
+// OGCG:   store i32 %[[A_INC]], ptr %[[A]], align 4
+// OGCG:   store i32 %[[A_LOAD]], ptr %[[B]], align 4
+// OGCG:   %[[B_TO_OUTPUT:.*]] = load i32, ptr %[[B]], align 4
+
+float fpPlus() {
+  float a = 1.0f;
+  return +a;
+}
+
+// CHECK: cir.func @fpPlus() -> !cir.float
+// CHECK:   %[[A:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["a", 
init]
+// CHECK:   %[[INPUT:.*]] = cir.load %[[A]]
+// CHECK:   %[[OUTPUT:.*]] = cir.unary(plus, %[[INPUT]])
+
+// LLVM: define float @fpPlus()
+// LLVM:   %[[A:.*]] = alloca float, i64 1, align 4
+// LLVM:   store float 1.000000e+00, ptr %[[A]], align 4
+// LLVM:   %[[A_LOAD:.*]] = load float, ptr %[[A]], align 4
+
+// OGCG: define{{.*}} float @_Z6fpPlusv()
+// OGCG:   %[[A:.*]] = alloca float, align 4
+// OGCG:   store float 1.000000e+00, ptr %[[A]], align 4
+// OGCG:   %[[A_LOAD:.*]] = load float, ptr %[[A]], align 4
+
+float fpMinus() {
+  float a = 1.0f;
+  return -a;
+}
+
+// CHECK: cir.func @fpMinus() -> !cir.float
+// CHECK:   %[[A:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["a", 
init]
+// CHECK:   %[[INPUT:.*]] = cir.load %[[A]]
+// CHECK:   %[[OUTPUT:.*]] = cir.unary(minus, %[[INPUT]])
+
+// LLVM: define float @fpMinus()
+// LLVM:   %[[A:.*]] = alloca float, i64 1, align 4
+// LLVM:   store float 1.000000e+00, ptr %[[A]], align 4
+// LLVM:   %[[A_LOAD:.*]] = load float, ptr %[[A]], align 4
+// LLVM:   %[[RESULT:.*]] = fneg float %[[A_LOAD]]
+
+// OGCG: define{{.*}} float @_Z7fpMinusv()
+// OGCG:   %[[A:.*]] = alloca float, align 4
+// OGCG:   store float 1.000000e+00, ptr %[[A]], align 4
+// OGCG:   %[[A_LOAD:.*]] = load float, ptr %[[A]], align 4
+// OGCG:   %[[RESULT:.*]] = fneg float %[[A_LOAD]]
+
+float fpPreInc() {
+  float a = 1.0f;
+  return ++a;
+}
+
+// CHECK: cir.func @fpPreInc() -> !cir.float
+// CHECK:   %[[A:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["a", 
init]
+// CHECK:   %[[ATMP:.*]] = cir.const #cir.fp<1.000000e+00> : !cir.float
+// CHECK:   cir.store %[[ATMP]], %[[A]] : !cir.float
+// CHECK:   %[[INPUT:.*]] = cir.load %[[A]]
+// CHECK:   %[[INCREMENTED:.*]] = cir.unary(inc, %[[INPUT]])
+
+// LLVM: define float @fpPreInc()
+// LLVM:   %[[A:.*]] = alloca float, i64 1, align 4
+// LLVM:   store float 1.000000e+00, ptr %[[A]], align 4
+// LLVM:   %[[A_LOAD:.*]] = load float, ptr %[[A]], align 4
+// LLVM:   %[[RESULT:.*]] = fadd float 1.000000e+00, %[[A_LOAD]]
+
+// OGCG: define{{.*}} float @_Z8fpPreIncv()
+// OGCG:   %[[A:.*]] = alloca float, align 4
+// OGCG:   store float 1.000000e+00, ptr %[[A]], align 4
+// OGCG:   %[[A_LOAD:.*]] = load float, ptr %[[A]], align 4
+// OGCG:   %[[RESULT:.*]] = fadd float %[[A_LOAD]], 1.000000e+00
+
+float fpPreDec() {
+  float a = 1.0f;
+  return --a;
+}
+
+// CHECK: cir.func @fpPreDec() -> !cir.float
+// CHECK:   %[[A:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["a", 
init]
+// CHECK:   %[[ATMP:.*]] = cir.const #cir.fp<1.000000e+00> : !cir.float
+// CHECK:   cir.store %[[ATMP]], %[[A]] : !cir.float
+// CHECK:   %[[INPUT:.*]] = cir.load %[[A]]
+// CHECK:   %[[DECREMENTED:.*]] = cir.unary(dec, %[[INPUT]])
+
+// LLVM: define float @fpPreDec()
+// LLVM:   %[[A:.*]] = alloca float, i64 1, align 4
+// LLVM:   store float 1.000000e+00, ptr %[[A]], align 4
+// LLVM:   %[[A_LOAD:.*]] = load float, ptr %[[A]], align 4
+// LLVM:   %[[RESULT:.*]] = fadd float -1.000000e+00, %[[A_LOAD]]
+
+// OGCG: define{{.*}} float @_Z8fpPreDecv()
+// OGCG:   %[[A:.*]] = alloca float, align 4
+// OGCG:   store float 1.000000e+00, ptr %[[A]], align 4
+// OGCG:   %[[A_LOAD:.*]] = load float, ptr %[[A]], align 4
+// OGCG:   %[[RESULT:.*]] = fadd float %[[A_LOAD]], -1.000000e+00
+
+float fpPostInc() {
+  float a = 1.0f;
+  return a++;
+}
+
+// CHECK: cir.func @fpPostInc() -> !cir.float
+// CHECK:   %[[A:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["a", 
init]
+// CHECK:   %[[ATMP:.*]] = cir.const #cir.fp<1.000000e+00> : !cir.float
+// CHECK:   cir.store %[[ATMP]], %[[A]] : !cir.float
+// CHECK:   %[[INPUT:.*]] = cir.load %[[A]]
+// CHECK:   %[[INCREMENTED:.*]] = cir.unary(inc, %[[INPUT]])
+
+// LLVM: define float @fpPostInc()
+// LLVM:   %[[A:.*]] = alloca float, i64 1, align 4
+// LLVM:   store float 1.000000e+00, ptr %[[A]], align 4
+// LLVM:   %[[A_LOAD:.*]] = load float, ptr %[[A]], align 4
+// LLVM:   %[[RESULT:.*]] = fadd float 1.000000e+00, %[[A_LOAD]]
+
+// OGCG: define{{.*}} float @_Z9fpPostIncv()
+// OGCG:   %[[A:.*]] = alloca float, align 4
+// OGCG:   store float 1.000000e+00, ptr %[[A]], align 4
+// OGCG:   %[[A_LOAD:.*]] = load float, ptr %[[A]], align 4
+// OGCG:   %[[RESULT:.*]] = fadd float %[[A_LOAD]], 1.000000e+00
+
+float fpPostDec() {
+  float a = 1.0f;
+  return a--;
+}
+
+// CHECK: cir.func @fpPostDec() -> !cir.float
+// CHECK:   %[[A:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["a", 
init]
+// CHECK:   %[[ATMP:.*]] = cir.const #cir.fp<1.000000e+00> : !cir.float
+// CHECK:   cir.store %[[ATMP]], %[[A]] : !cir.float
+// CHECK:   %[[INPUT:.*]] = cir.load %[[A]]
+// CHECK:   %[[DECREMENTED:.*]] = cir.unary(dec, %[[INPUT]])
+
+// LLVM: define float @fpPostDec()
+// LLVM:   %[[A:.*]] = alloca float, i64 1, align 4
+// LLVM:   store float 1.000000e+00, ptr %[[A]], align 4
+// LLVM:   %[[A_LOAD:.*]] = load float, ptr %[[A]], align 4
+// LLVM:   %[[RESULT:.*]] = fadd float -1.000000e+00, %[[A_LOAD]]
+
+// OGCG: define{{.*}} float @_Z9fpPostDecv()
+// OGCG:   %[[A:.*]] = alloca float, align 4
+// OGCG:   store float 1.000000e+00, ptr %[[A]], align 4
+// OGCG:   %[[A_LOAD:.*]] = load float, ptr %[[A]], align 4
+// OGCG:   %[[RESULT:.*]] = fadd float %[[A_LOAD]], -1.000000e+00
+
+// Ensure the increment is performed after the assignment to b.
+float fpPostInc2() {
+  float a = 1.0f;
+  float b = a++;
+  return b;
+}
+
+// CHECK: cir.func @fpPostInc2() -> !cir.float
+// CHECK:   %[[A:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["a", 
init]
+// CHECK:   %[[B:.*]] = cir.alloca !cir.float, !cir.ptr<!cir.float>, ["b", 
init]
+// CHECK:   %[[ATMP:.*]] = cir.const #cir.fp<1.000000e+00> : !cir.float
+// CHECK:   cir.store %[[ATMP]], %[[A]] : !cir.float
+// CHECK:   %[[ATOB:.*]] = cir.load %[[A]]
+// CHECK:   %[[INCREMENTED:.*]] = cir.unary(inc, %[[ATOB]])
+// CHECK:   cir.store %[[INCREMENTED]], %[[A]]
+// CHECK:   cir.store %[[ATOB]], %[[B]]
+// CHECK:   %[[B_TO_OUTPUT:.*]] = cir.load %[[B]]
+
+// LLVM: define float @fpPostInc2()
+// LLVM:   %[[A:.*]] = alloca float, i64 1, align 4
+// LLVM:   %[[B:.*]] = alloca float, i64 1, align 4
+// LLVM:   store float 1.000000e+00, ptr %[[A]], align 4
+// LLVM:   %[[A_LOAD:.*]] = load float, ptr %[[A]], align 4
+// LLVM:   %[[A_INC:.*]] = fadd float 1.000000e+00, %[[A_LOAD]]
+// LLVM:   store float %[[A_INC]], ptr %[[A]], align 4
+// LLVM:   store float %[[A_LOAD]], ptr %[[B]], align 4
+// LLVM:   %[[B_TO_OUTPUT:.*]] = load float, ptr %[[B]], align 4
+
+// OGCG: define{{.*}} float @_Z10fpPostInc2v()
+// OGCG:   %[[A:.*]] = alloca float, align 4
+// OGCG:   %[[B:.*]] = alloca float, align 4
+// OGCG:   store float 1.000000e+00, ptr %[[A]], align 4
+// OGCG:   %[[A_LOAD:.*]] = load float, ptr %[[A]], align 4
+// OGCG:   %[[A_INC:.*]] = fadd float %[[A_LOAD]], 1.000000e+00
+// OGCG:   store float %[[A_INC]], ptr %[[A]], align 4
+// OGCG:   store float %[[A_LOAD]], ptr %[[B]], align 4
+// OGCG:   %[[B_TO_OUTPUT:.*]] = load float, ptr %[[B]], align 4

_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to