Author: Timm Bäder Date: 2024-02-16T19:00:40+01:00 New Revision: 7f45acfbfec678ad918640744229f15b9293ac79
URL: https://github.com/llvm/llvm-project/commit/7f45acfbfec678ad918640744229f15b9293ac79 DIFF: https://github.com/llvm/llvm-project/commit/7f45acfbfec678ad918640744229f15b9293ac79.diff LOG: [clang][Interp] Implement various overflow and carry builtins Enough so we can enable SemaCXX/builtin-overflow.cpp. Added: Modified: clang/lib/AST/Interp/ByteCodeEmitter.cpp clang/lib/AST/Interp/InterpBuiltin.cpp clang/lib/AST/Interp/PrimType.h clang/test/SemaCXX/builtins-overflow.cpp Removed: ################################################################################ diff --git a/clang/lib/AST/Interp/ByteCodeEmitter.cpp b/clang/lib/AST/Interp/ByteCodeEmitter.cpp index e697e24fb341d2..2b9ff96daa6c1d 100644 --- a/clang/lib/AST/Interp/ByteCodeEmitter.cpp +++ b/clang/lib/AST/Interp/ByteCodeEmitter.cpp @@ -22,6 +22,10 @@ using namespace clang; using namespace clang::interp; +static bool isUnevaluatedBuiltin(unsigned BuiltinID) { + return BuiltinID == Builtin::BI__builtin_classify_type; +} + Function *ByteCodeEmitter::compileFunc(const FunctionDecl *FuncDecl) { bool IsLambdaStaticInvoker = false; if (const auto *MD = dyn_cast<CXXMethodDecl>(FuncDecl); @@ -122,7 +126,7 @@ Function *ByteCodeEmitter::compileFunc(const FunctionDecl *FuncDecl) { if (!Func) { bool IsUnevaluatedBuiltin = false; if (unsigned BI = FuncDecl->getBuiltinID()) - IsUnevaluatedBuiltin = Ctx.getASTContext().BuiltinInfo.isUnevaluated(BI); + IsUnevaluatedBuiltin = isUnevaluatedBuiltin(BI); Func = P.createFunction(FuncDecl, ParamOffset, std::move(ParamTypes), diff --git a/clang/lib/AST/Interp/InterpBuiltin.cpp b/clang/lib/AST/Interp/InterpBuiltin.cpp index 9b3f70fef35998..f1040d15a1d2ad 100644 --- a/clang/lib/AST/Interp/InterpBuiltin.cpp +++ b/clang/lib/AST/Interp/InterpBuiltin.cpp @@ -55,8 +55,8 @@ static APSInt peekToAPSInt(InterpStack &Stk, PrimType T, size_t Offset = 0) { APSInt R; INT_TYPE_SWITCH(T, { T Val = Stk.peek<T>(Offset); - R = APSInt( - APInt(Val.bitWidth(), static_cast<uint64_t>(Val), T::isSigned())); + R = APSInt(APInt(Val.bitWidth(), static_cast<uint64_t>(Val), T::isSigned()), + !T::isSigned()); }); return R; @@ -155,6 +155,11 @@ static void pushSizeT(InterpState &S, uint64_t Val) { } } +static void assignInteger(Pointer &Dest, PrimType ValueT, const APSInt &Value) { + INT_TYPE_SWITCH_NO_BOOL( + ValueT, { Dest.deref<T>() = T::from(static_cast<T>(Value)); }); +} + static bool retPrimValue(InterpState &S, CodePtr OpPC, APValue &Result, std::optional<PrimType> &T) { if (!T) @@ -667,6 +672,175 @@ static bool interp__builtin_launder(InterpState &S, CodePtr OpPC, return true; } +// Two integral values followed by a pointer (lhs, rhs, resultOut) +static bool interp__builtin_overflowop(InterpState &S, CodePtr OpPC, + const InterpFrame *Frame, + const Function *Func, + const CallExpr *Call) { + Pointer &ResultPtr = S.Stk.peek<Pointer>(); + if (ResultPtr.isDummy()) + return false; + + unsigned BuiltinOp = Func->getBuiltinID(); + PrimType RHST = *S.getContext().classify(Call->getArg(1)->getType()); + PrimType LHST = *S.getContext().classify(Call->getArg(0)->getType()); + APSInt RHS = peekToAPSInt(S.Stk, RHST, + align(primSize(PT_Ptr)) + align(primSize(RHST))); + APSInt LHS = peekToAPSInt(S.Stk, LHST, + align(primSize(PT_Ptr)) + align(primSize(RHST)) + + align(primSize(LHST))); + QualType ResultType = Call->getArg(2)->getType()->getPointeeType(); + PrimType ResultT = *S.getContext().classify(ResultType); + bool Overflow; + + APSInt Result; + if (BuiltinOp == Builtin::BI__builtin_add_overflow || + BuiltinOp == Builtin::BI__builtin_sub_overflow || + BuiltinOp == Builtin::BI__builtin_mul_overflow) { + bool IsSigned = LHS.isSigned() || RHS.isSigned() || + ResultType->isSignedIntegerOrEnumerationType(); + bool AllSigned = LHS.isSigned() && RHS.isSigned() && + ResultType->isSignedIntegerOrEnumerationType(); + uint64_t LHSSize = LHS.getBitWidth(); + uint64_t RHSSize = RHS.getBitWidth(); + uint64_t ResultSize = S.getCtx().getTypeSize(ResultType); + uint64_t MaxBits = std::max(std::max(LHSSize, RHSSize), ResultSize); + + // Add an additional bit if the signedness isn't uniformly agreed to. We + // could do this ONLY if there is a signed and an unsigned that both have + // MaxBits, but the code to check that is pretty nasty. The issue will be + // caught in the shrink-to-result later anyway. + if (IsSigned && !AllSigned) + ++MaxBits; + + LHS = APSInt(LHS.extOrTrunc(MaxBits), !IsSigned); + RHS = APSInt(RHS.extOrTrunc(MaxBits), !IsSigned); + Result = APSInt(MaxBits, !IsSigned); + } + + // Find largest int. + switch (BuiltinOp) { + default: + llvm_unreachable("Invalid value for BuiltinOp"); + case Builtin::BI__builtin_add_overflow: + case Builtin::BI__builtin_sadd_overflow: + case Builtin::BI__builtin_saddl_overflow: + case Builtin::BI__builtin_saddll_overflow: + case Builtin::BI__builtin_uadd_overflow: + case Builtin::BI__builtin_uaddl_overflow: + case Builtin::BI__builtin_uaddll_overflow: + Result = LHS.isSigned() ? LHS.sadd_ov(RHS, Overflow) + : LHS.uadd_ov(RHS, Overflow); + break; + case Builtin::BI__builtin_sub_overflow: + case Builtin::BI__builtin_ssub_overflow: + case Builtin::BI__builtin_ssubl_overflow: + case Builtin::BI__builtin_ssubll_overflow: + case Builtin::BI__builtin_usub_overflow: + case Builtin::BI__builtin_usubl_overflow: + case Builtin::BI__builtin_usubll_overflow: + Result = LHS.isSigned() ? LHS.ssub_ov(RHS, Overflow) + : LHS.usub_ov(RHS, Overflow); + break; + case Builtin::BI__builtin_mul_overflow: + case Builtin::BI__builtin_smul_overflow: + case Builtin::BI__builtin_smull_overflow: + case Builtin::BI__builtin_smulll_overflow: + case Builtin::BI__builtin_umul_overflow: + case Builtin::BI__builtin_umull_overflow: + case Builtin::BI__builtin_umulll_overflow: + Result = LHS.isSigned() ? LHS.smul_ov(RHS, Overflow) + : LHS.umul_ov(RHS, Overflow); + break; + } + + // In the case where multiple sizes are allowed, truncate and see if + // the values are the same. + if (BuiltinOp == Builtin::BI__builtin_add_overflow || + BuiltinOp == Builtin::BI__builtin_sub_overflow || + BuiltinOp == Builtin::BI__builtin_mul_overflow) { + // APSInt doesn't have a TruncOrSelf, so we use extOrTrunc instead, + // since it will give us the behavior of a TruncOrSelf in the case where + // its parameter <= its size. We previously set Result to be at least the + // type-size of the result, so getTypeSize(ResultType) <= Resu + APSInt Temp = Result.extOrTrunc(S.getCtx().getTypeSize(ResultType)); + Temp.setIsSigned(ResultType->isSignedIntegerOrEnumerationType()); + + if (!APSInt::isSameValue(Temp, Result)) + Overflow = true; + Result = Temp; + } + + // Write Result to ResultPtr and put Overflow on the stacl. + assignInteger(ResultPtr, ResultT, Result); + ResultPtr.initialize(); + assert(Func->getDecl()->getReturnType()->isBooleanType()); + S.Stk.push<Boolean>(Overflow); + return true; +} + +/// Three integral values followed by a pointer (lhs, rhs, carry, carryOut). +static bool interp__builtin_carryop(InterpState &S, CodePtr OpPC, + const InterpFrame *Frame, + const Function *Func, + const CallExpr *Call) { + unsigned BuiltinOp = Func->getBuiltinID(); + PrimType LHST = *S.getContext().classify(Call->getArg(0)->getType()); + PrimType RHST = *S.getContext().classify(Call->getArg(1)->getType()); + PrimType CarryT = *S.getContext().classify(Call->getArg(2)->getType()); + APSInt RHS = peekToAPSInt(S.Stk, RHST, + align(primSize(PT_Ptr)) + align(primSize(CarryT)) + + align(primSize(RHST))); + APSInt LHS = + peekToAPSInt(S.Stk, LHST, + align(primSize(PT_Ptr)) + align(primSize(RHST)) + + align(primSize(CarryT)) + align(primSize(LHST))); + APSInt CarryIn = peekToAPSInt( + S.Stk, LHST, align(primSize(PT_Ptr)) + align(primSize(CarryT))); + APSInt CarryOut; + + APSInt Result; + // Copy the number of bits and sign. + Result = LHS; + CarryOut = LHS; + + bool FirstOverflowed = false; + bool SecondOverflowed = false; + switch (BuiltinOp) { + default: + llvm_unreachable("Invalid value for BuiltinOp"); + case Builtin::BI__builtin_addcb: + case Builtin::BI__builtin_addcs: + case Builtin::BI__builtin_addc: + case Builtin::BI__builtin_addcl: + case Builtin::BI__builtin_addcll: + Result = + LHS.uadd_ov(RHS, FirstOverflowed).uadd_ov(CarryIn, SecondOverflowed); + break; + case Builtin::BI__builtin_subcb: + case Builtin::BI__builtin_subcs: + case Builtin::BI__builtin_subc: + case Builtin::BI__builtin_subcl: + case Builtin::BI__builtin_subcll: + Result = + LHS.usub_ov(RHS, FirstOverflowed).usub_ov(CarryIn, SecondOverflowed); + break; + } + // It is possible for both overflows to happen but CGBuiltin uses an OR so + // this is consistent. + CarryOut = (uint64_t)(FirstOverflowed | SecondOverflowed); + + Pointer &CarryOutPtr = S.Stk.peek<Pointer>(); + QualType CarryOutType = Call->getArg(3)->getType()->getPointeeType(); + PrimType CarryOutT = *S.getContext().classify(CarryOutType); + assignInteger(CarryOutPtr, CarryOutT, CarryOut); + CarryOutPtr.initialize(); + + assert(Call->getType() == Call->getArg(0)->getType()); + pushAPSInt(S, Result); + return true; +} + bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const Function *F, const CallExpr *Call) { InterpFrame *Frame = S.Current; @@ -901,6 +1075,45 @@ bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const Function *F, return false; break; + case Builtin::BI__builtin_add_overflow: + case Builtin::BI__builtin_sub_overflow: + case Builtin::BI__builtin_mul_overflow: + case Builtin::BI__builtin_sadd_overflow: + case Builtin::BI__builtin_uadd_overflow: + case Builtin::BI__builtin_uaddl_overflow: + case Builtin::BI__builtin_uaddll_overflow: + case Builtin::BI__builtin_usub_overflow: + case Builtin::BI__builtin_usubl_overflow: + case Builtin::BI__builtin_usubll_overflow: + case Builtin::BI__builtin_umul_overflow: + case Builtin::BI__builtin_umull_overflow: + case Builtin::BI__builtin_umulll_overflow: + case Builtin::BI__builtin_saddl_overflow: + case Builtin::BI__builtin_saddll_overflow: + case Builtin::BI__builtin_ssub_overflow: + case Builtin::BI__builtin_ssubl_overflow: + case Builtin::BI__builtin_ssubll_overflow: + case Builtin::BI__builtin_smul_overflow: + case Builtin::BI__builtin_smull_overflow: + case Builtin::BI__builtin_smulll_overflow: + if (!interp__builtin_overflowop(S, OpPC, Frame, F, Call)) + return false; + break; + + case Builtin::BI__builtin_addcb: + case Builtin::BI__builtin_addcs: + case Builtin::BI__builtin_addc: + case Builtin::BI__builtin_addcl: + case Builtin::BI__builtin_addcll: + case Builtin::BI__builtin_subcb: + case Builtin::BI__builtin_subcs: + case Builtin::BI__builtin_subc: + case Builtin::BI__builtin_subcl: + case Builtin::BI__builtin_subcll: + if (!interp__builtin_carryop(S, OpPC, Frame, F, Call)) + return false; + break; + default: return false; } diff --git a/clang/lib/AST/Interp/PrimType.h b/clang/lib/AST/Interp/PrimType.h index d07c2efe8e3c9b..24a24a71a07b57 100644 --- a/clang/lib/AST/Interp/PrimType.h +++ b/clang/lib/AST/Interp/PrimType.h @@ -149,6 +149,24 @@ static inline bool aligned(const void *P) { } \ } while (0) +#define INT_TYPE_SWITCH_NO_BOOL(Expr, B) \ + do { \ + switch (Expr) { \ + TYPE_SWITCH_CASE(PT_Sint8, B) \ + TYPE_SWITCH_CASE(PT_Uint8, B) \ + TYPE_SWITCH_CASE(PT_Sint16, B) \ + TYPE_SWITCH_CASE(PT_Uint16, B) \ + TYPE_SWITCH_CASE(PT_Sint32, B) \ + TYPE_SWITCH_CASE(PT_Uint32, B) \ + TYPE_SWITCH_CASE(PT_Sint64, B) \ + TYPE_SWITCH_CASE(PT_Uint64, B) \ + TYPE_SWITCH_CASE(PT_IntAP, B) \ + TYPE_SWITCH_CASE(PT_IntAPS, B) \ + default: \ + llvm_unreachable("Not an integer value"); \ + } \ + } while (0) + #define COMPOSITE_TYPE_SWITCH(Expr, B, D) \ do { \ switch (Expr) { \ diff --git a/clang/test/SemaCXX/builtins-overflow.cpp b/clang/test/SemaCXX/builtins-overflow.cpp index 1b1e46ae751329..f40af5dd51cd23 100644 --- a/clang/test/SemaCXX/builtins-overflow.cpp +++ b/clang/test/SemaCXX/builtins-overflow.cpp @@ -1,4 +1,5 @@ // RUN: %clang_cc1 -fsyntax-only -std=c++17 -verify %s +// RUN: %clang_cc1 -fsyntax-only -std=c++17 -verify %s -fexperimental-new-constant-interpreter // expected-no-diagnostics #include <limits.h> _______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits