@@ -5915,6 +5916,16 @@ bool Sema::GatherArgumentsForCall(SourceLocation
CallLoc, FunctionDecl *FDecl,
ProtoArgType->isBlockPointerType())
if (auto *BE = dyn_cast(Arg->IgnoreParenNoopCasts(Context)))
BE->getBlockDecl()->setDoesNotEscape();
+ //
@@ -128,6 +128,16 @@ class CodeGenTypes {
/// memory representation is usually i8 or i32, depending on the target.
llvm::Type *ConvertTypeForMem(QualType T, bool ForBitField = false);
+ /// Check that size and ABI alignment of given LLVM type matches size and
+ /// align
@@ -128,6 +128,16 @@ class CodeGenTypes {
/// memory representation is usually i8 or i32, depending on the target.
llvm::Type *ConvertTypeForMem(QualType T, bool ForBitField = false);
+ /// Check that size and ABI alignment of given LLVM type matches size and
+ /// align
@@ -610,8 +610,26 @@ bool ConstStructBuilder::AppendBytes(CharUnits
FieldOffsetInChars,
}
bool ConstStructBuilder::AppendBitField(
-const FieldDecl *Field, uint64_t FieldOffset, llvm::ConstantInt *CI,
+const FieldDecl *Field, uint64_t FieldOffset, llvm::Constant *C,
https://github.com/rjmccall edited
https://github.com/llvm/llvm-project/pull/91364
___
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
@@ -118,6 +124,39 @@ llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T,
bool ForBitField) {
return R;
}
+bool CodeGenTypes::typeRequiresSplitIntoByteArray(QualType ASTTy,
+ llvm::Type *LLVMTy) {
+ if (!LLVMTy)
+LLVM
https://github.com/rjmccall requested changes to this pull request.
Getting very close.
https://github.com/llvm/llvm-project/pull/91364
___
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
@@ -1774,6 +1784,22 @@ llvm::Constant
*ConstantEmitter::emitForMemory(CodeGenModule &CGM,
return Res;
}
+ if (destType->isBitIntType()) {
+if (!CGM.getTypes().LLVMTypeLayoutMatchesAST(destType, C->getType())) {
+ // Long _BitInt has array of bytes as in-memory
@@ -3140,6 +3140,269 @@
ASTContext::getPointerAuthVTablePointerDiscriminator(const CXXRecordDecl *RD) {
return llvm::getPointerAuthStableSipHash(Str);
}
+/// Encode a function type for use in the discriminator of a function pointer
+/// type. We can't use the itanium scheme
@@ -609,9 +609,25 @@ bool ConstStructBuilder::AppendBytes(CharUnits
FieldOffsetInChars,
return Builder.add(InitCst, StartOffset + FieldOffsetInChars,
AllowOverwrite);
}
-bool ConstStructBuilder::AppendBitField(
-const FieldDecl *Field, uint64_t FieldOffset, llvm::Const
@@ -1,12 +1,25 @@
-// RUN: %clang_cc1 -triple x86_64-gnu-linux -O3 -disable-llvm-passes
-emit-llvm -o - %s | FileCheck %s --check-prefixes=CHECK,CHECK64
-// RUN: %clang_cc1 -triple x86_64-windows-pc -O3 -disable-llvm-passes
-emit-llvm -o - %s | FileCheck %s --check-prefixes=CHEC
@@ -2093,17 +2107,10 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value
*Value, Address Addr,
llvm::Type *SrcTy = Value->getType();
if (const auto *ClangVecTy = Ty->getAs()) {
auto *VecTy = dyn_cast(SrcTy);
-if (VecTy && ClangVecTy->isExtVectorBoolType()) {
-
@@ -1774,6 +1774,18 @@ llvm::Constant
*ConstantEmitter::emitForMemory(CodeGenModule &CGM,
return Res;
}
+ if (const auto *BIT = destType->getAs()) {
+if (BIT->getNumBits() > 128) {
+ // Long _BitInt has array of bytes as in-memory type.
+ ConstantAggregat
rjmccall wrote:
I also have trouble imagining why a target would ever want to make `va_copy` a
non-trivial operation, and I suspect that in practice programmers do not
reliably call `va_end` to clean up their iterations. In general, I would say
that platforms should be moving towards making v
rjmccall wrote:
*Every* `va_list` stores pointers; otherwise, it wouldn't be able to support an
arbitrary number of arguments. Copying just copies the pointers, and that's
fine because the memory they point to is immutable and always outlives the
`va_list`. You can imagine a `va_list` implem
rjmccall wrote:
> > _Every_ `va_list` stores pointers; otherwise, it wouldn't be able to
> > support an arbitrary number of arguments. Copying just copies the pointers,
> > and that's fine because the memory they point to is immutable and always
> > outlives the `va_list`. You can imagine a `v
rjmccall wrote:
Oh, I completely spaced on this before, but of course there *are* constraints
on `va_list` in the standard: `va_list`s are passed by value to functions like
`vprintf`. That, of course, requires the value to be primitively copied. If
you call `vprintf(format, args)`, the standa
@@ -1886,6 +1896,29 @@ llvm::Constant
*ConstantEmitter::emitForMemory(CodeGenModule &CGM,
return Res;
}
+ if (destType->isBitIntType()) {
+if (CGM.getTypes().typeRequiresSplitIntoByteArray(destType, C->getType()))
{
+ // Long _BitInt has array of bytes as in-
https://github.com/rjmccall edited
https://github.com/llvm/llvm-project/pull/91364
___
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
https://github.com/rjmccall commented:
This is generally looking great, and I think it's ready to go as soon as you
can finish the tests. (You said you weren't able to update all the tests — did
you have questions about the remaining tests?)
I did have a thought, though. Are we confident that
@@ -107,17 +107,52 @@ llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T,
bool ForBitField) {
return llvm::IntegerType::get(FixedVT->getContext(), BytePadded);
}
- // If this is a bool type, or a bit-precise integer type in a bitfield
- // representation, map this
@@ -89,7 +89,7 @@ void CodeGenTypes::addRecordTypeName(const RecordDecl *RD,
/// ConvertType in that it is used to convert to the memory representation for
/// a type. For example, the scalar representation for _Bool is i1, but the
/// memory representation is usually i8 or i3
https://github.com/rjmccall edited
https://github.com/llvm/llvm-project/pull/91364
___
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
https://github.com/rjmccall edited
https://github.com/llvm/llvm-project/pull/91364
___
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
rjmccall wrote:
Ah right, I'd forgotten that some ABIs use that array trick to get it to pass
by reference, and you're right that that makes it ill-formed to simply assign
around.
I like your idea of specifically making it UB to copy with `memcpy` etc and
just advising that people use va_copy
rjmccall wrote:
Okay, so x86_64 describes it in byte terms and says they're little-endian,
which is consistent with the overall target. Interestingly, it does not
guarantee the content of the excess bits. The code-generation in this patch is
consistent with that: the extension we do is unnec
rjmccall wrote:
Given all that, I feel pretty comfortable relying on using LLVM's `i96` stores
and so on. I do worry some that we're eventually going to run into a target
where the `_BitInt` ABI does not match what LLVM wants to generate for `i96`
load/store, but we should be able to generali
https://github.com/rjmccall approved this pull request.
https://github.com/llvm/llvm-project/pull/98146
___
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
rjmccall wrote:
> > Okay, so x86_64 describes it in byte terms and says they're little-endian,
> > which is consistent with the overall target. Interestingly, it does not
> > guarantee the content of the excess bits. The code-generation in this patch
> > is consistent with that: the extension
https://github.com/rjmccall approved this pull request.
https://github.com/llvm/llvm-project/pull/91364
___
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
rjmccall wrote:
Hmm. I think this is actually pretty different from the `bool` pattern.
Suppose we're talking about `_BitInt(N)`. Let `BYTES := ceil(N/8)`, and let
`BITS := BYTES * 8`.
The problem being presented here is this:
1. For at least some values of `N`, we cannot use LLVM's `iN` f
rjmccall wrote:
If you want to do things that way, you will need to (1) generalize CodeGenTypes
with a new API that will return this load/store type when applicable and (2)
look at all the places we call `ConvertTypeForMem`, `EmitToMemory`, and
`EmitFromMemory` to make sure they do the right t
rjmccall wrote:
My experience is that compiler writers are really good at hacking in special
cases to make their test cases work and really bad at recognizing that their
case isn't as special as they think. There are three types already called out
for special treatment in `ConvertTypeForMem`,
https://github.com/rjmccall commented:
I think you're right about the intended logic being to check for a definition,
especially given the wording of the warning. IIRC, we didn't have some of
these high-level checks at the time.
With that said, I think you need to check if a definition exists
https://github.com/rjmccall edited
https://github.com/llvm/llvm-project/pull/85886
___
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
@@ -2415,46 +2415,112 @@ DiagnosticBuilder
ItaniumRecordLayoutBuilder::Diag(SourceLocation Loc,
return Context.getDiagnostics().Report(Loc, DiagID);
}
+/// https://itanium-cxx-abi.github.io/cxx-abi/abi.html#POD
+/// POD for the purpose of layout
+/// In general, a type is
@@ -2415,46 +2415,112 @@ DiagnosticBuilder
ItaniumRecordLayoutBuilder::Diag(SourceLocation Loc,
return Context.getDiagnostics().Report(Loc, DiagID);
}
+/// https://itanium-cxx-abi.github.io/cxx-abi/abi.html#POD
+/// POD for the purpose of layout
+/// In general, a type is
https://github.com/rjmccall approved this pull request.
This LGTM. Please reach out to the Fuchsia and XL folks if you can, but we
don't need to hold up the PR over it.
https://github.com/llvm/llvm-project/pull/90462
___
cfe-commits mailing list
cfe-
https://github.com/rjmccall commented:
Is this really all that's required? It looks like you're just filling in
explicit zero padding when emitting constant initializers. That should steer
clear of any possibility that LLVM would treat the padding as `undef` for
optimization purposes (surely
https://github.com/rjmccall edited
https://github.com/llvm/llvm-project/pull/97121
___
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
@@ -361,6 +368,13 @@ CodeGenFunction::AddInitializerToStaticVarDecl(const
VarDecl &D,
}
return GV;
}
+ if (!getLangOpts().CPlusPlus) {
+// In C, when an initializer is given, the Linux kernel relies on clang to
+// zero-initialize all members not explicitly
@@ -361,6 +368,13 @@ CodeGenFunction::AddInitializerToStaticVarDecl(const
VarDecl &D,
}
return GV;
}
+ if (!getLangOpts().CPlusPlus) {
+// In C, when an initializer is given, the Linux kernel relies on clang to
+// zero-initialize all members not explicitly
https://github.com/rjmccall edited
https://github.com/llvm/llvm-project/pull/92103
___
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
https://github.com/rjmccall commented:
The code changes look right to me.
https://github.com/llvm/llvm-project/pull/92103
___
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
@@ -160,6 +160,9 @@ Bug Fixes in This Version
Bug Fixes to Compiler Builtins
^^
+- ``__is_layout_compatible`` no longer requires the empty bases to be the same
in two
+ standard-layout classes. It now only compares non-static data members.
---
@@ -160,6 +160,9 @@ Bug Fixes in This Version
Bug Fixes to Compiler Builtins
^^
+- ``__is_layout_compatible`` no longer requires the empty bases to be the same
in two
+ standard-layout classes. It now only compares non-static data members.
---
@@ -361,6 +368,13 @@ CodeGenFunction::AddInitializerToStaticVarDecl(const
VarDecl &D,
}
return GV;
}
+ if (!getLangOpts().CPlusPlus) {
+// In C, when an initializer is given, the Linux kernel relies on clang to
+// zero-initialize all members not explicitly
@@ -361,6 +368,13 @@ CodeGenFunction::AddInitializerToStaticVarDecl(const
VarDecl &D,
}
return GV;
}
+ if (!getLangOpts().CPlusPlus) {
+// In C, when an initializer is given, the Linux kernel relies on clang to
+// zero-initialize all members not explicitly
Author: John McCall
Date: 2021-10-08T05:44:06-04:00
New Revision: 5ab6ee75994d645725264e757d67bbb1c96fb2b6
URL:
https://github.com/llvm/llvm-project/commit/5ab6ee75994d645725264e757d67bbb1c96fb2b6
DIFF:
https://github.com/llvm/llvm-project/commit/5ab6ee75994d645725264e757d67bbb1c96fb2b6.diff
L
@@ -1121,3 +1121,99 @@ bool SemaHLSL::CheckBuiltinFunctionCall(unsigned
BuiltinID, CallExpr *TheCall) {
}
return false;
}
+
+bool SemaHLSL::CheckCompatibleParameterABI(FunctionDecl *New,
+ FunctionDecl *Old) {
+ if (New->getNumPar
@@ -4348,8 +4348,18 @@ LValue CodeGenFunction::EmitMatrixSubscriptExpr(const
MatrixSubscriptExpr *E) {
!E->isIncomplete() &&
"incomplete matrix subscript expressions should be rejected during
Sema");
LValue Base = EmitLValue(E->getBase());
- llvm::Value *RowIdx
https://github.com/rjmccall approved this pull request.
LGTM
https://github.com/llvm/llvm-project/pull/103044
___
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
@@ -4148,6 +4152,30 @@ static void emitWriteback(CodeGenFunction &CGF,
assert(!isProvablyNull(srcAddr.getBasePointer()) &&
"shouldn't have writeback for provably null argument");
+ if (CGF.getLangOpts().HLSL) {
+if (!isa(writeback.CastExpr)) {
+ RValue Tmp
@@ -182,18 +182,8 @@ void LangOptions::setLangDefaults(LangOptions &Opts,
Language Lang,
Opts.HIP = Lang == Language::HIP;
Opts.CUDA = Lang == Language::CUDA || Opts.HIP;
- if (Opts.HIP) {
-// HIP toolchain does not support 'Fast' FPOpFusion in backends since it
-
@@ -3178,6 +3178,13 @@ static void RenderFloatingPointOptions(const ToolChain
&TC, const Driver &D,
StringRef Val = A->getValue();
if (Val == "fast" || Val == "on" || Val == "off" ||
Val == "fast-honor-pragmas") {
+// fast-honor-pragmas is depreca
@@ -68,35 +68,12 @@
// RUN: -O3 -target-cpu gfx906 -o - -x ir %t.ll \
// RUN: | FileCheck -check-prefixes=COMMON,AMD-OPT-FASTSTD %s
-// Explicit -ffp-contract=fast-honor-pragmas
-// In IR, fmul/fadd instructions with contract flag are emitted.
-// In backend
-//nvptx/a
rjmccall wrote:
I don't usually do that for people, sorry.
https://github.com/llvm/llvm-project/pull/94885
___
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
rjmccall wrote:
That sounds fine to me as long as we're still emitting projections of them
properly (i.e. not just assuming "oh, it's an empty record, we can use whatever
pointer we want because it'll never be dereferenced").
https://github.com/llvm/llvm-project/pull/96422
@@ -185,10 +185,33 @@ llvm::MDNode *CodeGenTBAA::getTypeInfoHelper(const Type
*Ty) {
return getChar();
// Handle pointers and references.
- // TODO: Implement C++'s type "similarity" and consider dis-"similar"
- // pointers distinct.
- if (Ty->isPointerType() || Ty->
https://github.com/rjmccall commented:
Generally looking good.
https://github.com/llvm/llvm-project/pull/76612
___
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
https://github.com/rjmccall edited
https://github.com/llvm/llvm-project/pull/76612
___
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
@@ -185,10 +185,33 @@ llvm::MDNode *CodeGenTBAA::getTypeInfoHelper(const Type
*Ty) {
return getChar();
// Handle pointers and references.
rjmccall wrote:
Probably worth putting standard citations here:
```suggestion
// Handle pointers and references.
@@ -1059,9 +1059,15 @@
CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn,
if (Guard.isValid()) {
// If we have a guard variable, check whether we've already performed
// these initializations. This happens for TLS initialization functions.
- ll
https://github.com/rjmccall commented:
Alright. LGTM, but let's ping @AaronBallman and @efriedma-quic.
https://github.com/llvm/llvm-project/pull/76612
___
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listi
@@ -1059,9 +1059,15 @@
CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn,
if (Guard.isValid()) {
// If we have a guard variable, check whether we've already performed
// these initializations. This happens for TLS initialization functions.
- ll
rjmccall wrote:
Yeah, the conditional operator doesn't do any floating-path math itself. If
the first operand is a floating-point expression, we should always be modeling
that with a float-to-boolean conversion, and the flags should go there.
https://github.com/llvm/llvm-project/pull/105912
_
https://github.com/rjmccall approved this pull request.
Thanks, that looks good.
https://github.com/llvm/llvm-project/pull/111597
___
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
rjmccall wrote:
Are you suggesting that loads need to have fast-math flags attached to them?
Because this sounds like a bad representation in IR.
https://github.com/llvm/llvm-project/pull/105912
___
cfe-commits mailing list
cfe-commits@lists.llvm.org
@@ -75,6 +75,12 @@ const CGFunctionInfo &arrangeCXXMethodType(CodeGenModule
&CGM,
const FunctionProtoType *FTP,
const CXXMethodDecl *MD);
+const CGFunctionInfo &arrangeCXXMethodCall(CodeGenM
https://github.com/rjmccall edited
https://github.com/llvm/llvm-project/pull/111597
___
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
rjmccall wrote:
I don't understand what fast-math flags are supposed to mean for loads, phis,
and selects. These are not arithmetic operations; they just propagate values.
If you're trying to implement some kind of rule where fast-math analysis should
not pass across certain kinds of abstrac
rjmccall wrote:
Oh, that's interesting. I'd been assuming this was a cross-function issue or
something like that, but that's a great example of where we need more than
that. I agree that it feels like having some kind of barrier instruction is
the right way to go — basically a unary operator
https://github.com/rjmccall approved this pull request.
Nice, this looks really clean. Thanks for taking this on!
https://github.com/llvm/llvm-project/pull/110762
___
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/m
https://github.com/rjmccall commented:
Thanks, LGTM other than the memory leak.
https://github.com/llvm/llvm-project/pull/110569
___
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
@@ -119,6 +120,7 @@ class CodeGenTBAA {
llvm::Module &Module;
const CodeGenOptions &CodeGenOpts;
const LangOptions &Features;
+ MangleContext *MangleCtx;
rjmccall wrote:
This should either be a unique_ptr or you need to delete it in the destructor.
htt
https://github.com/rjmccall edited
https://github.com/llvm/llvm-project/pull/110569
___
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
https://github.com/rjmccall edited
https://github.com/llvm/llvm-project/pull/111995
___
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
https://github.com/rjmccall edited
https://github.com/llvm/llvm-project/pull/111995
___
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
@@ -326,25 +326,25 @@ struct LazyOffsetPtr {
///
/// If the low bit is clear, a pointer to the AST node. If the low
/// bit is set, the upper 63 bits are the offset.
- mutable uint64_t Ptr = 0;
+ mutable uintptr_t Ptr = 0;
public:
LazyOffsetPtr() = default;
- exp
https://github.com/rjmccall edited
https://github.com/llvm/llvm-project/pull/111995
___
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
@@ -326,25 +326,25 @@ struct LazyOffsetPtr {
///
/// If the low bit is clear, a pointer to the AST node. If the low
/// bit is set, the upper 63 bits are the offset.
- mutable uint64_t Ptr = 0;
+ mutable uintptr_t Ptr = 0;
public:
LazyOffsetPtr() = default;
- exp
https://github.com/rjmccall approved this pull request.
LGTM
https://github.com/llvm/llvm-project/pull/110569
___
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
https://github.com/rjmccall approved this pull request.
https://github.com/llvm/llvm-project/pull/112218
___
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
rjmccall wrote:
Is this a target-independent decision? I could certainly imagine a target with
a generic AS wanting to specify that indirect return addresses (and maybe even
parameters?) should be in that rather than the alloca AS; among other things,
it would allow return values to be used t
rjmccall wrote:
I don't think there's any situation in which Clang needs to change the address
space of a declaration. It can happen if the programmer has declarations that
disagree about the address space in which the entity is defined, but it's fair
to just emit an error in that situation.
rjmccall wrote:
> > Can we find a way to re-use the code between this and the actual lookup
> > code? Feels like we could have some sort of predicate like
> > `doesLookupResultSuppressADL(NamedDecl*)`. Or are we forced to use slightly
> > different predicates for some compatibility reason?
>
@@ -1672,10 +1672,11 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI)
{
// Add type for sret argument.
if (IRFunctionArgs.hasSRetArg()) {
-QualType Ret = FI.getReturnType();
-unsigned AddressSpace = CGM.getTypes().getTargetAddressSpace(Ret);
+auto Ad
rjmccall wrote:
I agree that it doesn't meaningfully come from a source-level type and should
be specified by the target lowering. I just want to make sure we write the new
code in a way that plausibly supports the target ABI specifying something other
than "it's always in the alloca AS". Ca
https://github.com/rjmccall approved this pull request.
LGTM
https://github.com/llvm/llvm-project/pull/113691
___
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
https://github.com/rjmccall approved this pull request.
LGTM
https://github.com/llvm/llvm-project/pull/108970
___
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
https://github.com/rjmccall approved this pull request.
LGTM
https://github.com/llvm/llvm-project/pull/116100
___
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
@@ -2419,8 +2419,13 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
return Visit(const_cast(E));
case CK_NoOp: {
-return CE->changesVolatileQualification() ? EmitLoadOfLValue(CE)
- : Visit(const_cast(E));
+i
@@ -2419,8 +2419,13 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
return Visit(const_cast(E));
case CK_NoOp: {
-return CE->changesVolatileQualification() ? EmitLoadOfLValue(CE)
- : Visit(const_cast(E));
+i
@@ -2419,8 +2419,13 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
return Visit(const_cast(E));
case CK_NoOp: {
-return CE->changesVolatileQualification() ? EmitLoadOfLValue(CE)
- : Visit(const_cast(E));
+i
https://github.com/rjmccall edited
https://github.com/llvm/llvm-project/pull/109056
___
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
rjmccall wrote:
I don't think there's a deep reason blocks and lambdas don't use quite the same
scope mechanics in the compiler, so if you wanted to pursue that, it seems
reasonable. But this approach also seems viable. I agree that removing the
assertion would be the wrong thing to do, and
https://github.com/rjmccall requested changes to this pull request.
https://github.com/llvm/llvm-project/pull/110569
___
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits
@@ -221,21 +221,27 @@ llvm::MDNode *CodeGenTBAA::getTypeInfoHelper(const Type
*Ty) {
PtrDepth++;
Ty = Ty->getPointeeType().getTypePtr();
} while (Ty->isPointerType());
-// TODO: Implement C++'s type "similarity" and consider dis-"similar"
-// pointers d
@@ -221,21 +221,27 @@ llvm::MDNode *CodeGenTBAA::getTypeInfoHelper(const Type
*Ty) {
PtrDepth++;
Ty = Ty->getPointeeType().getTypePtr();
} while (Ty->isPointerType());
-// TODO: Implement C++'s type "similarity" and consider dis-"similar"
-// pointers d
@@ -221,21 +221,27 @@ llvm::MDNode *CodeGenTBAA::getTypeInfoHelper(const Type
*Ty) {
PtrDepth++;
Ty = Ty->getPointeeType().getTypePtr();
} while (Ty->isPointerType());
-// TODO: Implement C++'s type "similarity" and consider dis-"similar"
-// pointers d
801 - 900 of 1050 matches
Mail list logo