kstoimenov updated this revision to Diff 365650.
kstoimenov added a comment.
Fixed the test.
Repository:
rG LLVM Github Monorepo
CHANGES SINCE LAST ACTION
https://reviews.llvm.org/D107850/new/
https://reviews.llvm.org/D107850
Files:
clang/test/CodeGen/asan-use-callbacks.cpp
llvm/include/llvm/IR/Intrinsics.td
llvm/include/llvm/Transforms/Instrumentation/AddressSanitizer.h
llvm/lib/Target/X86/X86AsmPrinter.cpp
llvm/lib/Target/X86/X86AsmPrinter.h
llvm/lib/Target/X86/X86InstrCompiler.td
llvm/lib/Target/X86/X86MCInstLower.cpp
llvm/lib/Target/X86/X86RegisterInfo.td
llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
Index: llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
===================================================================
--- llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -348,6 +348,10 @@
static cl::opt<bool> ClOpt("asan-opt", cl::desc("Optimize instrumentation"),
cl::Hidden, cl::init(true));
+static cl::opt<bool> ClOptimizeCallbacks("asan-optimize-callbacks",
+ cl::desc("Optimize callbacks"),
+ cl::Hidden, cl::init(false));
+
static cl::opt<bool> ClOptSameTemp(
"asan-opt-same-temp", cl::desc("Instrument the same temp just once"),
cl::Hidden, cl::init(true));
@@ -623,6 +627,7 @@
C = &(M.getContext());
LongSize = M.getDataLayout().getPointerSizeInBits();
IntptrTy = Type::getIntNTy(*C, LongSize);
+ Int8PtrTy = Type::getInt8PtrTy(*C);
TargetTriple = Triple(M.getTargetTriple());
Mapping = getShadowMapping(TargetTriple, LongSize, this->CompileKernel);
@@ -673,6 +678,7 @@
Value *SizeArgument, uint32_t Exp);
void instrumentMemIntrinsic(MemIntrinsic *MI);
Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
+ void encodeMemToShadowInfo(int64_t *AccessInfo);
bool suppressInstrumentationSiteForDebug(int &Instrumented);
bool instrumentFunction(Function &F, const TargetLibraryInfo *TLI);
bool maybeInsertAsanInitAtFunctionEntry(Function &F);
@@ -713,6 +719,7 @@
bool UseAfterScope;
AsanDetectStackUseAfterReturnMode UseAfterReturn;
Type *IntptrTy;
+ Type *Int8PtrTy;
ShadowMapping Mapping;
FunctionCallee AsanHandleNoReturnFunc;
FunctionCallee AsanPtrCmpFunction, AsanPtrSubFunction;
@@ -1361,6 +1368,15 @@
return IRB.CreateAdd(Shadow, ShadowBase);
}
+void AddressSanitizer::encodeMemToShadowInfo(int64_t *AccessInfo) {
+ *AccessInfo +=
+ (Mapping.Scale << AsanAccessInfo::MappingScaleShift) +
+ (Mapping.OrShadowOffset << AsanAccessInfo::OrShadowOffsetShift);
+ if (LocalDynamicShadow || Mapping.InGlobal) {
+ // TODO(kstoimenov): for now fail.
+ }
+}
+
// Instrument memset/memmove/memcpy
void AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
IRBuilder<> IRB(MI);
@@ -1742,12 +1758,25 @@
size_t AccessSizeIndex = TypeSizeToSizeIndex(TypeSize);
if (UseCalls) {
- if (Exp == 0)
- IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][0][AccessSizeIndex],
- AddrLong);
- else
- IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][1][AccessSizeIndex],
- {AddrLong, ConstantInt::get(IRB.getInt32Ty(), Exp)});
+ if (ClOptimizeCallbacks) {
+ Value *Ptr8 = IRB.CreatePointerCast(Addr, Int8PtrTy);
+ Module *M = IRB.GetInsertBlock()->getParent()->getParent();
+ int64_t AccessInfo =
+ (IsWrite << AsanAccessInfo::IsWriteShift) +
+ (AccessSizeIndex << AsanAccessInfo::AccessSizeIndexShift);
+ encodeMemToShadowInfo(&AccessInfo);
+ IRB.CreateCall(
+ Intrinsic::getDeclaration(M, Intrinsic::asan_check_memaccess),
+ {Ptr8, ConstantInt::get(IRB.getInt64Ty(), Mapping.Offset),
+ ConstantInt::get(IRB.getInt32Ty(), AccessInfo)});
+ } else {
+ if (Exp == 0)
+ IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][0][AccessSizeIndex],
+ AddrLong);
+ else
+ IRB.CreateCall(AsanMemoryAccessCallback[IsWrite][1][AccessSizeIndex],
+ {AddrLong, ConstantInt::get(IRB.getInt32Ty(), Exp)});
+ }
return;
}
Index: llvm/lib/Target/X86/X86RegisterInfo.td
===================================================================
--- llvm/lib/Target/X86/X86RegisterInfo.td
+++ llvm/lib/Target/X86/X86RegisterInfo.td
@@ -436,6 +436,12 @@
(add RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
RBX, R14, R15, R12, R13, RBP, RSP, RIP)>;
+// GR64 - 64-bit GPRs without RAX and RIP. Could be used when emitting code for
+// intrinsics, which use implict input registers.
+def GR64NoRAX : RegisterClass<"X86", [i64], 64,
+ (add RCX, RDX, RSI, RDI, R8, R9, R10, R11,
+ RBX, R14, R15, R12, R13, RBP, RSP)>;
+
// Segment registers for use by MOV instructions (and others) that have a
// segment register as one operand. Always contain a 16-bit segment
// descriptor.
Index: llvm/lib/Target/X86/X86MCInstLower.cpp
===================================================================
--- llvm/lib/Target/X86/X86MCInstLower.cpp
+++ llvm/lib/Target/X86/X86MCInstLower.cpp
@@ -43,8 +43,10 @@
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/MC/MCSymbolELF.h"
+#include "llvm/Support/TargetRegistry.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetMachine.h"
+#include "llvm/Transforms/Instrumentation/AddressSanitizer.h"
using namespace llvm;
@@ -1323,6 +1325,243 @@
.addExpr(Op));
}
+void X86AsmPrinter::LowerASAN_CHECK_MEMACCESS(const MachineInstr &MI) {
+ // FIXME: Make this work on non-ELF.
+ if (!TM.getTargetTriple().isOSBinFormatELF()) {
+ report_fatal_error("llvm.hwa`san.check.memaccess only supported on ELF");
+ return;
+ }
+
+ unsigned Reg = MI.getOperand(0).getReg().id();
+ uint64_t ShadowBase = MI.getOperand(1).getImm();
+ uint32_t AccessInfo = MI.getOperand(2).getImm();
+ bool IsWrite = (AccessInfo >> AsanAccessInfo::IsWriteShift) & 1;
+ size_t AccessSizeIndex =
+ (AccessInfo >> AsanAccessInfo::AccessSizeIndexShift) & 0xf;
+
+ MCSymbol *&Sym =
+ AsanMemaccessSymbols[AsanMemaccessTuple(Reg, ShadowBase, AccessInfo)];
+ if (!Sym) {
+ std::string name = IsWrite ? "load" : "store";
+ std::string SymName = "__asan_check_" + name +
+ utostr(1 << AccessSizeIndex) + "_rn" + utostr(Reg);
+ Sym = OutContext.getOrCreateSymbol(SymName);
+ }
+
+ EmitAndCountInstruction(
+ MCInstBuilder(X86::CALL64pcrel32)
+ .addExpr(MCSymbolRefExpr::create(Sym, OutContext)));
+}
+
+void X86AsmPrinter::emitAsanMemaccessPartial(Module &M, unsigned Reg,
+ uint64_t ShadowBase, bool IsWrite,
+ size_t AccessSizeIndex,
+ size_t MappingScale,
+ bool OrShadowOffset,
+ MCSubtargetInfo &STI) {
+ assert(AccessSizeIndex == 0 || AccessSizeIndex == 1 || AccessSizeIndex == 2);
+ assert(Reg != X86::RAX);
+
+ OutStreamer->emitInstruction(MCInstBuilder(X86::MOV64rr)
+ .addReg(X86::RAX)
+ .addReg(X86::NoRegister + Reg),
+ STI);
+ OutStreamer->emitInstruction(MCInstBuilder(X86::SHR64ri)
+ .addReg(X86::RAX)
+ .addReg(X86::RAX)
+ .addImm(MappingScale),
+ STI);
+ if (OrShadowOffset) {
+ OutStreamer->emitInstruction(MCInstBuilder(X86::OR64i32).addImm(ShadowBase),
+ STI);
+ OutStreamer->emitInstruction(MCInstBuilder(X86::MOV8rm)
+ .addReg(X86::AL)
+ .addReg(X86::RAX)
+ .addImm(1)
+ .addReg(X86::NoRegister)
+ .addImm(0)
+ .addReg(X86::NoRegister),
+ STI);
+ OutStreamer->emitInstruction(
+ MCInstBuilder(X86::TEST8rr).addReg(X86::AL).addReg(X86::AL), STI);
+ } else {
+ OutStreamer->emitInstruction(MCInstBuilder(X86::MOVSX32rm8)
+ .addReg(X86::EAX)
+ .addReg(X86::RAX)
+ .addImm(1)
+ .addReg(X86::NoRegister)
+ .addImm(ShadowBase)
+ .addReg(X86::NoRegister),
+ STI);
+ OutStreamer->emitInstruction(
+ MCInstBuilder(X86::TEST32rr).addReg(X86::EAX).addReg(X86::EAX), STI);
+ }
+ MCSymbol *AdditionalCheck = OutContext.createTempSymbol();
+ OutStreamer->emitInstruction(
+ MCInstBuilder(X86::JCC_1)
+ .addExpr(MCSymbolRefExpr::create(AdditionalCheck, OutContext))
+ .addImm(X86::COND_NE),
+ STI);
+ MCSymbol *ReturnSym = OutContext.createTempSymbol();
+ OutStreamer->emitLabel(ReturnSym);
+ OutStreamer->emitInstruction(MCInstBuilder(getRetOpcode(*Subtarget)), STI);
+
+ // Shadow byte is non-zero so we need to perform additional checks.
+ OutStreamer->emitLabel(AdditionalCheck);
+ OutStreamer->emitInstruction(MCInstBuilder(X86::PUSH64r).addReg(X86::RCX),
+ STI);
+ OutStreamer->emitInstruction(MCInstBuilder(X86::MOV64rr)
+ .addReg(X86::RCX)
+ .addReg(X86::NoRegister + Reg),
+ STI);
+ const size_t Granularity = 1ULL << MappingScale;
+ OutStreamer->emitInstruction(MCInstBuilder(X86::AND32ri8)
+ .addReg(X86::ECX)
+ .addReg(X86::ECX)
+ .addImm(Granularity - 1),
+ STI);
+ if (AccessSizeIndex == 1) {
+ OutStreamer->emitInstruction(MCInstBuilder(X86::ADD32ri8)
+ .addReg(X86::ECX)
+ .addReg(X86::ECX)
+ .addImm(1),
+ STI);
+ } else if (AccessSizeIndex == 2) {
+ OutStreamer->emitInstruction(MCInstBuilder(X86::ADD32ri8)
+ .addReg(X86::ECX)
+ .addReg(X86::ECX)
+ .addImm(3),
+ STI);
+ }
+
+ OutStreamer->emitInstruction(
+ MCInstBuilder(X86::CMP32rr).addReg(X86::ECX).addReg(X86::EAX).addImm(1),
+ STI);
+ OutStreamer->emitInstruction(MCInstBuilder(X86::POP64r).addReg(X86::RCX),
+ STI);
+ OutStreamer->emitInstruction(
+ MCInstBuilder(X86::JCC_1)
+ .addExpr(MCSymbolRefExpr::create(ReturnSym, OutContext))
+ .addImm(X86::COND_L),
+ STI);
+
+ emitAsanReportError(M, Reg, IsWrite, AccessSizeIndex, STI);
+}
+
+void X86AsmPrinter::emitAsanMemaccessFull(Module &M, unsigned Reg,
+ uint64_t ShadowBase, bool IsWrite,
+ size_t AccessSizeIndex,
+ size_t MappingScale,
+ bool OrShadowOffset,
+ MCSubtargetInfo &STI) {
+ assert(AccessSizeIndex == 3 || AccessSizeIndex == 4);
+ assert(Reg != X86::RAX);
+ OutStreamer->emitInstruction(MCInstBuilder(X86::MOV64rr)
+ .addReg(X86::RAX)
+ .addReg(X86::NoRegister + Reg),
+ STI);
+ OutStreamer->emitInstruction(MCInstBuilder(X86::SHR64ri)
+ .addReg(X86::RAX)
+ .addReg(X86::RAX)
+ .addImm(MappingScale),
+ STI);
+ if (OrShadowOffset) {
+ OutStreamer->emitInstruction(MCInstBuilder(X86::OR64i32).addImm(ShadowBase),
+ STI);
+ auto OpCode = AccessSizeIndex == 3 ? X86::CMP8mi : X86::CMP16mi8;
+ OutStreamer->emitInstruction(MCInstBuilder(OpCode)
+ .addReg(X86::RAX)
+ .addImm(1)
+ .addReg(X86::NoRegister)
+ .addImm(0)
+ .addReg(X86::NoRegister)
+ .addImm(0),
+ STI);
+ } else {
+ auto OpCode = AccessSizeIndex == 3 ? X86::CMP8mi : X86::CMP16mi8;
+ OutStreamer->emitInstruction(MCInstBuilder(OpCode)
+ .addReg(X86::RAX)
+ .addImm(1)
+ .addReg(X86::NoRegister)
+ .addImm(ShadowBase)
+ .addReg(X86::NoRegister)
+ .addImm(0),
+ STI);
+ }
+ MCSymbol *ReportCode = OutContext.createTempSymbol();
+ OutStreamer->emitInstruction(
+ MCInstBuilder(X86::JCC_1)
+ .addExpr(MCSymbolRefExpr::create(ReportCode, OutContext))
+ .addImm(X86::COND_NE),
+ STI);
+ MCSymbol *ReturnSym = OutContext.createTempSymbol();
+ OutStreamer->emitLabel(ReturnSym);
+ OutStreamer->emitInstruction(MCInstBuilder(getRetOpcode(*Subtarget)), STI);
+
+ OutStreamer->emitLabel(ReportCode);
+ emitAsanReportError(M, Reg, IsWrite, AccessSizeIndex, STI);
+}
+
+void X86AsmPrinter::emitAsanReportError(Module &M, unsigned Reg, bool IsWrite,
+ size_t AccessSizeIndex,
+ MCSubtargetInfo &STI) {
+ std::string name = IsWrite ? "load" : "store";
+ MCSymbol *ReportError = OutContext.getOrCreateSymbol(
+ "__asan_report_" + name + utostr(1 << AccessSizeIndex));
+ OutStreamer->emitInstruction(MCInstBuilder(X86::MOV64rr)
+ .addReg(X86::RDI)
+ .addReg(X86::NoRegister + Reg),
+ STI);
+ OutStreamer->emitInstruction(
+ MCInstBuilder(X86::JMP_1)
+ .addExpr(MCSymbolRefExpr::create(ReportError, OutContext)),
+ STI);
+}
+
+void X86AsmPrinter::emitAsanMemaccessSymbols(Module &M) {
+ if (AsanMemaccessSymbols.empty())
+ return;
+
+ const Triple &TT = TM.getTargetTriple();
+ assert(TT.isOSBinFormatELF());
+ std::unique_ptr<MCSubtargetInfo> STI(
+ TM.getTarget().createMCSubtargetInfo(TT.str(), "", ""));
+ assert(STI && "Unable to create subtarget info");
+
+ for (auto &P : AsanMemaccessSymbols) {
+ MCSymbol *Sym = P.second;
+ OutStreamer->SwitchSection(OutContext.getELFSection(
+ ".text.hot", ELF::SHT_PROGBITS,
+ ELF::SHF_EXECINSTR | ELF::SHF_ALLOC | ELF::SHF_GROUP, 0, Sym->getName(),
+ /*IsComdat=*/true));
+
+ OutStreamer->emitSymbolAttribute(Sym, MCSA_ELF_TypeFunction);
+ OutStreamer->emitSymbolAttribute(Sym, MCSA_Weak);
+ OutStreamer->emitSymbolAttribute(Sym, MCSA_Hidden);
+ OutStreamer->emitLabel(Sym);
+
+ unsigned Reg = std::get<0>(P.first);
+ uint64_t ShadowBase = std::get<1>(P.first);
+ uint32_t AccessInfo = std::get<2>(P.first);
+
+ bool IsWrite = (AccessInfo >> AsanAccessInfo::IsWriteShift) & 1;
+ size_t AccessSizeIndex =
+ (AccessInfo >> AsanAccessInfo::AccessSizeIndexShift) & 0xf;
+ size_t MappingScale =
+ (AccessInfo >> AsanAccessInfo::MappingScaleShift) & 0xf;
+ bool OrShadowOffset =
+ (AccessInfo >> AsanAccessInfo::OrShadowOffsetShift) & 1;
+
+ if (AccessSizeIndex < 3) {
+ emitAsanMemaccessPartial(M, Reg, ShadowBase, IsWrite, AccessSizeIndex,
+ MappingScale, OrShadowOffset, *STI);
+ } else {
+ emitAsanMemaccessFull(M, Reg, ShadowBase, IsWrite, AccessSizeIndex,
+ MappingScale, OrShadowOffset, *STI);
+ }
+ }
+}
+
void X86AsmPrinter::LowerPATCHABLE_OP(const MachineInstr &MI,
X86MCInstLower &MCIL) {
// PATCHABLE_OP minsize, opcode, operands
@@ -2563,6 +2802,9 @@
EmitAndCountInstruction(MCInstBuilder(getRetOpcode(*Subtarget)));
return;
+ case X86::ASAN_CHECK_MEMACCESS:
+ return LowerASAN_CHECK_MEMACCESS(*MI);
+
case X86::MORESTACK_RET_RESTORE_R10:
// Return, then restore R10.
EmitAndCountInstruction(MCInstBuilder(getRetOpcode(*Subtarget)));
Index: llvm/lib/Target/X86/X86InstrCompiler.td
===================================================================
--- llvm/lib/Target/X86/X86InstrCompiler.td
+++ llvm/lib/Target/X86/X86InstrCompiler.td
@@ -260,6 +260,18 @@
"#SEH_Epilogue", []>;
}
+//===----------------------------------------------------------------------===//
+// Pseudo instructions used by address sanitizer.
+//===----------------------------------------------------------------------===//
+let
+ Defs = [RAX, EFLAGS] in {
+def ASAN_CHECK_MEMACCESS : PseudoI<
+ (outs), (ins GR64NoRAX:$addr, i64imm:$shadowbase, i32imm:$accessinfo),
+ [(int_asan_check_memaccess GR64NoRAX:$addr, (i64 timm:$shadowbase),
+ (i32 timm:$accessinfo))]>,
+ Sched<[]>;
+}
+
//===----------------------------------------------------------------------===//
// Pseudo instructions used by segmented stacks.
//
Index: llvm/lib/Target/X86/X86AsmPrinter.h
===================================================================
--- llvm/lib/Target/X86/X86AsmPrinter.h
+++ llvm/lib/Target/X86/X86AsmPrinter.h
@@ -98,6 +98,25 @@
void LowerFENTRY_CALL(const MachineInstr &MI, X86MCInstLower &MCIL);
+ // Address sanitizer specific lowering for X86.
+ void LowerASAN_CHECK_MEMACCESS(const MachineInstr &MI);
+ void emitAsanMemaccessSymbols(Module &M);
+ void emitAsanMemaccessPartial(Module &M, unsigned Reg, uint64_t ShadowBase,
+ bool IsWrite, size_t AccessSizeIndex,
+ size_t MappingScale, bool OrShadowOffset,
+ MCSubtargetInfo &STI);
+ void emitAsanMemaccessFull(Module &M, unsigned Reg, uint64_t ShadowBase,
+ bool IsWrite, size_t AccessSizeIndex,
+ size_t MappingScale, bool OrShadowOffset,
+ MCSubtargetInfo &STI);
+ void emitAsanReportError(Module &M, unsigned Reg, bool IsWrite,
+ size_t AccessSizeIndex, MCSubtargetInfo &STI);
+
+ typedef std::tuple<unsigned /*Reg*/, uint64_t /*ShadowBase*/,
+ uint32_t /*AccessInfo*/>
+ AsanMemaccessTuple;
+ std::map<AsanMemaccessTuple, MCSymbol *> AsanMemaccessSymbols;
+
// Choose between emitting .seh_ directives and .cv_fpo_ directives.
void EmitSEHInstruction(const MachineInstr *MI);
Index: llvm/lib/Target/X86/X86AsmPrinter.cpp
===================================================================
--- llvm/lib/Target/X86/X86AsmPrinter.cpp
+++ llvm/lib/Target/X86/X86AsmPrinter.cpp
@@ -753,6 +753,8 @@
void X86AsmPrinter::emitEndOfAsmFile(Module &M) {
const Triple &TT = TM.getTargetTriple();
+ emitAsanMemaccessSymbols(M);
+
if (TT.isOSBinFormatMachO()) {
// Mach-O uses non-lazy symbol stubs to encode per-TU information into
// global table for symbol lookup.
Index: llvm/include/llvm/Transforms/Instrumentation/AddressSanitizer.h
===================================================================
--- llvm/include/llvm/Transforms/Instrumentation/AddressSanitizer.h
+++ llvm/include/llvm/Transforms/Instrumentation/AddressSanitizer.h
@@ -148,6 +148,18 @@
bool UseOdrIndicator = true,
AsanDtorKind DestructorKind = AsanDtorKind::Global);
+namespace AsanAccessInfo {
+
+// Bit field positions for accessinfo parameter to llvm.asan.check.memaccess.
+enum {
+ AccessSizeIndexShift = 0, // 4 bits
+ IsWriteShift = 4,
+ MappingScaleShift = 5, // 4 bits
+ OrShadowOffsetShift = 9
+};
+
+} // namespace AsanAccessInfo
+
} // namespace llvm
#endif
Index: llvm/include/llvm/IR/Intrinsics.td
===================================================================
--- llvm/include/llvm/IR/Intrinsics.td
+++ llvm/include/llvm/IR/Intrinsics.td
@@ -1568,6 +1568,10 @@
def int_load_relative: DefaultAttrsIntrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_anyint_ty],
[IntrReadMem, IntrArgMemOnly]>;
+def int_asan_check_memaccess :
+ Intrinsic<[], [llvm_ptr_ty, llvm_i64_ty, llvm_i32_ty],
+ [IntrInaccessibleMemOnly, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>]>;
+
def int_hwasan_check_memaccess :
Intrinsic<[], [llvm_ptr_ty, llvm_ptr_ty, llvm_i32_ty],
[IntrInaccessibleMemOnly, ImmArg<ArgIndex<2>>]>;
Index: clang/test/CodeGen/asan-use-callbacks.cpp
===================================================================
--- clang/test/CodeGen/asan-use-callbacks.cpp
+++ clang/test/CodeGen/asan-use-callbacks.cpp
@@ -1,12 +1,18 @@
-// RUN: %clang -target x86_64-linux-gnu -S -emit-llvm -fsanitize=address \
-// RUN: -o - %s \
+// RUN: %clang -target x86_64-linux-gnu -S -emit-llvm -o - \
+// RUN: -fsanitize=address %s \
// RUN: | FileCheck %s --check-prefixes=CHECK-NO-OUTLINE
// RUN: %clang -target x86_64-linux-gnu -S -emit-llvm -o - \
// RUN: -fsanitize=address %s -fsanitize-address-outline-instrumentation \
// RUN: | FileCheck %s --check-prefixes=CHECK-OUTLINE
+// RUN: %clang -target x86_64-linux-gnu -S -emit-llvm -o - \
+// RUN: -fsanitize=address %s -fsanitize-address-outline-instrumentation \
+// RUN: -mllvm -asan-optimize-callbacks \
+// RUN: | FileCheck %s --check-prefixes=CHECK-OPTIMIZED
+
// CHECK-NO-OUTLINE-NOT: call{{.*}}@__asan_load4
// CHECK-OUTLINE: call{{.*}}@__asan_load4
+// CHECK-OPTIMIZED: call{{.*}}@llvm.asan.check.memaccess(i8*{{.*}}, i64{{.*}}, i32{{.*}})
int deref(int *p) {
return *p;
_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits