[asan] Implemented intrinsic for the custom calling convention similar used by HWASan for X86.

The implementation uses the int_asan_check_memaccess intrinsic to instrument the code. The intrinsic is replaced by a call to a function which performs the access check. The generated function names encode the input register name as a number using Reg - X86::NoRegister formula.

Reviewed By: vitalybuka

Differential Revision: https://reviews.llvm.org/D107850
This commit is contained in:
Kirill Stoimenov
2021-08-24 20:23:47 +00:00
parent ed0f4415f0
commit 832aae738b
10 changed files with 815 additions and 3 deletions

View File

@@ -1339,8 +1339,8 @@ def int_donothing : DefaultAttrsIntrinsic<[], [], [IntrNoMem, IntrWillReturn]>;
def int_sideeffect : DefaultAttrsIntrinsic<[], [], [IntrInaccessibleMemOnly, IntrWillReturn]>;
// The pseudoprobe intrinsic works as a place holder to the block it probes.
// Like the sideeffect intrinsic defined above, this intrinsic is treated by the
// optimizer as having opaque side effects so that it won't be get rid of or moved
// Like the sideeffect intrinsic defined above, this intrinsic is treated by the
// optimizer as having opaque side effects so that it won't be get rid of or moved
// out of the block it probes.
def int_pseudoprobe : Intrinsic<[], [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_i64_ty],
[IntrInaccessibleMemOnly, IntrWillReturn]>;
@@ -1637,6 +1637,9 @@ def int_icall_branch_funnel : DefaultAttrsIntrinsic<[], [llvm_vararg_ty], []>;
def int_load_relative: DefaultAttrsIntrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_anyint_ty],
[IntrReadMem, IntrArgMemOnly]>;
def int_asan_check_memaccess :
Intrinsic<[],[llvm_ptr_ty, llvm_i32_ty], [ImmArg<ArgIndex<1>>]>;
def int_hwasan_check_memaccess :
Intrinsic<[], [llvm_ptr_ty, llvm_ptr_ty, llvm_i32_ty],
[ImmArg<ArgIndex<2>>]>;

View File

@@ -155,6 +155,16 @@ ModulePass *createModuleAddressSanitizerLegacyPassPass(
bool UseOdrIndicator = true,
AsanDtorKind DestructorKind = AsanDtorKind::Global);
struct ASanAccessInfo {
const int32_t Packed;
const uint8_t AccessSizeIndex;
const bool IsWrite;
const bool CompileKernel;
explicit ASanAccessInfo(int32_t Packed);
ASanAccessInfo(bool IsWrite, bool CompileKernel, uint8_t AccessSizeIndex);
};
} // namespace llvm
#endif

View File

@@ -753,6 +753,8 @@ static void emitNonLazyStubs(MachineModuleInfo *MMI, MCStreamer &OutStreamer) {
void X86AsmPrinter::emitEndOfAsmFile(Module &M) {
const Triple &TT = TM.getTargetTriple();
emitAsanMemaccessSymbols(M);
if (TT.isOSBinFormatMachO()) {
// Mach-O uses non-lazy symbol stubs to encode per-TU information into
// global table for symbol lookup.

View File

@@ -23,6 +23,7 @@ class MCCodeEmitter;
class MCStreamer;
class X86Subtarget;
class TargetMachine;
struct ASanAccessInfo;
class LLVM_LIBRARY_VISIBILITY X86AsmPrinter : public AsmPrinter {
const X86Subtarget *Subtarget = nullptr;
@@ -98,6 +99,23 @@ class LLVM_LIBRARY_VISIBILITY X86AsmPrinter : public AsmPrinter {
void LowerFENTRY_CALL(const MachineInstr &MI, X86MCInstLower &MCIL);
// Address sanitizer specific lowering for X86.
void LowerASAN_CHECK_MEMACCESS(const MachineInstr &MI);
void emitAsanMemaccessSymbols(Module &M);
void emitAsanMemaccessPartial(Module &M, unsigned Reg,
const ASanAccessInfo &AccessInfo,
MCSubtargetInfo &STI);
void emitAsanMemaccessFull(Module &M, unsigned Reg,
const ASanAccessInfo &AccessInfo,
MCSubtargetInfo &STI);
void emitAsanReportError(Module &M, unsigned Reg,
const ASanAccessInfo &AccessInfo,
MCSubtargetInfo &STI);
typedef std::tuple<unsigned /*Reg*/, uint32_t /*AccessInfo*/>
AsanMemaccessTuple;
std::map<AsanMemaccessTuple, MCSymbol *> AsanMemaccessSymbols;
// Choose between emitting .seh_ directives and .cv_fpo_ directives.
void EmitSEHInstruction(const MachineInstr *MI);

View File

@@ -260,6 +260,17 @@ let isPseudo = 1, SchedRW = [WriteSystem] in {
"#SEH_Epilogue", []>;
}
//===----------------------------------------------------------------------===//
// Pseudo instructions used by address sanitizer.
//===----------------------------------------------------------------------===//
let
Defs = [R8, EFLAGS] in {
def ASAN_CHECK_MEMACCESS : PseudoI<
(outs), (ins GR64NoR8:$addr, i32imm:$accessinfo),
[(int_asan_check_memaccess GR64NoR8:$addr, (i32 timm:$accessinfo))]>,
Sched<[]>;
}
//===----------------------------------------------------------------------===//
// Pseudo instructions used by segmented stacks.
//
@@ -960,7 +971,7 @@ multiclass ATOMIC_RMW_BINOP<bits<8> opc8, bits<8> opc, string mnemonic,
!strconcat(mnemonic, "{l}\t{$val, $ptr|$ptr, $val}"),
[(set
GR32:$dst,
(!cast<PatFrag>(frag # "_32") addr:$ptr, GR32:$val))]>,
(!cast<PatFrag>(frag # "_32") addr:$ptr, GR32:$val))]>,
OpSize32;
def NAME#64 : RI<opc, MRMSrcMem, (outs GR64:$dst),
(ins GR64:$val, i64mem:$ptr),

View File

@@ -43,8 +43,11 @@
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/MC/MCSymbolELF.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Transforms/Instrumentation/AddressSanitizer.h"
#include "llvm/Transforms/Instrumentation/AddressSanitizerCommon.h"
using namespace llvm;
@@ -1323,6 +1326,243 @@ void X86AsmPrinter::LowerFENTRY_CALL(const MachineInstr &MI,
.addExpr(Op));
}
void X86AsmPrinter::LowerASAN_CHECK_MEMACCESS(const MachineInstr &MI) {
// FIXME: Make this work on non-ELF.
if (!TM.getTargetTriple().isOSBinFormatELF()) {
report_fatal_error("llvm.asan.check.memaccess only supported on ELF");
return;
}
unsigned Reg = MI.getOperand(0).getReg().id();
ASanAccessInfo AccessInfo(MI.getOperand(1).getImm());
MCSymbol *&Sym =
AsanMemaccessSymbols[AsanMemaccessTuple(Reg, AccessInfo.Packed)];
if (!Sym) {
std::string Name = AccessInfo.IsWrite ? "store" : "load";
std::string SymName = "__asan_check_" + Name +
utostr(1 << AccessInfo.AccessSizeIndex) + "_rn" +
utostr(Reg);
Sym = OutContext.getOrCreateSymbol(SymName);
}
EmitAndCountInstruction(
MCInstBuilder(X86::CALL64pcrel32)
.addExpr(MCSymbolRefExpr::create(Sym, OutContext)));
}
void X86AsmPrinter::emitAsanMemaccessPartial(Module &M, unsigned Reg,
const ASanAccessInfo &AccessInfo,
MCSubtargetInfo &STI) {
assert(AccessInfo.AccessSizeIndex == 0 || AccessInfo.AccessSizeIndex == 1 ||
AccessInfo.AccessSizeIndex == 2);
assert(Reg != X86::R8);
uint64_t ShadowBase;
int MappingScale;
bool OrShadowOffset;
getAddressSanitizerParams(
Triple(M.getTargetTriple()), M.getDataLayout().getPointerSizeInBits(),
AccessInfo.CompileKernel, &ShadowBase, &MappingScale, &OrShadowOffset);
OutStreamer->emitInstruction(
MCInstBuilder(X86::MOV64rr).addReg(X86::R8).addReg(X86::NoRegister + Reg),
STI);
OutStreamer->emitInstruction(MCInstBuilder(X86::SHR64ri)
.addReg(X86::R8)
.addReg(X86::NoRegister)
.addImm(MappingScale),
STI);
if (OrShadowOffset) {
OutStreamer->emitInstruction(MCInstBuilder(X86::OR64ri32)
.addReg(X86::NoRegister)
.addReg(X86::R8)
.addImm(ShadowBase),
STI);
OutStreamer->emitInstruction(MCInstBuilder(X86::MOV8rm)
.addReg(X86::R8B)
.addReg(X86::R8)
.addImm(1)
.addReg(X86::NoRegister)
.addImm(0)
.addReg(X86::NoRegister),
STI);
OutStreamer->emitInstruction(
MCInstBuilder(X86::TEST8rr).addReg(X86::R8B).addReg(X86::R8B), STI);
} else {
OutStreamer->emitInstruction(MCInstBuilder(X86::MOVSX32rm8)
.addReg(X86::R8D)
.addReg(X86::R8)
.addImm(1)
.addReg(X86::NoRegister)
.addImm(ShadowBase)
.addReg(X86::NoRegister),
STI);
OutStreamer->emitInstruction(
MCInstBuilder(X86::TEST32rr).addReg(X86::R8D).addReg(X86::R8D), STI);
}
MCSymbol *AdditionalCheck = OutContext.createTempSymbol();
OutStreamer->emitInstruction(
MCInstBuilder(X86::JCC_1)
.addExpr(MCSymbolRefExpr::create(AdditionalCheck, OutContext))
.addImm(X86::COND_NE),
STI);
MCSymbol *ReturnSym = OutContext.createTempSymbol();
OutStreamer->emitLabel(ReturnSym);
OutStreamer->emitInstruction(MCInstBuilder(getRetOpcode(*Subtarget)), STI);
// Shadow byte is non-zero so we need to perform additional checks.
OutStreamer->emitLabel(AdditionalCheck);
OutStreamer->emitInstruction(MCInstBuilder(X86::PUSH64r).addReg(X86::RCX),
STI);
OutStreamer->emitInstruction(MCInstBuilder(X86::MOV64rr)
.addReg(X86::RCX)
.addReg(X86::NoRegister + Reg),
STI);
const size_t Granularity = 1ULL << MappingScale;
OutStreamer->emitInstruction(MCInstBuilder(X86::AND32ri8)
.addReg(X86::NoRegister)
.addReg(X86::ECX)
.addImm(Granularity - 1),
STI);
if (AccessInfo.AccessSizeIndex == 1) {
OutStreamer->emitInstruction(MCInstBuilder(X86::ADD32ri8)
.addReg(X86::NoRegister)
.addReg(X86::ECX)
.addImm(1),
STI);
} else if (AccessInfo.AccessSizeIndex == 2) {
OutStreamer->emitInstruction(MCInstBuilder(X86::ADD32ri8)
.addReg(X86::NoRegister)
.addReg(X86::ECX)
.addImm(3),
STI);
}
OutStreamer->emitInstruction(
MCInstBuilder(X86::CMP32rr).addReg(X86::ECX).addReg(X86::R8D).addImm(1),
STI);
OutStreamer->emitInstruction(MCInstBuilder(X86::POP64r).addReg(X86::RCX),
STI);
OutStreamer->emitInstruction(
MCInstBuilder(X86::JCC_1)
.addExpr(MCSymbolRefExpr::create(ReturnSym, OutContext))
.addImm(X86::COND_L),
STI);
emitAsanReportError(M, Reg, AccessInfo, STI);
}
void X86AsmPrinter::emitAsanMemaccessFull(Module &M, unsigned Reg,
const ASanAccessInfo &AccessInfo,
MCSubtargetInfo &STI) {
assert(AccessInfo.AccessSizeIndex == 3 || AccessInfo.AccessSizeIndex == 4);
assert(Reg != X86::R8);
uint64_t ShadowBase;
int MappingScale;
bool OrShadowOffset;
getAddressSanitizerParams(
Triple(M.getTargetTriple()), M.getDataLayout().getPointerSizeInBits(),
AccessInfo.CompileKernel, &ShadowBase, &MappingScale, &OrShadowOffset);
OutStreamer->emitInstruction(
MCInstBuilder(X86::MOV64rr).addReg(X86::R8).addReg(X86::NoRegister + Reg),
STI);
OutStreamer->emitInstruction(MCInstBuilder(X86::SHR64ri)
.addReg(X86::R8)
.addReg(X86::R8)
.addImm(MappingScale),
STI);
if (OrShadowOffset) {
OutStreamer->emitInstruction(MCInstBuilder(X86::OR64ri32)
.addReg(X86::R8)
.addReg(X86::R8)
.addImm(ShadowBase),
STI);
auto OpCode = AccessInfo.AccessSizeIndex == 3 ? X86::CMP8mi : X86::CMP16mi8;
OutStreamer->emitInstruction(MCInstBuilder(OpCode)
.addReg(X86::R8)
.addImm(1)
.addReg(X86::NoRegister)
.addImm(0)
.addReg(X86::NoRegister)
.addImm(0),
STI);
} else {
auto OpCode = AccessInfo.AccessSizeIndex == 3 ? X86::CMP8mi : X86::CMP16mi8;
OutStreamer->emitInstruction(MCInstBuilder(OpCode)
.addReg(X86::R8)
.addImm(1)
.addReg(X86::NoRegister)
.addImm(ShadowBase)
.addReg(X86::NoRegister)
.addImm(0),
STI);
}
MCSymbol *ReportCode = OutContext.createTempSymbol();
OutStreamer->emitInstruction(
MCInstBuilder(X86::JCC_1)
.addExpr(MCSymbolRefExpr::create(ReportCode, OutContext))
.addImm(X86::COND_NE),
STI);
MCSymbol *ReturnSym = OutContext.createTempSymbol();
OutStreamer->emitLabel(ReturnSym);
OutStreamer->emitInstruction(MCInstBuilder(getRetOpcode(*Subtarget)), STI);
OutStreamer->emitLabel(ReportCode);
emitAsanReportError(M, Reg, AccessInfo, STI);
}
void X86AsmPrinter::emitAsanReportError(Module &M, unsigned Reg,
const ASanAccessInfo &AccessInfo,
MCSubtargetInfo &STI) {
std::string Name = AccessInfo.IsWrite ? "store" : "load";
MCSymbol *ReportError = OutContext.getOrCreateSymbol(
"__asan_report_" + Name + utostr(1 << AccessInfo.AccessSizeIndex));
OutStreamer->emitInstruction(MCInstBuilder(X86::MOV64rr)
.addReg(X86::RDI)
.addReg(X86::NoRegister + Reg),
STI);
OutStreamer->emitInstruction(
MCInstBuilder(X86::JMP_1)
.addExpr(MCSymbolRefExpr::create(ReportError, OutContext)),
STI);
}
void X86AsmPrinter::emitAsanMemaccessSymbols(Module &M) {
if (AsanMemaccessSymbols.empty())
return;
const Triple &TT = TM.getTargetTriple();
assert(TT.isOSBinFormatELF());
std::unique_ptr<MCSubtargetInfo> STI(
TM.getTarget().createMCSubtargetInfo(TT.str(), "", ""));
assert(STI && "Unable to create subtarget info");
for (auto &P : AsanMemaccessSymbols) {
MCSymbol *Sym = P.second;
OutStreamer->SwitchSection(OutContext.getELFSection(
".text.hot", ELF::SHT_PROGBITS,
ELF::SHF_EXECINSTR | ELF::SHF_ALLOC | ELF::SHF_GROUP, 0, Sym->getName(),
/*IsComdat=*/true));
OutStreamer->emitSymbolAttribute(Sym, MCSA_ELF_TypeFunction);
OutStreamer->emitSymbolAttribute(Sym, MCSA_Weak);
OutStreamer->emitSymbolAttribute(Sym, MCSA_Hidden);
OutStreamer->emitLabel(Sym);
unsigned Reg = std::get<0>(P.first);
ASanAccessInfo AccessInfo(std::get<1>(P.first));
if (AccessInfo.AccessSizeIndex < 3) {
emitAsanMemaccessPartial(M, Reg, AccessInfo, *STI);
} else {
emitAsanMemaccessFull(M, Reg, AccessInfo, *STI);
}
}
}
void X86AsmPrinter::LowerPATCHABLE_OP(const MachineInstr &MI,
X86MCInstLower &MCIL) {
// PATCHABLE_OP minsize, opcode, operands
@@ -2563,6 +2803,9 @@ void X86AsmPrinter::emitInstruction(const MachineInstr *MI) {
EmitAndCountInstruction(MCInstBuilder(getRetOpcode(*Subtarget)));
return;
case X86::ASAN_CHECK_MEMACCESS:
return LowerASAN_CHECK_MEMACCESS(*MI);
case X86::MORESTACK_RET_RESTORE_R10:
// Return, then restore R10.
EmitAndCountInstruction(MCInstBuilder(getRetOpcode(*Subtarget)));

View File

@@ -436,6 +436,12 @@ def GR64 : RegisterClass<"X86", [i64], 64,
(add RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
RBX, R14, R15, R12, R13, RBP, RSP, RIP)>;
// GR64 - 64-bit GPRs without R8 and RIP. Could be used when emitting code for
// intrinsics, which use implict input registers.
def GR64NoR8 : RegisterClass<"X86", [i64], 64,
(add RAX, RCX, RDX, RSI, RDI, R9, R10, R11,
RBX, R14, R15, R12, R13, RBP, RSP)>;
// Segment registers for use by MOV instructions (and others) that have a
// segment register as one operand. Always contain a 16-bit segment
// descriptor.

View File

@@ -178,6 +178,14 @@ static const size_t kNumberOfAccessSizes = 5;
static const unsigned kAllocaRzSize = 32;
// ASanAccessInfo implementation constants.
constexpr size_t kCompileKernelShift = 0;
constexpr size_t kCompileKernelMask = 0x1;
constexpr size_t kAccessSizeIndexShift = 1;
constexpr size_t kAccessSizeIndexMask = 0xf;
constexpr size_t kIsWriteShift = 5;
constexpr size_t kIsWriteMask = 0x1;
// Command-line flags.
static cl::opt<bool> ClEnableKasan(
@@ -568,6 +576,21 @@ void getAddressSanitizerParams(const Triple &TargetTriple, int LongSize,
*MappingScale = Mapping.Scale;
*OrShadowOffset = Mapping.OrShadowOffset;
}
ASanAccessInfo::ASanAccessInfo(int32_t Packed)
: Packed(Packed),
AccessSizeIndex((Packed >> kAccessSizeIndexShift) & kAccessSizeIndexMask),
IsWrite((Packed >> kIsWriteShift) & kIsWriteMask),
CompileKernel((Packed >> kCompileKernelShift) & kCompileKernelMask) {}
ASanAccessInfo::ASanAccessInfo(bool IsWrite, bool CompileKernel,
uint8_t AccessSizeIndex)
: Packed((IsWrite << kIsWriteShift) +
(CompileKernel << kCompileKernelShift) +
(AccessSizeIndex << kAccessSizeIndexShift)),
AccessSizeIndex(AccessSizeIndex), IsWrite(IsWrite),
CompileKernel(CompileKernel) {}
} // namespace llvm
static uint64_t getRedzoneSizeForScale(int MappingScale) {

View File

@@ -0,0 +1,243 @@
; RUN: llc < %s | FileCheck %s
target triple = "x86_64-unknown-linux-gnu"
define void @load1(i8* nocapture readonly %x) {
; CHECK: callq __asan_check_load1_rn[[RN1:.*]]
; CHECK: callq __asan_check_store1_rn[[RN1]]
; CHECK-NEXT: retq
call void @llvm.asan.check.memaccess(i8* %x, i32 0)
call void @llvm.asan.check.memaccess(i8* %x, i32 32)
ret void
}
define void @load2(i16* nocapture readonly %x) {
; CHECK: callq __asan_check_load2_rn[[RN2:.*]]
; CHECK: callq __asan_check_store2_rn[[RN2]]
; CHECK-NEXT: retq
%1 = ptrtoint i16* %x to i64
%2 = bitcast i16* %x to i8*
call void @llvm.asan.check.memaccess(i8* %2, i32 2)
call void @llvm.asan.check.memaccess(i8* %2, i32 34)
ret void
}
define void @load4(i32* nocapture readonly %x) {
; CHECK: callq __asan_check_load4_rn[[RN4:.*]]
; CHECK: callq __asan_check_store4_rn[[RN4]]
; CHECK-NEXT: retq
%1 = ptrtoint i32* %x to i64
%2 = bitcast i32* %x to i8*
call void @llvm.asan.check.memaccess(i8* %2, i32 4)
call void @llvm.asan.check.memaccess(i8* %2, i32 36)
ret void
}
define void @load8(i64* nocapture readonly %x) {
; CHECK: callq __asan_check_load8_rn[[RN8:.*]]
; CHECK: callq __asan_check_store8_rn[[RN8]]
; CHECK-NEXT: retq
%1 = ptrtoint i64* %x to i64
%2 = bitcast i64* %x to i8*
call void @llvm.asan.check.memaccess(i8* %2, i32 6)
call void @llvm.asan.check.memaccess(i8* %2, i32 38)
ret void
}
define void @load16(i128* nocapture readonly %x) {
; CHECK: callq __asan_check_load16_rn[[RN16:.*]]
; CHECK: callq __asan_check_store16_rn[[RN16]]
; CHECK-NEXT: retq
%1 = ptrtoint i128* %x to i64
%2 = bitcast i128* %x to i8*
call void @llvm.asan.check.memaccess(i8* %2, i32 8)
call void @llvm.asan.check.memaccess(i8* %2, i32 40)
ret void
}
; CHECK: .type __asan_check_load1_rn[[RN1]],@function
; CHECK-NEXT: .weak __asan_check_load1_rn[[RN1]]
; CHECK-NEXT: .hidden __asan_check_load1_rn[[RN1]]
; CHECK-NEXT: __asan_check_load1_rn[[RN1]]:
; CHECK-NEXT: movq [[REG:.*]], %r8
; CHECK-NEXT: shrq $3, %r8
; CHECK-NEXT: movsbl 2147450880(%r8), %r8d
; CHECK-NEXT: testl %r8d, %r8d
; CHECK-NEXT: jne [[EXTRA:.*]]
; CHECK-NEXT: [[RET:.*]]:
; CHECK-NEXT: retq
; CHECK-NEXT: [[EXTRA]]:
; CHECK-NEXT: pushq %rcx
; CHECK-NEXT: movq [[REG]], %rcx
; CHECK-NEXT: andl $7, %ecx
; CHECK-NEXT: cmpl %r8d, %ecx
; CHECK-NEXT: popq %rcx
; CHECK-NEXT: jl [[RET]]
; CHECK-NEXT: movq [[REG:.*]], %rdi
; CHECK-NEXT: jmp __asan_report_load1
; CHECK: .type __asan_check_load2_rn[[RN2]],@function
; CHECK-NEXT: .weak __asan_check_load2_rn[[RN2]]
; CHECK-NEXT: .hidden __asan_check_load2_rn[[RN2]]
; CHECK-NEXT: __asan_check_load2_rn[[RN2]]:
; CHECK-NEXT: movq [[REG:.*]], %r8
; CHECK-NEXT: shrq $3, %r8
; CHECK-NEXT: movsbl 2147450880(%r8), %r8d
; CHECK-NEXT: testl %r8d, %r8d
; CHECK-NEXT: jne [[EXTRA:.*]]
; CHECK-NEXT: [[RET:.*]]:
; CHECK-NEXT: retq
; CHECK-NEXT: [[EXTRA]]:
; CHECK-NEXT: pushq %rcx
; CHECK-NEXT: movq [[REG]], %rcx
; CHECK-NEXT: andl $7, %ecx
; CHECK-NEXT: addl $1, %ecx
; CHECK-NEXT: cmpl %r8d, %ecx
; CHECK-NEXT: popq %rcx
; CHECK-NEXT: jl [[RET]]
; CHECK-NEXT: movq [[REG:.*]], %rdi
; CHECK-NEXT: jmp __asan_report_load2
; CHECK: .type __asan_check_load4_rn[[RN4]],@function
; CHECK-NEXT: .weak __asan_check_load4_rn[[RN4]]
; CHECK-NEXT: .hidden __asan_check_load4_rn[[RN4]]
; CHECK-NEXT: __asan_check_load4_rn[[RN4]]:
; CHECK-NEXT: movq [[REG:.*]], %r8
; CHECK-NEXT: shrq $3, %r8
; CHECK-NEXT: movsbl 2147450880(%r8), %r8d
; CHECK-NEXT: testl %r8d, %r8d
; CHECK-NEXT: jne [[EXTRA:.*]]
; CHECK-NEXT: [[RET:.*]]:
; CHECK-NEXT: retq
; CHECK-NEXT: [[EXTRA]]:
; CHECK-NEXT: pushq %rcx
; CHECK-NEXT: movq [[REG]], %rcx
; CHECK-NEXT: andl $7, %ecx
; CHECK-NEXT: addl $3, %ecx
; CHECK-NEXT: cmpl %r8d, %ecx
; CHECK-NEXT: popq %rcx
; CHECK-NEXT: jl [[RET]]
; CHECK-NEXT: movq [[REG:.*]], %rdi
; CHECK-NEXT: jmp __asan_report_load4
; CHECK: .type __asan_check_load8_rn[[RN8]],@function
; CHECK-NEXT: .weak __asan_check_load8_rn[[RN8]]
; CHECK-NEXT: .hidden __asan_check_load8_rn[[RN8]]
; CHECK-NEXT: __asan_check_load8_rn[[RN8]]:
; CHECK-NEXT: movq [[REG:.*]], %r8
; CHECK-NEXT: shrq $3, %r8
; CHECK-NEXT: cmpb $0, 2147450880(%r8)
; CHECK-NEXT: jne [[FAIL:.*]]
; CHECK-NEXT: [[RET:.*]]:
; CHECK-NEXT: retq
; CHECK-NEXT: [[FAIL]]:
; CHECK-NEXT: movq [[REG:.*]], %rdi
; CHECK-NEXT: jmp __asan_report_load8
; CHECK: .type __asan_check_load16_rn[[RN16]],@function
; CHECK-NEXT: .weak __asan_check_load16_rn[[RN16]]
; CHECK-NEXT: .hidden __asan_check_load16_rn[[RN16]]
; CHECK-NEXT: __asan_check_load16_rn[[RN16]]:
; CHECK-NEXT: movq [[REG:.*]], %r8
; CHECK-NEXT: shrq $3, %r8
; CHECK-NEXT: cmpw $0, 2147450880(%r8)
; CHECK-NEXT: jne [[FAIL:.*]]
; CHECK-NEXT: [[RET:.*]]:
; CHECK-NEXT: retq
; CHECK-NEXT: [[FAIL]]:
; CHECK-NEXT: movq [[REG:.*]], %rdi
; CHECK-NEXT: jmp __asan_report_load16
; CHECK: .type __asan_check_store1_rn[[RN1]],@function
; CHECK-NEXT: .weak __asan_check_store1_rn[[RN1]]
; CHECK-NEXT: .hidden __asan_check_store1_rn[[RN1]]
; CHECK-NEXT: __asan_check_store1_rn[[RN1]]:
; CHECK-NEXT: movq [[REG:.*]], %r8
; CHECK-NEXT: shrq $3, %r8
; CHECK-NEXT: movsbl 2147450880(%r8), %r8d
; CHECK-NEXT: testl %r8d, %r8d
; CHECK-NEXT: jne [[EXTRA:.*]]
; CHECK-NEXT: [[RET:.*]]:
; CHECK-NEXT: retq
; CHECK-NEXT: [[EXTRA]]:
; CHECK-NEXT: pushq %rcx
; CHECK-NEXT: movq [[REG]], %rcx
; CHECK-NEXT: andl $7, %ecx
; CHECK-NEXT: cmpl %r8d, %ecx
; CHECK-NEXT: popq %rcx
; CHECK-NEXT: jl [[RET]]
; CHECK-NEXT: movq [[REG:.*]], %rdi
; CHECK-NEXT: jmp __asan_report_store1
; CHECK: .type __asan_check_store2_rn[[RN2]],@function
; CHECK-NEXT: .weak __asan_check_store2_rn[[RN2]]
; CHECK-NEXT: .hidden __asan_check_store2_rn[[RN2]]
; CHECK-NEXT: __asan_check_store2_rn[[RN2]]:
; CHECK-NEXT: movq [[REG:.*]], %r8
; CHECK-NEXT: shrq $3, %r8
; CHECK-NEXT: movsbl 2147450880(%r8), %r8d
; CHECK-NEXT: testl %r8d, %r8d
; CHECK-NEXT: jne [[EXTRA:.*]]
; CHECK-NEXT: [[RET:.*]]:
; CHECK-NEXT: retq
; CHECK-NEXT: [[EXTRA]]:
; CHECK-NEXT: pushq %rcx
; CHECK-NEXT: movq [[REG]], %rcx
; CHECK-NEXT: andl $7, %ecx
; CHECK-NEXT: addl $1, %ecx
; CHECK-NEXT: cmpl %r8d, %ecx
; CHECK-NEXT: popq %rcx
; CHECK-NEXT: jl [[RET]]
; CHECK-NEXT: movq [[REG:.*]], %rdi
; CHECK-NEXT: jmp __asan_report_store2
; CHECK: .type __asan_check_store4_rn[[RN4]],@function
; CHECK-NEXT: .weak __asan_check_store4_rn[[RN4]]
; CHECK-NEXT: .hidden __asan_check_store4_rn[[RN4]]
; CHECK-NEXT: __asan_check_store4_rn[[RN4]]:
; CHECK-NEXT: movq [[REG:.*]], %r8
; CHECK-NEXT: shrq $3, %r8
; CHECK-NEXT: movsbl 2147450880(%r8), %r8d
; CHECK-NEXT: testl %r8d, %r8d
; CHECK-NEXT: jne [[EXTRA:.*]]
; CHECK-NEXT: [[RET:.*]]:
; CHECK-NEXT: retq
; CHECK-NEXT: [[EXTRA]]:
; CHECK-NEXT: pushq %rcx
; CHECK-NEXT: movq [[REG]], %rcx
; CHECK-NEXT: andl $7, %ecx
; CHECK-NEXT: addl $3, %ecx
; CHECK-NEXT: cmpl %r8d, %ecx
; CHECK-NEXT: popq %rcx
; CHECK-NEXT: jl [[RET]]
; CHECK-NEXT: movq [[REG:.*]], %rdi
; CHECK-NEXT: jmp __asan_report_store4
; CHECK: .type __asan_check_store8_rn[[RN8]],@function
; CHECK-NEXT: .weak __asan_check_store8_rn[[RN8]]
; CHECK-NEXT: .hidden __asan_check_store8_rn[[RN8]]
; CHECK-NEXT: __asan_check_store8_rn[[RN8]]:
; CHECK-NEXT: movq [[REG:.*]], %r8
; CHECK-NEXT: shrq $3, %r8
; CHECK-NEXT: cmpb $0, 2147450880(%r8)
; CHECK-NEXT: jne [[FAIL:.*]]
; CHECK-NEXT: [[RET:.*]]:
; CHECK-NEXT: retq
; CHECK-NEXT: [[FAIL]]:
; CHECK-NEXT: movq [[REG:.*]], %rdi
; CHECK-NEXT: jmp __asan_report_store8
; CHECK: .type __asan_check_store16_rn[[RN16]],@function
; CHECK-NEXT: .weak __asan_check_store16_rn[[RN16]]
; CHECK-NEXT: .hidden __asan_check_store16_rn[[RN16]]
; CHECK-NEXT: __asan_check_store16_rn[[RN16]]:
; CHECK-NEXT: movq [[REG:.*]], %r8
; CHECK-NEXT: shrq $3, %r8
; CHECK-NEXT: cmpw $0, 2147450880(%r8)
; CHECK-NEXT: jne [[FAIL:.*]]
; CHECK-NEXT: [[RET:.*]]:
; CHECK-NEXT: retq
; CHECK-NEXT: [[FAIL]]:
; CHECK-NEXT: movq [[REG:.*]], %rdi
; CHECK-NEXT: jmp __asan_report_store16
declare void @llvm.asan.check.memaccess(i8*, i32 immarg)

View File

@@ -0,0 +1,253 @@
; RUN: llc < %s | FileCheck %s
target triple = "x86_64-pc-win"
define void @load1(i8* nocapture readonly %x) {
; CHECK: callq __asan_check_load1_rn[[RN1:.*]]
; CHECK: callq __asan_check_store1_rn[[RN1]]
; CHECK-NEXT: retq
call void @llvm.asan.check.memaccess(i8* %x, i32 0)
call void @llvm.asan.check.memaccess(i8* %x, i32 32)
ret void
}
define void @load2(i16* nocapture readonly %x) {
; CHECK: callq __asan_check_load2_rn[[RN2:.*]]
; CHECK: callq __asan_check_store2_rn[[RN2]]
; CHECK-NEXT: retq
%1 = ptrtoint i16* %x to i64
%2 = bitcast i16* %x to i8*
call void @llvm.asan.check.memaccess(i8* %2, i32 2)
call void @llvm.asan.check.memaccess(i8* %2, i32 34)
ret void
}
define void @load4(i32* nocapture readonly %x) {
; CHECK: callq __asan_check_load4_rn[[RN4:.*]]
; CHECK: callq __asan_check_store4_rn[[RN4]]
; CHECK-NEXT: retq
%1 = ptrtoint i32* %x to i64
%2 = bitcast i32* %x to i8*
call void @llvm.asan.check.memaccess(i8* %2, i32 4)
call void @llvm.asan.check.memaccess(i8* %2, i32 36)
ret void
}
define void @load8(i64* nocapture readonly %x) {
; CHECK: callq __asan_check_load8_rn[[RN8:.*]]
; CHECK: callq __asan_check_store8_rn[[RN8]]
; CHECK-NEXT: retq
%1 = ptrtoint i64* %x to i64
%2 = bitcast i64* %x to i8*
call void @llvm.asan.check.memaccess(i8* %2, i32 6)
call void @llvm.asan.check.memaccess(i8* %2, i32 38)
ret void
}
define void @load16(i128* nocapture readonly %x) {
; CHECK: callq __asan_check_load16_rn[[RN16:.*]]
; CHECK: callq __asan_check_store16_rn[[RN16]]
; CHECK-NEXT: retq
%1 = ptrtoint i128* %x to i64
%2 = bitcast i128* %x to i8*
call void @llvm.asan.check.memaccess(i8* %2, i32 8)
call void @llvm.asan.check.memaccess(i8* %2, i32 40)
ret void
}
; CHECK: .type __asan_check_load1_rn[[RN1]],@function
; CHECK-NEXT: .weak __asan_check_load1_rn[[RN1]]
; CHECK-NEXT: .hidden __asan_check_load1_rn[[RN1]]
; CHECK-NEXT: __asan_check_load1_rn[[RN1]]:
; CHECK-NEXT: movq [[REG:.*]], %r8
; CHECK-NEXT: shrq $3, %r8
; CHECK-NEXT: orq $17592186044416, %r8{{.*}}
; CHECK-NEXT: movb (%r8), %r8b
; CHECK-NEXT: testb %r8b, %r8b
; CHECK-NEXT: jne [[EXTRA:.*]]
; CHECK-NEXT: [[RET:.*]]:
; CHECK-NEXT: retq
; CHECK-NEXT: [[EXTRA]]:
; CHECK-NEXT: pushq %rcx
; CHECK-NEXT: movq [[REG]], %rcx
; CHECK-NEXT: andl $7, %ecx
; CHECK-NEXT: cmpl %r8d, %ecx
; CHECK-NEXT: popq %rcx
; CHECK-NEXT: jl [[RET]]
; CHECK-NEXT: movq [[REG:.*]], %rdi
; CHECK-NEXT: jmp __asan_report_load1
; CHECK: .type __asan_check_load2_rn[[RN2]],@function
; CHECK-NEXT: .weak __asan_check_load2_rn[[RN2]]
; CHECK-NEXT: .hidden __asan_check_load2_rn[[RN2]]
; CHECK-NEXT: __asan_check_load2_rn[[RN2]]:
; CHECK-NEXT: movq [[REG:.*]], %r8
; CHECK-NEXT: shrq $3, %r8
; CHECK-NEXT: orq $17592186044416, %r8{{.*}}
; CHECK-NEXT: movb (%r8), %r8b
; CHECK-NEXT: testb %r8b, %r8b
; CHECK-NEXT: jne [[EXTRA:.*]]
; CHECK-NEXT: [[RET:.*]]:
; CHECK-NEXT: retq
; CHECK-NEXT: [[EXTRA]]:
; CHECK-NEXT: pushq %rcx
; CHECK-NEXT: movq [[REG]], %rcx
; CHECK-NEXT: andl $7, %ecx
; CHECK-NEXT: addl $1, %ecx
; CHECK-NEXT: cmpl %r8d, %ecx
; CHECK-NEXT: popq %rcx
; CHECK-NEXT: jl [[RET]]
; CHECK-NEXT: movq [[REG:.*]], %rdi
; CHECK-NEXT: jmp __asan_report_load2
; CHECK: .type __asan_check_load4_rn[[RN4]],@function
; CHECK-NEXT: .weak __asan_check_load4_rn[[RN4]]
; CHECK-NEXT: .hidden __asan_check_load4_rn[[RN4]]
; CHECK-NEXT: __asan_check_load4_rn[[RN4]]:
; CHECK-NEXT: movq [[REG:.*]], %r8
; CHECK-NEXT: shrq $3, %r8
; CHECK-NEXT: orq $17592186044416, %r8{{.*}}
; CHECK-NEXT: movb (%r8), %r8b
; CHECK-NEXT: testb %r8b, %r8b
; CHECK-NEXT: jne [[EXTRA:.*]]
; CHECK-NEXT: [[RET:.*]]:
; CHECK-NEXT: retq
; CHECK-NEXT: [[EXTRA]]:
; CHECK-NEXT: pushq %rcx
; CHECK-NEXT: movq [[REG]], %rcx
; CHECK-NEXT: andl $7, %ecx
; CHECK-NEXT: addl $3, %ecx
; CHECK-NEXT: cmpl %r8d, %ecx
; CHECK-NEXT: popq %rcx
; CHECK-NEXT: jl [[RET]]
; CHECK-NEXT: movq [[REG:.*]], %rdi
; CHECK-NEXT: jmp __asan_report_load4
; CHECK: .type __asan_check_load8_rn[[RN8]],@function
; CHECK-NEXT: .weak __asan_check_load8_rn[[RN8]]
; CHECK-NEXT: .hidden __asan_check_load8_rn[[RN8]]
; CHECK-NEXT: __asan_check_load8_rn[[RN8]]:
; CHECK-NEXT: movq [[REG:.*]], %r8
; CHECK-NEXT: shrq $3, %r8
; CHECK-NEXT: orq $17592186044416, %r8{{.*}}
; CHECK-NEXT: cmpb $0, (%r8)
; CHECK-NEXT: jne [[FAIL:.*]]
; CHECK-NEXT: [[RET:.*]]:
; CHECK-NEXT: retq
; CHECK-NEXT: [[FAIL]]:
; CHECK-NEXT: movq [[REG:.*]], %rdi
; CHECK-NEXT: jmp __asan_report_load8
; CHECK: .type __asan_check_load16_rn[[RN16]],@function
; CHECK-NEXT: .weak __asan_check_load16_rn[[RN16]]
; CHECK-NEXT: .hidden __asan_check_load16_rn[[RN16]]
; CHECK-NEXT: __asan_check_load16_rn[[RN16]]:
; CHECK-NEXT: movq [[REG:.*]], %r8
; CHECK-NEXT: shrq $3, %r8
; CHECK-NEXT: orq $17592186044416, %r8{{.*}}
; CHECK-NEXT: cmpw $0, (%r8)
; CHECK-NEXT: jne [[FAIL:.*]]
; CHECK-NEXT: [[RET:.*]]:
; CHECK-NEXT: retq
; CHECK-NEXT: [[FAIL]]:
; CHECK-NEXT: movq [[REG:.*]], %rdi
; CHECK-NEXT: jmp __asan_report_load16
; CHECK: .type __asan_check_store1_rn[[RN1]],@function
; CHECK-NEXT: .weak __asan_check_store1_rn[[RN1]]
; CHECK-NEXT: .hidden __asan_check_store1_rn[[RN1]]
; CHECK-NEXT: __asan_check_store1_rn[[RN1]]:
; CHECK-NEXT: movq [[REG:.*]], %r8
; CHECK-NEXT: shrq $3, %r8
; CHECK-NEXT: orq $17592186044416, %r8
; CHECK-NEXT: movb (%r8), %r8b
; CHECK-NEXT: testb %r8b, %r8b
; CHECK-NEXT: jne [[EXTRA:.*]]
; CHECK-NEXT: [[RET:.*]]:
; CHECK-NEXT: retq
; CHECK-NEXT: [[EXTRA]]:
; CHECK-NEXT: pushq %rcx
; CHECK-NEXT: movq [[REG]], %rcx
; CHECK-NEXT: andl $7, %ecx
; CHECK-NEXT: cmpl %r8d, %ecx
; CHECK-NEXT: popq %rcx
; CHECK-NEXT: jl [[RET]]
; CHECK-NEXT: movq [[REG:.*]], %rdi
; CHECK-NEXT: jmp __asan_report_store1
; CHECK: .type __asan_check_store2_rn[[RN2]],@function
; CHECK-NEXT: .weak __asan_check_store2_rn[[RN2]]
; CHECK-NEXT: .hidden __asan_check_store2_rn[[RN2]]
; CHECK-NEXT: __asan_check_store2_rn[[RN2]]:
; CHECK-NEXT: movq [[REG:.*]], %r8
; CHECK-NEXT: shrq $3, %r8
; CHECK-NEXT: orq $17592186044416, %r8
; CHECK-NEXT: movb (%r8), %r8b
; CHECK-NEXT: testb %r8b, %r8b
; CHECK-NEXT: jne [[EXTRA:.*]]
; CHECK-NEXT: [[RET:.*]]:
; CHECK-NEXT: retq
; CHECK-NEXT: [[EXTRA]]:
; CHECK-NEXT: pushq %rcx
; CHECK-NEXT: movq [[REG]], %rcx
; CHECK-NEXT: andl $7, %ecx
; CHECK-NEXT: addl $1, %ecx
; CHECK-NEXT: cmpl %r8d, %ecx
; CHECK-NEXT: popq %rcx
; CHECK-NEXT: jl [[RET]]
; CHECK-NEXT: movq [[REG:.*]], %rdi
; CHECK-NEXT: jmp __asan_report_store2
; CHECK: .type __asan_check_store4_rn[[RN4]],@function
; CHECK-NEXT: .weak __asan_check_store4_rn[[RN4]]
; CHECK-NEXT: .hidden __asan_check_store4_rn[[RN4]]
; CHECK-NEXT: __asan_check_store4_rn[[RN4]]:
; CHECK-NEXT: movq [[REG:.*]], %r8
; CHECK-NEXT: shrq $3, %r8
; CHECK-NEXT: orq $17592186044416, %r8
; CHECK-NEXT: movb (%r8), %r8b
; CHECK-NEXT: testb %r8b, %r8b
; CHECK-NEXT: jne [[EXTRA:.*]]
; CHECK-NEXT: [[RET:.*]]:
; CHECK-NEXT: retq
; CHECK-NEXT: [[EXTRA]]:
; CHECK-NEXT: pushq %rcx
; CHECK-NEXT: movq [[REG]], %rcx
; CHECK-NEXT: andl $7, %ecx
; CHECK-NEXT: addl $3, %ecx
; CHECK-NEXT: cmpl %r8d, %ecx
; CHECK-NEXT: popq %rcx
; CHECK-NEXT: jl [[RET]]
; CHECK-NEXT: movq [[REG:.*]], %rdi
; CHECK-NEXT: jmp __asan_report_store4
; CHECK: .type __asan_check_store8_rn[[RN8]],@function
; CHECK-NEXT: .weak __asan_check_store8_rn[[RN8]]
; CHECK-NEXT: .hidden __asan_check_store8_rn[[RN8]]
; CHECK-NEXT: __asan_check_store8_rn[[RN8]]:
; CHECK-NEXT: movq [[REG:.*]], %r8
; CHECK-NEXT: shrq $3, %r8
; CHECK-NEXT: orq $17592186044416, %r8{{.*}}
; CHECK-NEXT: cmpb $0, (%r8)
; CHECK-NEXT: jne [[FAIL:.*]]
; CHECK-NEXT: [[RET:.*]]:
; CHECK-NEXT: retq
; CHECK-NEXT: [[FAIL]]:
; CHECK-NEXT: movq [[REG:.*]], %rdi
; CHECK-NEXT: jmp __asan_report_store8
; CHECK: .type __asan_check_store16_rn[[RN16]],@function
; CHECK-NEXT: .weak __asan_check_store16_rn[[RN16]]
; CHECK-NEXT: .hidden __asan_check_store16_rn[[RN16]]
; CHECK-NEXT: __asan_check_store16_rn[[RN16]]:
; CHECK-NEXT: movq [[REG:.*]], %r8
; CHECK-NEXT: shrq $3, %r8
; CHECK-NEXT: orq $17592186044416, %r8{{.*}}
; CHECK-NEXT: cmpw $0, (%r8)
; CHECK-NEXT: jne [[FAIL:.*]]
; CHECK-NEXT: [[RET:.*]]:
; CHECK-NEXT: retq
; CHECK-NEXT: [[FAIL]]:
; CHECK-NEXT: movq [[REG:.*]], %rdi
; CHECK-NEXT: jmp __asan_report_store16
declare void @llvm.asan.check.memaccess(i8*, i32 immarg)