This was using its own, outdated list of possible captures. This was at minimum not catching cmpxchg and addrspacecast captures. One change is now any volatile access is treated as capturing. The test coverage for this pass is quite inadequate, but this required removing volatile in the lifetime capture test. Also fixes some infrastructure issues to allow running just the IR pass. Fixes bug 42238. llvm-svn: 363169
140 lines
6.0 KiB
LLVM
140 lines
6.0 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
|
|
; RUN: opt -S -mtriple=x86_64-pc-linux-gnu -stack-protector < %s | FileCheck %s
|
|
; Bug 42238: Test some situations missed by old, custom capture tracking.
|
|
|
|
define void @store_captures() #0 {
|
|
; CHECK-LABEL: @store_captures(
|
|
; CHECK-NEXT: entry:
|
|
; CHECK-NEXT: [[STACKGUARDSLOT:%.*]] = alloca i8*
|
|
; CHECK-NEXT: [[STACKGUARD:%.*]] = load volatile i8*, i8* addrspace(257)* inttoptr (i32 40 to i8* addrspace(257)*)
|
|
; CHECK-NEXT: call void @llvm.stackprotector(i8* [[STACKGUARD]], i8** [[STACKGUARDSLOT]])
|
|
; CHECK-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
|
|
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
|
|
; CHECK-NEXT: [[J:%.*]] = alloca i32*, align 8
|
|
; CHECK-NEXT: store i32 0, i32* [[RETVAL]]
|
|
; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[A]], align 4
|
|
; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[LOAD]], 1
|
|
; CHECK-NEXT: store i32 [[ADD]], i32* [[A]], align 4
|
|
; CHECK-NEXT: store i32* [[A]], i32** [[J]], align 8
|
|
; CHECK-NEXT: [[STACKGUARD1:%.*]] = load volatile i8*, i8* addrspace(257)* inttoptr (i32 40 to i8* addrspace(257)*)
|
|
; CHECK-NEXT: [[TMP0:%.*]] = load volatile i8*, i8** [[STACKGUARDSLOT]]
|
|
; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i8* [[STACKGUARD1]], [[TMP0]]
|
|
; CHECK-NEXT: br i1 [[TMP1]], label [[SP_RETURN:%.*]], label [[CALLSTACKCHECKFAILBLK:%.*]], !prof !0
|
|
; CHECK: SP_return:
|
|
; CHECK-NEXT: ret void
|
|
; CHECK: CallStackCheckFailBlk:
|
|
; CHECK-NEXT: call void @__stack_chk_fail()
|
|
; CHECK-NEXT: unreachable
|
|
;
|
|
entry:
|
|
%retval = alloca i32, align 4
|
|
%a = alloca i32, align 4
|
|
%j = alloca i32*, align 8
|
|
store i32 0, i32* %retval
|
|
%load = load i32, i32* %a, align 4
|
|
%add = add nsw i32 %load, 1
|
|
store i32 %add, i32* %a, align 4
|
|
store i32* %a, i32** %j, align 8
|
|
ret void
|
|
}
|
|
|
|
define i32* @return_captures() #0 {
|
|
; CHECK-LABEL: @return_captures(
|
|
; CHECK-NEXT: entry:
|
|
; CHECK-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
|
|
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
|
|
; CHECK-NEXT: [[J:%.*]] = alloca i32*, align 8
|
|
; CHECK-NEXT: store i32 0, i32* [[RETVAL]]
|
|
; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[A]], align 4
|
|
; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[LOAD]], 1
|
|
; CHECK-NEXT: store i32 [[ADD]], i32* [[A]], align 4
|
|
; CHECK-NEXT: ret i32* [[A]]
|
|
;
|
|
entry:
|
|
%retval = alloca i32, align 4
|
|
%a = alloca i32, align 4
|
|
%j = alloca i32*, align 8
|
|
store i32 0, i32* %retval
|
|
%load = load i32, i32* %a, align 4
|
|
%add = add nsw i32 %load, 1
|
|
store i32 %add, i32* %a, align 4
|
|
ret i32* %a
|
|
}
|
|
|
|
define void @store_addrspacecast_captures() #0 {
|
|
; CHECK-LABEL: @store_addrspacecast_captures(
|
|
; CHECK-NEXT: entry:
|
|
; CHECK-NEXT: [[STACKGUARDSLOT:%.*]] = alloca i8*
|
|
; CHECK-NEXT: [[STACKGUARD:%.*]] = load volatile i8*, i8* addrspace(257)* inttoptr (i32 40 to i8* addrspace(257)*)
|
|
; CHECK-NEXT: call void @llvm.stackprotector(i8* [[STACKGUARD]], i8** [[STACKGUARDSLOT]])
|
|
; CHECK-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
|
|
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
|
|
; CHECK-NEXT: [[J:%.*]] = alloca i32 addrspace(1)*, align 8
|
|
; CHECK-NEXT: store i32 0, i32* [[RETVAL]]
|
|
; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[A]], align 4
|
|
; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[LOAD]], 1
|
|
; CHECK-NEXT: store i32 [[ADD]], i32* [[A]], align 4
|
|
; CHECK-NEXT: [[A_ADDRSPACECAST:%.*]] = addrspacecast i32* [[A]] to i32 addrspace(1)*
|
|
; CHECK-NEXT: store i32 addrspace(1)* [[A_ADDRSPACECAST]], i32 addrspace(1)** [[J]], align 8
|
|
; CHECK-NEXT: [[STACKGUARD1:%.*]] = load volatile i8*, i8* addrspace(257)* inttoptr (i32 40 to i8* addrspace(257)*)
|
|
; CHECK-NEXT: [[TMP0:%.*]] = load volatile i8*, i8** [[STACKGUARDSLOT]]
|
|
; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i8* [[STACKGUARD1]], [[TMP0]]
|
|
; CHECK-NEXT: br i1 [[TMP1]], label [[SP_RETURN:%.*]], label [[CALLSTACKCHECKFAILBLK:%.*]], !prof !0
|
|
; CHECK: SP_return:
|
|
; CHECK-NEXT: ret void
|
|
; CHECK: CallStackCheckFailBlk:
|
|
; CHECK-NEXT: call void @__stack_chk_fail()
|
|
; CHECK-NEXT: unreachable
|
|
;
|
|
entry:
|
|
%retval = alloca i32, align 4
|
|
%a = alloca i32, align 4
|
|
%j = alloca i32 addrspace(1)*, align 8
|
|
store i32 0, i32* %retval
|
|
%load = load i32, i32* %a, align 4
|
|
%add = add nsw i32 %load, 1
|
|
store i32 %add, i32* %a, align 4
|
|
%a.addrspacecast = addrspacecast i32* %a to i32 addrspace(1)*
|
|
store i32 addrspace(1)* %a.addrspacecast, i32 addrspace(1)** %j, align 8
|
|
ret void
|
|
}
|
|
|
|
define void @cmpxchg_captures() #0 {
|
|
; CHECK-LABEL: @cmpxchg_captures(
|
|
; CHECK-NEXT: entry:
|
|
; CHECK-NEXT: [[STACKGUARDSLOT:%.*]] = alloca i8*
|
|
; CHECK-NEXT: [[STACKGUARD:%.*]] = load volatile i8*, i8* addrspace(257)* inttoptr (i32 40 to i8* addrspace(257)*)
|
|
; CHECK-NEXT: call void @llvm.stackprotector(i8* [[STACKGUARD]], i8** [[STACKGUARDSLOT]])
|
|
; CHECK-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
|
|
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
|
|
; CHECK-NEXT: [[J:%.*]] = alloca i32*, align 8
|
|
; CHECK-NEXT: store i32 0, i32* [[RETVAL]]
|
|
; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[A]], align 4
|
|
; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[LOAD]], 1
|
|
; CHECK-NEXT: store i32 [[ADD]], i32* [[A]], align 4
|
|
; CHECK-NEXT: [[TMP0:%.*]] = cmpxchg i32** [[J]], i32* [[A]], i32* null seq_cst monotonic
|
|
; CHECK-NEXT: [[STACKGUARD1:%.*]] = load volatile i8*, i8* addrspace(257)* inttoptr (i32 40 to i8* addrspace(257)*)
|
|
; CHECK-NEXT: [[TMP1:%.*]] = load volatile i8*, i8** [[STACKGUARDSLOT]]
|
|
; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i8* [[STACKGUARD1]], [[TMP1]]
|
|
; CHECK-NEXT: br i1 [[TMP2]], label [[SP_RETURN:%.*]], label [[CALLSTACKCHECKFAILBLK:%.*]], !prof !0
|
|
; CHECK: SP_return:
|
|
; CHECK-NEXT: ret void
|
|
; CHECK: CallStackCheckFailBlk:
|
|
; CHECK-NEXT: call void @__stack_chk_fail()
|
|
; CHECK-NEXT: unreachable
|
|
;
|
|
entry:
|
|
%retval = alloca i32, align 4
|
|
%a = alloca i32, align 4
|
|
%j = alloca i32*, align 8
|
|
store i32 0, i32* %retval
|
|
%load = load i32, i32* %a, align 4
|
|
%add = add nsw i32 %load, 1
|
|
store i32 %add, i32* %a, align 4
|
|
|
|
cmpxchg i32** %j, i32* %a, i32* null seq_cst monotonic
|
|
ret void
|
|
}
|
|
|
|
attributes #0 = { sspstrong }
|