The internal structure used to carry intermediate computations hold
signed values. If an object size happens to overflow signed values, we
can get invalid result, so make sure this situation never happens.
This is not very limitative as static allocation of such large values
should scarcely happen.
281 lines
9.4 KiB
LLVM
281 lines
9.4 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
|
|
; RUN: opt -passes=lower-constant-intrinsics -S < %s | FileCheck %s
|
|
|
|
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
|
|
target triple = "x86_64-apple-darwin10.0.0"
|
|
|
|
declare i64 @llvm.objectsize.i64(ptr, i1, i1, i1) nounwind readonly
|
|
declare i64 @llvm.objectsize.i64.p1(ptr addrspace(1), i1, i1, i1) nounwind readonly
|
|
declare void @llvm.trap() nounwind
|
|
|
|
; objectsize should fold to a constant, which causes the branch to fold to an
|
|
; uncond branch.
|
|
define i32 @test1(ptr %ptr) nounwind ssp noredzone align 2 {
|
|
; CHECK-LABEL: @test1(
|
|
; CHECK-NEXT: entry:
|
|
; CHECK-NEXT: br label [[T:%.*]]
|
|
; CHECK: T:
|
|
; CHECK-NEXT: ret i32 4
|
|
;
|
|
entry:
|
|
%0 = tail call i64 @llvm.objectsize.i64(ptr %ptr, i1 false, i1 false, i1 false)
|
|
%1 = icmp ugt i64 %0, 3
|
|
br i1 %1, label %T, label %trap
|
|
|
|
|
|
trap: ; preds = %0, %entry
|
|
tail call void @llvm.trap() noreturn nounwind
|
|
unreachable
|
|
|
|
T:
|
|
ret i32 4
|
|
}
|
|
|
|
define i64 @test_objectsize_null_flag(ptr %ptr) {
|
|
; CHECK-LABEL: @test_objectsize_null_flag(
|
|
; CHECK-NEXT: entry:
|
|
; CHECK-NEXT: ret i64 -1
|
|
;
|
|
entry:
|
|
%0 = tail call i64 @llvm.objectsize.i64(ptr null, i1 false, i1 true, i1 false)
|
|
ret i64 %0
|
|
}
|
|
|
|
define i64 @test_objectsize_null_flag_min(ptr %ptr) {
|
|
; CHECK-LABEL: @test_objectsize_null_flag_min(
|
|
; CHECK-NEXT: entry:
|
|
; CHECK-NEXT: ret i64 0
|
|
;
|
|
entry:
|
|
%0 = tail call i64 @llvm.objectsize.i64(ptr null, i1 true, i1 true, i1 false)
|
|
ret i64 %0
|
|
}
|
|
|
|
; Test foldable null pointers because we evaluate them with non-exact modes in
|
|
; CodeGenPrepare.
|
|
define i64 @test_objectsize_null_flag_noas0() {
|
|
; CHECK-LABEL: @test_objectsize_null_flag_noas0(
|
|
; CHECK-NEXT: entry:
|
|
; CHECK-NEXT: ret i64 -1
|
|
;
|
|
entry:
|
|
%0 = tail call i64 @llvm.objectsize.i64.p1(ptr addrspace(1) null, i1 false,
|
|
i1 true, i1 false)
|
|
ret i64 %0
|
|
}
|
|
|
|
define i64 @test_objectsize_null_flag_min_noas0() {
|
|
; CHECK-LABEL: @test_objectsize_null_flag_min_noas0(
|
|
; CHECK-NEXT: entry:
|
|
; CHECK-NEXT: ret i64 0
|
|
;
|
|
entry:
|
|
%0 = tail call i64 @llvm.objectsize.i64.p1(ptr addrspace(1) null, i1 true,
|
|
i1 true, i1 false)
|
|
ret i64 %0
|
|
}
|
|
|
|
define i64 @test_objectsize_null_known_flag_noas0() {
|
|
; CHECK-LABEL: @test_objectsize_null_known_flag_noas0(
|
|
; CHECK-NEXT: entry:
|
|
; CHECK-NEXT: ret i64 -1
|
|
;
|
|
entry:
|
|
%0 = tail call i64 @llvm.objectsize.i64.p1(ptr addrspace(1) null, i1 false,
|
|
i1 false, i1 false)
|
|
ret i64 %0
|
|
}
|
|
|
|
define i64 @test_objectsize_null_known_flag_min_noas0() {
|
|
; CHECK-LABEL: @test_objectsize_null_known_flag_min_noas0(
|
|
; CHECK-NEXT: entry:
|
|
; CHECK-NEXT: ret i64 0
|
|
;
|
|
entry:
|
|
%0 = tail call i64 @llvm.objectsize.i64.p1(ptr addrspace(1) null, i1 true,
|
|
i1 false, i1 false)
|
|
ret i64 %0
|
|
}
|
|
|
|
define i64 @test_objectsize_byval_arg(ptr byval([42 x i8]) %ptr) {
|
|
; CHECK-LABEL: @test_objectsize_byval_arg(
|
|
; CHECK-NEXT: ret i64 42
|
|
;
|
|
%size = tail call i64 @llvm.objectsize.i64(ptr %ptr, i1 true, i1 false, i1 false)
|
|
ret i64 %size
|
|
}
|
|
|
|
define i64 @test_objectsize_byref_arg(ptr byref([42 x i8]) %ptr) {
|
|
; CHECK-LABEL: @test_objectsize_byref_arg(
|
|
; CHECK-NEXT: ret i64 42
|
|
;
|
|
%size = tail call i64 @llvm.objectsize.i64(ptr %ptr, i1 true, i1 false, i1 false)
|
|
ret i64 %size
|
|
}
|
|
|
|
; https://llvm.org/PR50023
|
|
; The alloca operand type may not match pointer type size.
|
|
|
|
define i64 @vla_pointer_size_mismatch(i42 %x) {
|
|
; CHECK-LABEL: @vla_pointer_size_mismatch(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = zext i42 [[X:%.*]] to i64
|
|
; CHECK-NEXT: [[TMP2:%.*]] = mul i64 1, [[TMP1]]
|
|
; CHECK-NEXT: [[A:%.*]] = alloca i8, i42 [[X]], align 1
|
|
; CHECK-NEXT: [[G1:%.*]] = getelementptr i8, ptr [[A]], i8 17
|
|
; CHECK-NEXT: [[TMP3:%.*]] = sub i64 [[TMP2]], 17
|
|
; CHECK-NEXT: [[TMP4:%.*]] = icmp ult i64 [[TMP2]], 17
|
|
; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i64 0, i64 [[TMP3]]
|
|
; CHECK-NEXT: [[TMP6:%.*]] = icmp ne i64 [[TMP5]], -1
|
|
; CHECK-NEXT: call void @llvm.assume(i1 [[TMP6]])
|
|
; CHECK-NEXT: ret i64 [[TMP5]]
|
|
;
|
|
%A = alloca i8, i42 %x, align 1
|
|
%G1 = getelementptr i8, ptr %A, i8 17
|
|
%objsize = call i64 @llvm.objectsize.i64(ptr %G1, i1 false, i1 true, i1 true)
|
|
ret i64 %objsize
|
|
}
|
|
|
|
declare ptr @malloc(i64) allocsize(0)
|
|
|
|
define i64 @test_objectsize_malloc() {
|
|
; CHECK-LABEL: @test_objectsize_malloc(
|
|
; CHECK-NEXT: [[PTR:%.*]] = call ptr @malloc(i64 16)
|
|
; CHECK-NEXT: ret i64 16
|
|
;
|
|
%ptr = call ptr @malloc(i64 16)
|
|
%objsize = call i64 @llvm.objectsize.i64(ptr %ptr, i1 false, i1 true, i1 true)
|
|
ret i64 %objsize
|
|
}
|
|
|
|
@gv_weak = weak global i64 zeroinitializer, align 16
|
|
|
|
define i32 @promote_with_objectsize_min_false() {
|
|
; CHECK-LABEL: @promote_with_objectsize_min_false(
|
|
; CHECK-NEXT: ret i32 -1
|
|
;
|
|
%size = call i32 @llvm.objectsize.i32.p0(ptr @gv_weak, i1 false, i1 false, i1 false)
|
|
ret i32 %size
|
|
}
|
|
|
|
define i32 @promote_with_objectsize_min_true() {
|
|
; CHECK-LABEL: @promote_with_objectsize_min_true(
|
|
; CHECK-NEXT: ret i32 8
|
|
;
|
|
%size = call i32 @llvm.objectsize.i32.p0(ptr @gv_weak, i1 true, i1 false, i1 false)
|
|
ret i32 %size
|
|
}
|
|
|
|
@gv_extern = extern_weak global i64, align 16
|
|
|
|
define i32 @promote_with_objectsize_nullunknown_false() {
|
|
; CHECK-LABEL: @promote_with_objectsize_nullunknown_false(
|
|
; CHECK-NEXT: ret i32 0
|
|
;
|
|
%size = call i32 @llvm.objectsize.i32.p0(ptr @gv_extern, i1 true, i1 false, i1 false)
|
|
ret i32 %size
|
|
}
|
|
|
|
define i32 @promote_with_objectsize_nullunknown_true() {
|
|
; CHECK-LABEL: @promote_with_objectsize_nullunknown_true(
|
|
; CHECK-NEXT: ret i32 0
|
|
;
|
|
%size = call i32 @llvm.objectsize.i32.p0(ptr @gv_extern, i1 true, i1 true, i1 false)
|
|
ret i32 %size
|
|
}
|
|
|
|
define i64 @out_of_bound_gep() {
|
|
; CHECK-LABEL: @out_of_bound_gep(
|
|
; CHECK-NEXT: [[OBJ:%.*]] = alloca i8, i32 4, align 1
|
|
; CHECK-NEXT: [[SLIDE:%.*]] = getelementptr i8, ptr [[OBJ]], i8 8
|
|
; CHECK-NEXT: ret i64 0
|
|
;
|
|
%obj = alloca i8, i32 4
|
|
%slide = getelementptr i8, ptr %obj, i8 8
|
|
%objsize = call i64 @llvm.objectsize.i64(ptr %slide, i1 false, i1 false, i1 false)
|
|
ret i64 %objsize
|
|
}
|
|
|
|
define i64 @wrapping_gep(i1 %c) {
|
|
; CHECK-LABEL: @wrapping_gep(
|
|
; CHECK-NEXT: [[OBJ:%.*]] = alloca i8, i64 4, align 1
|
|
; CHECK-NEXT: [[SLIDE:%.*]] = getelementptr i8, ptr [[OBJ]], i64 -9223372036854775807
|
|
; CHECK-NEXT: [[SLIDE_BIS:%.*]] = getelementptr i8, ptr [[SLIDE]], i64 -9223372036854775808
|
|
; CHECK-NEXT: ret i64 3
|
|
;
|
|
%obj = alloca i8, i64 4
|
|
%slide = getelementptr i8, ptr %obj, i64 9223372036854775809
|
|
%slide.bis = getelementptr i8, ptr %slide, i64 9223372036854775808
|
|
%objsize = call i64 @llvm.objectsize.i64(ptr %slide.bis, i1 false, i1 false, i1 false)
|
|
ret i64 %objsize
|
|
}
|
|
|
|
define i64 @wrapping_gep_neg(i1 %c) {
|
|
; CHECK-LABEL: @wrapping_gep_neg(
|
|
; CHECK-NEXT: [[OBJ:%.*]] = alloca i8, i64 4, align 1
|
|
; CHECK-NEXT: [[SLIDE:%.*]] = getelementptr i8, ptr [[OBJ]], i64 9223372036854775807
|
|
; CHECK-NEXT: [[SLIDE_BIS:%.*]] = getelementptr i8, ptr [[SLIDE]], i64 9223372036854775807
|
|
; CHECK-NEXT: ret i64 0
|
|
;
|
|
%obj = alloca i8, i64 4
|
|
%slide = getelementptr i8, ptr %obj, i64 9223372036854775807
|
|
%slide.bis = getelementptr i8, ptr %slide, i64 9223372036854775807
|
|
%objsize = call i64 @llvm.objectsize.i64(ptr %slide.bis, i1 false, i1 false, i1 false)
|
|
ret i64 %objsize
|
|
}
|
|
|
|
define i64 @wrapping_gep_large_alloc(i1 %c) {
|
|
; CHECK-LABEL: @wrapping_gep_large_alloc(
|
|
; CHECK-NEXT: [[OBJ:%.*]] = alloca i8, i64 9223372036854775807, align 1
|
|
; CHECK-NEXT: [[SLIDE:%.*]] = getelementptr i8, ptr [[OBJ]], i64 9223372036854775807
|
|
; CHECK-NEXT: [[SLIDE_BIS:%.*]] = getelementptr i8, ptr [[SLIDE]], i64 3
|
|
; CHECK-NEXT: [[SLIDE_TER:%.*]] = getelementptr i8, ptr [[SLIDE_BIS]], i64 -4
|
|
; CHECK-NEXT: ret i64 1
|
|
;
|
|
%obj = alloca i8, i64 9223372036854775807
|
|
%slide = getelementptr i8, ptr %obj, i64 9223372036854775807
|
|
%slide.bis = getelementptr i8, ptr %slide, i64 3
|
|
%slide.ter = getelementptr i8, ptr %slide.bis, i64 -4
|
|
%objsize = call i64 @llvm.objectsize.i64(ptr %slide.ter, i1 false, i1 false, i1 false)
|
|
ret i64 %objsize
|
|
}
|
|
|
|
; We don't analyze allocations larger than platform's ptrdiff_t
|
|
define i64 @large_alloca() {
|
|
; CHECK-LABEL: @large_alloca(
|
|
; CHECK-NEXT: [[OBJ:%.*]] = alloca i8, i64 -9223372036854775808, align 1
|
|
; CHECK-NEXT: [[SLIDE:%.*]] = getelementptr i8, ptr [[OBJ]], i64 9223372036854775807
|
|
; CHECK-NEXT: ret i64 -1
|
|
;
|
|
%obj = alloca i8, i64 9223372036854775808
|
|
%slide = getelementptr i8, ptr %obj, i64 9223372036854775807
|
|
%objsize = call i64 @llvm.objectsize.i64(ptr %slide, i1 false, i1 false, i1 false)
|
|
ret i64 %objsize
|
|
}
|
|
|
|
; We don't analyze allocations larger than platform's ptrdiff_t
|
|
define i64 @large_malloc() {
|
|
; CHECK-LABEL: @large_malloc(
|
|
; CHECK-NEXT: [[OBJ:%.*]] = call ptr @malloc(i64 -9223372036854775808)
|
|
; CHECK-NEXT: [[SLIDE:%.*]] = getelementptr i8, ptr [[OBJ]], i64 9223372036854775807
|
|
; CHECK-NEXT: ret i64 -1
|
|
;
|
|
%obj = call ptr @malloc(i64 9223372036854775808)
|
|
%slide = getelementptr i8, ptr %obj, i64 9223372036854775807
|
|
%objsize = call i64 @llvm.objectsize.i64(ptr %slide, i1 false, i1 false, i1 false)
|
|
ret i64 %objsize
|
|
}
|
|
|
|
define i64 @out_of_bound_negative_gep(i1 %c) {
|
|
; CHECK-LABEL: @out_of_bound_negative_gep(
|
|
; CHECK-NEXT: [[OBJ:%.*]] = alloca i8, i32 4, align 1
|
|
; CHECK-NEXT: [[SLIDE:%.*]] = getelementptr i8, ptr [[OBJ]], i8 -8
|
|
; CHECK-NEXT: ret i64 0
|
|
;
|
|
%obj = alloca i8, i32 4
|
|
%slide = getelementptr i8, ptr %obj, i8 -8
|
|
%objsize = call i64 @llvm.objectsize.i64(ptr %slide, i1 false, i1 false, i1 false)
|
|
ret i64 %objsize
|
|
}
|
|
|
|
declare i32 @llvm.objectsize.i32.p0(ptr, i1, i1, i1)
|