Use KnownBits to infer the nneg flag on zext instructions. Currently we only set nneg when converting sext -> zext, but don't set it when we have a zext in the first place. If we want to use it in optimizations, we should make sure the flag inference is consistent.
839 lines
25 KiB
LLVM
839 lines
25 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
|
|
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
|
|
|
|
target datalayout = "n64"
|
|
|
|
declare void @use1(i1)
|
|
declare void @use32(i32)
|
|
declare void @use64(i64)
|
|
declare void @use_vec(<2 x i9>)
|
|
|
|
define i64 @test_sext_zext(i16 %A) {
|
|
; CHECK-LABEL: @test_sext_zext(
|
|
; CHECK-NEXT: [[C2:%.*]] = zext i16 [[A:%.*]] to i64
|
|
; CHECK-NEXT: ret i64 [[C2]]
|
|
;
|
|
%c1 = zext i16 %A to i32
|
|
%c2 = sext i32 %c1 to i64
|
|
ret i64 %c2
|
|
}
|
|
|
|
define <2 x i64> @test2(<2 x i1> %A) {
|
|
; CHECK-LABEL: @test2(
|
|
; CHECK-NEXT: [[XOR:%.*]] = xor <2 x i1> [[A:%.*]], <i1 true, i1 true>
|
|
; CHECK-NEXT: [[ZEXT:%.*]] = zext <2 x i1> [[XOR]] to <2 x i64>
|
|
; CHECK-NEXT: ret <2 x i64> [[ZEXT]]
|
|
;
|
|
%xor = xor <2 x i1> %A, <i1 true, i1 true>
|
|
%zext = zext <2 x i1> %xor to <2 x i64>
|
|
ret <2 x i64> %zext
|
|
}
|
|
|
|
define <2 x i64> @test3(<2 x i64> %A) {
|
|
; CHECK-LABEL: @test3(
|
|
; CHECK-NEXT: [[ZEXT:%.*]] = and <2 x i64> [[A:%.*]], <i64 23, i64 42>
|
|
; CHECK-NEXT: ret <2 x i64> [[ZEXT]]
|
|
;
|
|
%trunc = trunc <2 x i64> %A to <2 x i32>
|
|
%and = and <2 x i32> %trunc, <i32 23, i32 42>
|
|
%zext = zext <2 x i32> %and to <2 x i64>
|
|
ret <2 x i64> %zext
|
|
}
|
|
|
|
define <2 x i64> @test4(<2 x i64> %A) {
|
|
; CHECK-LABEL: @test4(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i64> [[A:%.*]], <i64 23, i64 42>
|
|
; CHECK-NEXT: [[ZEXT:%.*]] = xor <2 x i64> [[TMP1]], <i64 23, i64 42>
|
|
; CHECK-NEXT: ret <2 x i64> [[ZEXT]]
|
|
;
|
|
%trunc = trunc <2 x i64> %A to <2 x i32>
|
|
%and = and <2 x i32> %trunc, <i32 23, i32 42>
|
|
%xor = xor <2 x i32> %and, <i32 23, i32 42>
|
|
%zext = zext <2 x i32> %xor to <2 x i64>
|
|
ret <2 x i64> %zext
|
|
}
|
|
|
|
define i64 @fold_xor_zext_sandwich(i1 %a) {
|
|
; CHECK-LABEL: @fold_xor_zext_sandwich(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = xor i1 [[A:%.*]], true
|
|
; CHECK-NEXT: [[ZEXT2:%.*]] = zext i1 [[TMP1]] to i64
|
|
; CHECK-NEXT: ret i64 [[ZEXT2]]
|
|
;
|
|
%zext1 = zext i1 %a to i32
|
|
%xor = xor i32 %zext1, 1
|
|
%zext2 = zext i32 %xor to i64
|
|
ret i64 %zext2
|
|
}
|
|
|
|
define <2 x i64> @fold_xor_zext_sandwich_vec(<2 x i1> %a) {
|
|
; CHECK-LABEL: @fold_xor_zext_sandwich_vec(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i1> [[A:%.*]], <i1 true, i1 true>
|
|
; CHECK-NEXT: [[ZEXT2:%.*]] = zext <2 x i1> [[TMP1]] to <2 x i64>
|
|
; CHECK-NEXT: ret <2 x i64> [[ZEXT2]]
|
|
;
|
|
%zext1 = zext <2 x i1> %a to <2 x i32>
|
|
%xor = xor <2 x i32> %zext1, <i32 1, i32 1>
|
|
%zext2 = zext <2 x i32> %xor to <2 x i64>
|
|
ret <2 x i64> %zext2
|
|
}
|
|
|
|
; Assert that zexts in and(zext(icmp), zext(icmp)) can be folded.
|
|
|
|
define i8 @fold_and_zext_icmp(i64 %a, i64 %b, i64 %c) {
|
|
; CHECK-LABEL: @fold_and_zext_icmp(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt i64 [[A:%.*]], [[B:%.*]]
|
|
; CHECK-NEXT: [[TMP2:%.*]] = icmp slt i64 [[A]], [[C:%.*]]
|
|
; CHECK-NEXT: [[TMP3:%.*]] = and i1 [[TMP1]], [[TMP2]]
|
|
; CHECK-NEXT: [[TMP4:%.*]] = zext i1 [[TMP3]] to i8
|
|
; CHECK-NEXT: ret i8 [[TMP4]]
|
|
;
|
|
%1 = icmp sgt i64 %a, %b
|
|
%2 = zext i1 %1 to i8
|
|
%3 = icmp slt i64 %a, %c
|
|
%4 = zext i1 %3 to i8
|
|
%5 = and i8 %2, %4
|
|
ret i8 %5
|
|
}
|
|
|
|
; Assert that zexts in or(zext(icmp), zext(icmp)) can be folded.
|
|
|
|
define i8 @fold_or_zext_icmp(i64 %a, i64 %b, i64 %c) {
|
|
; CHECK-LABEL: @fold_or_zext_icmp(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt i64 [[A:%.*]], [[B:%.*]]
|
|
; CHECK-NEXT: [[TMP2:%.*]] = icmp slt i64 [[A]], [[C:%.*]]
|
|
; CHECK-NEXT: [[TMP3:%.*]] = or i1 [[TMP1]], [[TMP2]]
|
|
; CHECK-NEXT: [[TMP4:%.*]] = zext i1 [[TMP3]] to i8
|
|
; CHECK-NEXT: ret i8 [[TMP4]]
|
|
;
|
|
%1 = icmp sgt i64 %a, %b
|
|
%2 = zext i1 %1 to i8
|
|
%3 = icmp slt i64 %a, %c
|
|
%4 = zext i1 %3 to i8
|
|
%5 = or i8 %2, %4
|
|
ret i8 %5
|
|
}
|
|
|
|
; Assert that zexts in xor(zext(icmp), zext(icmp)) can be folded.
|
|
|
|
define i8 @fold_xor_zext_icmp(i64 %a, i64 %b, i64 %c) {
|
|
; CHECK-LABEL: @fold_xor_zext_icmp(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt i64 [[A:%.*]], [[B:%.*]]
|
|
; CHECK-NEXT: [[TMP2:%.*]] = icmp slt i64 [[A]], [[C:%.*]]
|
|
; CHECK-NEXT: [[TMP3:%.*]] = xor i1 [[TMP1]], [[TMP2]]
|
|
; CHECK-NEXT: [[TMP4:%.*]] = zext i1 [[TMP3]] to i8
|
|
; CHECK-NEXT: ret i8 [[TMP4]]
|
|
;
|
|
%1 = icmp sgt i64 %a, %b
|
|
%2 = zext i1 %1 to i8
|
|
%3 = icmp slt i64 %a, %c
|
|
%4 = zext i1 %3 to i8
|
|
%5 = xor i8 %2, %4
|
|
ret i8 %5
|
|
}
|
|
|
|
; Assert that zexts in logic(zext(icmp), zext(icmp)) are also folded accross
|
|
; nested logical operators.
|
|
|
|
define i8 @fold_nested_logic_zext_icmp(i64 %a, i64 %b, i64 %c, i64 %d) {
|
|
; CHECK-LABEL: @fold_nested_logic_zext_icmp(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt i64 [[A:%.*]], [[B:%.*]]
|
|
; CHECK-NEXT: [[TMP2:%.*]] = icmp slt i64 [[A]], [[C:%.*]]
|
|
; CHECK-NEXT: [[TMP3:%.*]] = and i1 [[TMP1]], [[TMP2]]
|
|
; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[A]], [[D:%.*]]
|
|
; CHECK-NEXT: [[TMP5:%.*]] = or i1 [[TMP3]], [[TMP4]]
|
|
; CHECK-NEXT: [[TMP6:%.*]] = zext i1 [[TMP5]] to i8
|
|
; CHECK-NEXT: ret i8 [[TMP6]]
|
|
;
|
|
%1 = icmp sgt i64 %a, %b
|
|
%2 = zext i1 %1 to i8
|
|
%3 = icmp slt i64 %a, %c
|
|
%4 = zext i1 %3 to i8
|
|
%5 = and i8 %2, %4
|
|
%6 = icmp eq i64 %a, %d
|
|
%7 = zext i1 %6 to i8
|
|
%8 = or i8 %5, %7
|
|
ret i8 %8
|
|
}
|
|
|
|
; This test is for Integer BitWidth > 64 && BitWidth <= 1024.
|
|
|
|
define i1024 @sext_zext_apint1(i77 %A) {
|
|
; CHECK-LABEL: @sext_zext_apint1(
|
|
; CHECK-NEXT: [[C2:%.*]] = zext i77 [[A:%.*]] to i1024
|
|
; CHECK-NEXT: ret i1024 [[C2]]
|
|
;
|
|
%c1 = zext i77 %A to i533
|
|
%c2 = sext i533 %c1 to i1024
|
|
ret i1024 %c2
|
|
}
|
|
|
|
; This test is for Integer BitWidth <= 64 && BitWidth % 2 != 0.
|
|
|
|
define i47 @sext_zext_apint2(i11 %A) {
|
|
; CHECK-LABEL: @sext_zext_apint2(
|
|
; CHECK-NEXT: [[C2:%.*]] = zext i11 [[A:%.*]] to i47
|
|
; CHECK-NEXT: ret i47 [[C2]]
|
|
;
|
|
%c1 = zext i11 %A to i39
|
|
%c2 = sext i39 %c1 to i47
|
|
ret i47 %c2
|
|
}
|
|
|
|
define i32 @masked_bit_set(i32 %x, i32 %y) {
|
|
; CHECK-LABEL: @masked_bit_set(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[X:%.*]], [[Y:%.*]]
|
|
; CHECK-NEXT: [[R:%.*]] = and i32 [[TMP1]], 1
|
|
; CHECK-NEXT: ret i32 [[R]]
|
|
;
|
|
%sh1 = shl i32 1, %y
|
|
%and = and i32 %sh1, %x
|
|
%cmp = icmp ne i32 %and, 0
|
|
%r = zext i1 %cmp to i32
|
|
ret i32 %r
|
|
}
|
|
|
|
define <2 x i32> @masked_bit_clear(<2 x i32> %x, <2 x i32> %y) {
|
|
; CHECK-LABEL: @masked_bit_clear(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i32> [[X:%.*]], <i32 -1, i32 -1>
|
|
; CHECK-NEXT: [[TMP2:%.*]] = lshr <2 x i32> [[TMP1]], [[Y:%.*]]
|
|
; CHECK-NEXT: [[R:%.*]] = and <2 x i32> [[TMP2]], <i32 1, i32 1>
|
|
; CHECK-NEXT: ret <2 x i32> [[R]]
|
|
;
|
|
%sh1 = shl <2 x i32> <i32 1, i32 1>, %y
|
|
%and = and <2 x i32> %sh1, %x
|
|
%cmp = icmp eq <2 x i32> %and, zeroinitializer
|
|
%r = zext <2 x i1> %cmp to <2 x i32>
|
|
ret <2 x i32> %r
|
|
}
|
|
|
|
define <2 x i32> @masked_bit_set_commute(<2 x i32> %px, <2 x i32> %y) {
|
|
; CHECK-LABEL: @masked_bit_set_commute(
|
|
; CHECK-NEXT: [[X:%.*]] = srem <2 x i32> <i32 42, i32 3>, [[PX:%.*]]
|
|
; CHECK-NEXT: [[TMP1:%.*]] = lshr <2 x i32> [[X]], [[Y:%.*]]
|
|
; CHECK-NEXT: [[R:%.*]] = and <2 x i32> [[TMP1]], <i32 1, i32 1>
|
|
; CHECK-NEXT: ret <2 x i32> [[R]]
|
|
;
|
|
%x = srem <2 x i32> <i32 42, i32 3>, %px ; thwart complexity-based canonicalization
|
|
%sh1 = shl <2 x i32> <i32 1, i32 1>, %y
|
|
%and = and <2 x i32> %x, %sh1
|
|
%cmp = icmp ne <2 x i32> %and, zeroinitializer
|
|
%r = zext <2 x i1> %cmp to <2 x i32>
|
|
ret <2 x i32> %r
|
|
}
|
|
|
|
define i32 @masked_bit_clear_commute(i32 %px, i32 %y) {
|
|
; CHECK-LABEL: @masked_bit_clear_commute(
|
|
; CHECK-NEXT: [[X:%.*]] = srem i32 42, [[PX:%.*]]
|
|
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X]], -1
|
|
; CHECK-NEXT: [[TMP2:%.*]] = lshr i32 [[TMP1]], [[Y:%.*]]
|
|
; CHECK-NEXT: [[R:%.*]] = and i32 [[TMP2]], 1
|
|
; CHECK-NEXT: ret i32 [[R]]
|
|
;
|
|
%x = srem i32 42, %px ; thwart complexity-based canonicalization
|
|
%sh1 = shl i32 1, %y
|
|
%and = and i32 %x, %sh1
|
|
%cmp = icmp eq i32 %and, 0
|
|
%r = zext i1 %cmp to i32
|
|
ret i32 %r
|
|
}
|
|
|
|
define i32 @masked_bit_set_use1(i32 %x, i32 %y) {
|
|
; CHECK-LABEL: @masked_bit_set_use1(
|
|
; CHECK-NEXT: [[SH1:%.*]] = shl nuw i32 1, [[Y:%.*]]
|
|
; CHECK-NEXT: call void @use32(i32 [[SH1]])
|
|
; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[X:%.*]], [[Y]]
|
|
; CHECK-NEXT: [[R:%.*]] = and i32 [[TMP1]], 1
|
|
; CHECK-NEXT: ret i32 [[R]]
|
|
;
|
|
%sh1 = shl i32 1, %y
|
|
call void @use32(i32 %sh1)
|
|
%and = and i32 %sh1, %x
|
|
%cmp = icmp ne i32 %and, 0
|
|
%r = zext i1 %cmp to i32
|
|
ret i32 %r
|
|
}
|
|
|
|
; Negative test
|
|
|
|
define i32 @masked_bit_set_use2(i32 %x, i32 %y) {
|
|
; CHECK-LABEL: @masked_bit_set_use2(
|
|
; CHECK-NEXT: [[SH1:%.*]] = shl nuw i32 1, [[Y:%.*]]
|
|
; CHECK-NEXT: [[AND:%.*]] = and i32 [[SH1]], [[X:%.*]]
|
|
; CHECK-NEXT: call void @use32(i32 [[AND]])
|
|
; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[AND]], 0
|
|
; CHECK-NEXT: [[R:%.*]] = zext i1 [[CMP]] to i32
|
|
; CHECK-NEXT: ret i32 [[R]]
|
|
;
|
|
%sh1 = shl i32 1, %y
|
|
%and = and i32 %sh1, %x
|
|
call void @use32(i32 %and)
|
|
%cmp = icmp ne i32 %and, 0
|
|
%r = zext i1 %cmp to i32
|
|
ret i32 %r
|
|
}
|
|
|
|
; Negative test
|
|
|
|
define i32 @masked_bit_set_use3(i32 %x, i32 %y) {
|
|
; CHECK-LABEL: @masked_bit_set_use3(
|
|
; CHECK-NEXT: [[SH1:%.*]] = shl nuw i32 1, [[Y:%.*]]
|
|
; CHECK-NEXT: [[AND:%.*]] = and i32 [[SH1]], [[X:%.*]]
|
|
; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[AND]], 0
|
|
; CHECK-NEXT: call void @use1(i1 [[CMP]])
|
|
; CHECK-NEXT: [[R:%.*]] = zext i1 [[CMP]] to i32
|
|
; CHECK-NEXT: ret i32 [[R]]
|
|
;
|
|
%sh1 = shl i32 1, %y
|
|
%and = and i32 %sh1, %x
|
|
%cmp = icmp ne i32 %and, 0
|
|
call void @use1(i1 %cmp)
|
|
%r = zext i1 %cmp to i32
|
|
ret i32 %r
|
|
}
|
|
|
|
define i32 @masked_bit_clear_use1(i32 %x, i32 %y) {
|
|
; CHECK-LABEL: @masked_bit_clear_use1(
|
|
; CHECK-NEXT: [[SH1:%.*]] = shl nuw i32 1, [[Y:%.*]]
|
|
; CHECK-NEXT: call void @use32(i32 [[SH1]])
|
|
; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X:%.*]], -1
|
|
; CHECK-NEXT: [[TMP2:%.*]] = lshr i32 [[TMP1]], [[Y]]
|
|
; CHECK-NEXT: [[R:%.*]] = and i32 [[TMP2]], 1
|
|
; CHECK-NEXT: ret i32 [[R]]
|
|
;
|
|
%sh1 = shl i32 1, %y
|
|
call void @use32(i32 %sh1)
|
|
%and = and i32 %sh1, %x
|
|
%cmp = icmp eq i32 %and, 0
|
|
%r = zext i1 %cmp to i32
|
|
ret i32 %r
|
|
}
|
|
|
|
; Negative test
|
|
|
|
define i32 @masked_bit_clear_use2(i32 %x, i32 %y) {
|
|
; CHECK-LABEL: @masked_bit_clear_use2(
|
|
; CHECK-NEXT: [[SH1:%.*]] = shl nuw i32 1, [[Y:%.*]]
|
|
; CHECK-NEXT: [[AND:%.*]] = and i32 [[SH1]], [[X:%.*]]
|
|
; CHECK-NEXT: call void @use32(i32 [[AND]])
|
|
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 0
|
|
; CHECK-NEXT: [[R:%.*]] = zext i1 [[CMP]] to i32
|
|
; CHECK-NEXT: ret i32 [[R]]
|
|
;
|
|
%sh1 = shl i32 1, %y
|
|
%and = and i32 %sh1, %x
|
|
call void @use32(i32 %and)
|
|
%cmp = icmp eq i32 %and, 0
|
|
%r = zext i1 %cmp to i32
|
|
ret i32 %r
|
|
}
|
|
|
|
; Negative test
|
|
|
|
define i32 @masked_bit_clear_use3(i32 %x, i32 %y) {
|
|
; CHECK-LABEL: @masked_bit_clear_use3(
|
|
; CHECK-NEXT: [[SH1:%.*]] = shl nuw i32 1, [[Y:%.*]]
|
|
; CHECK-NEXT: [[AND:%.*]] = and i32 [[SH1]], [[X:%.*]]
|
|
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 0
|
|
; CHECK-NEXT: call void @use1(i1 [[CMP]])
|
|
; CHECK-NEXT: [[R:%.*]] = zext i1 [[CMP]] to i32
|
|
; CHECK-NEXT: ret i32 [[R]]
|
|
;
|
|
%sh1 = shl i32 1, %y
|
|
%and = and i32 %sh1, %x
|
|
%cmp = icmp eq i32 %and, 0
|
|
call void @use1(i1 %cmp)
|
|
%r = zext i1 %cmp to i32
|
|
ret i32 %r
|
|
}
|
|
|
|
; Negative test
|
|
|
|
define i32 @masked_bits_set(i32 %x, i32 %y) {
|
|
; CHECK-LABEL: @masked_bits_set(
|
|
; CHECK-NEXT: [[SH1:%.*]] = shl i32 3, [[Y:%.*]]
|
|
; CHECK-NEXT: [[AND:%.*]] = and i32 [[SH1]], [[X:%.*]]
|
|
; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[AND]], 0
|
|
; CHECK-NEXT: [[R:%.*]] = zext i1 [[CMP]] to i32
|
|
; CHECK-NEXT: ret i32 [[R]]
|
|
;
|
|
%sh1 = shl i32 3, %y
|
|
%and = and i32 %sh1, %x
|
|
%cmp = icmp ne i32 %and, 0
|
|
%r = zext i1 %cmp to i32
|
|
ret i32 %r
|
|
}
|
|
|
|
; Negative test
|
|
|
|
define i32 @div_bit_set(i32 %x, i32 %y) {
|
|
; CHECK-LABEL: @div_bit_set(
|
|
; CHECK-NEXT: [[SH1:%.*]] = shl nuw i32 1, [[Y:%.*]]
|
|
; CHECK-NEXT: [[AND:%.*]] = sdiv i32 [[SH1]], [[X:%.*]]
|
|
; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[AND]], 0
|
|
; CHECK-NEXT: [[R:%.*]] = zext i1 [[CMP]] to i32
|
|
; CHECK-NEXT: ret i32 [[R]]
|
|
;
|
|
%sh1 = shl i32 1, %y
|
|
%and = sdiv i32 %sh1, %x
|
|
%cmp = icmp ne i32 %and, 0
|
|
%r = zext i1 %cmp to i32
|
|
ret i32 %r
|
|
}
|
|
|
|
; Negative test
|
|
|
|
define i32 @masked_bit_set_nonzero_cmp(i32 %x, i32 %y) {
|
|
; CHECK-LABEL: @masked_bit_set_nonzero_cmp(
|
|
; CHECK-NEXT: [[SH1:%.*]] = shl nuw i32 1, [[Y:%.*]]
|
|
; CHECK-NEXT: [[AND:%.*]] = and i32 [[SH1]], [[X:%.*]]
|
|
; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[AND]], 1
|
|
; CHECK-NEXT: [[R:%.*]] = zext i1 [[CMP]] to i32
|
|
; CHECK-NEXT: ret i32 [[R]]
|
|
;
|
|
%sh1 = shl i32 1, %y
|
|
%and = and i32 %sh1, %x
|
|
%cmp = icmp ne i32 %and, 1
|
|
%r = zext i1 %cmp to i32
|
|
ret i32 %r
|
|
}
|
|
|
|
; Negative test
|
|
|
|
define i32 @masked_bit_wrong_pred(i32 %x, i32 %y) {
|
|
; CHECK-LABEL: @masked_bit_wrong_pred(
|
|
; CHECK-NEXT: [[SH1:%.*]] = shl nuw i32 1, [[Y:%.*]]
|
|
; CHECK-NEXT: [[AND:%.*]] = and i32 [[SH1]], [[X:%.*]]
|
|
; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[AND]], 0
|
|
; CHECK-NEXT: [[R:%.*]] = zext i1 [[CMP]] to i32
|
|
; CHECK-NEXT: ret i32 [[R]]
|
|
;
|
|
%sh1 = shl i32 1, %y
|
|
%and = and i32 %sh1, %x
|
|
%cmp = icmp sgt i32 %and, 0
|
|
%r = zext i1 %cmp to i32
|
|
ret i32 %r
|
|
}
|
|
|
|
define i32 @zext_or_masked_bit_test(i32 %a, i32 %b, i32 %x) {
|
|
; CHECK-LABEL: @zext_or_masked_bit_test(
|
|
; CHECK-NEXT: [[SHL:%.*]] = shl nuw i32 1, [[B:%.*]]
|
|
; CHECK-NEXT: [[AND:%.*]] = and i32 [[SHL]], [[A:%.*]]
|
|
; CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[AND]], 0
|
|
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[X:%.*]], [[B]]
|
|
; CHECK-NEXT: [[OR:%.*]] = or i1 [[TOBOOL]], [[CMP]]
|
|
; CHECK-NEXT: [[Z:%.*]] = zext i1 [[OR]] to i32
|
|
; CHECK-NEXT: ret i32 [[Z]]
|
|
;
|
|
%shl = shl i32 1, %b
|
|
%and = and i32 %shl, %a
|
|
%tobool = icmp ne i32 %and, 0
|
|
%cmp = icmp eq i32 %x, %b
|
|
%or = or i1 %tobool, %cmp
|
|
%z = zext i1 %or to i32
|
|
ret i32 %z
|
|
}
|
|
|
|
define i32 @zext_or_masked_bit_test_uses(i32 %a, i32 %b, i32 %x) {
|
|
; CHECK-LABEL: @zext_or_masked_bit_test_uses(
|
|
; CHECK-NEXT: [[SHL:%.*]] = shl nuw i32 1, [[B:%.*]]
|
|
; CHECK-NEXT: [[AND:%.*]] = and i32 [[SHL]], [[A:%.*]]
|
|
; CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[AND]], 0
|
|
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[X:%.*]], [[B]]
|
|
; CHECK-NEXT: [[OR:%.*]] = or i1 [[TOBOOL]], [[CMP]]
|
|
; CHECK-NEXT: call void @use1(i1 [[OR]])
|
|
; CHECK-NEXT: [[Z:%.*]] = zext i1 [[OR]] to i32
|
|
; CHECK-NEXT: ret i32 [[Z]]
|
|
;
|
|
%shl = shl i32 1, %b
|
|
%and = and i32 %shl, %a
|
|
%tobool = icmp ne i32 %and, 0
|
|
%cmp = icmp eq i32 %x, %b
|
|
%or = or i1 %tobool, %cmp
|
|
call void @use1(i1 %or)
|
|
%z = zext i1 %or to i32
|
|
ret i32 %z
|
|
}
|
|
|
|
define i32 @notneg_zext_wider(i8 %x) {
|
|
; CHECK-LABEL: @notneg_zext_wider(
|
|
; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i8 [[X:%.*]], -1
|
|
; CHECK-NEXT: [[R:%.*]] = zext i1 [[CMP]] to i32
|
|
; CHECK-NEXT: ret i32 [[R]]
|
|
;
|
|
%cmp = icmp sgt i8 %x, -1
|
|
%r = zext i1 %cmp to i32
|
|
ret i32 %r
|
|
}
|
|
|
|
define <2 x i8> @notneg_zext_narrower(<2 x i32> %x) {
|
|
; CHECK-LABEL: @notneg_zext_narrower(
|
|
; CHECK-NEXT: [[CMP:%.*]] = icmp sgt <2 x i32> [[X:%.*]], <i32 -1, i32 -1>
|
|
; CHECK-NEXT: [[R:%.*]] = zext <2 x i1> [[CMP]] to <2 x i8>
|
|
; CHECK-NEXT: ret <2 x i8> [[R]]
|
|
;
|
|
%cmp = icmp sgt <2 x i32> %x, <i32 -1, i32 -1>
|
|
%r = zext <2 x i1> %cmp to <2 x i8>
|
|
ret <2 x i8> %r
|
|
}
|
|
|
|
define i32 @notneg_zext_wider_use(i8 %x) {
|
|
; CHECK-LABEL: @notneg_zext_wider_use(
|
|
; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i8 [[X:%.*]], -1
|
|
; CHECK-NEXT: call void @use1(i1 [[CMP]])
|
|
; CHECK-NEXT: [[R:%.*]] = zext i1 [[CMP]] to i32
|
|
; CHECK-NEXT: ret i32 [[R]]
|
|
;
|
|
%cmp = icmp sgt i8 %x, -1
|
|
call void @use1(i1 %cmp)
|
|
%r = zext i1 %cmp to i32
|
|
ret i32 %r
|
|
}
|
|
|
|
define i8 @notneg_zext_narrower_use(i32 %x) {
|
|
; CHECK-LABEL: @notneg_zext_narrower_use(
|
|
; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[X:%.*]], -1
|
|
; CHECK-NEXT: call void @use1(i1 [[CMP]])
|
|
; CHECK-NEXT: [[R:%.*]] = zext i1 [[CMP]] to i8
|
|
; CHECK-NEXT: ret i8 [[R]]
|
|
;
|
|
%cmp = icmp sgt i32 %x, -1
|
|
call void @use1(i1 %cmp)
|
|
%r = zext i1 %cmp to i8
|
|
ret i8 %r
|
|
}
|
|
|
|
define i8 @disguised_signbit_clear_test(i64 %x) {
|
|
; CHECK-LABEL: @disguised_signbit_clear_test(
|
|
; CHECK-NEXT: [[A1:%.*]] = and i64 [[X:%.*]], 128
|
|
; CHECK-NEXT: [[T4:%.*]] = icmp eq i64 [[A1]], 0
|
|
; CHECK-NEXT: [[T6:%.*]] = zext i1 [[T4]] to i8
|
|
; CHECK-NEXT: ret i8 [[T6]]
|
|
;
|
|
%a1 = and i64 %x, 128
|
|
%t4 = icmp eq i64 %a1, 0
|
|
%t6 = zext i1 %t4 to i8
|
|
ret i8 %t6
|
|
}
|
|
|
|
define i16 @pr57899(i1 %c, i32 %x) {
|
|
; CHECK-LABEL: @pr57899(
|
|
; CHECK-NEXT: entry:
|
|
; CHECK-NEXT: br i1 [[C:%.*]], label [[IF:%.*]], label [[JOIN:%.*]]
|
|
; CHECK: if:
|
|
; CHECK-NEXT: br label [[JOIN]]
|
|
; CHECK: join:
|
|
; CHECK-NEXT: ret i16 1
|
|
;
|
|
entry:
|
|
br i1 %c, label %if, label %join
|
|
|
|
if:
|
|
%g.1 = select i1 false, i32 %x, i32 1
|
|
br label %join
|
|
|
|
join:
|
|
%g.2 = phi i32 [ %g.1, %if ], [ 1, %entry ]
|
|
%tobool1 = icmp ne i32 %g.2, 4
|
|
%tobool3 = icmp ne i32 %g.2, 64
|
|
%x1 = and i1 %tobool1, %tobool3
|
|
%conv4 = zext i1 %x1 to i16
|
|
ret i16 %conv4
|
|
}
|
|
|
|
; negative test - but this could be transformed to eliminate a use of 't'
|
|
|
|
define i64 @and_trunc_extra_use1(i64 %x, i32 %y) {
|
|
; CHECK-LABEL: @and_trunc_extra_use1(
|
|
; CHECK-NEXT: [[T:%.*]] = trunc i64 [[X:%.*]] to i32
|
|
; CHECK-NEXT: call void @use32(i32 [[T]])
|
|
; CHECK-NEXT: [[A:%.*]] = and i32 [[T]], [[Y:%.*]]
|
|
; CHECK-NEXT: [[Z:%.*]] = zext i32 [[A]] to i64
|
|
; CHECK-NEXT: ret i64 [[Z]]
|
|
;
|
|
%t = trunc i64 %x to i32
|
|
call void @use32(i32 %t)
|
|
%a = and i32 %t, %y
|
|
%z = zext i32 %a to i64
|
|
ret i64 %z
|
|
}
|
|
|
|
; negative test - but this could be transformed to eliminate a use of 't'
|
|
|
|
define i64 @and_trunc_extra_use1_commute(i64 %x, i32 %p) {
|
|
; CHECK-LABEL: @and_trunc_extra_use1_commute(
|
|
; CHECK-NEXT: [[Y:%.*]] = mul i32 [[P:%.*]], [[P]]
|
|
; CHECK-NEXT: [[T:%.*]] = trunc i64 [[X:%.*]] to i32
|
|
; CHECK-NEXT: call void @use32(i32 [[T]])
|
|
; CHECK-NEXT: [[A:%.*]] = and i32 [[Y]], [[T]]
|
|
; CHECK-NEXT: [[Z:%.*]] = zext i32 [[A]] to i64
|
|
; CHECK-NEXT: ret i64 [[Z]]
|
|
;
|
|
%y = mul i32 %p, %p ; thwart complexity-based canonicalization
|
|
%t = trunc i64 %x to i32
|
|
call void @use32(i32 %t)
|
|
%a = and i32 %y, %t
|
|
%z = zext i32 %a to i64
|
|
ret i64 %z
|
|
}
|
|
|
|
; negative test - avoid creating an extra instruction
|
|
|
|
define i64 @and_trunc_extra_use2(i64 %x, i32 %y) {
|
|
; CHECK-LABEL: @and_trunc_extra_use2(
|
|
; CHECK-NEXT: [[T:%.*]] = trunc i64 [[X:%.*]] to i32
|
|
; CHECK-NEXT: [[A:%.*]] = and i32 [[T]], [[Y:%.*]]
|
|
; CHECK-NEXT: call void @use32(i32 [[A]])
|
|
; CHECK-NEXT: [[Z:%.*]] = zext i32 [[A]] to i64
|
|
; CHECK-NEXT: ret i64 [[Z]]
|
|
;
|
|
%t = trunc i64 %x to i32
|
|
%a = and i32 %t, %y
|
|
call void @use32(i32 %a)
|
|
%z = zext i32 %a to i64
|
|
ret i64 %z
|
|
}
|
|
|
|
; With constant mask, we duplicate it as a wider constant.
|
|
|
|
define i64 @and_trunc_extra_use2_constant(i64 %x) {
|
|
; CHECK-LABEL: @and_trunc_extra_use2_constant(
|
|
; CHECK-NEXT: [[T:%.*]] = trunc i64 [[X:%.*]] to i32
|
|
; CHECK-NEXT: [[A:%.*]] = and i32 [[T]], 42
|
|
; CHECK-NEXT: call void @use32(i32 [[A]])
|
|
; CHECK-NEXT: [[Z:%.*]] = and i64 [[X]], 42
|
|
; CHECK-NEXT: ret i64 [[Z]]
|
|
;
|
|
%t = trunc i64 %x to i32
|
|
%a = and i32 %t, 42
|
|
call void @use32(i32 %a)
|
|
%z = zext i32 %a to i64
|
|
ret i64 %z
|
|
}
|
|
|
|
; Works with arbitrary vectors and verify that the constant is zext.
|
|
|
|
define <2 x i17> @and_trunc_extra_use3_constant_vec(<2 x i17> %x) {
|
|
; CHECK-LABEL: @and_trunc_extra_use3_constant_vec(
|
|
; CHECK-NEXT: [[T:%.*]] = trunc <2 x i17> [[X:%.*]] to <2 x i9>
|
|
; CHECK-NEXT: call void @use_vec(<2 x i9> [[T]])
|
|
; CHECK-NEXT: [[A:%.*]] = and <2 x i9> [[T]], <i9 42, i9 -3>
|
|
; CHECK-NEXT: call void @use_vec(<2 x i9> [[A]])
|
|
; CHECK-NEXT: [[Z:%.*]] = and <2 x i17> [[X]], <i17 42, i17 509>
|
|
; CHECK-NEXT: ret <2 x i17> [[Z]]
|
|
;
|
|
%t = trunc <2 x i17> %x to <2 x i9>
|
|
call void @use_vec(<2 x i9> %t)
|
|
%a = and <2 x i9> %t, <i9 42, i9 -3>
|
|
call void @use_vec(<2 x i9> %a)
|
|
%z = zext <2 x i9> %a to <2 x i17>
|
|
ret <2 x i17> %z
|
|
}
|
|
|
|
; negative test - would require another cast
|
|
|
|
define i64 @and_trunc_extra_use1_wider_src(i65 %x, i32 %y) {
|
|
; CHECK-LABEL: @and_trunc_extra_use1_wider_src(
|
|
; CHECK-NEXT: [[T:%.*]] = trunc i65 [[X:%.*]] to i32
|
|
; CHECK-NEXT: call void @use32(i32 [[T]])
|
|
; CHECK-NEXT: [[A:%.*]] = and i32 [[T]], [[Y:%.*]]
|
|
; CHECK-NEXT: [[Z:%.*]] = zext i32 [[A]] to i64
|
|
; CHECK-NEXT: ret i64 [[Z]]
|
|
;
|
|
%t = trunc i65 %x to i32
|
|
call void @use32(i32 %t)
|
|
%a = and i32 %t, %y
|
|
%z = zext i32 %a to i64
|
|
ret i64 %z
|
|
}
|
|
|
|
define i16 @zext_icmp_eq0_pow2(i32 %x) {
|
|
; CHECK-LABEL: @zext_icmp_eq0_pow2(
|
|
; CHECK-NEXT: [[M:%.*]] = and i32 [[X:%.*]], 4
|
|
; CHECK-NEXT: [[I:%.*]] = icmp eq i32 [[M]], 0
|
|
; CHECK-NEXT: [[Z:%.*]] = zext i1 [[I]] to i16
|
|
; CHECK-NEXT: ret i16 [[Z]]
|
|
;
|
|
%m = and i32 %x, 4
|
|
%i = icmp eq i32 %m, 0
|
|
%z = zext i1 %i to i16
|
|
ret i16 %z
|
|
}
|
|
|
|
define i16 @zext_icmp_eq0_pow2_use1(i32 %x) {
|
|
; CHECK-LABEL: @zext_icmp_eq0_pow2_use1(
|
|
; CHECK-NEXT: [[M:%.*]] = and i32 [[X:%.*]], 4
|
|
; CHECK-NEXT: call void @use32(i32 [[M]])
|
|
; CHECK-NEXT: [[I:%.*]] = icmp eq i32 [[M]], 0
|
|
; CHECK-NEXT: [[Z:%.*]] = zext i1 [[I]] to i16
|
|
; CHECK-NEXT: ret i16 [[Z]]
|
|
;
|
|
%m = and i32 %x, 4
|
|
call void @use32(i32 %m)
|
|
%i = icmp eq i32 %m, 0
|
|
%z = zext i1 %i to i16
|
|
ret i16 %z
|
|
}
|
|
|
|
define i16 @zext_icmp_eq0_pow2_use2(i32 %x) {
|
|
; CHECK-LABEL: @zext_icmp_eq0_pow2_use2(
|
|
; CHECK-NEXT: [[M:%.*]] = and i32 [[X:%.*]], 4
|
|
; CHECK-NEXT: [[I:%.*]] = icmp eq i32 [[M]], 0
|
|
; CHECK-NEXT: call void @use1(i1 [[I]])
|
|
; CHECK-NEXT: [[Z:%.*]] = zext i1 [[I]] to i16
|
|
; CHECK-NEXT: ret i16 [[Z]]
|
|
;
|
|
%m = and i32 %x, 4
|
|
%i = icmp eq i32 %m, 0
|
|
call void @use1(i1 %i)
|
|
%z = zext i1 %i to i16
|
|
ret i16 %z
|
|
}
|
|
|
|
; This used to infinite loop.
|
|
|
|
define i8 @zext_icmp_eq_pow2(i8 %y, i8 %x) {
|
|
; CHECK-LABEL: @zext_icmp_eq_pow2(
|
|
; CHECK-NEXT: [[SHLX:%.*]] = shl i8 [[X:%.*]], 7
|
|
; CHECK-NEXT: [[SHLY:%.*]] = shl i8 -128, [[Y:%.*]]
|
|
; CHECK-NEXT: [[C:%.*]] = icmp eq i8 [[SHLX]], [[SHLY]]
|
|
; CHECK-NEXT: [[R:%.*]] = zext i1 [[C]] to i8
|
|
; CHECK-NEXT: ret i8 [[R]]
|
|
;
|
|
%shlx = shl i8 %x, 7
|
|
%shly = shl i8 -128, %y
|
|
%c = icmp eq i8 %shlx, %shly
|
|
%r = zext i1 %c to i8
|
|
ret i8 %r
|
|
}
|
|
|
|
define i64 @zext_icmp_eq_bool_0(ptr %ptr) {
|
|
; CHECK-LABEL: @zext_icmp_eq_bool_0(
|
|
; CHECK-NEXT: [[VAL:%.*]] = load i64, ptr [[PTR:%.*]], align 8, !range [[RNG0:![0-9]+]]
|
|
; CHECK-NEXT: [[LEN:%.*]] = xor i64 [[VAL]], 1
|
|
; CHECK-NEXT: ret i64 [[LEN]]
|
|
;
|
|
%val = load i64, ptr %ptr, align 8, !range !{i64 0, i64 2}
|
|
%cmp = icmp eq i64 %val, 0
|
|
%len = zext i1 %cmp to i64
|
|
ret i64 %len
|
|
}
|
|
|
|
define i64 @zext_icmp_eq_bool_1(ptr %ptr) {
|
|
; CHECK-LABEL: @zext_icmp_eq_bool_1(
|
|
; CHECK-NEXT: [[VAL:%.*]] = load i64, ptr [[PTR:%.*]], align 8, !range [[RNG0]]
|
|
; CHECK-NEXT: ret i64 [[VAL]]
|
|
;
|
|
%val = load i64, ptr %ptr, align 8, !range !{i64 0, i64 2}
|
|
%cmp = icmp eq i64 %val, 1
|
|
%len = zext i1 %cmp to i64
|
|
ret i64 %len
|
|
}
|
|
|
|
define i64 @zext_icmp_ne_bool_0(ptr %ptr) {
|
|
; CHECK-LABEL: @zext_icmp_ne_bool_0(
|
|
; CHECK-NEXT: [[VAL:%.*]] = load i64, ptr [[PTR:%.*]], align 8, !range [[RNG0]]
|
|
; CHECK-NEXT: ret i64 [[VAL]]
|
|
;
|
|
%val = load i64, ptr %ptr, align 8, !range !{i64 0, i64 2}
|
|
%cmp = icmp ne i64 %val, 0
|
|
%len = zext i1 %cmp to i64
|
|
ret i64 %len
|
|
}
|
|
|
|
define i64 @zext_icmp_ne_bool_1(ptr %ptr) {
|
|
; CHECK-LABEL: @zext_icmp_ne_bool_1(
|
|
; CHECK-NEXT: [[VAL:%.*]] = load i64, ptr [[PTR:%.*]], align 8, !range [[RNG0]]
|
|
; CHECK-NEXT: [[LEN:%.*]] = xor i64 [[VAL]], 1
|
|
; CHECK-NEXT: ret i64 [[LEN]]
|
|
;
|
|
%val = load i64, ptr %ptr, align 8, !range !{i64 0, i64 2}
|
|
%cmp = icmp ne i64 %val, 1
|
|
%len = zext i1 %cmp to i64
|
|
ret i64 %len
|
|
}
|
|
|
|
; https://alive2.llvm.org/ce/z/k7qosS
|
|
define i32 @zext_icmp_eq0_no_shift(ptr %ptr ) {
|
|
; CHECK-LABEL: @zext_icmp_eq0_no_shift(
|
|
; CHECK-NEXT: [[X:%.*]] = load i8, ptr [[PTR:%.*]], align 1, !range [[RNG1:![0-9]+]]
|
|
; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X]], 1
|
|
; CHECK-NEXT: [[RES:%.*]] = zext nneg i8 [[TMP1]] to i32
|
|
; CHECK-NEXT: ret i32 [[RES]]
|
|
;
|
|
%X = load i8, ptr %ptr,align 1, !range !{i8 0, i8 2} ; range [0, 2)
|
|
%cmp = icmp eq i8 %X, 0
|
|
%res = zext i1 %cmp to i32
|
|
ret i32 %res
|
|
}
|
|
|
|
@g = external global i8
|
|
|
|
define i64 @evaluate_zexted_const_expr(i1 %c) {
|
|
; CHECK-LABEL: @evaluate_zexted_const_expr(
|
|
; CHECK-NEXT: [[AND:%.*]] = select i1 [[C:%.*]], i7 trunc (i64 add (i64 ptrtoint (ptr @g to i64), i64 1) to i7), i7 trunc (i64 add (i64 ptrtoint (ptr @g to i64), i64 2) to i7)
|
|
; CHECK-NEXT: [[EXT:%.*]] = zext i7 [[AND]] to i64
|
|
; CHECK-NEXT: ret i64 [[EXT]]
|
|
;
|
|
%and = select i1 %c, i7 trunc (i64 add (i64 ptrtoint (ptr @g to i64), i64 1) to i7), i7 trunc (i64 add (i64 ptrtoint (ptr @g to i64), i64 2) to i7)
|
|
%ext = zext i7 %and to i64
|
|
ret i64 %ext
|
|
}
|
|
|
|
define i16 @zext_nneg_flag_drop(i8 %x, i16 %y) {
|
|
; CHECK-LABEL: @zext_nneg_flag_drop(
|
|
; CHECK-NEXT: [[EXT:%.*]] = zext i8 [[X:%.*]] to i16
|
|
; CHECK-NEXT: [[OR1:%.*]] = or i16 [[EXT]], [[Y:%.*]]
|
|
; CHECK-NEXT: [[OR2:%.*]] = or i16 [[OR1]], 128
|
|
; CHECK-NEXT: ret i16 [[OR2]]
|
|
;
|
|
%and = and i8 %x, 127
|
|
%ext = zext nneg i8 %and to i16
|
|
%or1 = or i16 %ext, %y
|
|
%or2 = or i16 %or1, 128
|
|
ret i16 %or2
|
|
}
|
|
|
|
define i32 @zext_nneg_redundant_and(i8 %a) {
|
|
; CHECK-LABEL: @zext_nneg_redundant_and(
|
|
; CHECK-NEXT: [[A_I32:%.*]] = zext nneg i8 [[A:%.*]] to i32
|
|
; CHECK-NEXT: ret i32 [[A_I32]]
|
|
;
|
|
%a.i32 = zext nneg i8 %a to i32
|
|
%res = and i32 %a.i32, 127
|
|
ret i32 %res
|
|
}
|
|
|
|
; Negative test, the and can't be removed
|
|
define i32 @zext_nneg_redundant_and_neg(i8 %a) {
|
|
; CHECK-LABEL: @zext_nneg_redundant_and_neg(
|
|
; CHECK-NEXT: [[B:%.*]] = and i8 [[A:%.*]], 127
|
|
; CHECK-NEXT: [[B_I32:%.*]] = zext nneg i8 [[B]] to i32
|
|
; CHECK-NEXT: ret i32 [[B_I32]]
|
|
;
|
|
%b = and i8 %a, 127
|
|
%b.i32 = zext nneg i8 %b to i32
|
|
ret i32 %b.i32
|
|
}
|
|
|
|
define i64 @zext_nneg_signbit_extract(i32 %a) nounwind {
|
|
; CHECK-LABEL: @zext_nneg_signbit_extract(
|
|
; CHECK-NEXT: ret i64 0
|
|
;
|
|
%b = zext nneg i32 %a to i64
|
|
%c = lshr i64 %b, 31
|
|
ret i64 %c
|
|
}
|
|
|
|
define i64 @zext_nneg_demanded_constant(i8 %a) nounwind {
|
|
; CHECK-LABEL: @zext_nneg_demanded_constant(
|
|
; CHECK-NEXT: [[B:%.*]] = zext nneg i8 [[A:%.*]] to i64
|
|
; CHECK-NEXT: call void @use64(i64 [[B]]) #[[ATTR0:[0-9]+]]
|
|
; CHECK-NEXT: [[C:%.*]] = and i64 [[B]], 126
|
|
; CHECK-NEXT: ret i64 [[C]]
|
|
;
|
|
%b = zext nneg i8 %a to i64
|
|
call void @use64(i64 %b)
|
|
%c = and i64 %b, 254
|
|
ret i64 %c
|
|
}
|