It's not safe for InstCombine to add disjoint metadata when converting Add to Or otherwise. I've added noundef attribute to preserve existing test behavior.
3991 lines
117 KiB
LLVM
3991 lines
117 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
|
|
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
|
|
|
|
declare void @use(i8)
|
|
declare void @use_i1(i1)
|
|
|
|
define i32 @select_0_or_1_from_bool(i1 %x) {
|
|
; CHECK-LABEL: @select_0_or_1_from_bool(
|
|
; CHECK-NEXT: [[NOT_X:%.*]] = xor i1 [[X:%.*]], true
|
|
; CHECK-NEXT: [[ADD:%.*]] = zext i1 [[NOT_X]] to i32
|
|
; CHECK-NEXT: ret i32 [[ADD]]
|
|
;
|
|
%ext = sext i1 %x to i32
|
|
%add = add i32 %ext, 1
|
|
ret i32 %add
|
|
}
|
|
|
|
define <2 x i32> @select_0_or_1_from_bool_vec(<2 x i1> %x) {
|
|
; CHECK-LABEL: @select_0_or_1_from_bool_vec(
|
|
; CHECK-NEXT: [[NOT_X:%.*]] = xor <2 x i1> [[X:%.*]], <i1 true, i1 true>
|
|
; CHECK-NEXT: [[ADD:%.*]] = zext <2 x i1> [[NOT_X]] to <2 x i32>
|
|
; CHECK-NEXT: ret <2 x i32> [[ADD]]
|
|
;
|
|
%ext = sext <2 x i1> %x to <2 x i32>
|
|
%add = add <2 x i32> %ext, <i32 1, i32 1>
|
|
ret <2 x i32> %add
|
|
}
|
|
|
|
define i32 @select_C_minus_1_or_C_from_bool(i1 %x) {
|
|
; CHECK-LABEL: @select_C_minus_1_or_C_from_bool(
|
|
; CHECK-NEXT: [[ADD:%.*]] = select i1 [[X:%.*]], i32 41, i32 42
|
|
; CHECK-NEXT: ret i32 [[ADD]]
|
|
;
|
|
%ext = sext i1 %x to i32
|
|
%add = add i32 %ext, 42
|
|
ret i32 %add
|
|
}
|
|
|
|
define <2 x i32> @select_C_minus_1_or_C_from_bool_vec(<2 x i1> %x) {
|
|
; CHECK-LABEL: @select_C_minus_1_or_C_from_bool_vec(
|
|
; CHECK-NEXT: [[ADD:%.*]] = select <2 x i1> [[X:%.*]], <2 x i32> <i32 41, i32 42>, <2 x i32> <i32 42, i32 43>
|
|
; CHECK-NEXT: ret <2 x i32> [[ADD]]
|
|
;
|
|
%ext = sext <2 x i1> %x to <2 x i32>
|
|
%add = add <2 x i32> %ext, <i32 42, i32 43>
|
|
ret <2 x i32> %add
|
|
}
|
|
|
|
; This is an 'andn' of the low bit.
|
|
|
|
define i32 @flip_and_mask(i32 %x) {
|
|
; CHECK-LABEL: @flip_and_mask(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[X:%.*]], 1
|
|
; CHECK-NEXT: [[INC:%.*]] = xor i32 [[TMP1]], 1
|
|
; CHECK-NEXT: ret i32 [[INC]]
|
|
;
|
|
%shl = shl i32 %x, 31
|
|
%shr = ashr i32 %shl, 31
|
|
%inc = add i32 %shr, 1
|
|
ret i32 %inc
|
|
}
|
|
|
|
define <2 x i8> @flip_and_mask_splat(<2 x i8> %x) {
|
|
; CHECK-LABEL: @flip_and_mask_splat(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i8> [[X:%.*]], <i8 1, i8 1>
|
|
; CHECK-NEXT: [[INC:%.*]] = xor <2 x i8> [[TMP1]], <i8 1, i8 1>
|
|
; CHECK-NEXT: ret <2 x i8> [[INC]]
|
|
;
|
|
%shl = shl <2 x i8> %x, <i8 7, i8 7>
|
|
%shr = ashr <2 x i8> %shl, <i8 7, i8 7>
|
|
%inc = add <2 x i8> %shr, <i8 1, i8 1>
|
|
ret <2 x i8> %inc
|
|
}
|
|
|
|
define i32 @test1(i32 %A) {
|
|
; CHECK-LABEL: @test1(
|
|
; CHECK-NEXT: ret i32 [[A:%.*]]
|
|
;
|
|
%B = add i32 %A, 0
|
|
ret i32 %B
|
|
}
|
|
|
|
define i32 @test2(i32 %A) {
|
|
; CHECK-LABEL: @test2(
|
|
; CHECK-NEXT: ret i32 [[A:%.*]]
|
|
;
|
|
%B = add i32 %A, 5
|
|
%C = add i32 %B, -5
|
|
ret i32 %C
|
|
}
|
|
|
|
define i32 @test3(i32 %A) {
|
|
; CHECK-LABEL: @test3(
|
|
; CHECK-NEXT: ret i32 [[A:%.*]]
|
|
;
|
|
%B = add i32 %A, 5
|
|
%C = sub i32 %B, 5
|
|
ret i32 %C
|
|
}
|
|
|
|
; D = B + -A = B - A
|
|
define i32 @test4(i32 %A, i32 %B) {
|
|
; CHECK-LABEL: @test4(
|
|
; CHECK-NEXT: [[D:%.*]] = sub i32 [[B:%.*]], [[A:%.*]]
|
|
; CHECK-NEXT: ret i32 [[D]]
|
|
;
|
|
%C = sub i32 0, %A
|
|
%D = add i32 %B, %C
|
|
ret i32 %D
|
|
}
|
|
|
|
; D = -A + B = B - A
|
|
define i32 @test5(i32 %A, i32 %B) {
|
|
; CHECK-LABEL: @test5(
|
|
; CHECK-NEXT: [[D:%.*]] = sub i32 [[B:%.*]], [[A:%.*]]
|
|
; CHECK-NEXT: ret i32 [[D]]
|
|
;
|
|
%C = sub i32 0, %A
|
|
%D = add i32 %C, %B
|
|
ret i32 %D
|
|
}
|
|
|
|
define i32 @test5_both_nsw(i32 %A, i32 %B) {
|
|
; CHECK-LABEL: @test5_both_nsw(
|
|
; CHECK-NEXT: [[D:%.*]] = sub nsw i32 [[B:%.*]], [[A:%.*]]
|
|
; CHECK-NEXT: ret i32 [[D]]
|
|
;
|
|
%C = sub nsw i32 0, %A
|
|
%D = add nsw i32 %C, %B
|
|
ret i32 %D
|
|
}
|
|
|
|
define i32 @test5_neg_nsw(i32 %A, i32 %B) {
|
|
; CHECK-LABEL: @test5_neg_nsw(
|
|
; CHECK-NEXT: [[D:%.*]] = sub i32 [[B:%.*]], [[A:%.*]]
|
|
; CHECK-NEXT: ret i32 [[D]]
|
|
;
|
|
%C = sub nsw i32 0, %A
|
|
%D = add i32 %C, %B
|
|
ret i32 %D
|
|
}
|
|
|
|
define i32 @test5_add_nsw(i32 %A, i32 %B) {
|
|
; CHECK-LABEL: @test5_add_nsw(
|
|
; CHECK-NEXT: [[D:%.*]] = sub i32 [[B:%.*]], [[A:%.*]]
|
|
; CHECK-NEXT: ret i32 [[D]]
|
|
;
|
|
%C = sub i32 0, %A
|
|
%D = add nsw i32 %C, %B
|
|
ret i32 %D
|
|
}
|
|
|
|
define <2 x i8> @neg_op0_vec_undef_elt(<2 x i8> %a, <2 x i8> %b) {
|
|
; CHECK-LABEL: @neg_op0_vec_undef_elt(
|
|
; CHECK-NEXT: [[R:%.*]] = sub <2 x i8> [[B:%.*]], [[A:%.*]]
|
|
; CHECK-NEXT: ret <2 x i8> [[R]]
|
|
;
|
|
%nega = sub <2 x i8> <i8 0, i8 undef>, %a
|
|
%r = add <2 x i8> %nega, %b
|
|
ret <2 x i8> %r
|
|
}
|
|
|
|
define <2 x i8> @neg_neg_vec_undef_elt(<2 x i8> %a, <2 x i8> %b) {
|
|
; CHECK-LABEL: @neg_neg_vec_undef_elt(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = add <2 x i8> [[A:%.*]], [[B:%.*]]
|
|
; CHECK-NEXT: [[R:%.*]] = sub <2 x i8> zeroinitializer, [[TMP1]]
|
|
; CHECK-NEXT: ret <2 x i8> [[R]]
|
|
;
|
|
%nega = sub <2 x i8> <i8 undef, i8 0>, %a
|
|
%negb = sub <2 x i8> <i8 undef, i8 0>, %b
|
|
%r = add <2 x i8> %nega, %negb
|
|
ret <2 x i8> %r
|
|
}
|
|
|
|
; C = 7*A+A == 8*A == A << 3
|
|
define i32 @test6(i32 %A) {
|
|
; CHECK-LABEL: @test6(
|
|
; CHECK-NEXT: [[C:%.*]] = shl i32 [[A:%.*]], 3
|
|
; CHECK-NEXT: ret i32 [[C]]
|
|
;
|
|
%B = mul i32 7, %A
|
|
%C = add i32 %B, %A
|
|
ret i32 %C
|
|
}
|
|
|
|
; C = A+7*A == 8*A == A << 3
|
|
define i32 @test7(i32 %A) {
|
|
; CHECK-LABEL: @test7(
|
|
; CHECK-NEXT: [[C:%.*]] = shl i32 [[A:%.*]], 3
|
|
; CHECK-NEXT: ret i32 [[C]]
|
|
;
|
|
%B = mul i32 7, %A
|
|
%C = add i32 %A, %B
|
|
ret i32 %C
|
|
}
|
|
|
|
; (A & C1)+(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0
|
|
define i32 @test8(i32 %A, i32 %B) {
|
|
; CHECK-LABEL: @test8(
|
|
; CHECK-NEXT: [[A1:%.*]] = and i32 [[A:%.*]], 7
|
|
; CHECK-NEXT: [[B1:%.*]] = and i32 [[B:%.*]], 128
|
|
; CHECK-NEXT: [[C:%.*]] = or disjoint i32 [[A1]], [[B1]]
|
|
; CHECK-NEXT: ret i32 [[C]]
|
|
;
|
|
%A1 = and i32 %A, 7
|
|
%B1 = and i32 %B, 128
|
|
%C = add i32 %A1, %B1
|
|
ret i32 %C
|
|
}
|
|
|
|
define i32 @test9(i32 %A) {
|
|
; CHECK-LABEL: @test9(
|
|
; CHECK-NEXT: [[C:%.*]] = shl i32 [[A:%.*]], 5
|
|
; CHECK-NEXT: ret i32 [[C]]
|
|
;
|
|
%B = shl i32 %A, 4
|
|
%C = add i32 %B, %B
|
|
ret i32 %C
|
|
}
|
|
|
|
; a != -b
|
|
define i1 @test10(i8 %a, i8 %b) {
|
|
; CHECK-LABEL: @test10(
|
|
; CHECK-NEXT: [[ADD:%.*]] = sub i8 0, [[B:%.*]]
|
|
; CHECK-NEXT: [[C:%.*]] = icmp ne i8 [[ADD]], [[A:%.*]]
|
|
; CHECK-NEXT: ret i1 [[C]]
|
|
;
|
|
%add = add i8 %a, %b
|
|
%c = icmp ne i8 %add, 0
|
|
ret i1 %c
|
|
}
|
|
|
|
define <2 x i1> @test10vec(<2 x i8> %a, <2 x i8> %b) {
|
|
; CHECK-LABEL: @test10vec(
|
|
; CHECK-NEXT: [[C:%.*]] = sub <2 x i8> zeroinitializer, [[B:%.*]]
|
|
; CHECK-NEXT: [[D:%.*]] = icmp ne <2 x i8> [[C]], [[A:%.*]]
|
|
; CHECK-NEXT: ret <2 x i1> [[D]]
|
|
;
|
|
%c = add <2 x i8> %a, %b
|
|
%d = icmp ne <2 x i8> %c, zeroinitializer
|
|
ret <2 x i1> %d
|
|
}
|
|
|
|
define i1 @test11(i8 %A) {
|
|
; CHECK-LABEL: @test11(
|
|
; CHECK-NEXT: [[C:%.*]] = icmp ne i8 [[A:%.*]], 1
|
|
; CHECK-NEXT: ret i1 [[C]]
|
|
;
|
|
%B = add i8 %A, -1
|
|
%c = icmp ne i8 %B, 0
|
|
ret i1 %c
|
|
}
|
|
|
|
define <2 x i1> @test11vec(<2 x i8> %a) {
|
|
; CHECK-LABEL: @test11vec(
|
|
; CHECK-NEXT: [[C:%.*]] = icmp ne <2 x i8> [[A:%.*]], <i8 1, i8 1>
|
|
; CHECK-NEXT: ret <2 x i1> [[C]]
|
|
;
|
|
%b = add <2 x i8> %a, <i8 -1, i8 -1>
|
|
%c = icmp ne <2 x i8> %b, zeroinitializer
|
|
ret <2 x i1> %c
|
|
}
|
|
|
|
define i8 @reassoc_shl1(i8 %x, i8 %y) {
|
|
; CHECK-LABEL: @reassoc_shl1(
|
|
; CHECK-NEXT: [[REASS_ADD:%.*]] = shl i8 [[X:%.*]], 1
|
|
; CHECK-NEXT: [[R:%.*]] = add i8 [[REASS_ADD]], [[Y:%.*]]
|
|
; CHECK-NEXT: ret i8 [[R]]
|
|
;
|
|
%a = add i8 %y, %x
|
|
%r = add i8 %a, %x
|
|
ret i8 %r
|
|
}
|
|
|
|
define <2 x i8> @reassoc_shl1_commute1(<2 x i8> %x, <2 x i8> %y) {
|
|
; CHECK-LABEL: @reassoc_shl1_commute1(
|
|
; CHECK-NEXT: [[REASS_ADD:%.*]] = shl <2 x i8> [[X:%.*]], <i8 1, i8 1>
|
|
; CHECK-NEXT: [[R:%.*]] = add <2 x i8> [[REASS_ADD]], [[Y:%.*]]
|
|
; CHECK-NEXT: ret <2 x i8> [[R]]
|
|
;
|
|
%a = add <2 x i8> %x, %y
|
|
%r = add <2 x i8> %a, %x
|
|
ret <2 x i8> %r
|
|
}
|
|
|
|
define i8 @reassoc_shl1_commute2(i8 %px, i8 %py) {
|
|
; CHECK-LABEL: @reassoc_shl1_commute2(
|
|
; CHECK-NEXT: [[X:%.*]] = sdiv i8 42, [[PX:%.*]]
|
|
; CHECK-NEXT: [[Y:%.*]] = sdiv i8 43, [[PY:%.*]]
|
|
; CHECK-NEXT: [[REASS_ADD:%.*]] = shl i8 [[X]], 1
|
|
; CHECK-NEXT: [[R:%.*]] = add i8 [[Y]], [[REASS_ADD]]
|
|
; CHECK-NEXT: ret i8 [[R]]
|
|
;
|
|
%x = sdiv i8 42, %px ; thwart complexity-based canonicalization
|
|
%y = sdiv i8 43, %py ; thwart complexity-based canonicalization
|
|
%a = add i8 %y, %x
|
|
%r = add i8 %x, %a
|
|
ret i8 %r
|
|
}
|
|
|
|
define i8 @reassoc_shl1_commute3(i8 %px, i8 %py) {
|
|
; CHECK-LABEL: @reassoc_shl1_commute3(
|
|
; CHECK-NEXT: [[X:%.*]] = sdiv i8 42, [[PX:%.*]]
|
|
; CHECK-NEXT: [[Y:%.*]] = sdiv i8 43, [[PY:%.*]]
|
|
; CHECK-NEXT: [[REASS_ADD:%.*]] = shl i8 [[X]], 1
|
|
; CHECK-NEXT: [[R:%.*]] = add i8 [[Y]], [[REASS_ADD]]
|
|
; CHECK-NEXT: ret i8 [[R]]
|
|
;
|
|
%x = sdiv i8 42, %px ; thwart complexity-based canonicalization
|
|
%y = sdiv i8 43, %py ; thwart complexity-based canonicalization
|
|
%a = add i8 %x, %y
|
|
%r = add i8 %x, %a
|
|
ret i8 %r
|
|
}
|
|
|
|
define i8 @reassoc_shl1_extra_use(i8 %x, i8 %y) {
|
|
; CHECK-LABEL: @reassoc_shl1_extra_use(
|
|
; CHECK-NEXT: [[A:%.*]] = add i8 [[Y:%.*]], [[X:%.*]]
|
|
; CHECK-NEXT: call void @use(i8 [[A]])
|
|
; CHECK-NEXT: [[R:%.*]] = add i8 [[A]], [[X]]
|
|
; CHECK-NEXT: ret i8 [[R]]
|
|
;
|
|
%a = add i8 %y, %x
|
|
call void @use(i8 %a)
|
|
%r = add i8 %a, %x
|
|
ret i8 %r
|
|
}
|
|
|
|
;; TODO: shl A, 1?
|
|
define i32 @test13(i32 %A, i32 %B, i32 %C) {
|
|
; CHECK-LABEL: @test13(
|
|
; CHECK-NEXT: [[D_OK:%.*]] = add i32 [[A:%.*]], [[B:%.*]]
|
|
; CHECK-NEXT: [[E_OK:%.*]] = add i32 [[D_OK]], [[C:%.*]]
|
|
; CHECK-NEXT: [[F:%.*]] = add i32 [[E_OK]], [[A]]
|
|
; CHECK-NEXT: ret i32 [[F]]
|
|
;
|
|
%D_OK = add i32 %A, %B
|
|
%E_OK = add i32 %D_OK, %C
|
|
%F = add i32 %E_OK, %A
|
|
ret i32 %F
|
|
}
|
|
|
|
define i32 @test14(i32 %offset, i32 %difference) {
|
|
; CHECK-LABEL: @test14(
|
|
; CHECK-NEXT: [[TMP_2:%.*]] = and i32 [[DIFFERENCE:%.*]], 3
|
|
; CHECK-NEXT: [[TMP_3_OK:%.*]] = add i32 [[TMP_2]], [[OFFSET:%.*]]
|
|
; CHECK-NEXT: [[TMP_5_MASK:%.*]] = and i32 [[DIFFERENCE]], -4
|
|
; CHECK-NEXT: [[TMP_8:%.*]] = add i32 [[TMP_3_OK]], [[TMP_5_MASK]]
|
|
; CHECK-NEXT: ret i32 [[TMP_8]]
|
|
;
|
|
%tmp.2 = and i32 %difference, 3
|
|
%tmp.3_OK = add i32 %tmp.2, %offset
|
|
%tmp.5.mask = and i32 %difference, -4
|
|
; == add %offset, %difference
|
|
%tmp.8 = add i32 %tmp.3_OK, %tmp.5.mask
|
|
ret i32 %tmp.8
|
|
}
|
|
|
|
; Only one bit set
|
|
define i8 @test15(i8 %A) {
|
|
; CHECK-LABEL: @test15(
|
|
; CHECK-NEXT: [[C:%.*]] = and i8 [[A:%.*]], 16
|
|
; CHECK-NEXT: ret i8 [[C]]
|
|
;
|
|
%B = add i8 %A, -64
|
|
%C = and i8 %B, 16
|
|
ret i8 %C
|
|
}
|
|
|
|
define i32 @test17(i32 %A) {
|
|
; CHECK-LABEL: @test17(
|
|
; CHECK-NEXT: [[C:%.*]] = sub i32 0, [[A:%.*]]
|
|
; CHECK-NEXT: ret i32 [[C]]
|
|
;
|
|
%B = xor i32 %A, -1
|
|
%C = add i32 %B, 1
|
|
ret i32 %C
|
|
}
|
|
|
|
define i8 @test18(i8 %A) {
|
|
; CHECK-LABEL: @test18(
|
|
; CHECK-NEXT: [[C:%.*]] = sub i8 16, [[A:%.*]]
|
|
; CHECK-NEXT: ret i8 [[C]]
|
|
;
|
|
%B = xor i8 %A, -1
|
|
%C = add i8 %B, 17
|
|
ret i8 %C
|
|
}
|
|
|
|
; ~X + -127 and (-128) - X with nsw are equally poisonous
|
|
define i8 @test18_nsw(i8 %A) {
|
|
; CHECK-LABEL: @test18_nsw(
|
|
; CHECK-NEXT: [[C:%.*]] = sub nsw i8 -128, [[A:%.*]]
|
|
; CHECK-NEXT: ret i8 [[C]]
|
|
;
|
|
%B = xor i8 %A, -1
|
|
%C = add nsw i8 %B, -127
|
|
ret i8 %C
|
|
}
|
|
|
|
; nuw couldn't propagate as nsw is.
|
|
define i8 @test18_nuw(i8 %A) {
|
|
; CHECK-LABEL: @test18_nuw(
|
|
; CHECK-NEXT: [[C:%.*]] = sub i8 -128, [[A:%.*]]
|
|
; CHECK-NEXT: ret i8 [[C]]
|
|
;
|
|
%B = xor i8 %A, -1
|
|
%C = add nuw i8 %B, -127
|
|
ret i8 %C
|
|
}
|
|
|
|
; 127 - X with nsw will be more poisonous than ~X + -128 with nsw. (see X = -1)
|
|
define i8 @test18_nsw_overflow(i8 %A) {
|
|
; CHECK-LABEL: @test18_nsw_overflow(
|
|
; CHECK-NEXT: [[C:%.*]] = sub i8 127, [[A:%.*]]
|
|
; CHECK-NEXT: ret i8 [[C]]
|
|
;
|
|
%B = xor i8 %A, -1
|
|
%C = add nsw i8 %B, -128
|
|
ret i8 %C
|
|
}
|
|
|
|
define <2 x i64> @test18vec(<2 x i64> %A) {
|
|
; CHECK-LABEL: @test18vec(
|
|
; CHECK-NEXT: [[ADD:%.*]] = sub <2 x i64> <i64 1, i64 2>, [[A:%.*]]
|
|
; CHECK-NEXT: ret <2 x i64> [[ADD]]
|
|
;
|
|
%xor = xor <2 x i64> %A, <i64 -1, i64 -1>
|
|
%add = add <2 x i64> %xor, <i64 2, i64 3>
|
|
ret <2 x i64> %add
|
|
}
|
|
|
|
define <2 x i8> @test18vec_nsw(<2 x i8> %A) {
|
|
; CHECK-LABEL: @test18vec_nsw(
|
|
; CHECK-NEXT: [[C:%.*]] = sub nsw <2 x i8> <i8 -124, i8 -125>, [[A:%.*]]
|
|
; CHECK-NEXT: ret <2 x i8> [[C]]
|
|
;
|
|
%B = xor <2 x i8> %A, <i8 -1, i8 -1>
|
|
%C = add nsw <2 x i8> %B, <i8 -123, i8 -124>
|
|
ret <2 x i8> %C
|
|
}
|
|
|
|
define <2 x i8> @test18vec_nsw_false(<2 x i8> %A) {
|
|
; CHECK-LABEL: @test18vec_nsw_false(
|
|
; CHECK-NEXT: [[C:%.*]] = sub nsw <2 x i8> <i8 -125, i8 -126>, [[A:%.*]]
|
|
; CHECK-NEXT: ret <2 x i8> [[C]]
|
|
;
|
|
%B = xor <2 x i8> %A, <i8 -1, i8 -1>
|
|
%C = add nsw <2 x i8> %B, <i8 -124, i8 -125>
|
|
ret <2 x i8> %C
|
|
}
|
|
|
|
|
|
define <2 x i8> @test18vec_nuw(<2 x i8> %A) {
|
|
; CHECK-LABEL: @test18vec_nuw(
|
|
; CHECK-NEXT: [[C:%.*]] = sub <2 x i8> <i8 -128, i8 -127>, [[A:%.*]]
|
|
; CHECK-NEXT: ret <2 x i8> [[C]]
|
|
;
|
|
%B = xor <2 x i8> %A, <i8 -1, i8 -1>
|
|
%C = add nuw <2 x i8> %B, <i8 -127, i8 -126>
|
|
ret <2 x i8> %C
|
|
}
|
|
|
|
define <2 x i8> @test18vec_nsw_overflow(<2 x i8> %A) {
|
|
; CHECK-LABEL: @test18vec_nsw_overflow(
|
|
; CHECK-NEXT: [[C:%.*]] = sub <2 x i8> <i8 -128, i8 127>, [[A:%.*]]
|
|
; CHECK-NEXT: ret <2 x i8> [[C]]
|
|
;
|
|
%B = xor <2 x i8> %A, <i8 -1, i8 -1>
|
|
%C = add nsw <2 x i8> %B, <i8 -127, i8 -128>
|
|
ret <2 x i8> %C
|
|
}
|
|
|
|
define i32 @test19(i1 %C) {
|
|
; CHECK-LABEL: @test19(
|
|
; CHECK-NEXT: [[V:%.*]] = select i1 [[C:%.*]], i32 1123, i32 133
|
|
; CHECK-NEXT: ret i32 [[V]]
|
|
;
|
|
%A = select i1 %C, i32 1000, i32 10
|
|
%V = add i32 %A, 123
|
|
ret i32 %V
|
|
}
|
|
|
|
define <2 x i32> @test19vec(i1 %C) {
|
|
; CHECK-LABEL: @test19vec(
|
|
; CHECK-NEXT: [[V:%.*]] = select i1 [[C:%.*]], <2 x i32> <i32 1123, i32 1123>, <2 x i32> <i32 133, i32 133>
|
|
; CHECK-NEXT: ret <2 x i32> [[V]]
|
|
;
|
|
%A = select i1 %C, <2 x i32> <i32 1000, i32 1000>, <2 x i32> <i32 10, i32 10>
|
|
%V = add <2 x i32> %A, <i32 123, i32 123>
|
|
ret <2 x i32> %V
|
|
}
|
|
|
|
; This is an InstSimplify fold, but test it here to make sure that
|
|
; InstCombine does not prevent the fold.
|
|
; With NSW, add of sign bit -> or of sign bit.
|
|
|
|
define i32 @test20(i32 %x) {
|
|
; CHECK-LABEL: @test20(
|
|
; CHECK-NEXT: ret i32 [[X:%.*]]
|
|
;
|
|
%y = xor i32 %x, -2147483648
|
|
%z = add nsw i32 %y, -2147483648
|
|
ret i32 %z
|
|
}
|
|
|
|
define i32 @xor_sign_bit(i32 %x) {
|
|
; CHECK-LABEL: @xor_sign_bit(
|
|
; CHECK-NEXT: [[ADD:%.*]] = add i32 [[X:%.*]], -2147483606
|
|
; CHECK-NEXT: ret i32 [[ADD]]
|
|
;
|
|
%xor = xor i32 %x, 2147483648
|
|
%add = add i32 %xor, 42
|
|
ret i32 %add
|
|
}
|
|
|
|
define <2 x i32> @xor_sign_bit_vec_splat(<2 x i32> %x) {
|
|
; CHECK-LABEL: @xor_sign_bit_vec_splat(
|
|
; CHECK-NEXT: [[ADD:%.*]] = add <2 x i32> [[X:%.*]], <i32 -2147483606, i32 -2147483606>
|
|
; CHECK-NEXT: ret <2 x i32> [[ADD]]
|
|
;
|
|
%xor = xor <2 x i32> %x, <i32 2147483648, i32 2147483648>
|
|
%add = add <2 x i32> %xor, <i32 42, i32 42>
|
|
ret <2 x i32> %add
|
|
}
|
|
|
|
; No-wrap info allows converting the add to 'or'.
|
|
|
|
define i8 @add_nsw_signbit(i8 %x) {
|
|
; CHECK-LABEL: @add_nsw_signbit(
|
|
; CHECK-NEXT: [[Y:%.*]] = or i8 [[X:%.*]], -128
|
|
; CHECK-NEXT: ret i8 [[Y]]
|
|
;
|
|
%y = add nsw i8 %x, -128
|
|
ret i8 %y
|
|
}
|
|
|
|
; No-wrap info allows converting the add to 'or'.
|
|
|
|
define i8 @add_nuw_signbit(i8 %x) {
|
|
; CHECK-LABEL: @add_nuw_signbit(
|
|
; CHECK-NEXT: [[Y:%.*]] = or i8 [[X:%.*]], -128
|
|
; CHECK-NEXT: ret i8 [[Y]]
|
|
;
|
|
%y = add nuw i8 %x, 128
|
|
ret i8 %y
|
|
}
|
|
|
|
define i32 @add_nsw_sext_add(i8 %x) {
|
|
; CHECK-LABEL: @add_nsw_sext_add(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = sext i8 [[X:%.*]] to i32
|
|
; CHECK-NEXT: [[R:%.*]] = add nsw i32 [[TMP1]], 398
|
|
; CHECK-NEXT: ret i32 [[R]]
|
|
;
|
|
%add = add nsw i8 %x, 42
|
|
%ext = sext i8 %add to i32
|
|
%r = add i32 %ext, 356
|
|
ret i32 %r
|
|
}
|
|
|
|
; Negative test - extra use of the sext means increase of instructions.
|
|
|
|
define i32 @add_nsw_sext_add_extra_use_1(i8 %x, ptr %p) {
|
|
; CHECK-LABEL: @add_nsw_sext_add_extra_use_1(
|
|
; CHECK-NEXT: [[ADD:%.*]] = add nsw i8 [[X:%.*]], 42
|
|
; CHECK-NEXT: [[EXT:%.*]] = sext i8 [[ADD]] to i32
|
|
; CHECK-NEXT: store i32 [[EXT]], ptr [[P:%.*]], align 4
|
|
; CHECK-NEXT: [[R:%.*]] = add nsw i32 [[EXT]], 356
|
|
; CHECK-NEXT: ret i32 [[R]]
|
|
;
|
|
%add = add nsw i8 %x, 42
|
|
%ext = sext i8 %add to i32
|
|
store i32 %ext, ptr %p
|
|
%r = add i32 %ext, 356
|
|
ret i32 %r
|
|
}
|
|
|
|
define <2 x i32> @add_nsw_sext_add_vec_extra_use_2(<2 x i8> %x, ptr %p) {
|
|
; CHECK-LABEL: @add_nsw_sext_add_vec_extra_use_2(
|
|
; CHECK-NEXT: [[ADD:%.*]] = add nsw <2 x i8> [[X:%.*]], <i8 42, i8 -5>
|
|
; CHECK-NEXT: store <2 x i8> [[ADD]], ptr [[P:%.*]], align 2
|
|
; CHECK-NEXT: [[TMP1:%.*]] = sext <2 x i8> [[X]] to <2 x i32>
|
|
; CHECK-NEXT: [[R:%.*]] = add nsw <2 x i32> [[TMP1]], <i32 398, i32 7>
|
|
; CHECK-NEXT: ret <2 x i32> [[R]]
|
|
;
|
|
%add = add nsw <2 x i8> %x, <i8 42, i8 -5>
|
|
store <2 x i8> %add, ptr %p
|
|
%ext = sext <2 x i8> %add to <2 x i32>
|
|
%r = add <2 x i32> %ext, <i32 356, i32 12>
|
|
ret <2 x i32> %r
|
|
}
|
|
|
|
define <2 x i32> @add_nuw_zext_add_vec(<2 x i16> %x) {
|
|
; CHECK-LABEL: @add_nuw_zext_add_vec(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = zext <2 x i16> [[X:%.*]] to <2 x i32>
|
|
; CHECK-NEXT: [[R:%.*]] = add nsw <2 x i32> [[TMP1]], <i32 65850, i32 -7>
|
|
; CHECK-NEXT: ret <2 x i32> [[R]]
|
|
;
|
|
%add = add nuw <2 x i16> %x, <i16 -42, i16 5>
|
|
%ext = zext <2 x i16> %add to <2 x i32>
|
|
%r = add <2 x i32> %ext, <i32 356, i32 -12>
|
|
ret <2 x i32> %r
|
|
}
|
|
|
|
; Negative test - extra use of the zext means increase of instructions.
|
|
|
|
define i64 @add_nuw_zext_add_extra_use_1(i8 %x, ptr %p) {
|
|
; CHECK-LABEL: @add_nuw_zext_add_extra_use_1(
|
|
; CHECK-NEXT: [[ADD:%.*]] = add nuw i8 [[X:%.*]], 42
|
|
; CHECK-NEXT: [[EXT:%.*]] = zext i8 [[ADD]] to i64
|
|
; CHECK-NEXT: store i64 [[EXT]], ptr [[P:%.*]], align 4
|
|
; CHECK-NEXT: [[R:%.*]] = add nuw nsw i64 [[EXT]], 356
|
|
; CHECK-NEXT: ret i64 [[R]]
|
|
;
|
|
%add = add nuw i8 %x, 42
|
|
%ext = zext i8 %add to i64
|
|
store i64 %ext, ptr %p
|
|
%r = add i64 %ext, 356
|
|
ret i64 %r
|
|
}
|
|
|
|
define i64 @add_nuw_zext_add_extra_use_2(i8 %x, ptr %p) {
|
|
; CHECK-LABEL: @add_nuw_zext_add_extra_use_2(
|
|
; CHECK-NEXT: [[ADD:%.*]] = add nuw i8 [[X:%.*]], 42
|
|
; CHECK-NEXT: store i8 [[ADD]], ptr [[P:%.*]], align 1
|
|
; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[X]] to i64
|
|
; CHECK-NEXT: [[R:%.*]] = add nuw nsw i64 [[TMP1]], -314
|
|
; CHECK-NEXT: ret i64 [[R]]
|
|
;
|
|
%add = add nuw i8 %x, 42
|
|
store i8 %add, ptr %p
|
|
%ext = zext i8 %add to i64
|
|
%r = add i64 %ext, -356
|
|
ret i64 %r
|
|
}
|
|
|
|
define i1 @test21(i32 %x) {
|
|
; CHECK-LABEL: @test21(
|
|
; CHECK-NEXT: [[Y:%.*]] = icmp eq i32 [[X:%.*]], 119
|
|
; CHECK-NEXT: ret i1 [[Y]]
|
|
;
|
|
%t = add i32 %x, 4
|
|
%y = icmp eq i32 %t, 123
|
|
ret i1 %y
|
|
}
|
|
|
|
define <2 x i1> @test21vec(<2 x i32> %x) {
|
|
; CHECK-LABEL: @test21vec(
|
|
; CHECK-NEXT: [[Y:%.*]] = icmp eq <2 x i32> [[X:%.*]], <i32 119, i32 119>
|
|
; CHECK-NEXT: ret <2 x i1> [[Y]]
|
|
;
|
|
%t = add <2 x i32> %x, <i32 4, i32 4>
|
|
%y = icmp eq <2 x i32> %t, <i32 123, i32 123>
|
|
ret <2 x i1> %y
|
|
}
|
|
|
|
define i32 @test22(i32 %V) {
|
|
; CHECK-LABEL: @test22(
|
|
; CHECK-NEXT: switch i32 [[V:%.*]], label [[DEFAULT:%.*]] [
|
|
; CHECK-NEXT: i32 10, label [[LAB1:%.*]]
|
|
; CHECK-NEXT: i32 20, label [[LAB2:%.*]]
|
|
; CHECK-NEXT: ]
|
|
; CHECK: Default:
|
|
; CHECK-NEXT: ret i32 123
|
|
; CHECK: Lab1:
|
|
; CHECK-NEXT: ret i32 12312
|
|
; CHECK: Lab2:
|
|
; CHECK-NEXT: ret i32 1231231
|
|
;
|
|
%V2 = add i32 %V, 10
|
|
switch i32 %V2, label %Default [
|
|
i32 20, label %Lab1
|
|
i32 30, label %Lab2
|
|
]
|
|
|
|
Default: ; preds = %0
|
|
ret i32 123
|
|
|
|
Lab1: ; preds = %0
|
|
ret i32 12312
|
|
|
|
Lab2: ; preds = %0
|
|
ret i32 1231231
|
|
}
|
|
|
|
define i32 @test23(i1 %C, i32 %a) {
|
|
; CHECK-LABEL: @test23(
|
|
; CHECK-NEXT: entry:
|
|
; CHECK-NEXT: br i1 [[C:%.*]], label [[ENDIF:%.*]], label [[ELSE:%.*]]
|
|
; CHECK: else:
|
|
; CHECK-NEXT: br label [[ENDIF]]
|
|
; CHECK: endif:
|
|
; CHECK-NEXT: [[B_0:%.*]] = phi i32 [ 1, [[ENTRY:%.*]] ], [ 2, [[ELSE]] ]
|
|
; CHECK-NEXT: ret i32 [[B_0]]
|
|
;
|
|
entry:
|
|
br i1 %C, label %endif, label %else
|
|
|
|
else: ; preds = %entry
|
|
br label %endif
|
|
|
|
endif: ; preds = %else, %entry
|
|
%b.0 = phi i32 [ 0, %entry ], [ 1, %else ]
|
|
%tmp.4 = add i32 %b.0, 1
|
|
ret i32 %tmp.4
|
|
}
|
|
|
|
define i32 @test24(i32 %A) {
|
|
; CHECK-LABEL: @test24(
|
|
; CHECK-NEXT: [[B:%.*]] = shl i32 [[A:%.*]], 1
|
|
; CHECK-NEXT: ret i32 [[B]]
|
|
;
|
|
%B = add i32 %A, 1
|
|
%C = shl i32 %B, 1
|
|
%D = sub i32 %C, 2
|
|
ret i32 %D
|
|
}
|
|
|
|
define i64 @test25(i64 %Y) {
|
|
; CHECK-LABEL: @test25(
|
|
; CHECK-NEXT: [[TMP_8:%.*]] = shl i64 [[Y:%.*]], 3
|
|
; CHECK-NEXT: ret i64 [[TMP_8]]
|
|
;
|
|
%tmp.4 = shl i64 %Y, 2
|
|
%tmp.12 = shl i64 %Y, 2
|
|
%tmp.8 = add i64 %tmp.4, %tmp.12
|
|
ret i64 %tmp.8
|
|
}
|
|
|
|
define i32 @test26(i32 %A, i32 %B) {
|
|
; CHECK-LABEL: @test26(
|
|
; CHECK-NEXT: ret i32 [[A:%.*]]
|
|
;
|
|
%C = add i32 %A, %B
|
|
%D = sub i32 %C, %B
|
|
ret i32 %D
|
|
}
|
|
|
|
; Fold add through select.
|
|
define i32 @test27(i1 %C, i32 %X, i32 %Y) {
|
|
; CHECK-LABEL: @test27(
|
|
; CHECK-NEXT: [[C_UPGRD_1_V:%.*]] = select i1 [[C:%.*]], i32 [[X:%.*]], i32 123
|
|
; CHECK-NEXT: ret i32 [[C_UPGRD_1_V]]
|
|
;
|
|
%A = add i32 %X, %Y
|
|
%B = add i32 %Y, 123
|
|
%C.upgrd.1 = select i1 %C, i32 %A, i32 %B
|
|
%D = sub i32 %C.upgrd.1, %Y
|
|
ret i32 %D
|
|
}
|
|
|
|
define i32 @test28(i32 %X) {
|
|
; CHECK-LABEL: @test28(
|
|
; CHECK-NEXT: [[Z:%.*]] = sub i32 -1192, [[X:%.*]]
|
|
; CHECK-NEXT: ret i32 [[Z]]
|
|
;
|
|
%Y = add i32 %X, 1234
|
|
%Z = sub i32 42, %Y
|
|
ret i32 %Z
|
|
}
|
|
|
|
define i32 @test29(i32 %x, i32 %y) {
|
|
; CHECK-LABEL: @test29(
|
|
; CHECK-NEXT: [[TMP_2:%.*]] = sub i32 [[X:%.*]], [[Y:%.*]]
|
|
; CHECK-NEXT: [[TMP_7:%.*]] = and i32 [[X]], 63
|
|
; CHECK-NEXT: [[TMP_9:%.*]] = and i32 [[TMP_2]], -64
|
|
; CHECK-NEXT: [[TMP_10:%.*]] = or disjoint i32 [[TMP_7]], [[TMP_9]]
|
|
; CHECK-NEXT: ret i32 [[TMP_10]]
|
|
;
|
|
%tmp.2 = sub i32 %x, %y
|
|
%tmp.2.mask = and i32 %tmp.2, 63
|
|
%tmp.6 = add i32 %tmp.2.mask, %y
|
|
%tmp.7 = and i32 %tmp.6, 63
|
|
%tmp.9 = and i32 %tmp.2, -64
|
|
%tmp.10 = or i32 %tmp.7, %tmp.9
|
|
ret i32 %tmp.10
|
|
}
|
|
|
|
; Add of sign bit -> xor of sign bit.
|
|
define i64 @test30(i64 %x) {
|
|
; CHECK-LABEL: @test30(
|
|
; CHECK-NEXT: ret i64 [[X:%.*]]
|
|
;
|
|
%tmp.2 = xor i64 %x, -9223372036854775808
|
|
%tmp.4 = add i64 %tmp.2, -9223372036854775808
|
|
ret i64 %tmp.4
|
|
}
|
|
|
|
define i32 @test31(i32 %A) {
|
|
; CHECK-LABEL: @test31(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = mul i32 [[A:%.*]], 5
|
|
; CHECK-NEXT: ret i32 [[TMP1]]
|
|
;
|
|
%B = add i32 %A, 4
|
|
%C = mul i32 %B, 5
|
|
%D = sub i32 %C, 20
|
|
ret i32 %D
|
|
}
|
|
|
|
define i32 @test32(i32 %A) {
|
|
; CHECK-LABEL: @test32(
|
|
; CHECK-NEXT: [[B:%.*]] = shl i32 [[A:%.*]], 2
|
|
; CHECK-NEXT: ret i32 [[B]]
|
|
;
|
|
%B = add i32 %A, 4
|
|
%C = shl i32 %B, 2
|
|
%D = sub i32 %C, 16
|
|
ret i32 %D
|
|
}
|
|
|
|
define i8 @test33(i8 %A) {
|
|
; CHECK-LABEL: @test33(
|
|
; CHECK-NEXT: [[C:%.*]] = or i8 [[A:%.*]], 1
|
|
; CHECK-NEXT: ret i8 [[C]]
|
|
;
|
|
%B = and i8 %A, -2
|
|
%C = add i8 %B, 1
|
|
ret i8 %C
|
|
}
|
|
|
|
define i8 @test34(i8 %A) {
|
|
; CHECK-LABEL: @test34(
|
|
; CHECK-NEXT: [[C:%.*]] = and i8 [[A:%.*]], 12
|
|
; CHECK-NEXT: ret i8 [[C]]
|
|
;
|
|
%B = add i8 %A, 64
|
|
%C = and i8 %B, 12
|
|
ret i8 %C
|
|
}
|
|
|
|
; If all bits affected by the add are included
|
|
; in the mask, do the mask op before the add.
|
|
|
|
define i8 @masked_add(i8 %x) {
|
|
; CHECK-LABEL: @masked_add(
|
|
; CHECK-NEXT: [[AND:%.*]] = and i8 [[X:%.*]], -16
|
|
; CHECK-NEXT: [[R:%.*]] = add i8 [[AND]], 96
|
|
; CHECK-NEXT: ret i8 [[R]]
|
|
;
|
|
%and = and i8 %x, 240 ; 0xf0
|
|
%r = add i8 %and, 96 ; 0x60
|
|
ret i8 %r
|
|
}
|
|
|
|
define <2 x i8> @masked_add_splat(<2 x i8> %x) {
|
|
; CHECK-LABEL: @masked_add_splat(
|
|
; CHECK-NEXT: [[AND:%.*]] = and <2 x i8> [[X:%.*]], <i8 -64, i8 -64>
|
|
; CHECK-NEXT: [[R:%.*]] = add <2 x i8> [[AND]], <i8 64, i8 64>
|
|
; CHECK-NEXT: ret <2 x i8> [[R]]
|
|
;
|
|
%and = and <2 x i8> %x, <i8 192, i8 192> ; 0xc0
|
|
%r = add <2 x i8> %and, <i8 64, i8 64> ; 0x40
|
|
ret <2 x i8> %r
|
|
}
|
|
|
|
define i8 @not_masked_add(i8 %x) {
|
|
; CHECK-LABEL: @not_masked_add(
|
|
; CHECK-NEXT: [[AND:%.*]] = and i8 [[X:%.*]], 112
|
|
; CHECK-NEXT: [[R:%.*]] = add nuw i8 [[AND]], 96
|
|
; CHECK-NEXT: ret i8 [[R]]
|
|
;
|
|
%and = and i8 %x, 112 ; 0x70
|
|
%r = add i8 %and, 96 ; 0x60
|
|
ret i8 %r
|
|
}
|
|
|
|
define i8 @masked_add_multi_use(i8 %x) {
|
|
; CHECK-LABEL: @masked_add_multi_use(
|
|
; CHECK-NEXT: [[AND:%.*]] = and i8 [[X:%.*]], -16
|
|
; CHECK-NEXT: [[R:%.*]] = add i8 [[AND]], 96
|
|
; CHECK-NEXT: call void @use(i8 [[AND]])
|
|
; CHECK-NEXT: ret i8 [[R]]
|
|
;
|
|
%and = and i8 %x, -16 ; 0xf0
|
|
%r = add i8 %and, 96 ; 0x60
|
|
call void @use(i8 %and) ; extra use
|
|
ret i8 %r
|
|
}
|
|
|
|
define i32 @test35(i32 %a) {
|
|
; CHECK-LABEL: @test35(
|
|
; CHECK-NEXT: ret i32 -1
|
|
;
|
|
%tmpnot = xor i32 %a, -1
|
|
%tmp2 = add i32 %tmpnot, %a
|
|
ret i32 %tmp2
|
|
}
|
|
|
|
define i32 @test36(i32 %a) {
|
|
; CHECK-LABEL: @test36(
|
|
; CHECK-NEXT: ret i32 0
|
|
;
|
|
%x = and i32 %a, -2
|
|
%y = and i32 %a, -126
|
|
%z = add i32 %x, %y
|
|
%q = and i32 %z, 1 ; always zero
|
|
ret i32 %q
|
|
}
|
|
|
|
define i1 @test37(i32 %a, i32 %b) {
|
|
; CHECK-LABEL: @test37(
|
|
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[B:%.*]], 0
|
|
; CHECK-NEXT: ret i1 [[CMP]]
|
|
;
|
|
%add = add i32 %a, %b
|
|
%cmp = icmp eq i32 %add, %a
|
|
ret i1 %cmp
|
|
}
|
|
|
|
define i1 @test38(i32 %a, i32 %b) {
|
|
; CHECK-LABEL: @test38(
|
|
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[A:%.*]], 0
|
|
; CHECK-NEXT: ret i1 [[CMP]]
|
|
;
|
|
%add = add i32 %a, %b
|
|
%cmp = icmp eq i32 %add, %b
|
|
ret i1 %cmp
|
|
}
|
|
|
|
define i1 @test39(i32 %a, i32 %b) {
|
|
; CHECK-LABEL: @test39(
|
|
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[B:%.*]], 0
|
|
; CHECK-NEXT: ret i1 [[CMP]]
|
|
;
|
|
%add = add i32 %b, %a
|
|
%cmp = icmp eq i32 %add, %a
|
|
ret i1 %cmp
|
|
}
|
|
|
|
define i1 @test40(i32 %a, i32 %b) {
|
|
; CHECK-LABEL: @test40(
|
|
; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[A:%.*]], 0
|
|
; CHECK-NEXT: ret i1 [[CMP]]
|
|
;
|
|
%add = add i32 %b, %a
|
|
%cmp = icmp eq i32 %add, %b
|
|
ret i1 %cmp
|
|
}
|
|
|
|
; (add (zext (add nuw X, C2)), C) --> (zext (add nuw X, C2 + C))
|
|
|
|
define i64 @test41(i32 %a) {
|
|
; CHECK-LABEL: @test41(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = add nuw i32 [[A:%.*]], 15
|
|
; CHECK-NEXT: [[SUB:%.*]] = zext i32 [[TMP1]] to i64
|
|
; CHECK-NEXT: ret i64 [[SUB]]
|
|
;
|
|
%add = add nuw i32 %a, 16
|
|
%zext = zext i32 %add to i64
|
|
%sub = add i64 %zext, -1
|
|
ret i64 %sub
|
|
}
|
|
|
|
; (add (zext (add nuw X, C2)), C) --> (zext (add nuw X, C2 + C))
|
|
|
|
define <2 x i64> @test41vec(<2 x i32> %a) {
|
|
; CHECK-LABEL: @test41vec(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = add nuw <2 x i32> [[A:%.*]], <i32 15, i32 15>
|
|
; CHECK-NEXT: [[SUB:%.*]] = zext <2 x i32> [[TMP1]] to <2 x i64>
|
|
; CHECK-NEXT: ret <2 x i64> [[SUB]]
|
|
;
|
|
%add = add nuw <2 x i32> %a, <i32 16, i32 16>
|
|
%zext = zext <2 x i32> %add to <2 x i64>
|
|
%sub = add <2 x i64> %zext, <i64 -1, i64 -1>
|
|
ret <2 x i64> %sub
|
|
}
|
|
|
|
define <2 x i64> @test41vec_and_multiuse(<2 x i32> %a) {
|
|
; CHECK-LABEL: @test41vec_and_multiuse(
|
|
; CHECK-NEXT: [[ADD:%.*]] = add nuw <2 x i32> [[A:%.*]], <i32 16, i32 16>
|
|
; CHECK-NEXT: [[ZEXT:%.*]] = zext <2 x i32> [[ADD]] to <2 x i64>
|
|
; CHECK-NEXT: [[REASS_ADD:%.*]] = shl nuw nsw <2 x i64> [[ZEXT]], <i64 1, i64 1>
|
|
; CHECK-NEXT: [[EXTRAUSE:%.*]] = add nsw <2 x i64> [[REASS_ADD]], <i64 -1, i64 -1>
|
|
; CHECK-NEXT: ret <2 x i64> [[EXTRAUSE]]
|
|
;
|
|
%add = add nuw <2 x i32> %a, <i32 16, i32 16>
|
|
%zext = zext <2 x i32> %add to <2 x i64>
|
|
%sub = add <2 x i64> %zext, <i64 -1, i64 -1>
|
|
%extrause = add <2 x i64> %zext, %sub
|
|
ret <2 x i64> %extrause
|
|
}
|
|
|
|
define i32 @test42(i1 %C) {
|
|
; CHECK-LABEL: @test42(
|
|
; CHECK-NEXT: [[V:%.*]] = select i1 [[C:%.*]], i32 1123, i32 133
|
|
; CHECK-NEXT: ret i32 [[V]]
|
|
;
|
|
%A = select i1 %C, i32 1000, i32 10
|
|
%V = add i32 123, %A
|
|
ret i32 %V
|
|
}
|
|
|
|
define <2 x i32> @test42vec(i1 %C) {
|
|
; CHECK-LABEL: @test42vec(
|
|
; CHECK-NEXT: [[V:%.*]] = select i1 [[C:%.*]], <2 x i32> <i32 1123, i32 1123>, <2 x i32> <i32 133, i32 133>
|
|
; CHECK-NEXT: ret <2 x i32> [[V]]
|
|
;
|
|
%A = select i1 %C, <2 x i32> <i32 1000, i32 1000>, <2 x i32> <i32 10, i32 10>
|
|
%V = add <2 x i32> <i32 123, i32 123>, %A
|
|
ret <2 x i32> %V
|
|
}
|
|
|
|
define <2 x i32> @test42vec2(i1 %C) {
|
|
; CHECK-LABEL: @test42vec2(
|
|
; CHECK-NEXT: [[V:%.*]] = select i1 [[C:%.*]], <2 x i32> <i32 1123, i32 2833>, <2 x i32> <i32 133, i32 363>
|
|
; CHECK-NEXT: ret <2 x i32> [[V]]
|
|
;
|
|
%A = select i1 %C, <2 x i32> <i32 1000, i32 2500>, <2 x i32> <i32 10, i32 30>
|
|
%V = add <2 x i32> <i32 123, i32 333>, %A
|
|
ret <2 x i32> %V
|
|
}
|
|
|
|
define i32 @test55(i1 %which) {
|
|
; CHECK-LABEL: @test55(
|
|
; CHECK-NEXT: entry:
|
|
; CHECK-NEXT: br i1 [[WHICH:%.*]], label [[FINAL:%.*]], label [[DELAY:%.*]]
|
|
; CHECK: delay:
|
|
; CHECK-NEXT: br label [[FINAL]]
|
|
; CHECK: final:
|
|
; CHECK-NEXT: [[A:%.*]] = phi i32 [ 1123, [[ENTRY:%.*]] ], [ 133, [[DELAY]] ]
|
|
; CHECK-NEXT: ret i32 [[A]]
|
|
;
|
|
entry:
|
|
br i1 %which, label %final, label %delay
|
|
|
|
delay:
|
|
br label %final
|
|
|
|
final:
|
|
%A = phi i32 [ 1000, %entry ], [ 10, %delay ]
|
|
%value = add i32 123, %A
|
|
ret i32 %value
|
|
}
|
|
|
|
define <2 x i32> @test43vec(i1 %which) {
|
|
; CHECK-LABEL: @test43vec(
|
|
; CHECK-NEXT: entry:
|
|
; CHECK-NEXT: br i1 [[WHICH:%.*]], label [[FINAL:%.*]], label [[DELAY:%.*]]
|
|
; CHECK: delay:
|
|
; CHECK-NEXT: br label [[FINAL]]
|
|
; CHECK: final:
|
|
; CHECK-NEXT: [[A:%.*]] = phi <2 x i32> [ <i32 1123, i32 1123>, [[ENTRY:%.*]] ], [ <i32 133, i32 133>, [[DELAY]] ]
|
|
; CHECK-NEXT: ret <2 x i32> [[A]]
|
|
;
|
|
entry:
|
|
br i1 %which, label %final, label %delay
|
|
|
|
delay:
|
|
br label %final
|
|
|
|
final:
|
|
%A = phi <2 x i32> [ <i32 1000, i32 1000>, %entry ], [ <i32 10, i32 10>, %delay ]
|
|
%value = add <2 x i32> <i32 123, i32 123>, %A
|
|
ret <2 x i32> %value
|
|
}
|
|
|
|
define <2 x i32> @test43vec2(i1 %which) {
|
|
; CHECK-LABEL: @test43vec2(
|
|
; CHECK-NEXT: entry:
|
|
; CHECK-NEXT: br i1 [[WHICH:%.*]], label [[FINAL:%.*]], label [[DELAY:%.*]]
|
|
; CHECK: delay:
|
|
; CHECK-NEXT: br label [[FINAL]]
|
|
; CHECK: final:
|
|
; CHECK-NEXT: [[A:%.*]] = phi <2 x i32> [ <i32 1123, i32 2833>, [[ENTRY:%.*]] ], [ <i32 133, i32 363>, [[DELAY]] ]
|
|
; CHECK-NEXT: ret <2 x i32> [[A]]
|
|
;
|
|
entry:
|
|
br i1 %which, label %final, label %delay
|
|
|
|
delay:
|
|
br label %final
|
|
|
|
final:
|
|
%A = phi <2 x i32> [ <i32 1000, i32 2500>, %entry ], [ <i32 10, i32 30>, %delay ]
|
|
%value = add <2 x i32> <i32 123, i32 333>, %A
|
|
ret <2 x i32> %value
|
|
}
|
|
|
|
; E = (A + 1) + ~B = A - B
|
|
define i32 @add_not_increment(i32 %A, i32 %B) {
|
|
; CHECK-LABEL: @add_not_increment(
|
|
; CHECK-NEXT: [[E:%.*]] = sub i32 [[A:%.*]], [[B:%.*]]
|
|
; CHECK-NEXT: ret i32 [[E]]
|
|
;
|
|
%C = xor i32 %B, -1
|
|
%D = add i32 %A, 1
|
|
%E = add i32 %D, %C
|
|
ret i32 %E
|
|
}
|
|
|
|
; E = (A + 1) + ~B = A - B
|
|
define <2 x i32> @add_not_increment_vec(<2 x i32> %A, <2 x i32> %B) {
|
|
; CHECK-LABEL: @add_not_increment_vec(
|
|
; CHECK-NEXT: [[E:%.*]] = sub <2 x i32> [[A:%.*]], [[B:%.*]]
|
|
; CHECK-NEXT: ret <2 x i32> [[E]]
|
|
;
|
|
%C = xor <2 x i32> %B, <i32 -1, i32 -1>
|
|
%D = add <2 x i32> %A, <i32 1, i32 1>
|
|
%E = add <2 x i32> %D, %C
|
|
ret <2 x i32> %E
|
|
}
|
|
|
|
; E = ~B + (1 + A) = A - B
|
|
define i32 @add_not_increment_commuted(i32 %A, i32 %B) {
|
|
; CHECK-LABEL: @add_not_increment_commuted(
|
|
; CHECK-NEXT: [[E:%.*]] = sub i32 [[A:%.*]], [[B:%.*]]
|
|
; CHECK-NEXT: ret i32 [[E]]
|
|
;
|
|
%C = xor i32 %B, -1
|
|
%D = add i32 %A, 1
|
|
%E = add i32 %C, %D
|
|
ret i32 %E
|
|
}
|
|
|
|
; E = (A + ~B) + 1 = A - B
|
|
define i32 @add_to_sub(i32 %M, i32 %B) {
|
|
; CHECK-LABEL: @add_to_sub(
|
|
; CHECK-NEXT: [[A:%.*]] = mul i32 [[M:%.*]], 42
|
|
; CHECK-NEXT: [[E:%.*]] = sub i32 [[A]], [[B:%.*]]
|
|
; CHECK-NEXT: ret i32 [[E]]
|
|
;
|
|
%A = mul i32 %M, 42 ; thwart complexity-based ordering
|
|
%C = xor i32 %B, -1
|
|
%D = add i32 %A, %C
|
|
%E = add i32 %D, 1
|
|
ret i32 %E
|
|
}
|
|
|
|
; E = (~B + A) + 1 = A - B
|
|
define i32 @add_to_sub2(i32 %A, i32 %M) {
|
|
; CHECK-LABEL: @add_to_sub2(
|
|
; CHECK-NEXT: [[B_NEG:%.*]] = mul i32 [[M:%.*]], -42
|
|
; CHECK-NEXT: [[E:%.*]] = add i32 [[B_NEG]], [[A:%.*]]
|
|
; CHECK-NEXT: ret i32 [[E]]
|
|
;
|
|
%B = mul i32 %M, 42 ; thwart complexity-based ordering
|
|
%C = xor i32 %B, -1
|
|
%D = add i32 %C, %A
|
|
%E = add i32 %D, 1
|
|
ret i32 %E
|
|
}
|
|
|
|
; (X | C1) + C2 --> (X | C1) ^ C1 iff (C1 == -C2)
|
|
define i32 @test44(i32 %A) {
|
|
; CHECK-LABEL: @test44(
|
|
; CHECK-NEXT: [[C:%.*]] = and i32 [[A:%.*]], -124
|
|
; CHECK-NEXT: ret i32 [[C]]
|
|
;
|
|
%B = or i32 %A, 123
|
|
%C = add i32 %B, -123
|
|
ret i32 %C
|
|
}
|
|
|
|
define i32 @test44_extra_use(i32 %A) {
|
|
; CHECK-LABEL: @test44_extra_use(
|
|
; CHECK-NEXT: [[B:%.*]] = or i32 [[A:%.*]], 123
|
|
; CHECK-NEXT: [[C:%.*]] = and i32 [[A]], -124
|
|
; CHECK-NEXT: [[D:%.*]] = mul i32 [[B]], [[C]]
|
|
; CHECK-NEXT: ret i32 [[D]]
|
|
;
|
|
%B = or i32 %A, 123
|
|
%C = add i32 %B, -123
|
|
%D = mul i32 %B, %C
|
|
ret i32 %D
|
|
}
|
|
|
|
define i32 @test44_non_matching(i32 %A) {
|
|
; CHECK-LABEL: @test44_non_matching(
|
|
; CHECK-NEXT: [[B:%.*]] = or i32 [[A:%.*]], 123
|
|
; CHECK-NEXT: [[C:%.*]] = add i32 [[B]], -321
|
|
; CHECK-NEXT: ret i32 [[C]]
|
|
;
|
|
%B = or i32 %A, 123
|
|
%C = add i32 %B, -321
|
|
ret i32 %C
|
|
}
|
|
|
|
define <2 x i32> @test44_vec(<2 x i32> %A) {
|
|
; CHECK-LABEL: @test44_vec(
|
|
; CHECK-NEXT: [[C:%.*]] = and <2 x i32> [[A:%.*]], <i32 -124, i32 -124>
|
|
; CHECK-NEXT: ret <2 x i32> [[C]]
|
|
;
|
|
%B = or <2 x i32> %A, <i32 123, i32 123>
|
|
%C = add <2 x i32> %B, <i32 -123, i32 -123>
|
|
ret <2 x i32> %C
|
|
}
|
|
|
|
define <2 x i32> @test44_vec_non_matching(<2 x i32> %A) {
|
|
; CHECK-LABEL: @test44_vec_non_matching(
|
|
; CHECK-NEXT: [[B:%.*]] = or <2 x i32> [[A:%.*]], <i32 123, i32 123>
|
|
; CHECK-NEXT: [[C:%.*]] = add <2 x i32> [[B]], <i32 -321, i32 -321>
|
|
; CHECK-NEXT: ret <2 x i32> [[C]]
|
|
;
|
|
%B = or <2 x i32> %A, <i32 123, i32 123>
|
|
%C = add <2 x i32> %B, <i32 -321, i32 -321>
|
|
ret <2 x i32> %C
|
|
}
|
|
|
|
define <2 x i32> @test44_vec_undef(<2 x i32> %A) {
|
|
; CHECK-LABEL: @test44_vec_undef(
|
|
; CHECK-NEXT: [[B:%.*]] = or <2 x i32> [[A:%.*]], <i32 123, i32 undef>
|
|
; CHECK-NEXT: [[C:%.*]] = add <2 x i32> [[B]], <i32 -123, i32 undef>
|
|
; CHECK-NEXT: ret <2 x i32> [[C]]
|
|
;
|
|
%B = or <2 x i32> %A, <i32 123, i32 undef>
|
|
%C = add <2 x i32> %B, <i32 -123, i32 undef>
|
|
ret <2 x i32> %C
|
|
}
|
|
|
|
define <2 x i32> @test44_vec_non_splat(<2 x i32> %A) {
|
|
; CHECK-LABEL: @test44_vec_non_splat(
|
|
; CHECK-NEXT: [[B:%.*]] = or <2 x i32> [[A:%.*]], <i32 123, i32 456>
|
|
; CHECK-NEXT: [[C:%.*]] = add <2 x i32> [[B]], <i32 -123, i32 -456>
|
|
; CHECK-NEXT: ret <2 x i32> [[C]]
|
|
;
|
|
%B = or <2 x i32> %A, <i32 123, i32 456>
|
|
%C = add <2 x i32> %B, <i32 -123, i32 -456>
|
|
ret <2 x i32> %C
|
|
}
|
|
|
|
define i32 @lshr_add(i1 %x, i1 %y) {
|
|
; CHECK-LABEL: @lshr_add(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = xor i1 [[X:%.*]], true
|
|
; CHECK-NEXT: [[TMP2:%.*]] = and i1 [[TMP1]], [[Y:%.*]]
|
|
; CHECK-NEXT: [[R:%.*]] = zext i1 [[TMP2]] to i32
|
|
; CHECK-NEXT: ret i32 [[R]]
|
|
;
|
|
%xz = zext i1 %x to i32
|
|
%ys = sext i1 %y to i32
|
|
%sub = add i32 %xz, %ys
|
|
%r = lshr i32 %sub, 31
|
|
ret i32 %r
|
|
}
|
|
|
|
define i5 @and_add(i1 %x, i1 %y) {
|
|
; CHECK-LABEL: @and_add(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = xor i1 [[X:%.*]], true
|
|
; CHECK-NEXT: [[TMP2:%.*]] = and i1 [[TMP1]], [[Y:%.*]]
|
|
; CHECK-NEXT: [[R:%.*]] = select i1 [[TMP2]], i5 -2, i5 0
|
|
; CHECK-NEXT: ret i5 [[R]]
|
|
;
|
|
%xz = zext i1 %x to i5
|
|
%ys = sext i1 %y to i5
|
|
%sub = add i5 %xz, %ys
|
|
%r = and i5 %sub, 30
|
|
ret i5 %r
|
|
}
|
|
|
|
define <2 x i8> @ashr_add_commute(<2 x i1> %x, <2 x i1> %y) {
|
|
; CHECK-LABEL: @ashr_add_commute(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i1> [[X:%.*]], <i1 true, i1 true>
|
|
; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i1> [[TMP1]], [[Y:%.*]]
|
|
; CHECK-NEXT: [[TMP3:%.*]] = sext <2 x i1> [[TMP2]] to <2 x i8>
|
|
; CHECK-NEXT: ret <2 x i8> [[TMP3]]
|
|
;
|
|
%xz = zext <2 x i1> %x to <2 x i8>
|
|
%ys = sext <2 x i1> %y to <2 x i8>
|
|
%sub = add nsw <2 x i8> %ys, %xz
|
|
%r = ashr <2 x i8> %sub, <i8 1, i8 1>
|
|
ret <2 x i8> %r
|
|
}
|
|
|
|
define i32 @cmp_math(i32 %x, i32 %y) {
|
|
; CHECK-LABEL: @cmp_math(
|
|
; CHECK-NEXT: [[LT:%.*]] = icmp ult i32 [[X:%.*]], [[Y:%.*]]
|
|
; CHECK-NEXT: [[R:%.*]] = zext i1 [[LT]] to i32
|
|
; CHECK-NEXT: ret i32 [[R]]
|
|
;
|
|
%gt = icmp ugt i32 %x, %y
|
|
%lt = icmp ult i32 %x, %y
|
|
%xz = zext i1 %gt to i32
|
|
%yz = zext i1 %lt to i32
|
|
%s = sub i32 %xz, %yz
|
|
%r = lshr i32 %s, 31
|
|
ret i32 %r
|
|
}
|
|
|
|
; Negative test - wrong type
|
|
|
|
define i32 @lshr_add_nonbool(i2 %x, i1 %y) {
|
|
; CHECK-LABEL: @lshr_add_nonbool(
|
|
; CHECK-NEXT: [[XZ:%.*]] = zext i2 [[X:%.*]] to i32
|
|
; CHECK-NEXT: [[YS:%.*]] = sext i1 [[Y:%.*]] to i32
|
|
; CHECK-NEXT: [[SUB:%.*]] = add nsw i32 [[XZ]], [[YS]]
|
|
; CHECK-NEXT: [[R:%.*]] = lshr i32 [[SUB]], 31
|
|
; CHECK-NEXT: ret i32 [[R]]
|
|
;
|
|
%xz = zext i2 %x to i32
|
|
%ys = sext i1 %y to i32
|
|
%sub = add i32 %xz, %ys
|
|
%r = lshr i32 %sub, 31
|
|
ret i32 %r
|
|
}
|
|
|
|
; Negative test - wrong demand
|
|
|
|
define i32 @and31_add(i1 %x, i1 %y) {
|
|
; CHECK-LABEL: @and31_add(
|
|
; CHECK-NEXT: [[XZ:%.*]] = zext i1 [[X:%.*]] to i32
|
|
; CHECK-NEXT: [[YS:%.*]] = sext i1 [[Y:%.*]] to i32
|
|
; CHECK-NEXT: [[SUB:%.*]] = add nsw i32 [[XZ]], [[YS]]
|
|
; CHECK-NEXT: [[R:%.*]] = and i32 [[SUB]], 31
|
|
; CHECK-NEXT: ret i32 [[R]]
|
|
;
|
|
%xz = zext i1 %x to i32
|
|
%ys = sext i1 %y to i32
|
|
%sub = add i32 %xz, %ys
|
|
%r = and i32 %sub, 31
|
|
ret i32 %r
|
|
}
|
|
|
|
; Negative test - extra use
|
|
|
|
define i32 @lshr_add_use(i1 %x, i1 %y, ptr %p) {
|
|
; CHECK-LABEL: @lshr_add_use(
|
|
; CHECK-NEXT: [[XZ:%.*]] = zext i1 [[X:%.*]] to i32
|
|
; CHECK-NEXT: store i32 [[XZ]], ptr [[P:%.*]], align 4
|
|
; CHECK-NEXT: [[YS:%.*]] = sext i1 [[Y:%.*]] to i32
|
|
; CHECK-NEXT: [[SUB:%.*]] = add nsw i32 [[XZ]], [[YS]]
|
|
; CHECK-NEXT: [[R:%.*]] = lshr i32 [[SUB]], 31
|
|
; CHECK-NEXT: ret i32 [[R]]
|
|
;
|
|
%xz = zext i1 %x to i32
|
|
store i32 %xz, ptr %p
|
|
%ys = sext i1 %y to i32
|
|
%sub = add i32 %xz, %ys
|
|
%r = lshr i32 %sub, 31
|
|
ret i32 %r
|
|
}
|
|
|
|
; Negative test - extra use
|
|
|
|
define i32 @lshr_add_use2(i1 %x, i1 %y, ptr %p) {
|
|
; CHECK-LABEL: @lshr_add_use2(
|
|
; CHECK-NEXT: [[XZ:%.*]] = zext i1 [[X:%.*]] to i32
|
|
; CHECK-NEXT: [[YS:%.*]] = sext i1 [[Y:%.*]] to i32
|
|
; CHECK-NEXT: store i32 [[YS]], ptr [[P:%.*]], align 4
|
|
; CHECK-NEXT: [[SUB:%.*]] = add nsw i32 [[XZ]], [[YS]]
|
|
; CHECK-NEXT: [[R:%.*]] = lshr i32 [[SUB]], 31
|
|
; CHECK-NEXT: ret i32 [[R]]
|
|
;
|
|
%xz = zext i1 %x to i32
|
|
%ys = sext i1 %y to i32
|
|
store i32 %ys, ptr %p
|
|
%sub = add i32 %xz, %ys
|
|
%r = lshr i32 %sub, 31
|
|
ret i32 %r
|
|
}
|
|
|
|
define i32 @lshr_add_sexts(i1 %x, i1 %y) {
|
|
; CHECK-LABEL: @lshr_add_sexts(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = or i1 [[X:%.*]], [[Y:%.*]]
|
|
; CHECK-NEXT: [[R:%.*]] = zext i1 [[TMP1]] to i32
|
|
; CHECK-NEXT: ret i32 [[R]]
|
|
;
|
|
%xs = sext i1 %x to i32
|
|
%ys = sext i1 %y to i32
|
|
%sub = add i32 %xs, %ys
|
|
%r = lshr i32 %sub, 31
|
|
ret i32 %r
|
|
}
|
|
|
|
define i5 @and_add_sexts(i1 %x, i1 %y) {
|
|
; CHECK-LABEL: @and_add_sexts(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = or i1 [[X:%.*]], [[Y:%.*]]
|
|
; CHECK-NEXT: [[R:%.*]] = select i1 [[TMP1]], i5 -2, i5 0
|
|
; CHECK-NEXT: ret i5 [[R]]
|
|
;
|
|
%xs = sext i1 %x to i5
|
|
%ys = sext i1 %y to i5
|
|
%sub = add i5 %xs, %ys
|
|
%r = and i5 %sub, 30
|
|
ret i5 %r
|
|
}
|
|
|
|
define <2 x i8> @ashr_add_sexts(<2 x i1> %x, <2 x i1> %y) {
|
|
; CHECK-LABEL: @ashr_add_sexts(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = or <2 x i1> [[Y:%.*]], [[X:%.*]]
|
|
; CHECK-NEXT: [[TMP2:%.*]] = sext <2 x i1> [[TMP1]] to <2 x i8>
|
|
; CHECK-NEXT: ret <2 x i8> [[TMP2]]
|
|
;
|
|
%xs = sext <2 x i1> %x to <2 x i8>
|
|
%ys = sext <2 x i1> %y to <2 x i8>
|
|
%sub = add nsw <2 x i8> %ys, %xs
|
|
%r = ashr <2 x i8> %sub, <i8 1, i8 1>
|
|
ret <2 x i8> %r
|
|
}
|
|
|
|
define i32 @cmp_math_sexts(i32 %x, i32 %y) {
|
|
; CHECK-LABEL: @cmp_math_sexts(
|
|
; CHECK-NEXT: [[DOTNOT:%.*]] = icmp ne i32 [[X:%.*]], [[Y:%.*]]
|
|
; CHECK-NEXT: [[R:%.*]] = zext i1 [[DOTNOT]] to i32
|
|
; CHECK-NEXT: ret i32 [[R]]
|
|
;
|
|
%gt = icmp ugt i32 %x, %y
|
|
%lt = icmp ult i32 %x, %y
|
|
%xz = sext i1 %gt to i32
|
|
%yz = zext i1 %lt to i32
|
|
%s = sub i32 %xz, %yz
|
|
%r = lshr i32 %s, 31
|
|
ret i32 %r
|
|
}
|
|
|
|
; Negative test - wrong type
|
|
|
|
define i32 @lshr_add_nonbool_sexts(i2 %x, i1 %y) {
|
|
; CHECK-LABEL: @lshr_add_nonbool_sexts(
|
|
; CHECK-NEXT: [[XS:%.*]] = sext i2 [[X:%.*]] to i32
|
|
; CHECK-NEXT: [[YS:%.*]] = sext i1 [[Y:%.*]] to i32
|
|
; CHECK-NEXT: [[SUB:%.*]] = add nsw i32 [[XS]], [[YS]]
|
|
; CHECK-NEXT: [[R:%.*]] = lshr i32 [[SUB]], 31
|
|
; CHECK-NEXT: ret i32 [[R]]
|
|
;
|
|
%xs = sext i2 %x to i32
|
|
%ys = sext i1 %y to i32
|
|
%sub = add i32 %xs, %ys
|
|
%r = lshr i32 %sub, 31
|
|
ret i32 %r
|
|
}
|
|
|
|
; Negative test - wrong demand
|
|
|
|
define i32 @and31_add_sexts(i1 %x, i1 %y) {
|
|
; CHECK-LABEL: @and31_add_sexts(
|
|
; CHECK-NEXT: [[XS:%.*]] = sext i1 [[X:%.*]] to i32
|
|
; CHECK-NEXT: [[YS:%.*]] = sext i1 [[Y:%.*]] to i32
|
|
; CHECK-NEXT: [[SUB:%.*]] = add nsw i32 [[XS]], [[YS]]
|
|
; CHECK-NEXT: [[R:%.*]] = and i32 [[SUB]], 31
|
|
; CHECK-NEXT: ret i32 [[R]]
|
|
;
|
|
%xs = sext i1 %x to i32
|
|
%ys = sext i1 %y to i32
|
|
%sub = add i32 %xs, %ys
|
|
%r = and i32 %sub, 31
|
|
ret i32 %r
|
|
}
|
|
|
|
; Negative test - extra use
|
|
|
|
define i32 @lshr_add_use_sexts(i1 %x, i1 %y, ptr %p) {
|
|
; CHECK-LABEL: @lshr_add_use_sexts(
|
|
; CHECK-NEXT: [[XS:%.*]] = sext i1 [[X:%.*]] to i32
|
|
; CHECK-NEXT: store i32 [[XS]], ptr [[P:%.*]], align 4
|
|
; CHECK-NEXT: [[YS:%.*]] = sext i1 [[Y:%.*]] to i32
|
|
; CHECK-NEXT: [[SUB:%.*]] = add nsw i32 [[XS]], [[YS]]
|
|
; CHECK-NEXT: [[R:%.*]] = lshr i32 [[SUB]], 31
|
|
; CHECK-NEXT: ret i32 [[R]]
|
|
;
|
|
%xs = sext i1 %x to i32
|
|
store i32 %xs, ptr %p
|
|
%ys = sext i1 %y to i32
|
|
%sub = add i32 %xs, %ys
|
|
%r = lshr i32 %sub, 31
|
|
ret i32 %r
|
|
}
|
|
|
|
; Negative test - extra use
|
|
|
|
define i32 @lshr_add_use2_sexts(i1 %x, i1 %y, ptr %p) {
|
|
; CHECK-LABEL: @lshr_add_use2_sexts(
|
|
; CHECK-NEXT: [[XS:%.*]] = sext i1 [[X:%.*]] to i32
|
|
; CHECK-NEXT: [[YS:%.*]] = sext i1 [[Y:%.*]] to i32
|
|
; CHECK-NEXT: store i32 [[YS]], ptr [[P:%.*]], align 4
|
|
; CHECK-NEXT: [[SUB:%.*]] = add nsw i32 [[XS]], [[YS]]
|
|
; CHECK-NEXT: [[R:%.*]] = lshr i32 [[SUB]], 31
|
|
; CHECK-NEXT: ret i32 [[R]]
|
|
;
|
|
%xs = sext i1 %x to i32
|
|
%ys = sext i1 %y to i32
|
|
store i32 %ys, ptr %p
|
|
%sub = add i32 %xs, %ys
|
|
%r = lshr i32 %sub, 31
|
|
ret i32 %r
|
|
}
|
|
|
|
define i8 @add_like_or_t0(i8 %x) {
|
|
; CHECK-LABEL: @add_like_or_t0(
|
|
; CHECK-NEXT: [[I0:%.*]] = shl i8 [[X:%.*]], 4
|
|
; CHECK-NEXT: [[R:%.*]] = add i8 [[I0]], 57
|
|
; CHECK-NEXT: ret i8 [[R]]
|
|
;
|
|
%i0 = shl i8 %x, 4
|
|
%i1 = or i8 %i0, 15 ; no common bits
|
|
%r = add i8 %i1, 42
|
|
ret i8 %r
|
|
}
|
|
define i8 @add_like_or_n1(i8 %x) {
|
|
; CHECK-LABEL: @add_like_or_n1(
|
|
; CHECK-NEXT: [[I0:%.*]] = shl i8 [[X:%.*]], 4
|
|
; CHECK-NEXT: [[I1:%.*]] = or i8 [[I0]], 31
|
|
; CHECK-NEXT: [[R:%.*]] = add i8 [[I1]], 42
|
|
; CHECK-NEXT: ret i8 [[R]]
|
|
;
|
|
%i0 = shl i8 %x, 4
|
|
%i1 = or i8 %i0, 31 ; 4'th bit might be common-set
|
|
%r = add i8 %i1, 42
|
|
ret i8 %r
|
|
}
|
|
define i8 @add_like_or_t2_extrause(i8 %x) {
|
|
; CHECK-LABEL: @add_like_or_t2_extrause(
|
|
; CHECK-NEXT: [[I0:%.*]] = shl i8 [[X:%.*]], 4
|
|
; CHECK-NEXT: [[I1:%.*]] = or disjoint i8 [[I0]], 15
|
|
; CHECK-NEXT: call void @use(i8 [[I1]])
|
|
; CHECK-NEXT: [[R:%.*]] = add i8 [[I0]], 57
|
|
; CHECK-NEXT: ret i8 [[R]]
|
|
;
|
|
%i0 = shl i8 %x, 4
|
|
%i1 = or i8 %i0, 15 ; no common bits
|
|
call void @use(i8 %i1) ; extra use
|
|
%r = add i8 %i1, 42
|
|
ret i8 %r
|
|
}
|
|
|
|
define i8 @add_like_or_disjoint(i8 %x) {
|
|
; CHECK-LABEL: @add_like_or_disjoint(
|
|
; CHECK-NEXT: [[R:%.*]] = add i8 [[X:%.*]], 57
|
|
; CHECK-NEXT: ret i8 [[R]]
|
|
;
|
|
%i1 = or disjoint i8 %x, 15
|
|
%r = add i8 %i1, 42
|
|
ret i8 %r
|
|
}
|
|
|
|
define i8 @add_and_xor(i8 noundef %x, i8 %y) {
|
|
; CHECK-LABEL: @add_and_xor(
|
|
; CHECK-NEXT: [[ADD:%.*]] = or i8 [[Y:%.*]], [[X:%.*]]
|
|
; CHECK-NEXT: ret i8 [[ADD]]
|
|
;
|
|
%xor = xor i8 %x, -1
|
|
%and = and i8 %xor, %y
|
|
%add = add i8 %and, %x
|
|
ret i8 %add
|
|
}
|
|
|
|
define i8 @add_and_xor_wrong_const(i8 %x, i8 %y) {
|
|
; CHECK-LABEL: @add_and_xor_wrong_const(
|
|
; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[X:%.*]], -2
|
|
; CHECK-NEXT: [[AND:%.*]] = and i8 [[XOR]], [[Y:%.*]]
|
|
; CHECK-NEXT: [[ADD:%.*]] = add i8 [[AND]], [[X]]
|
|
; CHECK-NEXT: ret i8 [[ADD]]
|
|
;
|
|
%xor = xor i8 %x, -2
|
|
%and = and i8 %xor, %y
|
|
%add = add i8 %and, %x
|
|
ret i8 %add
|
|
}
|
|
|
|
define i8 @add_and_xor_wrong_op(i8 %x, i8 %y, i8 %z) {
|
|
; CHECK-LABEL: @add_and_xor_wrong_op(
|
|
; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[Z:%.*]], -1
|
|
; CHECK-NEXT: [[AND:%.*]] = and i8 [[XOR]], [[Y:%.*]]
|
|
; CHECK-NEXT: [[ADD:%.*]] = add i8 [[AND]], [[X:%.*]]
|
|
; CHECK-NEXT: ret i8 [[ADD]]
|
|
;
|
|
%xor = xor i8 %z, -1
|
|
%and = and i8 %xor, %y
|
|
%add = add i8 %and, %x
|
|
ret i8 %add
|
|
}
|
|
|
|
define i8 @add_and_xor_commuted1(i8 noundef %x, i8 %_y) {
|
|
; CHECK-LABEL: @add_and_xor_commuted1(
|
|
; CHECK-NEXT: [[Y:%.*]] = udiv i8 42, [[_Y:%.*]]
|
|
; CHECK-NEXT: [[ADD:%.*]] = or i8 [[Y]], [[X:%.*]]
|
|
; CHECK-NEXT: ret i8 [[ADD]]
|
|
;
|
|
%y = udiv i8 42, %_y ; thwart complexity-based canonicalization
|
|
%xor = xor i8 %x, -1
|
|
%and = and i8 %y, %xor
|
|
%add = add i8 %and, %x
|
|
ret i8 %add
|
|
}
|
|
|
|
define i8 @add_and_xor_commuted2(i8 noundef %_x, i8 %y) {
|
|
; CHECK-LABEL: @add_and_xor_commuted2(
|
|
; CHECK-NEXT: [[X:%.*]] = udiv i8 42, [[_X:%.*]]
|
|
; CHECK-NEXT: [[ADD:%.*]] = or i8 [[X]], [[Y:%.*]]
|
|
; CHECK-NEXT: ret i8 [[ADD]]
|
|
;
|
|
%x = udiv i8 42, %_x ; thwart complexity-based canonicalization
|
|
%xor = xor i8 %x, -1
|
|
%and = and i8 %xor, %y
|
|
%add = add i8 %x, %and
|
|
ret i8 %add
|
|
}
|
|
|
|
define i8 @add_and_xor_commuted3(i8 noundef %_x, i8 %_y) {
|
|
; CHECK-LABEL: @add_and_xor_commuted3(
|
|
; CHECK-NEXT: [[X:%.*]] = udiv i8 42, [[_X:%.*]]
|
|
; CHECK-NEXT: [[Y:%.*]] = udiv i8 42, [[_Y:%.*]]
|
|
; CHECK-NEXT: [[ADD:%.*]] = or i8 [[X]], [[Y]]
|
|
; CHECK-NEXT: ret i8 [[ADD]]
|
|
;
|
|
%x = udiv i8 42, %_x ; thwart complexity-based canonicalization
|
|
%y = udiv i8 42, %_y ; thwart complexity-based canonicalization
|
|
%xor = xor i8 %x, -1
|
|
%and = and i8 %y, %xor
|
|
%add = add i8 %x, %and
|
|
ret i8 %add
|
|
}
|
|
|
|
define i8 @add_and_xor_extra_use(i8 noundef %x, i8 %y) {
|
|
; CHECK-LABEL: @add_and_xor_extra_use(
|
|
; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[X:%.*]], -1
|
|
; CHECK-NEXT: call void @use(i8 [[XOR]])
|
|
; CHECK-NEXT: [[AND:%.*]] = and i8 [[XOR]], [[Y:%.*]]
|
|
; CHECK-NEXT: call void @use(i8 [[AND]])
|
|
; CHECK-NEXT: [[ADD:%.*]] = or i8 [[Y]], [[X]]
|
|
; CHECK-NEXT: ret i8 [[ADD]]
|
|
;
|
|
%xor = xor i8 %x, -1
|
|
call void @use(i8 %xor)
|
|
%and = and i8 %xor, %y
|
|
call void @use(i8 %and)
|
|
%add = add i8 %and, %x
|
|
ret i8 %add
|
|
}
|
|
|
|
define i8 @add_xor_and_const(i8 noundef %x) {
|
|
; CHECK-LABEL: @add_xor_and_const(
|
|
; CHECK-NEXT: [[ADD:%.*]] = or i8 [[X:%.*]], 42
|
|
; CHECK-NEXT: ret i8 [[ADD]]
|
|
;
|
|
%and = and i8 %x, 42
|
|
%xor = xor i8 %and, 42
|
|
%add = add i8 %xor, %x
|
|
ret i8 %add
|
|
}
|
|
|
|
define i8 @add_xor_and_const_wrong_const(i8 %x) {
|
|
; CHECK-LABEL: @add_xor_and_const_wrong_const(
|
|
; CHECK-NEXT: [[AND:%.*]] = and i8 [[X:%.*]], 42
|
|
; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[AND]], 88
|
|
; CHECK-NEXT: [[ADD:%.*]] = add i8 [[XOR]], [[X]]
|
|
; CHECK-NEXT: ret i8 [[ADD]]
|
|
;
|
|
%and = and i8 %x, 42
|
|
%xor = xor i8 %and, 88
|
|
%add = add i8 %xor, %x
|
|
ret i8 %add
|
|
}
|
|
|
|
define i8 @add_xor_and_var(i8 noundef %x, i8 noundef %y) {
|
|
; CHECK-LABEL: @add_xor_and_var(
|
|
; CHECK-NEXT: [[AND:%.*]] = and i8 [[X:%.*]], [[Y:%.*]]
|
|
; CHECK-NEXT: call void @use(i8 [[AND]])
|
|
; CHECK-NEXT: [[ADD:%.*]] = or i8 [[Y]], [[X]]
|
|
; CHECK-NEXT: ret i8 [[ADD]]
|
|
;
|
|
%and = and i8 %x, %y
|
|
call void @use(i8 %and)
|
|
%xor = xor i8 %and, %y
|
|
%add = add i8 %xor, %x
|
|
ret i8 %add
|
|
}
|
|
|
|
define i8 @add_xor_and_var_wrong_op1(i8 %x, i8 %y, i8 %z) {
|
|
; CHECK-LABEL: @add_xor_and_var_wrong_op1(
|
|
; CHECK-NEXT: [[AND:%.*]] = and i8 [[X:%.*]], [[Y:%.*]]
|
|
; CHECK-NEXT: call void @use(i8 [[AND]])
|
|
; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[AND]], [[Z:%.*]]
|
|
; CHECK-NEXT: [[ADD:%.*]] = add i8 [[XOR]], [[X]]
|
|
; CHECK-NEXT: ret i8 [[ADD]]
|
|
;
|
|
%and = and i8 %x, %y
|
|
call void @use(i8 %and)
|
|
%xor = xor i8 %and, %z
|
|
%add = add i8 %xor, %x
|
|
ret i8 %add
|
|
}
|
|
|
|
define i8 @add_xor_and_var_wrong_op2(i8 %x, i8 %y, i8 %z) {
|
|
; CHECK-LABEL: @add_xor_and_var_wrong_op2(
|
|
; CHECK-NEXT: [[AND:%.*]] = and i8 [[X:%.*]], [[Y:%.*]]
|
|
; CHECK-NEXT: call void @use(i8 [[AND]])
|
|
; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[AND]], [[Y]]
|
|
; CHECK-NEXT: [[ADD:%.*]] = add i8 [[XOR]], [[Z:%.*]]
|
|
; CHECK-NEXT: ret i8 [[ADD]]
|
|
;
|
|
%and = and i8 %x, %y
|
|
call void @use(i8 %and)
|
|
%xor = xor i8 %and, %y
|
|
%add = add i8 %xor, %z
|
|
ret i8 %add
|
|
}
|
|
|
|
define i8 @add_xor_and_var_commuted1(i8 noundef %x, i8 noundef %y) {
|
|
; CHECK-LABEL: @add_xor_and_var_commuted1(
|
|
; CHECK-NEXT: [[AND:%.*]] = and i8 [[Y:%.*]], [[X:%.*]]
|
|
; CHECK-NEXT: call void @use(i8 [[AND]])
|
|
; CHECK-NEXT: [[ADD:%.*]] = or i8 [[Y]], [[X]]
|
|
; CHECK-NEXT: ret i8 [[ADD]]
|
|
;
|
|
%and = and i8 %y, %x
|
|
call void @use(i8 %and)
|
|
%xor = xor i8 %and, %y
|
|
%add = add i8 %xor, %x
|
|
ret i8 %add
|
|
}
|
|
|
|
define i8 @add_xor_and_var_commuted2(i8 noundef %_x, i8 noundef %_y) {
|
|
; CHECK-LABEL: @add_xor_and_var_commuted2(
|
|
; CHECK-NEXT: [[X:%.*]] = udiv i8 42, [[_X:%.*]]
|
|
; CHECK-NEXT: [[Y:%.*]] = udiv i8 42, [[_Y:%.*]]
|
|
; CHECK-NEXT: [[AND:%.*]] = and i8 [[X]], [[Y]]
|
|
; CHECK-NEXT: call void @use(i8 [[AND]])
|
|
; CHECK-NEXT: [[ADD:%.*]] = or i8 [[Y]], [[X]]
|
|
; CHECK-NEXT: ret i8 [[ADD]]
|
|
;
|
|
%x = udiv i8 42, %_x ; thwart complexity-based canonicalization
|
|
%y = udiv i8 42, %_y ; thwart complexity-based canonicalization
|
|
%and = and i8 %x, %y
|
|
call void @use(i8 %and)
|
|
%xor = xor i8 %y, %and
|
|
%add = add i8 %xor, %x
|
|
ret i8 %add
|
|
}
|
|
|
|
define i8 @add_xor_and_var_commuted3(i8 noundef %x, i8 noundef %_y) {
|
|
; CHECK-LABEL: @add_xor_and_var_commuted3(
|
|
; CHECK-NEXT: [[Y:%.*]] = udiv i8 42, [[_Y:%.*]]
|
|
; CHECK-NEXT: [[AND:%.*]] = and i8 [[Y]], [[X:%.*]]
|
|
; CHECK-NEXT: call void @use(i8 [[AND]])
|
|
; CHECK-NEXT: [[ADD:%.*]] = or i8 [[Y]], [[X]]
|
|
; CHECK-NEXT: ret i8 [[ADD]]
|
|
;
|
|
%y = udiv i8 42, %_y ; thwart complexity-based canonicalization
|
|
%and = and i8 %y, %x
|
|
call void @use(i8 %and)
|
|
%xor = xor i8 %y, %and
|
|
%add = add i8 %xor, %x
|
|
ret i8 %add
|
|
}
|
|
|
|
define i8 @add_xor_and_var_commuted4(i8 noundef %_x, i8 noundef %y) {
|
|
; CHECK-LABEL: @add_xor_and_var_commuted4(
|
|
; CHECK-NEXT: [[X:%.*]] = udiv i8 42, [[_X:%.*]]
|
|
; CHECK-NEXT: [[AND:%.*]] = and i8 [[X]], [[Y:%.*]]
|
|
; CHECK-NEXT: call void @use(i8 [[AND]])
|
|
; CHECK-NEXT: [[ADD:%.*]] = or i8 [[X]], [[Y]]
|
|
; CHECK-NEXT: ret i8 [[ADD]]
|
|
;
|
|
%x = udiv i8 42, %_x ; thwart complexity-based canonicalization
|
|
%and = and i8 %x, %y
|
|
call void @use(i8 %and)
|
|
%xor = xor i8 %and, %y
|
|
%add = add i8 %x, %xor
|
|
ret i8 %add
|
|
}
|
|
|
|
define i8 @add_xor_and_var_commuted5(i8 noundef %_x, i8 noundef %_y) {
|
|
; CHECK-LABEL: @add_xor_and_var_commuted5(
|
|
; CHECK-NEXT: [[X:%.*]] = udiv i8 42, [[_X:%.*]]
|
|
; CHECK-NEXT: [[Y:%.*]] = udiv i8 42, [[_Y:%.*]]
|
|
; CHECK-NEXT: [[AND:%.*]] = and i8 [[Y]], [[X]]
|
|
; CHECK-NEXT: call void @use(i8 [[AND]])
|
|
; CHECK-NEXT: [[ADD:%.*]] = or i8 [[X]], [[Y]]
|
|
; CHECK-NEXT: ret i8 [[ADD]]
|
|
;
|
|
%x = udiv i8 42, %_x ; thwart complexity-based canonicalization
|
|
%y = udiv i8 42, %_y ; thwart complexity-based canonicalization
|
|
%and = and i8 %y, %x
|
|
call void @use(i8 %and)
|
|
%xor = xor i8 %and, %y
|
|
%add = add i8 %x, %xor
|
|
ret i8 %add
|
|
}
|
|
|
|
define i8 @add_xor_and_var_commuted6(i8 noundef %_x, i8 noundef %_y) {
|
|
; CHECK-LABEL: @add_xor_and_var_commuted6(
|
|
; CHECK-NEXT: [[X:%.*]] = udiv i8 42, [[_X:%.*]]
|
|
; CHECK-NEXT: [[Y:%.*]] = udiv i8 42, [[_Y:%.*]]
|
|
; CHECK-NEXT: [[AND:%.*]] = and i8 [[X]], [[Y]]
|
|
; CHECK-NEXT: call void @use(i8 [[AND]])
|
|
; CHECK-NEXT: [[ADD:%.*]] = or i8 [[X]], [[Y]]
|
|
; CHECK-NEXT: ret i8 [[ADD]]
|
|
;
|
|
%x = udiv i8 42, %_x ; thwart complexity-based canonicalization
|
|
%y = udiv i8 42, %_y ; thwart complexity-based canonicalization
|
|
%and = and i8 %x, %y
|
|
call void @use(i8 %and)
|
|
%xor = xor i8 %y, %and
|
|
%add = add i8 %x, %xor
|
|
ret i8 %add
|
|
}
|
|
|
|
define i8 @add_xor_and_var_commuted7(i8 noundef %_x, i8 noundef %_y) {
|
|
; CHECK-LABEL: @add_xor_and_var_commuted7(
|
|
; CHECK-NEXT: [[X:%.*]] = udiv i8 42, [[_X:%.*]]
|
|
; CHECK-NEXT: [[Y:%.*]] = udiv i8 42, [[_Y:%.*]]
|
|
; CHECK-NEXT: [[AND:%.*]] = and i8 [[Y]], [[X]]
|
|
; CHECK-NEXT: call void @use(i8 [[AND]])
|
|
; CHECK-NEXT: [[ADD:%.*]] = or i8 [[X]], [[Y]]
|
|
; CHECK-NEXT: ret i8 [[ADD]]
|
|
;
|
|
%x = udiv i8 42, %_x ; thwart complexity-based canonicalization
|
|
%y = udiv i8 42, %_y ; thwart complexity-based canonicalization
|
|
%and = and i8 %y, %x
|
|
call void @use(i8 %and)
|
|
%xor = xor i8 %y, %and
|
|
%add = add i8 %x, %xor
|
|
ret i8 %add
|
|
}
|
|
|
|
define i8 @add_xor_and_var_extra_use(i8 noundef %x, i8 noundef %y) {
|
|
; CHECK-LABEL: @add_xor_and_var_extra_use(
|
|
; CHECK-NEXT: [[AND:%.*]] = and i8 [[X:%.*]], [[Y:%.*]]
|
|
; CHECK-NEXT: call void @use(i8 [[AND]])
|
|
; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[AND]], [[Y]]
|
|
; CHECK-NEXT: call void @use(i8 [[XOR]])
|
|
; CHECK-NEXT: [[ADD:%.*]] = or i8 [[Y]], [[X]]
|
|
; CHECK-NEXT: ret i8 [[ADD]]
|
|
;
|
|
%and = and i8 %x, %y
|
|
call void @use(i8 %and)
|
|
%xor = xor i8 %and, %y
|
|
call void @use(i8 %xor)
|
|
%add = add i8 %xor, %x
|
|
ret i8 %add
|
|
}
|
|
|
|
define i32 @add_add_add(i32 %A, i32 %B, i32 %C, i32 %D) {
|
|
; CHECK-LABEL: @add_add_add(
|
|
; CHECK-NEXT: [[E:%.*]] = add i32 [[A:%.*]], [[B:%.*]]
|
|
; CHECK-NEXT: [[F:%.*]] = add i32 [[E]], [[C:%.*]]
|
|
; CHECK-NEXT: [[G:%.*]] = add i32 [[F]], [[D:%.*]]
|
|
; CHECK-NEXT: ret i32 [[G]]
|
|
;
|
|
%E = add i32 %A, %B
|
|
%F = add i32 %E, %C
|
|
%G = add i32 %F, %D
|
|
ret i32 %G
|
|
}
|
|
|
|
define i32 @add_add_add_commute1(i32 %A, i32 %B, i32 %C, i32 %D) {
|
|
; CHECK-LABEL: @add_add_add_commute1(
|
|
; CHECK-NEXT: [[E:%.*]] = add i32 [[B:%.*]], [[A:%.*]]
|
|
; CHECK-NEXT: [[F:%.*]] = add i32 [[E]], [[C:%.*]]
|
|
; CHECK-NEXT: [[G:%.*]] = add i32 [[F]], [[D:%.*]]
|
|
; CHECK-NEXT: ret i32 [[G]]
|
|
;
|
|
%E = add i32 %B, %A
|
|
%F = add i32 %E, %C
|
|
%G = add i32 %F, %D
|
|
ret i32 %G
|
|
}
|
|
|
|
define i32 @add_add_add_commute2(i32 %A, i32 %B, i32 %C, i32 %D) {
|
|
; CHECK-LABEL: @add_add_add_commute2(
|
|
; CHECK-NEXT: [[E:%.*]] = add i32 [[B:%.*]], [[A:%.*]]
|
|
; CHECK-NEXT: [[F:%.*]] = add i32 [[E]], [[C:%.*]]
|
|
; CHECK-NEXT: [[G:%.*]] = add i32 [[F]], [[D:%.*]]
|
|
; CHECK-NEXT: ret i32 [[G]]
|
|
;
|
|
%E = add i32 %B, %A
|
|
%F = add i32 %C, %E
|
|
%G = add i32 %F, %D
|
|
ret i32 %G
|
|
}
|
|
|
|
define i32 @add_add_add_commute3(i32 %A, i32 %B, i32 %C, i32 %D) {
|
|
; CHECK-LABEL: @add_add_add_commute3(
|
|
; CHECK-NEXT: [[E:%.*]] = add i32 [[B:%.*]], [[A:%.*]]
|
|
; CHECK-NEXT: [[F:%.*]] = add i32 [[E]], [[C:%.*]]
|
|
; CHECK-NEXT: [[G:%.*]] = add i32 [[F]], [[D:%.*]]
|
|
; CHECK-NEXT: ret i32 [[G]]
|
|
;
|
|
%E = add i32 %B, %A
|
|
%F = add i32 %C, %E
|
|
%G = add i32 %D, %F
|
|
ret i32 %G
|
|
}
|
|
|
|
; x * y + x --> (y + 1) * x
|
|
|
|
define i8 @mul_add_common_factor_commute1(i8 %x, i8 %y) {
|
|
; CHECK-LABEL: @mul_add_common_factor_commute1(
|
|
; CHECK-NEXT: [[X1:%.*]] = add i8 [[Y:%.*]], 1
|
|
; CHECK-NEXT: [[A:%.*]] = mul i8 [[X1]], [[X:%.*]]
|
|
; CHECK-NEXT: ret i8 [[A]]
|
|
;
|
|
%m = mul nsw i8 %x, %y
|
|
%a = add nsw i8 %m, %x
|
|
ret i8 %a
|
|
}
|
|
|
|
define <2 x i8> @mul_add_common_factor_commute2(<2 x i8> %x, <2 x i8> %y) {
|
|
; CHECK-LABEL: @mul_add_common_factor_commute2(
|
|
; CHECK-NEXT: [[M1:%.*]] = add <2 x i8> [[Y:%.*]], <i8 1, i8 1>
|
|
; CHECK-NEXT: [[A:%.*]] = mul nuw <2 x i8> [[M1]], [[X:%.*]]
|
|
; CHECK-NEXT: ret <2 x i8> [[A]]
|
|
;
|
|
%m = mul nuw <2 x i8> %y, %x
|
|
%a = add nuw <2 x i8> %m, %x
|
|
ret <2 x i8> %a
|
|
}
|
|
|
|
define i8 @mul_add_common_factor_commute3(i8 %p, i8 %y) {
|
|
; CHECK-LABEL: @mul_add_common_factor_commute3(
|
|
; CHECK-NEXT: [[X:%.*]] = mul i8 [[P:%.*]], [[P]]
|
|
; CHECK-NEXT: [[M1:%.*]] = add i8 [[Y:%.*]], 1
|
|
; CHECK-NEXT: [[A:%.*]] = mul i8 [[X]], [[M1]]
|
|
; CHECK-NEXT: ret i8 [[A]]
|
|
;
|
|
%x = mul i8 %p, %p ; thwart complexity-based canonicalization
|
|
%m = mul nuw i8 %x, %y
|
|
%a = add nsw i8 %x, %m
|
|
ret i8 %a
|
|
}
|
|
|
|
define i8 @mul_add_common_factor_commute4(i8 %p, i8 %q) {
|
|
; CHECK-LABEL: @mul_add_common_factor_commute4(
|
|
; CHECK-NEXT: [[X:%.*]] = mul i8 [[P:%.*]], [[P]]
|
|
; CHECK-NEXT: [[Y:%.*]] = mul i8 [[Q:%.*]], [[Q]]
|
|
; CHECK-NEXT: [[M1:%.*]] = add i8 [[Y]], 1
|
|
; CHECK-NEXT: [[A:%.*]] = mul i8 [[X]], [[M1]]
|
|
; CHECK-NEXT: ret i8 [[A]]
|
|
;
|
|
%x = mul i8 %p, %p ; thwart complexity-based canonicalization
|
|
%y = mul i8 %q, %q ; thwart complexity-based canonicalization
|
|
%m = mul nsw i8 %y, %x
|
|
%a = add nuw i8 %x, %m
|
|
ret i8 %a
|
|
}
|
|
|
|
; negative test - uses
|
|
|
|
define i8 @mul_add_common_factor_use(i8 %x, i8 %y) {
|
|
; CHECK-LABEL: @mul_add_common_factor_use(
|
|
; CHECK-NEXT: [[M:%.*]] = mul i8 [[X:%.*]], [[Y:%.*]]
|
|
; CHECK-NEXT: call void @use(i8 [[M]])
|
|
; CHECK-NEXT: [[A:%.*]] = add i8 [[M]], [[X]]
|
|
; CHECK-NEXT: ret i8 [[A]]
|
|
;
|
|
%m = mul i8 %x, %y
|
|
call void @use(i8 %m)
|
|
%a = add i8 %m, %x
|
|
ret i8 %a
|
|
}
|
|
|
|
define i8 @not_mul(i8 %x) {
|
|
; CHECK-LABEL: @not_mul(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = mul i8 [[X:%.*]], -41
|
|
; CHECK-NEXT: [[PLUSX:%.*]] = add i8 [[TMP1]], -1
|
|
; CHECK-NEXT: ret i8 [[PLUSX]]
|
|
;
|
|
%mul = mul nsw i8 %x, 42
|
|
%not = xor i8 %mul, -1
|
|
%plusx = add nsw i8 %not, %x
|
|
ret i8 %plusx
|
|
}
|
|
|
|
define <2 x i8> @not_mul_commute(<2 x i8> %p) {
|
|
; CHECK-LABEL: @not_mul_commute(
|
|
; CHECK-NEXT: [[X:%.*]] = mul <2 x i8> [[P:%.*]], [[P]]
|
|
; CHECK-NEXT: [[TMP1:%.*]] = mul <2 x i8> [[X]], <i8 43, i8 43>
|
|
; CHECK-NEXT: [[PLUSX:%.*]] = add <2 x i8> [[TMP1]], <i8 -1, i8 -1>
|
|
; CHECK-NEXT: ret <2 x i8> [[PLUSX]]
|
|
;
|
|
%x = mul <2 x i8> %p, %p ; thwart complexity-based canonicalization
|
|
%mul = mul nuw <2 x i8> %x, <i8 -42, i8 -42>
|
|
%not = xor <2 x i8> %mul, <i8 -1, i8 -1>
|
|
%plusx = add nuw <2 x i8> %x, %not
|
|
ret <2 x i8> %plusx
|
|
}
|
|
|
|
; negative test - need common operand
|
|
|
|
define i8 @not_mul_wrong_op(i8 %x, i8 %y) {
|
|
; CHECK-LABEL: @not_mul_wrong_op(
|
|
; CHECK-NEXT: [[MUL:%.*]] = mul i8 [[X:%.*]], 42
|
|
; CHECK-NEXT: [[NOT:%.*]] = xor i8 [[MUL]], -1
|
|
; CHECK-NEXT: [[PLUSX:%.*]] = add i8 [[NOT]], [[Y:%.*]]
|
|
; CHECK-NEXT: ret i8 [[PLUSX]]
|
|
;
|
|
%mul = mul i8 %x, 42
|
|
%not = xor i8 %mul, -1
|
|
%plusx = add i8 %not, %y
|
|
ret i8 %plusx
|
|
}
|
|
|
|
; negative test - avoid creating an extra mul
|
|
|
|
define i8 @not_mul_use1(i8 %x) {
|
|
; CHECK-LABEL: @not_mul_use1(
|
|
; CHECK-NEXT: [[MUL:%.*]] = mul nsw i8 [[X:%.*]], 42
|
|
; CHECK-NEXT: call void @use(i8 [[MUL]])
|
|
; CHECK-NEXT: [[NOT:%.*]] = xor i8 [[MUL]], -1
|
|
; CHECK-NEXT: [[PLUSX:%.*]] = add nsw i8 [[NOT]], [[X]]
|
|
; CHECK-NEXT: ret i8 [[PLUSX]]
|
|
;
|
|
%mul = mul nsw i8 %x, 42
|
|
call void @use(i8 %mul)
|
|
%not = xor i8 %mul, -1
|
|
%plusx = add nsw i8 %not, %x
|
|
ret i8 %plusx
|
|
}
|
|
|
|
; negative test - too many instructions
|
|
|
|
define i8 @not_mul_use2(i8 %x) {
|
|
; CHECK-LABEL: @not_mul_use2(
|
|
; CHECK-NEXT: [[MUL:%.*]] = mul i8 [[X:%.*]], 42
|
|
; CHECK-NEXT: [[NOT:%.*]] = xor i8 [[MUL]], -1
|
|
; CHECK-NEXT: call void @use(i8 [[NOT]])
|
|
; CHECK-NEXT: [[PLUSX:%.*]] = add i8 [[NOT]], [[X]]
|
|
; CHECK-NEXT: ret i8 [[PLUSX]]
|
|
;
|
|
%mul = mul i8 %x, 42
|
|
%not = xor i8 %mul, -1
|
|
call void @use(i8 %not)
|
|
%plusx = add i8 %not, %x
|
|
ret i8 %plusx
|
|
}
|
|
|
|
define i8 @full_ashr_inc(i8 %x) {
|
|
; CHECK-LABEL: @full_ashr_inc(
|
|
; CHECK-NEXT: [[ISNOTNEG:%.*]] = icmp sgt i8 [[X:%.*]], -1
|
|
; CHECK-NEXT: [[R:%.*]] = zext i1 [[ISNOTNEG]] to i8
|
|
; CHECK-NEXT: ret i8 [[R]]
|
|
;
|
|
%a = ashr i8 %x, 7
|
|
%r = add i8 %a, 1
|
|
ret i8 %r
|
|
}
|
|
|
|
define <2 x i6> @full_ashr_inc_vec(<2 x i6> %x) {
|
|
; CHECK-LABEL: @full_ashr_inc_vec(
|
|
; CHECK-NEXT: [[ISNOTNEG:%.*]] = icmp sgt <2 x i6> [[X:%.*]], <i6 -1, i6 -1>
|
|
; CHECK-NEXT: [[R:%.*]] = zext <2 x i1> [[ISNOTNEG]] to <2 x i6>
|
|
; CHECK-NEXT: ret <2 x i6> [[R]]
|
|
;
|
|
%a = ashr <2 x i6> %x, <i6 5, i6 poison>
|
|
%r = add <2 x i6> %a, <i6 1, i6 1>
|
|
ret <2 x i6> %r
|
|
}
|
|
|
|
; negative test - extra use
|
|
|
|
define i8 @full_ashr_inc_use(i8 %x) {
|
|
; CHECK-LABEL: @full_ashr_inc_use(
|
|
; CHECK-NEXT: [[A:%.*]] = ashr i8 [[X:%.*]], 7
|
|
; CHECK-NEXT: call void @use(i8 [[A]])
|
|
; CHECK-NEXT: [[R:%.*]] = add nsw i8 [[A]], 1
|
|
; CHECK-NEXT: ret i8 [[R]]
|
|
;
|
|
%a = ashr i8 %x, 7
|
|
call void @use(i8 %a)
|
|
%r = add i8 %a, 1
|
|
ret i8 %r
|
|
}
|
|
|
|
; negative test - wrong shift amount
|
|
|
|
define i8 @not_full_ashr_inc(i8 %x) {
|
|
; CHECK-LABEL: @not_full_ashr_inc(
|
|
; CHECK-NEXT: [[A:%.*]] = ashr i8 [[X:%.*]], 6
|
|
; CHECK-NEXT: [[R:%.*]] = add nsw i8 [[A]], 1
|
|
; CHECK-NEXT: ret i8 [[R]]
|
|
;
|
|
%a = ashr i8 %x, 6
|
|
%r = add i8 %a, 1
|
|
ret i8 %r
|
|
}
|
|
|
|
; negative test - wrong add amount
|
|
|
|
define i8 @full_ashr_not_inc(i8 %x) {
|
|
; CHECK-LABEL: @full_ashr_not_inc(
|
|
; CHECK-NEXT: [[A:%.*]] = ashr i8 [[X:%.*]], 7
|
|
; CHECK-NEXT: [[R:%.*]] = add nsw i8 [[A]], 2
|
|
; CHECK-NEXT: ret i8 [[R]]
|
|
;
|
|
%a = ashr i8 %x, 7
|
|
%r = add i8 %a, 2
|
|
ret i8 %r
|
|
}
|
|
|
|
define i8 @select_negate_or_zero(i1 %b, i8 %x, i8 %y) {
|
|
; CHECK-LABEL: @select_negate_or_zero(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[B:%.*]], i8 0, i8 [[X:%.*]]
|
|
; CHECK-NEXT: [[ADD1:%.*]] = sub i8 [[Y:%.*]], [[TMP1]]
|
|
; CHECK-NEXT: ret i8 [[ADD1]]
|
|
;
|
|
%negx = sub i8 0, %x
|
|
%sel = select i1 %b, i8 0, i8 %negx
|
|
%add = add i8 %sel, %y
|
|
ret i8 %add
|
|
}
|
|
|
|
; commuted add operands - same result
|
|
|
|
define <2 x i8> @select_negate_or_zero_commute(<2 x i1> %b, <2 x i8> %x, <2 x i8> %p) {
|
|
; CHECK-LABEL: @select_negate_or_zero_commute(
|
|
; CHECK-NEXT: [[Y:%.*]] = mul <2 x i8> [[P:%.*]], [[P]]
|
|
; CHECK-NEXT: [[TMP1:%.*]] = select <2 x i1> [[B:%.*]], <2 x i8> zeroinitializer, <2 x i8> [[X:%.*]]
|
|
; CHECK-NEXT: [[ADD1:%.*]] = sub <2 x i8> [[Y]], [[TMP1]]
|
|
; CHECK-NEXT: ret <2 x i8> [[ADD1]]
|
|
;
|
|
%y = mul <2 x i8> %p, %p ; thwart complexity-based canonicalization
|
|
%negx = sub <2 x i8> <i8 poison, i8 0>, %x
|
|
%sel = select <2 x i1> %b, <2 x i8> <i8 poison, i8 0>, <2 x i8> %negx
|
|
%add = add <2 x i8> %y, %sel
|
|
ret <2 x i8> %add
|
|
}
|
|
|
|
; swapped select operands and extra use are ok
|
|
|
|
define i8 @select_negate_or_zero_swap(i1 %b, i8 %x, i8 %y) {
|
|
; CHECK-LABEL: @select_negate_or_zero_swap(
|
|
; CHECK-NEXT: [[NEGX:%.*]] = sub i8 0, [[X:%.*]]
|
|
; CHECK-NEXT: call void @use(i8 [[NEGX]])
|
|
; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[B:%.*]], i8 [[X]], i8 0
|
|
; CHECK-NEXT: [[ADD1:%.*]] = sub i8 [[Y:%.*]], [[TMP1]]
|
|
; CHECK-NEXT: ret i8 [[ADD1]]
|
|
;
|
|
%negx = sub i8 0, %x
|
|
call void @use(i8 %negx)
|
|
%sel = select i1 %b, i8 %negx, i8 0
|
|
%add = add i8 %sel, %y
|
|
ret i8 %add
|
|
}
|
|
|
|
; commuted add operands - same result
|
|
|
|
define i8 @select_negate_or_zero_swap_commute(i1 %b, i8 %x, i8 %p) {
|
|
; CHECK-LABEL: @select_negate_or_zero_swap_commute(
|
|
; CHECK-NEXT: [[Y:%.*]] = mul i8 [[P:%.*]], [[P]]
|
|
; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[B:%.*]], i8 [[X:%.*]], i8 0
|
|
; CHECK-NEXT: [[ADD1:%.*]] = sub i8 [[Y]], [[TMP1]]
|
|
; CHECK-NEXT: ret i8 [[ADD1]]
|
|
;
|
|
%y = mul i8 %p, %p ; thwart complexity-based canonicalization
|
|
%negx = sub i8 0, %x
|
|
%sel = select i1 %b, i8 %negx, i8 0
|
|
%add = add i8 %y, %sel
|
|
ret i8 %add
|
|
}
|
|
|
|
; negative test - one arm of the select must simplify
|
|
|
|
define i8 @select_negate_or_nonzero(i1 %b, i8 %x, i8 %y) {
|
|
; CHECK-LABEL: @select_negate_or_nonzero(
|
|
; CHECK-NEXT: [[NEGX:%.*]] = sub i8 0, [[X:%.*]]
|
|
; CHECK-NEXT: [[SEL:%.*]] = select i1 [[B:%.*]], i8 42, i8 [[NEGX]]
|
|
; CHECK-NEXT: [[ADD:%.*]] = add i8 [[SEL]], [[Y:%.*]]
|
|
; CHECK-NEXT: ret i8 [[ADD]]
|
|
;
|
|
%negx = sub i8 0, %x
|
|
%sel = select i1 %b, i8 42, i8 %negx
|
|
%add = add i8 %sel, %y
|
|
ret i8 %add
|
|
}
|
|
|
|
; negative test - must have a negate, not any subtract
|
|
|
|
define i8 @select_nonnegate_or_zero(i1 %b, i8 %x, i8 %y) {
|
|
; CHECK-LABEL: @select_nonnegate_or_zero(
|
|
; CHECK-NEXT: [[NEGX:%.*]] = sub i8 42, [[X:%.*]]
|
|
; CHECK-NEXT: [[SEL:%.*]] = select i1 [[B:%.*]], i8 0, i8 [[NEGX]]
|
|
; CHECK-NEXT: [[ADD:%.*]] = add i8 [[SEL]], [[Y:%.*]]
|
|
; CHECK-NEXT: ret i8 [[ADD]]
|
|
;
|
|
%negx = sub i8 42, %x
|
|
%sel = select i1 %b, i8 0, i8 %negx
|
|
%add = add i8 %sel, %y
|
|
ret i8 %add
|
|
}
|
|
|
|
; negative test - don't create an extra instruction
|
|
|
|
define i8 @select_negate_or_nonzero_use(i1 %b, i8 %x, i8 %y) {
|
|
; CHECK-LABEL: @select_negate_or_nonzero_use(
|
|
; CHECK-NEXT: [[NEGX:%.*]] = sub i8 0, [[X:%.*]]
|
|
; CHECK-NEXT: [[SEL:%.*]] = select i1 [[B:%.*]], i8 0, i8 [[NEGX]]
|
|
; CHECK-NEXT: call void @use(i8 [[SEL]])
|
|
; CHECK-NEXT: [[ADD:%.*]] = add i8 [[SEL]], [[Y:%.*]]
|
|
; CHECK-NEXT: ret i8 [[ADD]]
|
|
;
|
|
%negx = sub i8 0, %x
|
|
%sel = select i1 %b, i8 0, i8 %negx
|
|
call void @use(i8 %sel)
|
|
%add = add i8 %sel, %y
|
|
ret i8 %add
|
|
}
|
|
|
|
; extra reduction because y + ~y -> -1
|
|
|
|
define i5 @select_negate_not(i1 %b, i5 %x, i5 %y) {
|
|
; CHECK-LABEL: @select_negate_not(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = sub i5 [[Y:%.*]], [[X:%.*]]
|
|
; CHECK-NEXT: [[ADD1:%.*]] = select i1 [[B:%.*]], i5 -1, i5 [[TMP1]]
|
|
; CHECK-NEXT: ret i5 [[ADD1]]
|
|
;
|
|
%negx = sub i5 0, %x
|
|
%noty = xor i5 %y, -1
|
|
%sel = select i1 %b, i5 %noty, i5 %negx
|
|
%add = add i5 %sel, %y
|
|
ret i5 %add
|
|
}
|
|
|
|
define i5 @select_negate_not_commute(i1 %b, i5 %x, i5 %p) {
|
|
; CHECK-LABEL: @select_negate_not_commute(
|
|
; CHECK-NEXT: [[Y:%.*]] = mul i5 [[P:%.*]], [[P]]
|
|
; CHECK-NEXT: [[TMP1:%.*]] = sub i5 [[Y]], [[X:%.*]]
|
|
; CHECK-NEXT: [[ADD1:%.*]] = select i1 [[B:%.*]], i5 -1, i5 [[TMP1]]
|
|
; CHECK-NEXT: ret i5 [[ADD1]]
|
|
;
|
|
%y = mul i5 %p, %p ; thwart complexity-based canonicalization
|
|
%negx = sub i5 0, %x
|
|
%noty = xor i5 %y, -1
|
|
%sel = select i1 %b, i5 %noty, i5 %negx
|
|
%add = add i5 %y, %sel
|
|
ret i5 %add
|
|
}
|
|
|
|
define i5 @select_negate_not_swap(i1 %b, i5 %x, i5 %y) {
|
|
; CHECK-LABEL: @select_negate_not_swap(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = sub i5 [[Y:%.*]], [[X:%.*]]
|
|
; CHECK-NEXT: [[ADD1:%.*]] = select i1 [[B:%.*]], i5 [[TMP1]], i5 -1
|
|
; CHECK-NEXT: ret i5 [[ADD1]]
|
|
;
|
|
%negx = sub i5 0, %x
|
|
%noty = xor i5 %y, -1
|
|
%sel = select i1 %b, i5 %negx, i5 %noty
|
|
%add = add i5 %sel, %y
|
|
ret i5 %add
|
|
}
|
|
|
|
define i5 @select_negate_not_swap_commute(i1 %b, i5 %x, i5 %p) {
|
|
; CHECK-LABEL: @select_negate_not_swap_commute(
|
|
; CHECK-NEXT: [[Y:%.*]] = mul i5 [[P:%.*]], [[P]]
|
|
; CHECK-NEXT: [[TMP1:%.*]] = sub i5 [[Y]], [[X:%.*]]
|
|
; CHECK-NEXT: [[ADD1:%.*]] = select i1 [[B:%.*]], i5 [[TMP1]], i5 -1
|
|
; CHECK-NEXT: ret i5 [[ADD1]]
|
|
;
|
|
%y = mul i5 %p, %p ; thwart complexity-based canonicalization
|
|
%negx = sub i5 0, %x
|
|
%noty = xor i5 %y, -1
|
|
%sel = select i1 %b, i5 %negx, i5 %noty
|
|
%add = add i5 %y, %sel
|
|
ret i5 %add
|
|
}
|
|
|
|
define i32 @add_select_sub_both_arms_simplify(i1 %b, i32 %a) {
|
|
; CHECK-LABEL: @add_select_sub_both_arms_simplify(
|
|
; CHECK-NEXT: [[ADD:%.*]] = select i1 [[B:%.*]], i32 [[A:%.*]], i32 99
|
|
; CHECK-NEXT: ret i32 [[ADD]]
|
|
;
|
|
%sub = sub i32 99, %a
|
|
%sel = select i1 %b, i32 0, i32 %sub
|
|
%add = add i32 %sel, %a
|
|
ret i32 %add
|
|
}
|
|
|
|
define <2 x i8> @add_select_sub_both_arms_simplify_swap(<2 x i1> %b, <2 x i8> %a) {
|
|
; CHECK-LABEL: @add_select_sub_both_arms_simplify_swap(
|
|
; CHECK-NEXT: [[ADD:%.*]] = select <2 x i1> [[B:%.*]], <2 x i8> <i8 42, i8 99>, <2 x i8> [[A:%.*]]
|
|
; CHECK-NEXT: ret <2 x i8> [[ADD]]
|
|
;
|
|
%sub = sub <2 x i8> <i8 42, i8 99>, %a
|
|
%sel = select <2 x i1> %b, <2 x i8> %sub, <2 x i8> zeroinitializer
|
|
%add = add <2 x i8> %sel, %a
|
|
ret <2 x i8> %add
|
|
}
|
|
|
|
define i8 @add_select_sub_both_arms_simplify_use1(i1 %b, i8 %a) {
|
|
; CHECK-LABEL: @add_select_sub_both_arms_simplify_use1(
|
|
; CHECK-NEXT: [[SUB:%.*]] = sub i8 42, [[A:%.*]]
|
|
; CHECK-NEXT: call void @use(i8 [[SUB]])
|
|
; CHECK-NEXT: [[ADD:%.*]] = select i1 [[B:%.*]], i8 [[A]], i8 42
|
|
; CHECK-NEXT: ret i8 [[ADD]]
|
|
;
|
|
%sub = sub i8 42, %a
|
|
call void @use(i8 %sub)
|
|
%sel = select i1 %b, i8 0, i8 %sub
|
|
%add = add i8 %sel, %a
|
|
ret i8 %add
|
|
}
|
|
|
|
define i8 @add_select_sub_both_arms_simplify_use2(i1 %b, i8 %a) {
|
|
; CHECK-LABEL: @add_select_sub_both_arms_simplify_use2(
|
|
; CHECK-NEXT: [[SUB:%.*]] = sub i8 42, [[A:%.*]]
|
|
; CHECK-NEXT: [[SEL:%.*]] = select i1 [[B:%.*]], i8 0, i8 [[SUB]]
|
|
; CHECK-NEXT: call void @use(i8 [[SEL]])
|
|
; CHECK-NEXT: [[ADD:%.*]] = add i8 [[SEL]], [[A]]
|
|
; CHECK-NEXT: ret i8 [[ADD]]
|
|
;
|
|
%sub = sub i8 42, %a
|
|
%sel = select i1 %b, i8 0, i8 %sub
|
|
call void @use(i8 %sel)
|
|
%add = add i8 %sel, %a
|
|
ret i8 %add
|
|
}
|
|
|
|
define i5 @demand_low_bits_uses(i8 %x, i8 %y) {
|
|
; CHECK-LABEL: @demand_low_bits_uses(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = shl i8 [[X:%.*]], 5
|
|
; CHECK-NEXT: [[A:%.*]] = sub i8 [[Y:%.*]], [[TMP1]]
|
|
; CHECK-NEXT: call void @use(i8 [[A]])
|
|
; CHECK-NEXT: [[R:%.*]] = trunc i8 [[Y]] to i5
|
|
; CHECK-NEXT: ret i5 [[R]]
|
|
;
|
|
%m = mul i8 %x, -32 ; 0xE0
|
|
%a = add i8 %m, %y
|
|
call void @use(i8 %a)
|
|
%r = trunc i8 %a to i5
|
|
ret i5 %r
|
|
}
|
|
|
|
; negative test - demands one more bit
|
|
|
|
define i6 @demand_low_bits_uses_extra_bit(i8 %x, i8 %y) {
|
|
; CHECK-LABEL: @demand_low_bits_uses_extra_bit(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = shl i8 [[X:%.*]], 5
|
|
; CHECK-NEXT: [[A:%.*]] = sub i8 [[Y:%.*]], [[TMP1]]
|
|
; CHECK-NEXT: call void @use(i8 [[A]])
|
|
; CHECK-NEXT: [[R:%.*]] = trunc i8 [[A]] to i6
|
|
; CHECK-NEXT: ret i6 [[R]]
|
|
;
|
|
%m = mul i8 %x, -32 ; 0xE0
|
|
%a = add i8 %m, %y
|
|
call void @use(i8 %a)
|
|
%r = trunc i8 %a to i6
|
|
ret i6 %r
|
|
}
|
|
|
|
define i8 @demand_low_bits_uses_commute(i8 %x, i8 %p, i8 %z) {
|
|
; CHECK-LABEL: @demand_low_bits_uses_commute(
|
|
; CHECK-NEXT: [[Y:%.*]] = mul i8 [[P:%.*]], [[P]]
|
|
; CHECK-NEXT: [[M:%.*]] = and i8 [[X:%.*]], -64
|
|
; CHECK-NEXT: [[A:%.*]] = add i8 [[Y]], [[M]]
|
|
; CHECK-NEXT: call void @use(i8 [[A]])
|
|
; CHECK-NEXT: [[S:%.*]] = sub i8 [[Y]], [[Z:%.*]]
|
|
; CHECK-NEXT: [[R:%.*]] = shl i8 [[S]], 2
|
|
; CHECK-NEXT: ret i8 [[R]]
|
|
;
|
|
%y = mul i8 %p, %p ; thwart complexity-based canonicalization
|
|
%m = and i8 %x, -64 ; 0xC0
|
|
%a = add i8 %y, %m
|
|
call void @use(i8 %a)
|
|
%s = sub i8 %a, %z
|
|
%r = shl i8 %s, 2
|
|
ret i8 %r
|
|
}
|
|
|
|
; negative test - demands one more bit
|
|
|
|
define i8 @demand_low_bits_uses_commute_extra_bit(i8 %x, i8 %p, i8 %z) {
|
|
; CHECK-LABEL: @demand_low_bits_uses_commute_extra_bit(
|
|
; CHECK-NEXT: [[Y:%.*]] = mul i8 [[P:%.*]], [[P]]
|
|
; CHECK-NEXT: [[M:%.*]] = and i8 [[X:%.*]], -64
|
|
; CHECK-NEXT: [[A:%.*]] = add i8 [[Y]], [[M]]
|
|
; CHECK-NEXT: call void @use(i8 [[A]])
|
|
; CHECK-NEXT: [[S:%.*]] = sub i8 [[A]], [[Z:%.*]]
|
|
; CHECK-NEXT: [[R:%.*]] = shl i8 [[S]], 1
|
|
; CHECK-NEXT: ret i8 [[R]]
|
|
;
|
|
%y = mul i8 %p, %p ; thwart complexity-based canonicalization
|
|
%m = and i8 %x, -64 ; 0xC0
|
|
%a = add i8 %y, %m
|
|
call void @use(i8 %a)
|
|
%s = sub i8 %a, %z
|
|
%r = shl i8 %s, 1
|
|
ret i8 %r
|
|
}
|
|
|
|
define { i64, i64 } @PR57576(i64 noundef %x, i64 noundef %y, i64 noundef %z, i64 noundef %w) {
|
|
; CHECK-LABEL: @PR57576(
|
|
; CHECK-NEXT: [[ZX:%.*]] = zext i64 [[X:%.*]] to i128
|
|
; CHECK-NEXT: [[ZY:%.*]] = zext i64 [[Y:%.*]] to i128
|
|
; CHECK-NEXT: [[ZZ:%.*]] = zext i64 [[Z:%.*]] to i128
|
|
; CHECK-NEXT: [[SHY:%.*]] = shl nuw i128 [[ZY]], 64
|
|
; CHECK-NEXT: [[XY:%.*]] = or disjoint i128 [[SHY]], [[ZX]]
|
|
; CHECK-NEXT: [[SUB:%.*]] = sub i128 [[XY]], [[ZZ]]
|
|
; CHECK-NEXT: [[T:%.*]] = trunc i128 [[SUB]] to i64
|
|
; CHECK-NEXT: [[TMP1:%.*]] = lshr i128 [[SUB]], 64
|
|
; CHECK-NEXT: [[DOTTR:%.*]] = trunc i128 [[TMP1]] to i64
|
|
; CHECK-NEXT: [[DOTNARROW:%.*]] = sub i64 [[DOTTR]], [[W:%.*]]
|
|
; CHECK-NEXT: [[R1:%.*]] = insertvalue { i64, i64 } poison, i64 [[T]], 0
|
|
; CHECK-NEXT: [[R2:%.*]] = insertvalue { i64, i64 } [[R1]], i64 [[DOTNARROW]], 1
|
|
; CHECK-NEXT: ret { i64, i64 } [[R2]]
|
|
;
|
|
%zx = zext i64 %x to i128
|
|
%zy = zext i64 %y to i128
|
|
%zw = zext i64 %w to i128
|
|
%zz = zext i64 %z to i128
|
|
%shy = shl nuw i128 %zy, 64
|
|
%mw = mul i128 %zw, -18446744073709551616
|
|
%xy = or i128 %shy, %zx
|
|
%sub = sub i128 %xy, %zz
|
|
%add = add i128 %sub, %mw
|
|
%t = trunc i128 %add to i64
|
|
%h = lshr i128 %add, 64
|
|
%t2 = trunc i128 %h to i64
|
|
%r1 = insertvalue { i64, i64 } poison, i64 %t, 0
|
|
%r2 = insertvalue { i64, i64 } %r1, i64 %t2, 1
|
|
ret { i64, i64 } %r2
|
|
}
|
|
|
|
define i8 @mul_negpow2(i8 %x, i8 %y) {
|
|
; CHECK-LABEL: @mul_negpow2(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = shl i8 [[X:%.*]], 1
|
|
; CHECK-NEXT: [[A:%.*]] = sub i8 [[Y:%.*]], [[TMP1]]
|
|
; CHECK-NEXT: ret i8 [[A]]
|
|
;
|
|
%m = mul i8 %x, -2
|
|
%a = add i8 %m, %y
|
|
ret i8 %a
|
|
}
|
|
|
|
define <2 x i8> @mul_negpow2_commute_vec(<2 x i8> %x, <2 x i8> %p) {
|
|
; CHECK-LABEL: @mul_negpow2_commute_vec(
|
|
; CHECK-NEXT: [[Y:%.*]] = mul <2 x i8> [[P:%.*]], [[P]]
|
|
; CHECK-NEXT: [[TMP1:%.*]] = shl <2 x i8> [[X:%.*]], <i8 3, i8 3>
|
|
; CHECK-NEXT: [[A:%.*]] = sub <2 x i8> [[Y]], [[TMP1]]
|
|
; CHECK-NEXT: ret <2 x i8> [[A]]
|
|
;
|
|
%y = mul <2 x i8> %p, %p ; thwart complexity-based canonicalization
|
|
%m = mul <2 x i8> %x, <i8 -8, i8 -8>
|
|
%a = add <2 x i8> %y, %m
|
|
ret <2 x i8> %a
|
|
}
|
|
|
|
; negative test - extra use
|
|
|
|
define i8 @mul_negpow2_use(i8 %x) {
|
|
; CHECK-LABEL: @mul_negpow2_use(
|
|
; CHECK-NEXT: [[M:%.*]] = mul i8 [[X:%.*]], -2
|
|
; CHECK-NEXT: call void @use(i8 [[M]])
|
|
; CHECK-NEXT: [[A:%.*]] = add i8 [[M]], 42
|
|
; CHECK-NEXT: ret i8 [[A]]
|
|
;
|
|
%m = mul i8 %x, -2
|
|
call void @use(i8 %m)
|
|
%a = add i8 %m, 42
|
|
ret i8 %a
|
|
}
|
|
|
|
; negative test - not negative-power-of-2 multiplier
|
|
|
|
define i8 @mul_not_negpow2(i8 %x) {
|
|
; CHECK-LABEL: @mul_not_negpow2(
|
|
; CHECK-NEXT: [[M:%.*]] = mul i8 [[X:%.*]], -3
|
|
; CHECK-NEXT: [[A:%.*]] = add i8 [[M]], 42
|
|
; CHECK-NEXT: ret i8 [[A]]
|
|
;
|
|
%m = mul i8 %x, -3
|
|
%a = add i8 %m, 42
|
|
ret i8 %a
|
|
}
|
|
|
|
define i16 @add_sub_zext(i8 %x, i8 %y) {
|
|
; CHECK-LABEL: @add_sub_zext(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[Y:%.*]] to i16
|
|
; CHECK-NEXT: ret i16 [[TMP1]]
|
|
;
|
|
%1 = sub nuw i8 %y, %x
|
|
%2 = zext i8 %1 to i16
|
|
%3 = zext i8 %x to i16
|
|
%4 = add i16 %2, %3
|
|
ret i16 %4
|
|
}
|
|
|
|
define i16 @add_commute_sub_zext(i8 %x, i8 %y) {
|
|
; CHECK-LABEL: @add_commute_sub_zext(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[Y:%.*]] to i16
|
|
; CHECK-NEXT: ret i16 [[TMP1]]
|
|
;
|
|
%1 = sub nuw i8 %y, %x
|
|
%2 = zext i8 %1 to i16
|
|
%3 = zext i8 %x to i16
|
|
%4 = add i16 %3, %2
|
|
ret i16 %4
|
|
}
|
|
|
|
define <2 x i8> @add_sub_2xi5_zext(<2 x i5> %x, <2 x i5> %y) {
|
|
; CHECK-LABEL: @add_sub_2xi5_zext(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = zext <2 x i5> [[Y:%.*]] to <2 x i8>
|
|
; CHECK-NEXT: ret <2 x i8> [[TMP1]]
|
|
;
|
|
%1 = sub nuw <2 x i5> %y, %x
|
|
%2 = zext <2 x i5> %1 to <2 x i8>
|
|
%3 = zext <2 x i5> %x to <2 x i8>
|
|
%4 = add <2 x i8> %3, %2
|
|
ret <2 x i8> %4
|
|
}
|
|
|
|
|
|
define i3 @add_commute_sub_i2_zext_i3(i2 %x, i2 %y) {
|
|
; CHECK-LABEL: @add_commute_sub_i2_zext_i3(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = zext i2 [[Y:%.*]] to i3
|
|
; CHECK-NEXT: ret i3 [[TMP1]]
|
|
;
|
|
%1 = sub nuw i2 %y, %x
|
|
%2 = zext i2 %1 to i3
|
|
%3 = zext i2 %x to i3
|
|
%4 = add i3 %3, %2
|
|
ret i3 %4
|
|
}
|
|
|
|
define i16 @add_sub_use_zext(i8 %x, i8 %y) {
|
|
; CHECK-LABEL: @add_sub_use_zext(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = sub nuw i8 [[Y:%.*]], [[X:%.*]]
|
|
; CHECK-NEXT: call void @use(i8 [[TMP1]])
|
|
; CHECK-NEXT: [[TMP2:%.*]] = zext i8 [[Y]] to i16
|
|
; CHECK-NEXT: ret i16 [[TMP2]]
|
|
;
|
|
%1 = sub nuw i8 %y, %x
|
|
call void @use(i8 %1)
|
|
%2 = zext i8 %1 to i16
|
|
%3 = zext i8 %x to i16
|
|
%4 = add i16 %2, %3
|
|
ret i16 %4
|
|
}
|
|
|
|
; Negative test: x - y + x != y
|
|
define i16 @add_sub_commute_zext(i8 %x, i8 %y) {
|
|
; CHECK-LABEL: @add_sub_commute_zext(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = sub nuw i8 [[X:%.*]], [[Y:%.*]]
|
|
; CHECK-NEXT: [[TMP2:%.*]] = zext i8 [[TMP1]] to i16
|
|
; CHECK-NEXT: [[TMP3:%.*]] = zext i8 [[X]] to i16
|
|
; CHECK-NEXT: [[TMP4:%.*]] = add nuw nsw i16 [[TMP2]], [[TMP3]]
|
|
; CHECK-NEXT: ret i16 [[TMP4]]
|
|
;
|
|
%1 = sub nuw i8 %x, %y
|
|
%2 = zext i8 %1 to i16
|
|
%3 = zext i8 %x to i16
|
|
%4 = add i16 %2, %3
|
|
ret i16 %4
|
|
}
|
|
|
|
; Negative test: no nuw flags
|
|
define i16 @add_no_nuw_sub_zext(i8 %x, i8 %y) {
|
|
; CHECK-LABEL: @add_no_nuw_sub_zext(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = sub i8 [[Y:%.*]], [[X:%.*]]
|
|
; CHECK-NEXT: [[TMP2:%.*]] = zext i8 [[TMP1]] to i16
|
|
; CHECK-NEXT: [[TMP3:%.*]] = zext i8 [[X]] to i16
|
|
; CHECK-NEXT: [[TMP4:%.*]] = add nuw nsw i16 [[TMP3]], [[TMP2]]
|
|
; CHECK-NEXT: ret i16 [[TMP4]]
|
|
;
|
|
%1 = sub i8 %y, %x
|
|
%2 = zext i8 %1 to i16
|
|
%3 = zext i8 %x to i16
|
|
%4 = add i16 %3, %2
|
|
ret i16 %4
|
|
}
|
|
|
|
define i16 @add_no_nuw_sub_commute_zext(i8 %x, i8 %y) {
|
|
; CHECK-LABEL: @add_no_nuw_sub_commute_zext(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = sub i8 [[X:%.*]], [[Y:%.*]]
|
|
; CHECK-NEXT: [[TMP2:%.*]] = zext i8 [[TMP1]] to i16
|
|
; CHECK-NEXT: [[TMP3:%.*]] = zext i8 [[X]] to i16
|
|
; CHECK-NEXT: [[TMP4:%.*]] = add nuw nsw i16 [[TMP3]], [[TMP2]]
|
|
; CHECK-NEXT: ret i16 [[TMP4]]
|
|
;
|
|
%1 = sub i8 %x, %y
|
|
%2 = zext i8 %1 to i16
|
|
%3 = zext i8 %x to i16
|
|
%4 = add i16 %3, %2
|
|
ret i16 %4
|
|
}
|
|
|
|
define i16 @add_sub_zext_constant(i8 %x) {
|
|
; CHECK-LABEL: @add_sub_zext_constant(
|
|
; CHECK-NEXT: ret i16 254
|
|
;
|
|
%1 = sub nuw i8 254, %x
|
|
%2 = zext i8 %1 to i16
|
|
%3 = zext i8 %x to i16
|
|
%4 = add i16 %2, %3
|
|
ret i16 %4
|
|
}
|
|
|
|
define <vscale x 1 x i32> @add_to_or_scalable(<vscale x 1 x i32> %in) {
|
|
; CHECK-LABEL: @add_to_or_scalable(
|
|
; CHECK-NEXT: [[SHL:%.*]] = shl <vscale x 1 x i32> [[IN:%.*]], shufflevector (<vscale x 1 x i32> insertelement (<vscale x 1 x i32> poison, i32 1, i32 0), <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer)
|
|
; CHECK-NEXT: [[ADD:%.*]] = or disjoint <vscale x 1 x i32> [[SHL]], shufflevector (<vscale x 1 x i32> insertelement (<vscale x 1 x i32> poison, i32 1, i32 0), <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer)
|
|
; CHECK-NEXT: ret <vscale x 1 x i32> [[ADD]]
|
|
;
|
|
%shl = shl <vscale x 1 x i32> %in, shufflevector (<vscale x 1 x i32> insertelement (<vscale x 1 x i32> poison, i32 1, i32 0), <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer)
|
|
%add = add <vscale x 1 x i32> %shl, shufflevector (<vscale x 1 x i32> insertelement (<vscale x 1 x i32> poison, i32 1, i32 0), <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer)
|
|
ret <vscale x 1 x i32> %add
|
|
}
|
|
|
|
define i5 @zext_zext_not(i3 noundef %x) {
|
|
; CHECK-LABEL: @zext_zext_not(
|
|
; CHECK-NEXT: ret i5 7
|
|
;
|
|
%zx = zext i3 %x to i5
|
|
%notx = xor i3 %x, -1
|
|
%znotx = zext i3 %notx to i5
|
|
%r = add i5 %zx, %znotx
|
|
ret i5 %r
|
|
}
|
|
|
|
define <2 x i5> @zext_zext_not_commute(<2 x i3> noundef %x) {
|
|
; CHECK-LABEL: @zext_zext_not_commute(
|
|
; CHECK-NEXT: ret <2 x i5> <i5 7, i5 7>
|
|
;
|
|
%zx = zext <2 x i3> %x to <2 x i5>
|
|
%notx = xor <2 x i3> %x, <i3 -1, i3 poison>
|
|
%znotx = zext <2 x i3> %notx to <2 x i5>
|
|
%r = add <2 x i5> %znotx, %zx
|
|
ret <2 x i5> %r
|
|
}
|
|
|
|
define i9 @sext_sext_not(i3 noundef %x) {
|
|
; CHECK-LABEL: @sext_sext_not(
|
|
; CHECK-NEXT: ret i9 -1
|
|
;
|
|
%sx = sext i3 %x to i9
|
|
%notx = xor i3 %x, -1
|
|
%snotx = sext i3 %notx to i9
|
|
%r = add i9 %sx, %snotx
|
|
ret i9 %r
|
|
}
|
|
|
|
define i8 @sext_sext_not_commute(i3 noundef %x) {
|
|
; CHECK-LABEL: @sext_sext_not_commute(
|
|
; CHECK-NEXT: [[SX:%.*]] = sext i3 [[X:%.*]] to i8
|
|
; CHECK-NEXT: call void @use(i8 [[SX]])
|
|
; CHECK-NEXT: ret i8 -1
|
|
;
|
|
|
|
%sx = sext i3 %x to i8
|
|
call void @use(i8 %sx)
|
|
%notx = xor i3 %x, -1
|
|
%snotx = sext i3 %notx to i8
|
|
%r = add i8 %snotx, %sx
|
|
ret i8 %r
|
|
}
|
|
|
|
define i5 @zext_sext_not(i4 noundef %x) {
|
|
; CHECK-LABEL: @zext_sext_not(
|
|
; CHECK-NEXT: [[ZX:%.*]] = zext i4 [[X:%.*]] to i5
|
|
; CHECK-NEXT: [[NOTX:%.*]] = xor i4 [[X]], -1
|
|
; CHECK-NEXT: [[SNOTX:%.*]] = sext i4 [[NOTX]] to i5
|
|
; CHECK-NEXT: [[R:%.*]] = or disjoint i5 [[ZX]], [[SNOTX]]
|
|
; CHECK-NEXT: ret i5 [[R]]
|
|
;
|
|
%zx = zext i4 %x to i5
|
|
%notx = xor i4 %x, -1
|
|
%snotx = sext i4 %notx to i5
|
|
%r = add i5 %zx, %snotx
|
|
ret i5 %r
|
|
}
|
|
|
|
define i8 @zext_sext_not_commute(i4 noundef %x) {
|
|
; CHECK-LABEL: @zext_sext_not_commute(
|
|
; CHECK-NEXT: [[ZX:%.*]] = zext i4 [[X:%.*]] to i8
|
|
; CHECK-NEXT: call void @use(i8 [[ZX]])
|
|
; CHECK-NEXT: [[NOTX:%.*]] = xor i4 [[X]], -1
|
|
; CHECK-NEXT: [[SNOTX:%.*]] = sext i4 [[NOTX]] to i8
|
|
; CHECK-NEXT: call void @use(i8 [[SNOTX]])
|
|
; CHECK-NEXT: [[R:%.*]] = or disjoint i8 [[SNOTX]], [[ZX]]
|
|
; CHECK-NEXT: ret i8 [[R]]
|
|
;
|
|
%zx = zext i4 %x to i8
|
|
call void @use(i8 %zx)
|
|
%notx = xor i4 %x, -1
|
|
%snotx = sext i4 %notx to i8
|
|
call void @use(i8 %snotx)
|
|
%r = add i8 %snotx, %zx
|
|
ret i8 %r
|
|
}
|
|
|
|
define i9 @sext_zext_not(i4 noundef %x) {
|
|
; CHECK-LABEL: @sext_zext_not(
|
|
; CHECK-NEXT: [[SX:%.*]] = sext i4 [[X:%.*]] to i9
|
|
; CHECK-NEXT: [[NOTX:%.*]] = xor i4 [[X]], -1
|
|
; CHECK-NEXT: [[ZNOTX:%.*]] = zext i4 [[NOTX]] to i9
|
|
; CHECK-NEXT: [[R:%.*]] = or disjoint i9 [[SX]], [[ZNOTX]]
|
|
; CHECK-NEXT: ret i9 [[R]]
|
|
;
|
|
%sx = sext i4 %x to i9
|
|
%notx = xor i4 %x, -1
|
|
%znotx = zext i4 %notx to i9
|
|
%r = add i9 %sx, %znotx
|
|
ret i9 %r
|
|
}
|
|
|
|
define i9 @sext_zext_not_commute(i4 noundef %x) {
|
|
; CHECK-LABEL: @sext_zext_not_commute(
|
|
; CHECK-NEXT: [[SX:%.*]] = sext i4 [[X:%.*]] to i9
|
|
; CHECK-NEXT: [[NOTX:%.*]] = xor i4 [[X]], -1
|
|
; CHECK-NEXT: [[ZNOTX:%.*]] = zext i4 [[NOTX]] to i9
|
|
; CHECK-NEXT: [[R:%.*]] = or disjoint i9 [[ZNOTX]], [[SX]]
|
|
; CHECK-NEXT: ret i9 [[R]]
|
|
;
|
|
%sx = sext i4 %x to i9
|
|
%notx = xor i4 %x, -1
|
|
%znotx = zext i4 %notx to i9
|
|
%r = add i9 %znotx, %sx
|
|
ret i9 %r
|
|
}
|
|
|
|
; PR57741
|
|
|
|
define i32 @floor_sdiv(i32 %x) {
|
|
; CHECK-LABEL: @floor_sdiv(
|
|
; CHECK-NEXT: [[R:%.*]] = ashr i32 [[X:%.*]], 2
|
|
; CHECK-NEXT: ret i32 [[R]]
|
|
;
|
|
%d = sdiv i32 %x, 4
|
|
%a = and i32 %x, -2147483645
|
|
%i = icmp ugt i32 %a, -2147483648
|
|
%s = sext i1 %i to i32
|
|
%r = add i32 %d, %s
|
|
ret i32 %r
|
|
}
|
|
|
|
define i8 @floor_sdiv_by_2(i8 %x) {
|
|
; CHECK-LABEL: @floor_sdiv_by_2(
|
|
; CHECK-NEXT: [[RV:%.*]] = ashr i8 [[X:%.*]], 1
|
|
; CHECK-NEXT: ret i8 [[RV]]
|
|
;
|
|
%div = sdiv i8 %x, 2
|
|
%and = and i8 %x, -127
|
|
%icmp = icmp eq i8 %and, -127
|
|
%sext = sext i1 %icmp to i8
|
|
%rv = add nsw i8 %div, %sext
|
|
ret i8 %rv
|
|
}
|
|
|
|
define i8 @floor_sdiv_by_2_wrong_mask(i8 %x) {
|
|
; CHECK-LABEL: @floor_sdiv_by_2_wrong_mask(
|
|
; CHECK-NEXT: [[DIV:%.*]] = sdiv i8 [[X:%.*]], 2
|
|
; CHECK-NEXT: [[AND:%.*]] = and i8 [[X]], 127
|
|
; CHECK-NEXT: [[ICMP:%.*]] = icmp eq i8 [[AND]], 127
|
|
; CHECK-NEXT: [[SEXT:%.*]] = sext i1 [[ICMP]] to i8
|
|
; CHECK-NEXT: [[RV:%.*]] = add nsw i8 [[DIV]], [[SEXT]]
|
|
; CHECK-NEXT: ret i8 [[RV]]
|
|
;
|
|
%div = sdiv i8 %x, 2
|
|
%and = and i8 %x, 127
|
|
%icmp = icmp eq i8 %and, 127
|
|
%sext = sext i1 %icmp to i8
|
|
%rv = add nsw i8 %div, %sext
|
|
ret i8 %rv
|
|
}
|
|
|
|
define i8 @floor_sdiv_by_2_wrong_constant(i8 %x) {
|
|
; CHECK-LABEL: @floor_sdiv_by_2_wrong_constant(
|
|
; CHECK-NEXT: [[DIV:%.*]] = sdiv i8 [[X:%.*]], 4
|
|
; CHECK-NEXT: [[AND:%.*]] = and i8 [[X]], -125
|
|
; CHECK-NEXT: [[ICMP:%.*]] = icmp eq i8 [[AND]], -125
|
|
; CHECK-NEXT: [[SEXT:%.*]] = sext i1 [[ICMP]] to i8
|
|
; CHECK-NEXT: [[RV:%.*]] = add nsw i8 [[DIV]], [[SEXT]]
|
|
; CHECK-NEXT: ret i8 [[RV]]
|
|
;
|
|
%div = sdiv i8 %x, 4
|
|
%and = and i8 %x, -125
|
|
%icmp = icmp eq i8 %and, -125
|
|
%sext = sext i1 %icmp to i8
|
|
%rv = add nsw i8 %div, %sext
|
|
ret i8 %rv
|
|
}
|
|
|
|
define i8 @floor_sdiv_by_2_wrong_cast(i8 %x) {
|
|
; CHECK-LABEL: @floor_sdiv_by_2_wrong_cast(
|
|
; CHECK-NEXT: [[DIV:%.*]] = sdiv i8 [[X:%.*]], 2
|
|
; CHECK-NEXT: [[AND:%.*]] = and i8 [[X]], -127
|
|
; CHECK-NEXT: [[ICMP:%.*]] = icmp eq i8 [[AND]], -127
|
|
; CHECK-NEXT: [[SEXT:%.*]] = zext i1 [[ICMP]] to i8
|
|
; CHECK-NEXT: [[RV:%.*]] = add nsw i8 [[DIV]], [[SEXT]]
|
|
; CHECK-NEXT: ret i8 [[RV]]
|
|
;
|
|
%div = sdiv i8 %x, 2
|
|
%and = and i8 %x, -127
|
|
%icmp = icmp eq i8 %and, -127
|
|
%sext = zext i1 %icmp to i8
|
|
%rv = add nsw i8 %div, %sext
|
|
ret i8 %rv
|
|
}
|
|
|
|
; vectors work too and commute is handled by complexity-based canonicalization
|
|
|
|
define <2 x i32> @floor_sdiv_vec_commute(<2 x i32> %x) {
|
|
; CHECK-LABEL: @floor_sdiv_vec_commute(
|
|
; CHECK-NEXT: [[R:%.*]] = ashr <2 x i32> [[X:%.*]], <i32 2, i32 2>
|
|
; CHECK-NEXT: ret <2 x i32> [[R]]
|
|
;
|
|
%d = sdiv <2 x i32> %x, <i32 4, i32 4>
|
|
%a = and <2 x i32> %x, <i32 -2147483645, i32 -2147483645>
|
|
%i = icmp ugt <2 x i32> %a, <i32 -2147483648, i32 -2147483648>
|
|
%s = sext <2 x i1> %i to <2 x i32>
|
|
%r = add <2 x i32> %s, %d
|
|
ret <2 x i32> %r
|
|
}
|
|
|
|
; extra uses are ok
|
|
|
|
define i8 @floor_sdiv_uses(i8 %x) {
|
|
; CHECK-LABEL: @floor_sdiv_uses(
|
|
; CHECK-NEXT: [[D:%.*]] = sdiv i8 [[X:%.*]], 16
|
|
; CHECK-NEXT: call void @use(i8 [[D]])
|
|
; CHECK-NEXT: [[A:%.*]] = and i8 [[X]], -113
|
|
; CHECK-NEXT: call void @use(i8 [[A]])
|
|
; CHECK-NEXT: [[I:%.*]] = icmp ugt i8 [[A]], -128
|
|
; CHECK-NEXT: [[S:%.*]] = sext i1 [[I]] to i8
|
|
; CHECK-NEXT: call void @use(i8 [[S]])
|
|
; CHECK-NEXT: [[R:%.*]] = ashr i8 [[X]], 4
|
|
; CHECK-NEXT: ret i8 [[R]]
|
|
;
|
|
%d = sdiv i8 %x, 16
|
|
call void @use(i8 %d)
|
|
%a = and i8 %x, 143 ; 128 + 15
|
|
call void @use(i8 %a)
|
|
%i = icmp ugt i8 %a, 128
|
|
%s = sext i1 %i to i8
|
|
call void @use(i8 %s)
|
|
%r = add i8 %d, %s
|
|
ret i8 %r
|
|
}
|
|
|
|
; negative test
|
|
|
|
define i32 @floor_sdiv_wrong_div(i32 %x) {
|
|
; CHECK-LABEL: @floor_sdiv_wrong_div(
|
|
; CHECK-NEXT: [[D:%.*]] = sdiv i32 [[X:%.*]], 8
|
|
; CHECK-NEXT: [[A:%.*]] = and i32 [[X]], -2147483645
|
|
; CHECK-NEXT: [[I:%.*]] = icmp ugt i32 [[A]], -2147483648
|
|
; CHECK-NEXT: [[S:%.*]] = sext i1 [[I]] to i32
|
|
; CHECK-NEXT: [[R:%.*]] = add nsw i32 [[D]], [[S]]
|
|
; CHECK-NEXT: ret i32 [[R]]
|
|
;
|
|
%d = sdiv i32 %x, 8
|
|
%a = and i32 %x, -2147483645
|
|
%i = icmp ugt i32 %a, -2147483648
|
|
%s = sext i1 %i to i32
|
|
%r = add i32 %d, %s
|
|
ret i32 %r
|
|
}
|
|
|
|
; negative test
|
|
|
|
define i32 @floor_sdiv_wrong_mask(i32 %x) {
|
|
; CHECK-LABEL: @floor_sdiv_wrong_mask(
|
|
; CHECK-NEXT: [[D:%.*]] = sdiv i32 [[X:%.*]], 4
|
|
; CHECK-NEXT: [[A:%.*]] = and i32 [[X]], -2147483644
|
|
; CHECK-NEXT: [[I:%.*]] = icmp ugt i32 [[A]], -2147483648
|
|
; CHECK-NEXT: [[S:%.*]] = sext i1 [[I]] to i32
|
|
; CHECK-NEXT: [[R:%.*]] = add nsw i32 [[D]], [[S]]
|
|
; CHECK-NEXT: ret i32 [[R]]
|
|
;
|
|
%d = sdiv i32 %x, 4
|
|
%a = and i32 %x, -2147483644
|
|
%i = icmp ugt i32 %a, -2147483648
|
|
%s = sext i1 %i to i32
|
|
%r = add i32 %d, %s
|
|
ret i32 %r
|
|
}
|
|
|
|
; negative test
|
|
|
|
define i32 @floor_sdiv_wrong_cmp(i32 %x) {
|
|
; CHECK-LABEL: @floor_sdiv_wrong_cmp(
|
|
; CHECK-NEXT: [[D:%.*]] = sdiv i32 [[X:%.*]], 4
|
|
; CHECK-NEXT: [[A:%.*]] = and i32 [[X]], -2147483646
|
|
; CHECK-NEXT: [[I:%.*]] = icmp eq i32 [[A]], -2147483646
|
|
; CHECK-NEXT: [[S:%.*]] = sext i1 [[I]] to i32
|
|
; CHECK-NEXT: [[R:%.*]] = add nsw i32 [[D]], [[S]]
|
|
; CHECK-NEXT: ret i32 [[R]]
|
|
;
|
|
%d = sdiv i32 %x, 4
|
|
%a = and i32 %x, -2147483645
|
|
%i = icmp ugt i32 %a, -2147483647
|
|
%s = sext i1 %i to i32
|
|
%r = add i32 %d, %s
|
|
ret i32 %r
|
|
}
|
|
|
|
; negative test
|
|
|
|
define i32 @floor_sdiv_wrong_ext(i32 %x) {
|
|
; CHECK-LABEL: @floor_sdiv_wrong_ext(
|
|
; CHECK-NEXT: [[D:%.*]] = sdiv i32 [[X:%.*]], 4
|
|
; CHECK-NEXT: [[A:%.*]] = and i32 [[X]], -2147483645
|
|
; CHECK-NEXT: [[I:%.*]] = icmp ugt i32 [[A]], -2147483648
|
|
; CHECK-NEXT: [[S:%.*]] = zext i1 [[I]] to i32
|
|
; CHECK-NEXT: [[R:%.*]] = add nsw i32 [[D]], [[S]]
|
|
; CHECK-NEXT: ret i32 [[R]]
|
|
;
|
|
%d = sdiv i32 %x, 4
|
|
%a = and i32 %x, -2147483645
|
|
%i = icmp ugt i32 %a, -2147483648
|
|
%s = zext i1 %i to i32
|
|
%r = add i32 %d, %s
|
|
ret i32 %r
|
|
}
|
|
|
|
; negative test
|
|
|
|
define i32 @floor_sdiv_wrong_op(i32 %x, i32 %y) {
|
|
; CHECK-LABEL: @floor_sdiv_wrong_op(
|
|
; CHECK-NEXT: [[D:%.*]] = sdiv i32 [[X:%.*]], 4
|
|
; CHECK-NEXT: [[A:%.*]] = and i32 [[Y:%.*]], -2147483645
|
|
; CHECK-NEXT: [[I:%.*]] = icmp ugt i32 [[A]], -2147483648
|
|
; CHECK-NEXT: [[S:%.*]] = zext i1 [[I]] to i32
|
|
; CHECK-NEXT: [[R:%.*]] = add nsw i32 [[D]], [[S]]
|
|
; CHECK-NEXT: ret i32 [[R]]
|
|
;
|
|
%d = sdiv i32 %x, 4
|
|
%a = and i32 %y, -2147483645
|
|
%i = icmp ugt i32 %a, -2147483648
|
|
%s = zext i1 %i to i32
|
|
%r = add i32 %d, %s
|
|
ret i32 %r
|
|
}
|
|
|
|
; (X s>> (BW - 1)) + (zext (X s> 0)) --> (X s>> (BW - 1)) | (zext (X != 0))
|
|
|
|
define i8 @signum_i8_i8(i8 %x) {
|
|
; CHECK-LABEL: @signum_i8_i8(
|
|
; CHECK-NEXT: [[SIGNBIT:%.*]] = ashr i8 [[X:%.*]], 7
|
|
; CHECK-NEXT: [[ISNOTNULL:%.*]] = icmp ne i8 [[X]], 0
|
|
; CHECK-NEXT: [[ISNOTNULL_ZEXT:%.*]] = zext i1 [[ISNOTNULL]] to i8
|
|
; CHECK-NEXT: [[R:%.*]] = or i8 [[SIGNBIT]], [[ISNOTNULL_ZEXT]]
|
|
; CHECK-NEXT: ret i8 [[R]]
|
|
;
|
|
%sgt0 = icmp sgt i8 %x, 0
|
|
%zgt0 = zext i1 %sgt0 to i8
|
|
%signbit = ashr i8 %x, 7
|
|
%r = add i8 %zgt0, %signbit
|
|
ret i8 %r
|
|
}
|
|
|
|
; extra use of shift is ok
|
|
|
|
define i8 @signum_i8_i8_use1(i8 %x) {
|
|
; CHECK-LABEL: @signum_i8_i8_use1(
|
|
; CHECK-NEXT: [[SIGNBIT:%.*]] = ashr i8 [[X:%.*]], 7
|
|
; CHECK-NEXT: call void @use(i8 [[SIGNBIT]])
|
|
; CHECK-NEXT: [[ISNOTNULL:%.*]] = icmp ne i8 [[X]], 0
|
|
; CHECK-NEXT: [[ISNOTNULL_ZEXT:%.*]] = zext i1 [[ISNOTNULL]] to i8
|
|
; CHECK-NEXT: [[R:%.*]] = or i8 [[SIGNBIT]], [[ISNOTNULL_ZEXT]]
|
|
; CHECK-NEXT: ret i8 [[R]]
|
|
;
|
|
%sgt0 = icmp sgt i8 %x, 0
|
|
%zgt0 = zext i1 %sgt0 to i8
|
|
%signbit = ashr i8 %x, 7
|
|
call void @use(i8 %signbit)
|
|
%r = add i8 %zgt0, %signbit
|
|
ret i8 %r
|
|
}
|
|
|
|
; negative test
|
|
|
|
define i8 @signum_i8_i8_use2(i8 %x) {
|
|
; CHECK-LABEL: @signum_i8_i8_use2(
|
|
; CHECK-NEXT: [[SGT0:%.*]] = icmp sgt i8 [[X:%.*]], 0
|
|
; CHECK-NEXT: [[ZGT0:%.*]] = zext i1 [[SGT0]] to i8
|
|
; CHECK-NEXT: call void @use(i8 [[ZGT0]])
|
|
; CHECK-NEXT: [[SIGNBIT:%.*]] = ashr i8 [[X]], 7
|
|
; CHECK-NEXT: [[R:%.*]] = add nsw i8 [[SIGNBIT]], [[ZGT0]]
|
|
; CHECK-NEXT: ret i8 [[R]]
|
|
;
|
|
%sgt0 = icmp sgt i8 %x, 0
|
|
%zgt0 = zext i1 %sgt0 to i8
|
|
call void @use(i8 %zgt0)
|
|
%signbit = ashr i8 %x, 7
|
|
%r = add i8 %zgt0, %signbit
|
|
ret i8 %r
|
|
}
|
|
|
|
; negative test
|
|
|
|
define i8 @signum_i8_i8_use3(i8 %x) {
|
|
; CHECK-LABEL: @signum_i8_i8_use3(
|
|
; CHECK-NEXT: [[SGT0:%.*]] = icmp sgt i8 [[X:%.*]], 0
|
|
; CHECK-NEXT: call void @use_i1(i1 [[SGT0]])
|
|
; CHECK-NEXT: [[ZGT0:%.*]] = zext i1 [[SGT0]] to i8
|
|
; CHECK-NEXT: [[SIGNBIT:%.*]] = ashr i8 [[X]], 7
|
|
; CHECK-NEXT: [[R:%.*]] = add nsw i8 [[SIGNBIT]], [[ZGT0]]
|
|
; CHECK-NEXT: ret i8 [[R]]
|
|
;
|
|
%sgt0 = icmp sgt i8 %x, 0
|
|
call void @use_i1(i1 %sgt0)
|
|
%zgt0 = zext i1 %sgt0 to i8
|
|
%signbit = ashr i8 %x, 7
|
|
%r = add i8 %zgt0, %signbit
|
|
ret i8 %r
|
|
}
|
|
|
|
; poison/undef is ok to propagate in shift amount
|
|
; complexity canonicalization guarantees that shift is op0 of add
|
|
|
|
define <2 x i5> @signum_v2i5_v2i5(<2 x i5> %x) {
|
|
; CHECK-LABEL: @signum_v2i5_v2i5(
|
|
; CHECK-NEXT: [[SIGNBIT:%.*]] = ashr <2 x i5> [[X:%.*]], <i5 4, i5 poison>
|
|
; CHECK-NEXT: [[ISNOTNULL:%.*]] = icmp ne <2 x i5> [[X]], zeroinitializer
|
|
; CHECK-NEXT: [[ISNOTNULL_ZEXT:%.*]] = zext <2 x i1> [[ISNOTNULL]] to <2 x i5>
|
|
; CHECK-NEXT: [[R:%.*]] = or <2 x i5> [[SIGNBIT]], [[ISNOTNULL_ZEXT]]
|
|
; CHECK-NEXT: ret <2 x i5> [[R]]
|
|
;
|
|
%sgt0 = icmp sgt <2 x i5> %x, zeroinitializer
|
|
%zgt0 = zext <2 x i1> %sgt0 to <2 x i5>
|
|
%signbit = ashr <2 x i5> %x, <i5 4, i5 poison>
|
|
%r = add <2 x i5> %signbit, %zgt0
|
|
ret <2 x i5> %r
|
|
}
|
|
|
|
; negative test
|
|
|
|
define i8 @signum_i8_i8_wrong_sh_amt(i8 %x) {
|
|
; CHECK-LABEL: @signum_i8_i8_wrong_sh_amt(
|
|
; CHECK-NEXT: [[SGT0:%.*]] = icmp sgt i8 [[X:%.*]], 0
|
|
; CHECK-NEXT: [[ZGT0:%.*]] = zext i1 [[SGT0]] to i8
|
|
; CHECK-NEXT: [[SIGNBIT:%.*]] = ashr i8 [[X]], 6
|
|
; CHECK-NEXT: [[R:%.*]] = add nsw i8 [[SIGNBIT]], [[ZGT0]]
|
|
; CHECK-NEXT: ret i8 [[R]]
|
|
;
|
|
%sgt0 = icmp sgt i8 %x, 0
|
|
%zgt0 = zext i1 %sgt0 to i8
|
|
%signbit = ashr i8 %x, 6
|
|
%r = add i8 %zgt0, %signbit
|
|
ret i8 %r
|
|
}
|
|
|
|
; negative test
|
|
|
|
define i8 @signum_i8_i8_wrong_ext(i8 %x) {
|
|
; CHECK-LABEL: @signum_i8_i8_wrong_ext(
|
|
; CHECK-NEXT: [[SGT0:%.*]] = icmp sgt i8 [[X:%.*]], 0
|
|
; CHECK-NEXT: [[ZGT0:%.*]] = sext i1 [[SGT0]] to i8
|
|
; CHECK-NEXT: [[SIGNBIT:%.*]] = ashr i8 [[X]], 7
|
|
; CHECK-NEXT: [[R:%.*]] = add nsw i8 [[SIGNBIT]], [[ZGT0]]
|
|
; CHECK-NEXT: ret i8 [[R]]
|
|
;
|
|
%sgt0 = icmp sgt i8 %x, 0
|
|
%zgt0 = sext i1 %sgt0 to i8
|
|
%signbit = ashr i8 %x, 7
|
|
%r = add i8 %zgt0, %signbit
|
|
ret i8 %r
|
|
}
|
|
|
|
; negative test
|
|
|
|
define i8 @signum_i8_i8_wrong_pred(i8 %x) {
|
|
; CHECK-LABEL: @signum_i8_i8_wrong_pred(
|
|
; CHECK-NEXT: [[SGT0:%.*]] = icmp sgt i8 [[X:%.*]], -1
|
|
; CHECK-NEXT: [[ZGT0:%.*]] = zext i1 [[SGT0]] to i8
|
|
; CHECK-NEXT: [[SIGNBIT:%.*]] = ashr i8 [[X]], 7
|
|
; CHECK-NEXT: [[R:%.*]] = add nsw i8 [[SIGNBIT]], [[ZGT0]]
|
|
; CHECK-NEXT: ret i8 [[R]]
|
|
;
|
|
%sgt0 = icmp sge i8 %x, 0
|
|
%zgt0 = zext i1 %sgt0 to i8
|
|
%signbit = ashr i8 %x, 7
|
|
%r = add i8 %zgt0, %signbit
|
|
ret i8 %r
|
|
}
|
|
|
|
define i32 @dec_zext_add_assume_nonzero(i8 %x) {
|
|
; CHECK-LABEL: @dec_zext_add_assume_nonzero(
|
|
; CHECK-NEXT: [[Z:%.*]] = icmp ne i8 [[X:%.*]], 0
|
|
; CHECK-NEXT: call void @llvm.assume(i1 [[Z]])
|
|
; CHECK-NEXT: [[C:%.*]] = zext i8 [[X]] to i32
|
|
; CHECK-NEXT: ret i32 [[C]]
|
|
;
|
|
%z = icmp ne i8 %x, 0
|
|
call void @llvm.assume(i1 %z)
|
|
%a = add i8 %x, -1
|
|
%b = zext i8 %a to i32
|
|
%c = add i32 %b, 1
|
|
ret i32 %c
|
|
}
|
|
|
|
define i32 @dec_zext_add_nonzero(i8 %x) {
|
|
; CHECK-LABEL: @dec_zext_add_nonzero(
|
|
; CHECK-NEXT: [[O:%.*]] = or i8 [[X:%.*]], 4
|
|
; CHECK-NEXT: [[C:%.*]] = zext i8 [[O]] to i32
|
|
; CHECK-NEXT: ret i32 [[C]]
|
|
;
|
|
%o = or i8 %x, 4
|
|
%a = add i8 %o, -1
|
|
%b = zext i8 %a to i32
|
|
%c = add i32 %b, 1
|
|
ret i32 %c
|
|
}
|
|
|
|
define <2 x i32> @dec_zext_add_nonzero_vec(<2 x i8> %x) {
|
|
; CHECK-LABEL: @dec_zext_add_nonzero_vec(
|
|
; CHECK-NEXT: [[O:%.*]] = or <2 x i8> [[X:%.*]], <i8 8, i8 8>
|
|
; CHECK-NEXT: [[C:%.*]] = zext <2 x i8> [[O]] to <2 x i32>
|
|
; CHECK-NEXT: ret <2 x i32> [[C]]
|
|
;
|
|
%o = or <2 x i8> %x, <i8 8, i8 8>
|
|
%a = add <2 x i8> %o, <i8 -1, i8 -1>
|
|
%b = zext <2 x i8> %a to <2 x i32>
|
|
%c = add <2 x i32> %b, <i32 1, i32 1>
|
|
ret <2 x i32> %c
|
|
}
|
|
|
|
define <2 x i32> @dec_zext_add_nonzero_vec_poison1(<2 x i8> %x) {
|
|
; CHECK-LABEL: @dec_zext_add_nonzero_vec_poison1(
|
|
; CHECK-NEXT: [[O:%.*]] = or <2 x i8> [[X:%.*]], <i8 8, i8 8>
|
|
; CHECK-NEXT: [[C:%.*]] = zext <2 x i8> [[O]] to <2 x i32>
|
|
; CHECK-NEXT: ret <2 x i32> [[C]]
|
|
;
|
|
%o = or <2 x i8> %x, <i8 8, i8 8>
|
|
%a = add <2 x i8> %o, <i8 -1, i8 poison>
|
|
%b = zext <2 x i8> %a to <2 x i32>
|
|
%c = add <2 x i32> %b, <i32 1, i32 1>
|
|
ret <2 x i32> %c
|
|
}
|
|
|
|
define <2 x i32> @dec_zext_add_nonzero_vec_poison2(<2 x i8> %x) {
|
|
; CHECK-LABEL: @dec_zext_add_nonzero_vec_poison2(
|
|
; CHECK-NEXT: [[O:%.*]] = or <2 x i8> [[X:%.*]], <i8 8, i8 8>
|
|
; CHECK-NEXT: [[A:%.*]] = add nsw <2 x i8> [[O]], <i8 -1, i8 -1>
|
|
; CHECK-NEXT: [[B:%.*]] = zext <2 x i8> [[A]] to <2 x i32>
|
|
; CHECK-NEXT: [[C:%.*]] = add nuw nsw <2 x i32> [[B]], <i32 1, i32 poison>
|
|
; CHECK-NEXT: ret <2 x i32> [[C]]
|
|
;
|
|
%o = or <2 x i8> %x, <i8 8, i8 8>
|
|
%a = add <2 x i8> %o, <i8 -1, i8 -1>
|
|
%b = zext <2 x i8> %a to <2 x i32>
|
|
%c = add <2 x i32> %b, <i32 1, i32 poison>
|
|
ret <2 x i32> %c
|
|
}
|
|
|
|
define i32 @add_zext_sext_i1(i1 %a) {
|
|
; CHECK-LABEL: @add_zext_sext_i1(
|
|
; CHECK-NEXT: ret i32 0
|
|
;
|
|
%zext = zext i1 %a to i32
|
|
%sext = sext i1 %a to i32
|
|
%add = add i32 %zext, %sext
|
|
ret i32 %add
|
|
}
|
|
|
|
define i32 @add_sext_zext_i1(i1 %a) {
|
|
; CHECK-LABEL: @add_sext_zext_i1(
|
|
; CHECK-NEXT: ret i32 0
|
|
;
|
|
%zext = zext i1 %a to i32
|
|
%sext = sext i1 %a to i32
|
|
%add = add i32 %sext, %zext
|
|
ret i32 %add
|
|
}
|
|
|
|
define <2 x i32> @add_zext_sext_i1_vec(<2 x i1> %a) {
|
|
; CHECK-LABEL: @add_zext_sext_i1_vec(
|
|
; CHECK-NEXT: ret <2 x i32> zeroinitializer
|
|
;
|
|
%zext = zext <2 x i1> %a to <2 x i32>
|
|
%sext = sext <2 x i1> %a to <2 x i32>
|
|
%add = add <2 x i32> %zext, %sext
|
|
ret <2 x i32> %add
|
|
}
|
|
|
|
define i32 @add_zext_zext_i1(i1 %a) {
|
|
; CHECK-LABEL: @add_zext_zext_i1(
|
|
; CHECK-NEXT: [[ADD:%.*]] = select i1 [[A:%.*]], i32 2, i32 0
|
|
; CHECK-NEXT: ret i32 [[ADD]]
|
|
;
|
|
%zext = zext i1 %a to i32
|
|
%add = add i32 %zext, %zext
|
|
ret i32 %add
|
|
}
|
|
|
|
define i32 @add_sext_sext_i1(i1 %a) {
|
|
; CHECK-LABEL: @add_sext_sext_i1(
|
|
; CHECK-NEXT: [[SEXT:%.*]] = sext i1 [[A:%.*]] to i32
|
|
; CHECK-NEXT: [[ADD:%.*]] = shl nsw i32 [[SEXT]], 1
|
|
; CHECK-NEXT: ret i32 [[ADD]]
|
|
;
|
|
%sext = sext i1 %a to i32
|
|
%add = add i32 %sext, %sext
|
|
ret i32 %add
|
|
}
|
|
|
|
define i32 @add_zext_sext_not_i1(i8 %a) {
|
|
; CHECK-LABEL: @add_zext_sext_not_i1(
|
|
; CHECK-NEXT: [[ZEXT:%.*]] = zext i8 [[A:%.*]] to i32
|
|
; CHECK-NEXT: [[SEXT:%.*]] = sext i8 [[A]] to i32
|
|
; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[ZEXT]], [[SEXT]]
|
|
; CHECK-NEXT: ret i32 [[ADD]]
|
|
;
|
|
%zext = zext i8 %a to i32
|
|
%sext = sext i8 %a to i32
|
|
%add = add i32 %zext, %sext
|
|
ret i32 %add
|
|
}
|
|
|
|
define i32 @add_zext_sext_i1_different_values(i1 %a, i1 %b) {
|
|
; CHECK-LABEL: @add_zext_sext_i1_different_values(
|
|
; CHECK-NEXT: [[ZEXT:%.*]] = zext i1 [[A:%.*]] to i32
|
|
; CHECK-NEXT: [[SEXT:%.*]] = sext i1 [[B:%.*]] to i32
|
|
; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[ZEXT]], [[SEXT]]
|
|
; CHECK-NEXT: ret i32 [[ADD]]
|
|
;
|
|
%zext = zext i1 %a to i32
|
|
%sext = sext i1 %b to i32
|
|
%add = add i32 %zext, %sext
|
|
ret i32 %add
|
|
}
|
|
|
|
define i32 @add_reduce_sqr_sum_nsw(i32 %a, i32 %b) {
|
|
; CHECK-LABEL: @add_reduce_sqr_sum_nsw(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[A:%.*]], [[B:%.*]]
|
|
; CHECK-NEXT: [[ADD:%.*]] = mul i32 [[TMP1]], [[TMP1]]
|
|
; CHECK-NEXT: ret i32 [[ADD]]
|
|
;
|
|
%a_sq = mul nsw i32 %a, %a
|
|
%two_a = shl i32 %a, 1
|
|
%two_a_plus_b = add i32 %two_a, %b
|
|
%mul = mul i32 %two_a_plus_b, %b
|
|
%add = add i32 %mul, %a_sq
|
|
ret i32 %add
|
|
}
|
|
|
|
define i32 @add_reduce_sqr_sum_u(i32 %a, i32 %b) {
|
|
; CHECK-LABEL: @add_reduce_sqr_sum_u(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[A:%.*]], [[B:%.*]]
|
|
; CHECK-NEXT: [[ADD:%.*]] = mul i32 [[TMP1]], [[TMP1]]
|
|
; CHECK-NEXT: ret i32 [[ADD]]
|
|
;
|
|
%a_sq = mul i32 %a, %a
|
|
%two_a = shl i32 %a, 1
|
|
%two_a_plus_b = add i32 %two_a, %b
|
|
%mul = mul i32 %two_a_plus_b, %b
|
|
%add = add i32 %mul, %a_sq
|
|
ret i32 %add
|
|
}
|
|
|
|
define i32 @add_reduce_sqr_sum_nuw(i32 %a, i32 %b) {
|
|
; CHECK-LABEL: @add_reduce_sqr_sum_nuw(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[A:%.*]], [[B:%.*]]
|
|
; CHECK-NEXT: [[ADD:%.*]] = mul i32 [[TMP1]], [[TMP1]]
|
|
; CHECK-NEXT: ret i32 [[ADD]]
|
|
;
|
|
%a_sq = mul nuw i32 %a, %a
|
|
%two_a = mul i32 %a, 2
|
|
%two_a_plus_b = add i32 %two_a, %b
|
|
%mul = mul nuw i32 %two_a_plus_b, %b
|
|
%add = add i32 %mul, %a_sq
|
|
ret i32 %add
|
|
}
|
|
|
|
define i32 @add_reduce_sqr_sum_flipped(i32 %a, i32 %b) {
|
|
; CHECK-LABEL: @add_reduce_sqr_sum_flipped(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[A:%.*]], [[B:%.*]]
|
|
; CHECK-NEXT: [[ADD:%.*]] = mul i32 [[TMP1]], [[TMP1]]
|
|
; CHECK-NEXT: ret i32 [[ADD]]
|
|
;
|
|
%a_sq = mul nsw i32 %a, %a
|
|
%two_a = shl i32 %a, 1
|
|
%two_a_plus_b = add i32 %two_a, %b
|
|
%mul = mul i32 %two_a_plus_b, %b
|
|
%add = add i32 %a_sq, %mul
|
|
ret i32 %add
|
|
}
|
|
|
|
define i32 @add_reduce_sqr_sum_flipped2(i32 %a, i32 %b) {
|
|
; CHECK-LABEL: @add_reduce_sqr_sum_flipped2(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[A:%.*]], [[B:%.*]]
|
|
; CHECK-NEXT: [[ADD:%.*]] = mul i32 [[TMP1]], [[TMP1]]
|
|
; CHECK-NEXT: ret i32 [[ADD]]
|
|
;
|
|
%a_sq = mul nsw i32 %a, %a
|
|
%two_a = shl i32 %a, 1
|
|
%two_a_plus_b = add i32 %two_a, %b
|
|
%mul = mul i32 %b, %two_a_plus_b
|
|
%add = add i32 %mul, %a_sq
|
|
ret i32 %add
|
|
}
|
|
|
|
define i32 @add_reduce_sqr_sum_flipped3(i32 %a, i32 %b) {
|
|
; CHECK-LABEL: @add_reduce_sqr_sum_flipped3(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[A:%.*]], [[B:%.*]]
|
|
; CHECK-NEXT: [[ADD:%.*]] = mul i32 [[TMP1]], [[TMP1]]
|
|
; CHECK-NEXT: ret i32 [[ADD]]
|
|
;
|
|
%a_sq = mul nsw i32 %a, %a
|
|
%two_a = shl i32 %a, 1
|
|
%two_a_plus_b = add i32 %b, %two_a
|
|
%mul = mul i32 %two_a_plus_b, %b
|
|
%add = add i32 %mul, %a_sq
|
|
ret i32 %add
|
|
}
|
|
|
|
define i32 @add_reduce_sqr_sum_order2(i32 %a, i32 %b) {
|
|
; CHECK-LABEL: @add_reduce_sqr_sum_order2(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[A:%.*]], [[B:%.*]]
|
|
; CHECK-NEXT: [[AB2:%.*]] = mul i32 [[TMP1]], [[TMP1]]
|
|
; CHECK-NEXT: ret i32 [[AB2]]
|
|
;
|
|
%a_sq = mul nsw i32 %a, %a
|
|
%twoa = mul i32 %a, 2
|
|
%twoab = mul i32 %twoa, %b
|
|
%b_sq = mul i32 %b, %b
|
|
%twoab_b2 = add i32 %twoab, %b_sq
|
|
%ab2 = add i32 %a_sq, %twoab_b2
|
|
ret i32 %ab2
|
|
}
|
|
|
|
define i32 @add_reduce_sqr_sum_order2_flipped(i32 %a, i32 %b) {
|
|
; CHECK-LABEL: @add_reduce_sqr_sum_order2_flipped(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[A:%.*]], [[B:%.*]]
|
|
; CHECK-NEXT: [[AB2:%.*]] = mul i32 [[TMP1]], [[TMP1]]
|
|
; CHECK-NEXT: ret i32 [[AB2]]
|
|
;
|
|
%a_sq = mul nsw i32 %a, %a
|
|
%twoa = mul i32 %a, 2
|
|
%twoab = mul i32 %twoa, %b
|
|
%b_sq = mul i32 %b, %b
|
|
%twoab_b2 = add i32 %twoab, %b_sq
|
|
%ab2 = add i32 %twoab_b2, %a_sq
|
|
ret i32 %ab2
|
|
}
|
|
|
|
define i32 @add_reduce_sqr_sum_order2_flipped2(i32 %a, i32 %b) {
|
|
; CHECK-LABEL: @add_reduce_sqr_sum_order2_flipped2(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[A:%.*]], [[B:%.*]]
|
|
; CHECK-NEXT: [[AB2:%.*]] = mul i32 [[TMP1]], [[TMP1]]
|
|
; CHECK-NEXT: ret i32 [[AB2]]
|
|
;
|
|
%a_sq = mul nsw i32 %a, %a
|
|
%twoa = mul i32 %a, 2
|
|
%twoab = mul i32 %twoa, %b
|
|
%b_sq = mul i32 %b, %b
|
|
%twoab_b2 = add i32 %b_sq, %twoab
|
|
%ab2 = add i32 %a_sq, %twoab_b2
|
|
ret i32 %ab2
|
|
}
|
|
|
|
define i32 @add_reduce_sqr_sum_order2_flipped3(i32 %a, i32 %b) {
|
|
; CHECK-LABEL: @add_reduce_sqr_sum_order2_flipped3(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[A:%.*]], [[B:%.*]]
|
|
; CHECK-NEXT: [[AB2:%.*]] = mul i32 [[TMP1]], [[TMP1]]
|
|
; CHECK-NEXT: ret i32 [[AB2]]
|
|
;
|
|
%a_sq = mul nsw i32 %a, %a
|
|
%twoa = mul i32 %a, 2
|
|
%twoab = mul i32 %b, %twoa
|
|
%b_sq = mul i32 %b, %b
|
|
%twoab_b2 = add i32 %twoab, %b_sq
|
|
%ab2 = add i32 %a_sq, %twoab_b2
|
|
ret i32 %ab2
|
|
}
|
|
|
|
define i32 @add_reduce_sqr_sum_order3(i32 %a, i32 %b) {
|
|
; CHECK-LABEL: @add_reduce_sqr_sum_order3(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[A:%.*]], [[B:%.*]]
|
|
; CHECK-NEXT: [[AB2:%.*]] = mul i32 [[TMP1]], [[TMP1]]
|
|
; CHECK-NEXT: ret i32 [[AB2]]
|
|
;
|
|
%a_sq = mul nsw i32 %a, %a
|
|
%twoa = mul i32 %a, 2
|
|
%twoab = mul i32 %twoa, %b
|
|
%b_sq = mul i32 %b, %b
|
|
%a2_b2 = add i32 %a_sq, %b_sq
|
|
%ab2 = add i32 %twoab, %a2_b2
|
|
ret i32 %ab2
|
|
}
|
|
|
|
define i32 @add_reduce_sqr_sum_order3_flipped(i32 %a, i32 %b) {
|
|
; CHECK-LABEL: @add_reduce_sqr_sum_order3_flipped(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[A:%.*]], [[B:%.*]]
|
|
; CHECK-NEXT: [[AB2:%.*]] = mul i32 [[TMP1]], [[TMP1]]
|
|
; CHECK-NEXT: ret i32 [[AB2]]
|
|
;
|
|
%a_sq = mul nsw i32 %a, %a
|
|
%twoa = mul i32 %a, 2
|
|
%twoab = mul i32 %twoa, %b
|
|
%b_sq = mul i32 %b, %b
|
|
%a2_b2 = add i32 %a_sq, %b_sq
|
|
%ab2 = add i32 %a2_b2, %twoab
|
|
ret i32 %ab2
|
|
}
|
|
|
|
define i32 @add_reduce_sqr_sum_order3_flipped2(i32 %a, i32 %b) {
|
|
; CHECK-LABEL: @add_reduce_sqr_sum_order3_flipped2(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[A:%.*]], [[B:%.*]]
|
|
; CHECK-NEXT: [[AB2:%.*]] = mul i32 [[TMP1]], [[TMP1]]
|
|
; CHECK-NEXT: ret i32 [[AB2]]
|
|
;
|
|
%a_sq = mul nsw i32 %a, %a
|
|
%twoa = mul i32 %a, 2
|
|
%twoab = mul i32 %twoa, %b
|
|
%b_sq = mul i32 %b, %b
|
|
%a2_b2 = add i32 %b_sq, %a_sq
|
|
%ab2 = add i32 %twoab, %a2_b2
|
|
ret i32 %ab2
|
|
}
|
|
|
|
define i32 @add_reduce_sqr_sum_order3_flipped3(i32 %a, i32 %b) {
|
|
; CHECK-LABEL: @add_reduce_sqr_sum_order3_flipped3(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[A:%.*]], [[B:%.*]]
|
|
; CHECK-NEXT: [[AB2:%.*]] = mul i32 [[TMP1]], [[TMP1]]
|
|
; CHECK-NEXT: ret i32 [[AB2]]
|
|
;
|
|
%a_sq = mul nsw i32 %a, %a
|
|
%twoa = mul i32 %a, 2
|
|
%twoab = mul i32 %b, %twoa
|
|
%b_sq = mul i32 %b, %b
|
|
%a2_b2 = add i32 %a_sq, %b_sq
|
|
%ab2 = add i32 %twoab, %a2_b2
|
|
ret i32 %ab2
|
|
}
|
|
|
|
define i32 @add_reduce_sqr_sum_order4(i32 %a, i32 %b) {
|
|
; CHECK-LABEL: @add_reduce_sqr_sum_order4(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[A:%.*]], [[B:%.*]]
|
|
; CHECK-NEXT: [[AB2:%.*]] = mul i32 [[TMP1]], [[TMP1]]
|
|
; CHECK-NEXT: ret i32 [[AB2]]
|
|
;
|
|
%a_sq = mul nsw i32 %a, %a
|
|
%ab = mul i32 %a, %b
|
|
%twoab = mul i32 %ab, 2
|
|
%b_sq = mul i32 %b, %b
|
|
%a2_b2 = add i32 %a_sq, %b_sq
|
|
%ab2 = add i32 %twoab, %a2_b2
|
|
ret i32 %ab2
|
|
}
|
|
|
|
define i32 @add_reduce_sqr_sum_order4_flipped(i32 %a, i32 %b) {
|
|
; CHECK-LABEL: @add_reduce_sqr_sum_order4_flipped(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[A:%.*]], [[B:%.*]]
|
|
; CHECK-NEXT: [[AB2:%.*]] = mul i32 [[TMP1]], [[TMP1]]
|
|
; CHECK-NEXT: ret i32 [[AB2]]
|
|
;
|
|
%a_sq = mul nsw i32 %a, %a
|
|
%ab = mul i32 %a, %b
|
|
%twoab = mul i32 %ab, 2
|
|
%b_sq = mul i32 %b, %b
|
|
%a2_b2 = add i32 %a_sq, %b_sq
|
|
%ab2 = add i32 %a2_b2, %twoab
|
|
ret i32 %ab2
|
|
}
|
|
|
|
define i32 @add_reduce_sqr_sum_order4_flipped2(i32 %a, i32 %b) {
|
|
; CHECK-LABEL: @add_reduce_sqr_sum_order4_flipped2(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[A:%.*]], [[B:%.*]]
|
|
; CHECK-NEXT: [[AB2:%.*]] = mul i32 [[TMP1]], [[TMP1]]
|
|
; CHECK-NEXT: ret i32 [[AB2]]
|
|
;
|
|
%a_sq = mul nsw i32 %a, %a
|
|
%ab = mul i32 %a, %b
|
|
%twoab = mul i32 %ab, 2
|
|
%b_sq = mul i32 %b, %b
|
|
%a2_b2 = add i32 %b_sq, %a_sq
|
|
%ab2 = add i32 %twoab, %a2_b2
|
|
ret i32 %ab2
|
|
}
|
|
|
|
define i32 @add_reduce_sqr_sum_order4_flipped3(i32 %a, i32 %b) {
|
|
; CHECK-LABEL: @add_reduce_sqr_sum_order4_flipped3(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[A:%.*]], [[B:%.*]]
|
|
; CHECK-NEXT: [[AB2:%.*]] = mul i32 [[TMP1]], [[TMP1]]
|
|
; CHECK-NEXT: ret i32 [[AB2]]
|
|
;
|
|
%a_sq = mul nsw i32 %a, %a
|
|
%ab = mul i32 %a, %b
|
|
%twoab = mul i32 2, %ab
|
|
%b_sq = mul i32 %b, %b
|
|
%a2_b2 = add i32 %a_sq, %b_sq
|
|
%ab2 = add i32 %twoab, %a2_b2
|
|
ret i32 %ab2
|
|
}
|
|
|
|
define i32 @add_reduce_sqr_sum_order4_flipped4(i32 %a, i32 %b) {
|
|
; CHECK-LABEL: @add_reduce_sqr_sum_order4_flipped4(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[B:%.*]], [[A:%.*]]
|
|
; CHECK-NEXT: [[AB2:%.*]] = mul i32 [[TMP1]], [[TMP1]]
|
|
; CHECK-NEXT: ret i32 [[AB2]]
|
|
;
|
|
%a_sq = mul nsw i32 %a, %a
|
|
%ab = mul i32 %b, %a
|
|
%twoab = mul i32 %ab, 2
|
|
%b_sq = mul i32 %b, %b
|
|
%a2_b2 = add i32 %a_sq, %b_sq
|
|
%ab2 = add i32 %twoab, %a2_b2
|
|
ret i32 %ab2
|
|
}
|
|
|
|
define i32 @add_reduce_sqr_sum_order5(i32 %a, i32 %b) {
|
|
; CHECK-LABEL: @add_reduce_sqr_sum_order5(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[B:%.*]], [[A:%.*]]
|
|
; CHECK-NEXT: [[AB2:%.*]] = mul i32 [[TMP1]], [[TMP1]]
|
|
; CHECK-NEXT: ret i32 [[AB2]]
|
|
;
|
|
%a_sq = mul nsw i32 %a, %a
|
|
%twob = mul i32 %b, 2
|
|
%twoab = mul i32 %twob, %a
|
|
%b_sq = mul i32 %b, %b
|
|
%a2_b2 = add i32 %a_sq, %b_sq
|
|
%ab2 = add i32 %twoab, %a2_b2
|
|
ret i32 %ab2
|
|
}
|
|
|
|
define i32 @add_reduce_sqr_sum_order5_flipped(i32 %a, i32 %b) {
|
|
; CHECK-LABEL: @add_reduce_sqr_sum_order5_flipped(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[B:%.*]], [[A:%.*]]
|
|
; CHECK-NEXT: [[AB2:%.*]] = mul i32 [[TMP1]], [[TMP1]]
|
|
; CHECK-NEXT: ret i32 [[AB2]]
|
|
;
|
|
%a_sq = mul nsw i32 %a, %a
|
|
%twob = mul i32 %b, 2
|
|
%twoab = mul i32 %twob, %a
|
|
%b_sq = mul i32 %b, %b
|
|
%a2_b2 = add i32 %a_sq, %b_sq
|
|
%ab2 = add i32 %a2_b2, %twoab
|
|
ret i32 %ab2
|
|
}
|
|
|
|
define i32 @add_reduce_sqr_sum_order5_flipped2(i32 %a, i32 %b) {
|
|
; CHECK-LABEL: @add_reduce_sqr_sum_order5_flipped2(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[B:%.*]], [[A:%.*]]
|
|
; CHECK-NEXT: [[AB2:%.*]] = mul i32 [[TMP1]], [[TMP1]]
|
|
; CHECK-NEXT: ret i32 [[AB2]]
|
|
;
|
|
%a_sq = mul nsw i32 %a, %a
|
|
%twob = mul i32 %b, 2
|
|
%twoab = mul i32 %twob, %a
|
|
%b_sq = mul i32 %b, %b
|
|
%a2_b2 = add i32 %b_sq, %a_sq
|
|
%ab2 = add i32 %twoab, %a2_b2
|
|
ret i32 %ab2
|
|
}
|
|
|
|
define i32 @add_reduce_sqr_sum_order5_flipped3(i32 %a, i32 %b) {
|
|
; CHECK-LABEL: @add_reduce_sqr_sum_order5_flipped3(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[B:%.*]], [[A:%.*]]
|
|
; CHECK-NEXT: [[AB2:%.*]] = mul i32 [[TMP1]], [[TMP1]]
|
|
; CHECK-NEXT: ret i32 [[AB2]]
|
|
;
|
|
%a_sq = mul nsw i32 %a, %a
|
|
%twob = mul i32 %b, 2
|
|
%twoab = mul i32 %a, %twob
|
|
%b_sq = mul i32 %b, %b
|
|
%a2_b2 = add i32 %a_sq, %b_sq
|
|
%ab2 = add i32 %twoab, %a2_b2
|
|
ret i32 %ab2
|
|
}
|
|
|
|
define i32 @add_reduce_sqr_sum_order5_flipped4(i32 %a, i32 %b) {
|
|
; CHECK-LABEL: @add_reduce_sqr_sum_order5_flipped4(
|
|
; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[B:%.*]], [[A:%.*]]
|
|
; CHECK-NEXT: [[AB2:%.*]] = mul i32 [[TMP1]], [[TMP1]]
|
|
; CHECK-NEXT: ret i32 [[AB2]]
|
|
;
|
|
%a_sq = mul nsw i32 %a, %a
|
|
%twob = mul i32 2, %b
|
|
%twoab = mul i32 %twob, %a
|
|
%b_sq = mul i32 %b, %b
|
|
%a2_b2 = add i32 %a_sq, %b_sq
|
|
%ab2 = add i32 %twoab, %a2_b2
|
|
ret i32 %ab2
|
|
}
|
|
|
|
define i32 @add_reduce_sqr_sum_not_one_use(i32 %a, i32 %b) {
|
|
; CHECK-LABEL: @add_reduce_sqr_sum_not_one_use(
|
|
; CHECK-NEXT: [[A_SQ:%.*]] = mul nsw i32 [[A:%.*]], [[A]]
|
|
; CHECK-NEXT: [[TWO_A:%.*]] = shl i32 [[A]], 1
|
|
; CHECK-NEXT: [[TWO_A_PLUS_B:%.*]] = add i32 [[TWO_A]], [[B:%.*]]
|
|
; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[TWO_A_PLUS_B]], [[B]]
|
|
; CHECK-NEXT: tail call void @fake_func(i32 [[MUL]])
|
|
; CHECK-NEXT: [[ADD:%.*]] = add i32 [[MUL]], [[A_SQ]]
|
|
; CHECK-NEXT: ret i32 [[ADD]]
|
|
;
|
|
%a_sq = mul nsw i32 %a, %a
|
|
%two_a = shl i32 %a, 1
|
|
%two_a_plus_b = add i32 %two_a, %b
|
|
%mul = mul i32 %two_a_plus_b, %b
|
|
tail call void @fake_func (i32 %mul)
|
|
%add = add i32 %mul, %a_sq
|
|
ret i32 %add
|
|
}
|
|
|
|
define i32 @add_reduce_sqr_sum_not_one_use2(i32 %a, i32 %b) {
|
|
; CHECK-LABEL: @add_reduce_sqr_sum_not_one_use2(
|
|
; CHECK-NEXT: [[A_SQ:%.*]] = mul nsw i32 [[A:%.*]], [[A]]
|
|
; CHECK-NEXT: [[TWO_A:%.*]] = shl i32 [[A]], 1
|
|
; CHECK-NEXT: [[TWO_A_PLUS_B:%.*]] = add i32 [[TWO_A]], [[B:%.*]]
|
|
; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[TWO_A_PLUS_B]], [[B]]
|
|
; CHECK-NEXT: tail call void @fake_func(i32 [[A_SQ]])
|
|
; CHECK-NEXT: [[ADD:%.*]] = add i32 [[MUL]], [[A_SQ]]
|
|
; CHECK-NEXT: ret i32 [[ADD]]
|
|
;
|
|
%a_sq = mul nsw i32 %a, %a
|
|
%two_a = shl i32 %a, 1
|
|
%two_a_plus_b = add i32 %two_a, %b
|
|
%mul = mul i32 %two_a_plus_b, %b
|
|
tail call void @fake_func (i32 %a_sq)
|
|
%add = add i32 %mul, %a_sq
|
|
ret i32 %add
|
|
}
|
|
|
|
define i32 @add_reduce_sqr_sum_order2_not_one_use(i32 %a, i32 %b) {
|
|
; CHECK-LABEL: @add_reduce_sqr_sum_order2_not_one_use(
|
|
; CHECK-NEXT: [[A_SQ:%.*]] = mul nsw i32 [[A:%.*]], [[A]]
|
|
; CHECK-NEXT: [[TWOA:%.*]] = shl i32 [[A]], 1
|
|
; CHECK-NEXT: [[TWOAB1:%.*]] = add i32 [[TWOA]], [[B:%.*]]
|
|
; CHECK-NEXT: [[TWOAB_B2:%.*]] = mul i32 [[TWOAB1]], [[B]]
|
|
; CHECK-NEXT: tail call void @fake_func(i32 [[TWOAB_B2]])
|
|
; CHECK-NEXT: [[AB2:%.*]] = add i32 [[A_SQ]], [[TWOAB_B2]]
|
|
; CHECK-NEXT: ret i32 [[AB2]]
|
|
;
|
|
%a_sq = mul nsw i32 %a, %a
|
|
%twoa = mul i32 %a, 2
|
|
%twoab = mul i32 %twoa, %b
|
|
%b_sq = mul i32 %b, %b
|
|
%twoab_b2 = add i32 %twoab, %b_sq
|
|
tail call void @fake_func (i32 %twoab_b2)
|
|
%ab2 = add i32 %a_sq, %twoab_b2
|
|
ret i32 %ab2
|
|
}
|
|
|
|
define i32 @add_reduce_sqr_sum_order2_not_one_use2(i32 %a, i32 %b) {
|
|
; CHECK-LABEL: @add_reduce_sqr_sum_order2_not_one_use2(
|
|
; CHECK-NEXT: [[A_SQ:%.*]] = mul nsw i32 [[A:%.*]], [[A]]
|
|
; CHECK-NEXT: [[TWOA:%.*]] = shl i32 [[A]], 1
|
|
; CHECK-NEXT: [[TWOAB1:%.*]] = add i32 [[TWOA]], [[B:%.*]]
|
|
; CHECK-NEXT: [[TWOAB_B2:%.*]] = mul i32 [[TWOAB1]], [[B]]
|
|
; CHECK-NEXT: tail call void @fake_func(i32 [[A_SQ]])
|
|
; CHECK-NEXT: [[AB2:%.*]] = add i32 [[A_SQ]], [[TWOAB_B2]]
|
|
; CHECK-NEXT: ret i32 [[AB2]]
|
|
;
|
|
%a_sq = mul nsw i32 %a, %a
|
|
%twoa = mul i32 %a, 2
|
|
%twoab = mul i32 %twoa, %b
|
|
%b_sq = mul i32 %b, %b
|
|
%twoab_b2 = add i32 %twoab, %b_sq
|
|
tail call void @fake_func (i32 %a_sq)
|
|
%ab2 = add i32 %a_sq, %twoab_b2
|
|
ret i32 %ab2
|
|
}
|
|
|
|
define i32 @add_reduce_sqr_sum_order3_not_one_use(i32 %a, i32 %b) {
|
|
; CHECK-LABEL: @add_reduce_sqr_sum_order3_not_one_use(
|
|
; CHECK-NEXT: [[A_SQ:%.*]] = mul nsw i32 [[A:%.*]], [[A]]
|
|
; CHECK-NEXT: [[TWOA:%.*]] = shl i32 [[A]], 1
|
|
; CHECK-NEXT: [[TWOAB:%.*]] = mul i32 [[TWOA]], [[B:%.*]]
|
|
; CHECK-NEXT: [[B_SQ:%.*]] = mul i32 [[B]], [[B]]
|
|
; CHECK-NEXT: [[A2_B2:%.*]] = add i32 [[A_SQ]], [[B_SQ]]
|
|
; CHECK-NEXT: tail call void @fake_func(i32 [[TWOAB]])
|
|
; CHECK-NEXT: [[AB2:%.*]] = add i32 [[TWOAB]], [[A2_B2]]
|
|
; CHECK-NEXT: ret i32 [[AB2]]
|
|
;
|
|
%a_sq = mul nsw i32 %a, %a
|
|
%twoa = mul i32 %a, 2
|
|
%twoab = mul i32 %twoa, %b
|
|
%b_sq = mul i32 %b, %b
|
|
%a2_b2 = add i32 %a_sq, %b_sq
|
|
tail call void @fake_func (i32 %twoab)
|
|
%ab2 = add i32 %twoab, %a2_b2
|
|
ret i32 %ab2
|
|
}
|
|
|
|
define i32 @add_reduce_sqr_sum_order3_not_one_use2(i32 %a, i32 %b) {
|
|
; CHECK-LABEL: @add_reduce_sqr_sum_order3_not_one_use2(
|
|
; CHECK-NEXT: [[A_SQ:%.*]] = mul nsw i32 [[A:%.*]], [[A]]
|
|
; CHECK-NEXT: [[TWOA:%.*]] = shl i32 [[A]], 1
|
|
; CHECK-NEXT: [[TWOAB:%.*]] = mul i32 [[TWOA]], [[B:%.*]]
|
|
; CHECK-NEXT: [[B_SQ:%.*]] = mul i32 [[B]], [[B]]
|
|
; CHECK-NEXT: [[A2_B2:%.*]] = add i32 [[A_SQ]], [[B_SQ]]
|
|
; CHECK-NEXT: tail call void @fake_func(i32 [[A2_B2]])
|
|
; CHECK-NEXT: [[AB2:%.*]] = add i32 [[TWOAB]], [[A2_B2]]
|
|
; CHECK-NEXT: ret i32 [[AB2]]
|
|
;
|
|
%a_sq = mul nsw i32 %a, %a
|
|
%twoa = mul i32 %a, 2
|
|
%twoab = mul i32 %twoa, %b
|
|
%b_sq = mul i32 %b, %b
|
|
%a2_b2 = add i32 %a_sq, %b_sq
|
|
tail call void @fake_func (i32 %a2_b2)
|
|
%ab2 = add i32 %twoab, %a2_b2
|
|
ret i32 %ab2
|
|
}
|
|
|
|
define i32 @add_reduce_sqr_sum_order4_not_one_use(i32 %a, i32 %b) {
|
|
; CHECK-LABEL: @add_reduce_sqr_sum_order4_not_one_use(
|
|
; CHECK-NEXT: [[A_SQ:%.*]] = mul nsw i32 [[A:%.*]], [[A]]
|
|
; CHECK-NEXT: [[AB:%.*]] = mul i32 [[A]], [[B:%.*]]
|
|
; CHECK-NEXT: [[TWOAB:%.*]] = shl i32 [[AB]], 1
|
|
; CHECK-NEXT: [[B_SQ:%.*]] = mul i32 [[B]], [[B]]
|
|
; CHECK-NEXT: [[A2_B2:%.*]] = add i32 [[A_SQ]], [[B_SQ]]
|
|
; CHECK-NEXT: tail call void @fake_func(i32 [[TWOAB]])
|
|
; CHECK-NEXT: [[AB2:%.*]] = add i32 [[TWOAB]], [[A2_B2]]
|
|
; CHECK-NEXT: ret i32 [[AB2]]
|
|
;
|
|
%a_sq = mul nsw i32 %a, %a
|
|
%ab = mul i32 %a, %b
|
|
%twoab = mul i32 %ab, 2
|
|
%b_sq = mul i32 %b, %b
|
|
%a2_b2 = add i32 %a_sq, %b_sq
|
|
tail call void @fake_func (i32 %twoab)
|
|
%ab2 = add i32 %twoab, %a2_b2
|
|
ret i32 %ab2
|
|
}
|
|
|
|
define i32 @add_reduce_sqr_sum_order4_not_one_use2(i32 %a, i32 %b) {
|
|
; CHECK-LABEL: @add_reduce_sqr_sum_order4_not_one_use2(
|
|
; CHECK-NEXT: [[A_SQ:%.*]] = mul nsw i32 [[A:%.*]], [[A]]
|
|
; CHECK-NEXT: [[AB:%.*]] = mul i32 [[A]], [[B:%.*]]
|
|
; CHECK-NEXT: [[TWOAB:%.*]] = shl i32 [[AB]], 1
|
|
; CHECK-NEXT: [[B_SQ:%.*]] = mul i32 [[B]], [[B]]
|
|
; CHECK-NEXT: [[A2_B2:%.*]] = add i32 [[A_SQ]], [[B_SQ]]
|
|
; CHECK-NEXT: tail call void @fake_func(i32 [[A2_B2]])
|
|
; CHECK-NEXT: [[AB2:%.*]] = add i32 [[TWOAB]], [[A2_B2]]
|
|
; CHECK-NEXT: ret i32 [[AB2]]
|
|
;
|
|
%a_sq = mul nsw i32 %a, %a
|
|
%ab = mul i32 %a, %b
|
|
%twoab = mul i32 %ab, 2
|
|
%b_sq = mul i32 %b, %b
|
|
%a2_b2 = add i32 %a_sq, %b_sq
|
|
tail call void @fake_func (i32 %a2_b2)
|
|
%ab2 = add i32 %twoab, %a2_b2
|
|
ret i32 %ab2
|
|
}
|
|
|
|
define i32 @add_reduce_sqr_sum_order5_not_one_use(i32 %a, i32 %b) {
|
|
; CHECK-LABEL: @add_reduce_sqr_sum_order5_not_one_use(
|
|
; CHECK-NEXT: [[A_SQ:%.*]] = mul nsw i32 [[A:%.*]], [[A]]
|
|
; CHECK-NEXT: [[TWOB:%.*]] = shl i32 [[B:%.*]], 1
|
|
; CHECK-NEXT: [[TWOAB:%.*]] = mul i32 [[TWOB]], [[A]]
|
|
; CHECK-NEXT: [[B_SQ:%.*]] = mul i32 [[B]], [[B]]
|
|
; CHECK-NEXT: [[A2_B2:%.*]] = add i32 [[A_SQ]], [[B_SQ]]
|
|
; CHECK-NEXT: tail call void @fake_func(i32 [[TWOAB]])
|
|
; CHECK-NEXT: [[AB2:%.*]] = add i32 [[TWOAB]], [[A2_B2]]
|
|
; CHECK-NEXT: ret i32 [[AB2]]
|
|
;
|
|
%a_sq = mul nsw i32 %a, %a
|
|
%twob = mul i32 %b, 2
|
|
%twoab = mul i32 %twob, %a
|
|
%b_sq = mul i32 %b, %b
|
|
%a2_b2 = add i32 %a_sq, %b_sq
|
|
tail call void @fake_func (i32 %twoab)
|
|
%ab2 = add i32 %twoab, %a2_b2
|
|
ret i32 %ab2
|
|
}
|
|
|
|
define i32 @add_reduce_sqr_sum_order5_not_one_use2(i32 %a, i32 %b) {
|
|
; CHECK-LABEL: @add_reduce_sqr_sum_order5_not_one_use2(
|
|
; CHECK-NEXT: [[A_SQ:%.*]] = mul nsw i32 [[A:%.*]], [[A]]
|
|
; CHECK-NEXT: [[TWOB:%.*]] = shl i32 [[B:%.*]], 1
|
|
; CHECK-NEXT: [[TWOAB:%.*]] = mul i32 [[TWOB]], [[A]]
|
|
; CHECK-NEXT: [[B_SQ:%.*]] = mul i32 [[B]], [[B]]
|
|
; CHECK-NEXT: [[A2_B2:%.*]] = add i32 [[A_SQ]], [[B_SQ]]
|
|
; CHECK-NEXT: tail call void @fake_func(i32 [[A2_B2]])
|
|
; CHECK-NEXT: [[AB2:%.*]] = add i32 [[TWOAB]], [[A2_B2]]
|
|
; CHECK-NEXT: ret i32 [[AB2]]
|
|
;
|
|
%a_sq = mul nsw i32 %a, %a
|
|
%twob = mul i32 %b, 2
|
|
%twoab = mul i32 %twob, %a
|
|
%b_sq = mul i32 %b, %b
|
|
%a2_b2 = add i32 %a_sq, %b_sq
|
|
tail call void @fake_func (i32 %a2_b2)
|
|
%ab2 = add i32 %twoab, %a2_b2
|
|
ret i32 %ab2
|
|
}
|
|
|
|
define i32 @add_reduce_sqr_sum_invalid0(i32 %a, i32 %b) {
|
|
; CHECK-LABEL: @add_reduce_sqr_sum_invalid0(
|
|
; CHECK-NEXT: [[TWO_A:%.*]] = shl i32 [[A:%.*]], 1
|
|
; CHECK-NEXT: [[TWO_A_PLUS_B:%.*]] = add i32 [[TWO_A]], [[B:%.*]]
|
|
; CHECK-NEXT: [[MUL1:%.*]] = add i32 [[TWO_A_PLUS_B]], [[A]]
|
|
; CHECK-NEXT: [[ADD:%.*]] = mul i32 [[MUL1]], [[B]]
|
|
; CHECK-NEXT: ret i32 [[ADD]]
|
|
;
|
|
%not_a_sq = mul nsw i32 %a, %b
|
|
%two_a = shl i32 %a, 1
|
|
%two_a_plus_b = add i32 %two_a, %b
|
|
%mul = mul i32 %two_a_plus_b, %b
|
|
%add = add i32 %mul, %not_a_sq
|
|
ret i32 %add
|
|
}
|
|
|
|
define i32 @add_reduce_sqr_sum_invalid1(i32 %a, i32 %b) {
|
|
; CHECK-LABEL: @add_reduce_sqr_sum_invalid1(
|
|
; CHECK-NEXT: [[A_SQ:%.*]] = mul nsw i32 [[A:%.*]], [[A]]
|
|
; CHECK-NEXT: [[NOT_TWO_A_PLUS_B:%.*]] = mul i32 [[A]], 3
|
|
; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[NOT_TWO_A_PLUS_B]], [[B:%.*]]
|
|
; CHECK-NEXT: [[ADD:%.*]] = add i32 [[MUL]], [[A_SQ]]
|
|
; CHECK-NEXT: ret i32 [[ADD]]
|
|
;
|
|
%a_sq = mul nsw i32 %a, %a
|
|
%two_a = shl i32 %a, 1
|
|
%not_two_a_plus_b = add i32 %two_a, %a
|
|
%mul = mul i32 %not_two_a_plus_b, %b
|
|
%add = add i32 %mul, %a_sq
|
|
ret i32 %add
|
|
}
|
|
|
|
define i32 @add_reduce_sqr_sum_invalid2(i32 %a, i32 %b) {
|
|
; CHECK-LABEL: @add_reduce_sqr_sum_invalid2(
|
|
; CHECK-NEXT: [[A_SQ:%.*]] = mul nsw i32 [[A:%.*]], [[A]]
|
|
; CHECK-NEXT: [[NOT_TWO_A:%.*]] = shl i32 [[A]], 2
|
|
; CHECK-NEXT: [[TWO_A_PLUS_B:%.*]] = add i32 [[NOT_TWO_A]], [[B:%.*]]
|
|
; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[TWO_A_PLUS_B]], [[B]]
|
|
; CHECK-NEXT: [[ADD:%.*]] = add i32 [[MUL]], [[A_SQ]]
|
|
; CHECK-NEXT: ret i32 [[ADD]]
|
|
;
|
|
%a_sq = mul nsw i32 %a, %a
|
|
%not_two_a = shl i32 %a, 2
|
|
%two_a_plus_b = add i32 %not_two_a, %b
|
|
%mul = mul i32 %two_a_plus_b, %b
|
|
%add = add i32 %mul, %a_sq
|
|
ret i32 %add
|
|
}
|
|
|
|
define i32 @add_reduce_sqr_sum_invalid3(i32 %a, i32 %b) {
|
|
; CHECK-LABEL: @add_reduce_sqr_sum_invalid3(
|
|
; CHECK-NEXT: [[TWO_A_PLUS_B:%.*]] = mul i32 [[B:%.*]], 3
|
|
; CHECK-NEXT: [[MUL1:%.*]] = add i32 [[TWO_A_PLUS_B]], [[A:%.*]]
|
|
; CHECK-NEXT: [[ADD:%.*]] = mul i32 [[MUL1]], [[A]]
|
|
; CHECK-NEXT: ret i32 [[ADD]]
|
|
;
|
|
%a_sq = mul nsw i32 %a, %a
|
|
%not_two_a = shl i32 %b, 1
|
|
%two_a_plus_b = add i32 %not_two_a, %b
|
|
%mul = mul i32 %two_a_plus_b, %a
|
|
%add = add i32 %mul, %a_sq
|
|
ret i32 %add
|
|
}
|
|
|
|
define i32 @add_reduce_sqr_sum_invalid4(i32 %a, i32 %b) {
|
|
; CHECK-LABEL: @add_reduce_sqr_sum_invalid4(
|
|
; CHECK-NEXT: [[A_SQ:%.*]] = mul nsw i32 [[A:%.*]], [[A]]
|
|
; CHECK-NEXT: [[TWO_A_PLUS_B:%.*]] = mul i32 [[B:%.*]], 3
|
|
; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[TWO_A_PLUS_B]], [[B]]
|
|
; CHECK-NEXT: [[ADD:%.*]] = add i32 [[MUL]], [[A_SQ]]
|
|
; CHECK-NEXT: ret i32 [[ADD]]
|
|
;
|
|
%a_sq = mul nsw i32 %a, %a
|
|
%not_two_a = shl i32 %b, 1
|
|
%two_a_plus_b = add i32 %not_two_a, %b
|
|
%mul = mul i32 %two_a_plus_b, %b
|
|
%add = add i32 %mul, %a_sq
|
|
ret i32 %add
|
|
}
|
|
|
|
define i32 @add_reduce_sqr_sum_varB_invalid0(i32 %a, i32 %b) {
|
|
; CHECK-LABEL: @add_reduce_sqr_sum_varB_invalid0(
|
|
; CHECK-NEXT: [[NOT_A_B:%.*]] = mul nsw i32 [[A:%.*]], [[A]]
|
|
; CHECK-NEXT: [[TWOAB:%.*]] = shl nuw i32 [[NOT_A_B]], 1
|
|
; CHECK-NEXT: [[A_SQ:%.*]] = mul i32 [[A]], [[A]]
|
|
; CHECK-NEXT: [[B_SQ:%.*]] = mul i32 [[B:%.*]], [[B]]
|
|
; CHECK-NEXT: [[A2_B2:%.*]] = add i32 [[A_SQ]], [[B_SQ]]
|
|
; CHECK-NEXT: [[AB2:%.*]] = add i32 [[TWOAB]], [[A2_B2]]
|
|
; CHECK-NEXT: ret i32 [[AB2]]
|
|
;
|
|
%not_a_b = mul nsw i32 %a, %a
|
|
%twoab = mul i32 %not_a_b, 2
|
|
%a_sq = mul i32 %a, %a
|
|
%b_sq = mul i32 %b, %b
|
|
%a2_b2 = add i32 %a_sq, %b_sq
|
|
%ab2 = add i32 %twoab, %a2_b2
|
|
ret i32 %ab2
|
|
}
|
|
|
|
define i32 @add_reduce_sqr_sum_varB_invalid1(i32 %a, i32 %b) {
|
|
; CHECK-LABEL: @add_reduce_sqr_sum_varB_invalid1(
|
|
; CHECK-NEXT: [[NOT_A_B:%.*]] = mul nsw i32 [[B:%.*]], [[B]]
|
|
; CHECK-NEXT: [[TWOAB:%.*]] = shl nuw i32 [[NOT_A_B]], 1
|
|
; CHECK-NEXT: [[A_SQ:%.*]] = mul i32 [[A:%.*]], [[A]]
|
|
; CHECK-NEXT: [[B_SQ:%.*]] = mul i32 [[B]], [[B]]
|
|
; CHECK-NEXT: [[A2_B2:%.*]] = add i32 [[A_SQ]], [[B_SQ]]
|
|
; CHECK-NEXT: [[AB2:%.*]] = add i32 [[TWOAB]], [[A2_B2]]
|
|
; CHECK-NEXT: ret i32 [[AB2]]
|
|
;
|
|
%not_a_b = mul nsw i32 %b, %b
|
|
%twoab = mul i32 %not_a_b, 2
|
|
%a_sq = mul i32 %a, %a
|
|
%b_sq = mul i32 %b, %b
|
|
%a2_b2 = add i32 %a_sq, %b_sq
|
|
%ab2 = add i32 %twoab, %a2_b2
|
|
ret i32 %ab2
|
|
}
|
|
|
|
define i32 @add_reduce_sqr_sum_varB_invalid2(i32 %a, i32 %b) {
|
|
; CHECK-LABEL: @add_reduce_sqr_sum_varB_invalid2(
|
|
; CHECK-NEXT: [[A_B:%.*]] = mul nsw i32 [[A:%.*]], [[B:%.*]]
|
|
; CHECK-NEXT: [[NOT_TWOAB:%.*]] = shl i32 [[A_B]], 2
|
|
; CHECK-NEXT: [[A_SQ:%.*]] = mul i32 [[A]], [[A]]
|
|
; CHECK-NEXT: [[B_SQ:%.*]] = mul i32 [[B]], [[B]]
|
|
; CHECK-NEXT: [[A2_B2:%.*]] = add i32 [[A_SQ]], [[B_SQ]]
|
|
; CHECK-NEXT: [[AB2:%.*]] = add i32 [[NOT_TWOAB]], [[A2_B2]]
|
|
; CHECK-NEXT: ret i32 [[AB2]]
|
|
;
|
|
%a_b = mul nsw i32 %a, %b
|
|
%not_twoab = mul i32 %a_b, 4
|
|
%a_sq = mul i32 %a, %a
|
|
%b_sq = mul i32 %b, %b
|
|
%a2_b2 = add i32 %a_sq, %b_sq
|
|
%ab2 = add i32 %not_twoab, %a2_b2
|
|
ret i32 %ab2
|
|
}
|
|
|
|
define i32 @add_reduce_sqr_sum_varB_invalid3(i32 %a, i32 %b) {
|
|
; CHECK-LABEL: @add_reduce_sqr_sum_varB_invalid3(
|
|
; CHECK-NEXT: [[A_B:%.*]] = mul nsw i32 [[A:%.*]], [[B:%.*]]
|
|
; CHECK-NEXT: [[TWOAB:%.*]] = shl i32 [[A_B]], 1
|
|
; CHECK-NEXT: [[B_SQ1:%.*]] = add i32 [[A]], [[B]]
|
|
; CHECK-NEXT: [[A2_B2:%.*]] = mul i32 [[B_SQ1]], [[B]]
|
|
; CHECK-NEXT: [[AB2:%.*]] = add i32 [[TWOAB]], [[A2_B2]]
|
|
; CHECK-NEXT: ret i32 [[AB2]]
|
|
;
|
|
%a_b = mul nsw i32 %a, %b
|
|
%twoab = mul i32 %a_b, 2
|
|
%not_a_sq = mul i32 %b, %a
|
|
%b_sq = mul i32 %b, %b
|
|
%a2_b2 = add i32 %not_a_sq, %b_sq
|
|
%ab2 = add i32 %twoab, %a2_b2
|
|
ret i32 %ab2
|
|
}
|
|
|
|
define i32 @add_reduce_sqr_sum_varB_invalid4(i32 %a, i32 %b) {
|
|
; CHECK-LABEL: @add_reduce_sqr_sum_varB_invalid4(
|
|
; CHECK-NEXT: [[A_B:%.*]] = mul nsw i32 [[A:%.*]], [[B:%.*]]
|
|
; CHECK-NEXT: [[TWOAB:%.*]] = shl i32 [[A_B]], 1
|
|
; CHECK-NEXT: [[NOT_B_SQ1:%.*]] = add i32 [[A]], [[B]]
|
|
; CHECK-NEXT: [[A2_B2:%.*]] = mul i32 [[NOT_B_SQ1]], [[A]]
|
|
; CHECK-NEXT: [[AB2:%.*]] = add i32 [[TWOAB]], [[A2_B2]]
|
|
; CHECK-NEXT: ret i32 [[AB2]]
|
|
;
|
|
%a_b = mul nsw i32 %a, %b
|
|
%twoab = mul i32 %a_b, 2
|
|
%a_sq = mul i32 %a, %a
|
|
%not_b_sq = mul i32 %b, %a
|
|
%a2_b2 = add i32 %a_sq, %not_b_sq
|
|
%ab2 = add i32 %twoab, %a2_b2
|
|
ret i32 %ab2
|
|
}
|
|
|
|
define i32 @add_reduce_sqr_sum_varC_invalid0(i32 %a, i32 %b) {
|
|
; CHECK-LABEL: @add_reduce_sqr_sum_varC_invalid0(
|
|
; CHECK-NEXT: [[NOT_TWOA:%.*]] = shl nsw i32 [[B:%.*]], 1
|
|
; CHECK-NEXT: [[TWOAB:%.*]] = mul i32 [[NOT_TWOA]], [[B]]
|
|
; CHECK-NEXT: [[A_SQ:%.*]] = mul i32 [[A:%.*]], [[A]]
|
|
; CHECK-NEXT: [[B_SQ:%.*]] = mul i32 [[B]], [[B]]
|
|
; CHECK-NEXT: [[A2_B2:%.*]] = add i32 [[A_SQ]], [[B_SQ]]
|
|
; CHECK-NEXT: [[AB2:%.*]] = add i32 [[TWOAB]], [[A2_B2]]
|
|
; CHECK-NEXT: ret i32 [[AB2]]
|
|
;
|
|
%not_twoa = mul nsw i32 %b, 2
|
|
%twoab = mul i32 %not_twoa, %b
|
|
%a_sq = mul i32 %a, %a
|
|
%b_sq = mul i32 %b, %b
|
|
%a2_b2 = add i32 %a_sq, %b_sq
|
|
%ab2 = add i32 %twoab, %a2_b2
|
|
ret i32 %ab2
|
|
}
|
|
|
|
define i32 @add_reduce_sqr_sum_varC_invalid1(i32 %a, i32 %b) {
|
|
; CHECK-LABEL: @add_reduce_sqr_sum_varC_invalid1(
|
|
; CHECK-NEXT: [[NOT_TWOA:%.*]] = shl nsw i32 [[A:%.*]], 2
|
|
; CHECK-NEXT: [[TWOAB:%.*]] = mul i32 [[NOT_TWOA]], [[B:%.*]]
|
|
; CHECK-NEXT: [[A_SQ:%.*]] = mul i32 [[A]], [[A]]
|
|
; CHECK-NEXT: [[B_SQ:%.*]] = mul i32 [[B]], [[B]]
|
|
; CHECK-NEXT: [[A2_B2:%.*]] = add i32 [[A_SQ]], [[B_SQ]]
|
|
; CHECK-NEXT: [[AB2:%.*]] = add i32 [[TWOAB]], [[A2_B2]]
|
|
; CHECK-NEXT: ret i32 [[AB2]]
|
|
;
|
|
%not_twoa = mul nsw i32 %a, 4
|
|
%twoab = mul i32 %not_twoa, %b
|
|
%a_sq = mul i32 %a, %a
|
|
%b_sq = mul i32 %b, %b
|
|
%a2_b2 = add i32 %a_sq, %b_sq
|
|
%ab2 = add i32 %twoab, %a2_b2
|
|
ret i32 %ab2
|
|
}
|
|
|
|
define i32 @add_reduce_sqr_sum_varC_invalid2(i32 %a, i32 %b) {
|
|
; CHECK-LABEL: @add_reduce_sqr_sum_varC_invalid2(
|
|
; CHECK-NEXT: [[TWOA:%.*]] = shl nsw i32 [[A:%.*]], 1
|
|
; CHECK-NEXT: [[NOT_TWOAB:%.*]] = mul i32 [[TWOA]], [[A]]
|
|
; CHECK-NEXT: [[A_SQ:%.*]] = mul i32 [[A]], [[A]]
|
|
; CHECK-NEXT: [[B_SQ:%.*]] = mul i32 [[B:%.*]], [[B]]
|
|
; CHECK-NEXT: [[A2_B2:%.*]] = add i32 [[A_SQ]], [[B_SQ]]
|
|
; CHECK-NEXT: [[AB2:%.*]] = add i32 [[NOT_TWOAB]], [[A2_B2]]
|
|
; CHECK-NEXT: ret i32 [[AB2]]
|
|
;
|
|
%twoa = mul nsw i32 %a, 2
|
|
%not_twoab = mul i32 %twoa, %a
|
|
%a_sq = mul i32 %a, %a
|
|
%b_sq = mul i32 %b, %b
|
|
%a2_b2 = add i32 %a_sq, %b_sq
|
|
%ab2 = add i32 %not_twoab, %a2_b2
|
|
ret i32 %ab2
|
|
}
|
|
|
|
declare void @llvm.assume(i1)
|
|
declare void @fake_func(i32)
|