Change VOP_PAT_GEN to default to not generating an instruction selection pattern for the VOP2 (e32) form of an instruction, only for the VOP3 (e64) form. This allows SIFoldOperands maximum freedom to fold copies into the operands of an instruction, before SIShrinkInstructions tries to shrink it back to the smaller encoding. This affects the following VOP2 instructions: v_min_i32 v_max_i32 v_min_u32 v_max_u32 v_and_b32 v_or_b32 v_xor_b32 v_lshr_b32 v_ashr_i32 v_lshl_b32 A further cleanup could simplify or remove VOP_PAT_GEN, since its optional second argument is never used. Differential Revision: https://reviews.llvm.org/D114252
159 lines
5.0 KiB
LLVM
159 lines
5.0 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc -march=amdgcn -mcpu=gfx1010 %s -o - | FileCheck %s
|
|
|
|
define i32 @xori64i32(i64 %a) {
|
|
; CHECK-LABEL: xori64i32:
|
|
; CHECK: ; %bb.0:
|
|
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; CHECK-NEXT: s_waitcnt_vscnt null, 0x0
|
|
; CHECK-NEXT: v_ashrrev_i32_e32 v0, 31, v1
|
|
; CHECK-NEXT: v_xor_b32_e32 v0, 0x7fffffff, v0
|
|
; CHECK-NEXT: s_setpc_b64 s[30:31]
|
|
%shr4 = ashr i64 %a, 63
|
|
%conv5 = trunc i64 %shr4 to i32
|
|
%xor = xor i32 %conv5, 2147483647
|
|
ret i32 %xor
|
|
}
|
|
|
|
define i64 @selecti64i64(i64 %a) {
|
|
; CHECK-LABEL: selecti64i64:
|
|
; CHECK: ; %bb.0:
|
|
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; CHECK-NEXT: s_waitcnt_vscnt null, 0x0
|
|
; CHECK-NEXT: v_ashrrev_i32_e32 v1, 31, v1
|
|
; CHECK-NEXT: v_xor_b32_e32 v0, 0x7fffffff, v1
|
|
; CHECK-NEXT: s_setpc_b64 s[30:31]
|
|
%c = icmp sgt i64 %a, -1
|
|
%s = select i1 %c, i64 2147483647, i64 -2147483648
|
|
ret i64 %s
|
|
}
|
|
|
|
define i32 @selecti64i32(i64 %a) {
|
|
; CHECK-LABEL: selecti64i32:
|
|
; CHECK: ; %bb.0:
|
|
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; CHECK-NEXT: s_waitcnt_vscnt null, 0x0
|
|
; CHECK-NEXT: v_ashrrev_i32_e32 v0, 31, v1
|
|
; CHECK-NEXT: v_xor_b32_e32 v0, 0x7fffffff, v0
|
|
; CHECK-NEXT: s_setpc_b64 s[30:31]
|
|
%c = icmp sgt i64 %a, -1
|
|
%s = select i1 %c, i32 2147483647, i32 -2147483648
|
|
ret i32 %s
|
|
}
|
|
|
|
define i64 @selecti32i64(i32 %a) {
|
|
; CHECK-LABEL: selecti32i64:
|
|
; CHECK: ; %bb.0:
|
|
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; CHECK-NEXT: s_waitcnt_vscnt null, 0x0
|
|
; CHECK-NEXT: v_ashrrev_i32_e32 v1, 31, v0
|
|
; CHECK-NEXT: v_xor_b32_e32 v0, 0x7fffffff, v1
|
|
; CHECK-NEXT: v_ashrrev_i32_e32 v1, 31, v1
|
|
; CHECK-NEXT: s_setpc_b64 s[30:31]
|
|
%c = icmp sgt i32 %a, -1
|
|
%s = select i1 %c, i64 2147483647, i64 -2147483648
|
|
ret i64 %s
|
|
}
|
|
|
|
|
|
|
|
define i8 @xori32i8(i32 %a) {
|
|
; CHECK-LABEL: xori32i8:
|
|
; CHECK: ; %bb.0:
|
|
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; CHECK-NEXT: s_waitcnt_vscnt null, 0x0
|
|
; CHECK-NEXT: v_ashrrev_i32_e32 v0, 31, v0
|
|
; CHECK-NEXT: v_xor_b32_e32 v0, 0x54, v0
|
|
; CHECK-NEXT: s_setpc_b64 s[30:31]
|
|
%shr4 = ashr i32 %a, 31
|
|
%conv5 = trunc i32 %shr4 to i8
|
|
%xor = xor i8 %conv5, 84
|
|
ret i8 %xor
|
|
}
|
|
|
|
define i32 @selecti32i32(i32 %a) {
|
|
; CHECK-LABEL: selecti32i32:
|
|
; CHECK: ; %bb.0:
|
|
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; CHECK-NEXT: s_waitcnt_vscnt null, 0x0
|
|
; CHECK-NEXT: v_ashrrev_i32_e32 v0, 31, v0
|
|
; CHECK-NEXT: v_xor_b32_e32 v0, 0x54, v0
|
|
; CHECK-NEXT: s_setpc_b64 s[30:31]
|
|
%c = icmp sgt i32 %a, -1
|
|
%s = select i1 %c, i32 84, i32 -85
|
|
ret i32 %s
|
|
}
|
|
|
|
define i8 @selecti32i8(i32 %a) {
|
|
; CHECK-LABEL: selecti32i8:
|
|
; CHECK: ; %bb.0:
|
|
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; CHECK-NEXT: s_waitcnt_vscnt null, 0x0
|
|
; CHECK-NEXT: v_ashrrev_i32_e32 v0, 31, v0
|
|
; CHECK-NEXT: v_xor_b32_e32 v0, 0x54, v0
|
|
; CHECK-NEXT: s_setpc_b64 s[30:31]
|
|
%c = icmp sgt i32 %a, -1
|
|
%s = select i1 %c, i8 84, i8 -85
|
|
ret i8 %s
|
|
}
|
|
|
|
define i32 @selecti8i32(i8 %a) {
|
|
; CHECK-LABEL: selecti8i32:
|
|
; CHECK: ; %bb.0:
|
|
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; CHECK-NEXT: s_waitcnt_vscnt null, 0x0
|
|
; CHECK-NEXT: v_bfe_i32 v0, v0, 0, 8
|
|
; CHECK-NEXT: v_mov_b32_e32 v1, 0x54
|
|
; CHECK-NEXT: v_ashrrev_i16 v0, 7, v0
|
|
; CHECK-NEXT: v_xor_b32_sdwa v0, sext(v0), v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
|
|
; CHECK-NEXT: s_setpc_b64 s[30:31]
|
|
%c = icmp sgt i8 %a, -1
|
|
%s = select i1 %c, i32 84, i32 -85
|
|
ret i32 %s
|
|
}
|
|
|
|
define i32 @icmpasreq(i32 %input, i32 %a, i32 %b) {
|
|
; CHECK-LABEL: icmpasreq:
|
|
; CHECK: ; %bb.0:
|
|
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; CHECK-NEXT: s_waitcnt_vscnt null, 0x0
|
|
; CHECK-NEXT: v_cmp_gt_i32_e32 vcc_lo, 0, v0
|
|
; CHECK-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc_lo
|
|
; CHECK-NEXT: s_setpc_b64 s[30:31]
|
|
%sh = ashr i32 %input, 31
|
|
%c = icmp eq i32 %sh, -1
|
|
%s = select i1 %c, i32 %a, i32 %b
|
|
ret i32 %s
|
|
}
|
|
|
|
define i32 @icmpasrne(i32 %input, i32 %a, i32 %b) {
|
|
; CHECK-LABEL: icmpasrne:
|
|
; CHECK: ; %bb.0:
|
|
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; CHECK-NEXT: s_waitcnt_vscnt null, 0x0
|
|
; CHECK-NEXT: v_cmp_lt_i32_e32 vcc_lo, -1, v0
|
|
; CHECK-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc_lo
|
|
; CHECK-NEXT: s_setpc_b64 s[30:31]
|
|
%sh = ashr i32 %input, 31
|
|
%c = icmp ne i32 %sh, -1
|
|
%s = select i1 %c, i32 %a, i32 %b
|
|
ret i32 %s
|
|
}
|
|
|
|
define i32 @oneusecmp(i32 %a, i32 %b, i32 %d) {
|
|
; CHECK-LABEL: oneusecmp:
|
|
; CHECK: ; %bb.0:
|
|
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; CHECK-NEXT: s_waitcnt_vscnt null, 0x0
|
|
; CHECK-NEXT: v_cmp_gt_i32_e32 vcc_lo, 0, v0
|
|
; CHECK-NEXT: v_ashrrev_i32_e32 v3, 31, v0
|
|
; CHECK-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
|
|
; CHECK-NEXT: v_xad_u32 v0, 0x7f, v3, v0
|
|
; CHECK-NEXT: s_setpc_b64 s[30:31]
|
|
%c = icmp sle i32 %a, -1
|
|
%s = select i1 %c, i32 -128, i32 127
|
|
%s2 = select i1 %c, i32 %d, i32 %b
|
|
%x = add i32 %s, %s2
|
|
ret i32 %x
|
|
}
|