[NFC][AMDGPU] Auto generate check lines for some test cases (#146400)

This commit is contained in:
Shilei Tian
2025-07-01 09:25:08 -04:00
committed by GitHub
parent bedd7ddb7f
commit 3355cca938
5 changed files with 2085 additions and 321 deletions

View File

@@ -1,13 +1,21 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc -mtriple=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
; GCN-LABEL: {{^}}and_i1_sext_bool:
; GCN: v_cmp_{{gt|le}}_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}}
; GCN: v_cndmask_b32_e{{32|64}} [[VAL:v[0-9]+]], 0, v{{[0-9]+}}, [[CC]]
; GCN: store_dword {{.*}}[[VAL]]
; GCN-NOT: v_cndmask_b32_e64 v{{[0-9]+}}, {{0|-1}}, {{0|-1}}
; GCN-NOT: v_and_b32_e32
define amdgpu_kernel void @and_i1_sext_bool(ptr addrspace(1) nocapture %arg) {
; GCN-LABEL: and_i1_sext_bool:
; GCN: ; %bb.0: ; %bb
; GCN-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
; GCN-NEXT: s_mov_b32 s3, 0xf000
; GCN-NEXT: s_mov_b32 s2, 0
; GCN-NEXT: v_lshlrev_b32_e32 v2, 2, v0
; GCN-NEXT: v_mov_b32_e32 v3, 0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: buffer_load_dword v4, v[2:3], s[0:3], 0 addr64
; GCN-NEXT: v_cmp_gt_u32_e32 vcc, v0, v1
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_cndmask_b32_e32 v0, 0, v4, vcc
; GCN-NEXT: buffer_store_dword v0, v[2:3], s[0:3], 0 addr64
; GCN-NEXT: s_endpgm
bb:
%x = tail call i32 @llvm.amdgcn.workitem.id.x()
%y = tail call i32 @llvm.amdgcn.workitem.id.y()
@@ -20,37 +28,40 @@ bb:
ret void
}
; GCN-LABEL: {{^}}and_sext_bool_fcmp:
; GCN: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT: v_cmp_eq_f32_e32 vcc, 0, v0
; GCN-NEXT: v_cndmask_b32_e32 v0, 0, v1, vcc
; GCN-NEXT: s_setpc_b64
define i32 @and_sext_bool_fcmp(float %x, i32 %y) {
; GCN-LABEL: and_sext_bool_fcmp:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT: v_cmp_eq_f32_e32 vcc, 0, v0
; GCN-NEXT: v_cndmask_b32_e32 v0, 0, v1, vcc
; GCN-NEXT: s_setpc_b64 s[30:31]
%cmp = fcmp oeq float %x, 0.0
%sext = sext i1 %cmp to i32
%and = and i32 %sext, %y
ret i32 %and
}
; GCN-LABEL: {{^}}and_sext_bool_fpclass:
; GCN: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT: v_mov_b32_e32 [[K:v[0-9]+]], 0x7b
; GCN-NEXT: v_cmp_class_f32_e32 vcc, v0, [[K]]
; GCN-NEXT: v_cndmask_b32_e32 v0, 0, v1, vcc
; GCN-NEXT: s_setpc_b64
define i32 @and_sext_bool_fpclass(float %x, i32 %y) {
; GCN-LABEL: and_sext_bool_fpclass:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT: v_mov_b32_e32 v2, 0x7b
; GCN-NEXT: v_cmp_class_f32_e32 vcc, v0, v2
; GCN-NEXT: v_cndmask_b32_e32 v0, 0, v1, vcc
; GCN-NEXT: s_setpc_b64 s[30:31]
%class = call i1 @llvm.is.fpclass(float %x, i32 123)
%sext = sext i1 %class to i32
%and = and i32 %sext, %y
ret i32 %and
}
; GCN-LABEL: {{^}}and_sext_bool_uadd_w_overflow:
; GCN: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT: v_add_i32_e32 v0, vcc, v0, v1
; GCN-NEXT: v_cndmask_b32_e32 v0, 0, v1, vcc
; GCN-NEXT: s_setpc_b64
define i32 @and_sext_bool_uadd_w_overflow(i32 %x, i32 %y) {
; GCN-LABEL: and_sext_bool_uadd_w_overflow:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT: v_add_i32_e32 v0, vcc, v0, v1
; GCN-NEXT: v_cndmask_b32_e32 v0, 0, v1, vcc
; GCN-NEXT: s_setpc_b64 s[30:31]
%uadd = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)
%carry = extractvalue { i32, i1 } %uadd, 1
%sext = sext i1 %carry to i32
@@ -58,12 +69,13 @@ define i32 @and_sext_bool_uadd_w_overflow(i32 %x, i32 %y) {
ret i32 %and
}
; GCN-LABEL: {{^}}and_sext_bool_usub_w_overflow:
; GCN: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT: v_sub_i32_e32 v0, vcc, v0, v1
; GCN-NEXT: v_cndmask_b32_e32 v0, 0, v1, vcc
; GCN-NEXT: s_setpc_b64
define i32 @and_sext_bool_usub_w_overflow(i32 %x, i32 %y) {
; GCN-LABEL: and_sext_bool_usub_w_overflow:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT: v_sub_i32_e32 v0, vcc, v0, v1
; GCN-NEXT: v_cndmask_b32_e32 v0, 0, v1, vcc
; GCN-NEXT: s_setpc_b64 s[30:31]
%uadd = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %x, i32 %y)
%carry = extractvalue { i32, i1 } %uadd, 1
%sext = sext i1 %carry to i32
@@ -71,15 +83,16 @@ define i32 @and_sext_bool_usub_w_overflow(i32 %x, i32 %y) {
ret i32 %and
}
; GCN-LABEL: {{^}}and_sext_bool_sadd_w_overflow:
; GCN: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT: v_cmp_gt_i32_e32 vcc, 0, v1
; GCN-NEXT: v_add_i32_e64 v2, s[4:5], v0, v1
; GCN-NEXT: v_cmp_lt_i32_e64 s[4:5], v2, v0
; GCN-NEXT: s_xor_b64 vcc, vcc, s[4:5]
; GCN-NEXT: v_cndmask_b32_e32 v0, 0, v1, vcc
; GCN-NEXT: s_setpc_b64
define i32 @and_sext_bool_sadd_w_overflow(i32 %x, i32 %y) {
; GCN-LABEL: and_sext_bool_sadd_w_overflow:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT: v_cmp_gt_i32_e32 vcc, 0, v1
; GCN-NEXT: v_add_i32_e64 v2, s[4:5], v0, v1
; GCN-NEXT: v_cmp_lt_i32_e64 s[4:5], v2, v0
; GCN-NEXT: s_xor_b64 vcc, vcc, s[4:5]
; GCN-NEXT: v_cndmask_b32_e32 v0, 0, v1, vcc
; GCN-NEXT: s_setpc_b64 s[30:31]
%uadd = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %x, i32 %y)
%carry = extractvalue { i32, i1 } %uadd, 1
%sext = sext i1 %carry to i32
@@ -87,15 +100,16 @@ define i32 @and_sext_bool_sadd_w_overflow(i32 %x, i32 %y) {
ret i32 %and
}
; GCN-LABEL: {{^}}and_sext_bool_ssub_w_overflow:
; GCN: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT: v_cmp_gt_i32_e32 vcc, 0, v1
; GCN-NEXT: v_add_i32_e64 v2, s[4:5], v0, v1
; GCN-NEXT: v_cmp_lt_i32_e64 s[4:5], v2, v0
; GCN-NEXT: s_xor_b64 vcc, vcc, s[4:5]
; GCN-NEXT: v_cndmask_b32_e32 v0, 0, v1, vcc
; GCN-NEXT: s_setpc_b64
define i32 @and_sext_bool_ssub_w_overflow(i32 %x, i32 %y) {
; GCN-LABEL: and_sext_bool_ssub_w_overflow:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT: v_cmp_gt_i32_e32 vcc, 0, v1
; GCN-NEXT: v_add_i32_e64 v2, s[4:5], v0, v1
; GCN-NEXT: v_cmp_lt_i32_e64 s[4:5], v2, v0
; GCN-NEXT: s_xor_b64 vcc, vcc, s[4:5]
; GCN-NEXT: v_cndmask_b32_e32 v0, 0, v1, vcc
; GCN-NEXT: s_setpc_b64 s[30:31]
%uadd = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %x, i32 %y)
%carry = extractvalue { i32, i1 } %uadd, 1
%sext = sext i1 %carry to i32
@@ -103,15 +117,16 @@ define i32 @and_sext_bool_ssub_w_overflow(i32 %x, i32 %y) {
ret i32 %and
}
; GCN-LABEL: {{^}}and_sext_bool_smul_w_overflow:
; GCN: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT: v_mul_hi_i32 v2, v0, v1
; GCN-NEXT: v_mul_lo_u32 v0, v0, v1
; GCN-NEXT: v_ashrrev_i32_e32 v0, 31, v0
; GCN-NEXT: v_cmp_ne_u32_e32 vcc, v2, v0
; GCN-NEXT: v_cndmask_b32_e32 v0, 0, v1, vcc
; GCN-NEXT: s_setpc_b64
define i32 @and_sext_bool_smul_w_overflow(i32 %x, i32 %y) {
; GCN-LABEL: and_sext_bool_smul_w_overflow:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT: v_mul_hi_i32 v2, v0, v1
; GCN-NEXT: v_mul_lo_u32 v0, v0, v1
; GCN-NEXT: v_ashrrev_i32_e32 v0, 31, v0
; GCN-NEXT: v_cmp_ne_u32_e32 vcc, v2, v0
; GCN-NEXT: v_cndmask_b32_e32 v0, 0, v1, vcc
; GCN-NEXT: s_setpc_b64 s[30:31]
%uadd = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %x, i32 %y)
%carry = extractvalue { i32, i1 } %uadd, 1
%sext = sext i1 %carry to i32
@@ -119,13 +134,14 @@ define i32 @and_sext_bool_smul_w_overflow(i32 %x, i32 %y) {
ret i32 %and
}
; GCN-LABEL: {{^}}and_sext_bool_umul_w_overflow:
; GCN: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT: v_mul_hi_u32 v0, v0, v1
; GCN-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; GCN-NEXT: v_cndmask_b32_e32 v0, 0, v1, vcc
; GCN-NEXT: s_setpc_b64
define i32 @and_sext_bool_umul_w_overflow(i32 %x, i32 %y) {
; GCN-LABEL: and_sext_bool_umul_w_overflow:
; GCN: ; %bb.0:
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT: v_mul_hi_u32 v0, v0, v1
; GCN-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; GCN-NEXT: v_cndmask_b32_e32 v0, 0, v1, vcc
; GCN-NEXT: s_setpc_b64 s[30:31]
%uadd = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %x, i32 %y)
%carry = extractvalue { i32, i1 } %uadd, 1
%sext = sext i1 %carry to i32

File diff suppressed because it is too large Load Diff

View File

@@ -1,30 +1,105 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc -mtriple=amdgcn -verify-machineinstrs -enable-misched -asm-verbose -disable-block-placement -simplifycfg-require-and-preserve-domtree=1 < %s | FileCheck -check-prefix=SI %s
declare i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
; SI-LABEL: {{^}}test_if:
; Make sure the i1 values created by the cfg structurizer pass are
; moved using VALU instructions
; waitcnt should be inserted after exec modification
; SI: v_cmp_lt_i32_e32 vcc, 1,
; SI-NEXT: s_mov_b64 {{s\[[0-9]+:[0-9]+\]}}, 0
; SI-NEXT: s_mov_b64 {{s\[[0-9]+:[0-9]+\]}}, 0
; SI-NEXT: s_and_saveexec_b64 [[SAVE1:s\[[0-9]+:[0-9]+\]]], vcc
; SI-NEXT: s_xor_b64 [[SAVE2:s\[[0-9]+:[0-9]+\]]], exec, [[SAVE1]]
; SI-NEXT: s_cbranch_execz [[FLOW_BB:.LBB[0-9]+_[0-9]+]]
; SI-NEXT: ; %bb.{{[0-9]+}}: ; %LeafBlock3
; SI: s_mov_b64 s[{{[0-9]:[0-9]}}], -1
; SI: s_and_saveexec_b64
; SI-NEXT: s_cbranch_execnz
; v_mov should be after exec modification
; SI: [[FLOW_BB]]:
; SI-NEXT: s_andn2_saveexec_b64 [[SAVE2]], [[SAVE2]]
;
define amdgpu_kernel void @test_if(i32 %b, ptr addrspace(1) %src, ptr addrspace(1) %dst) #1 {
; SI-LABEL: test_if:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dword s8, s[4:5], 0x9
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0xd
; SI-NEXT: v_cmp_lt_i32_e32 vcc, 1, v0
; SI-NEXT: s_mov_b64 s[10:11], 0
; SI-NEXT: s_mov_b64 s[2:3], 0
; SI-NEXT: s_and_saveexec_b64 s[4:5], vcc
; SI-NEXT: s_xor_b64 s[4:5], exec, s[4:5]
; SI-NEXT: s_cbranch_execz .LBB0_3
; SI-NEXT: ; %bb.1: ; %LeafBlock3
; SI-NEXT: v_cmp_eq_u32_e32 vcc, 2, v0
; SI-NEXT: s_mov_b64 s[2:3], -1
; SI-NEXT: s_and_saveexec_b64 s[6:7], vcc
; SI-NEXT: s_cbranch_execnz .LBB0_9
; SI-NEXT: .LBB0_2: ; %Flow7
; SI-NEXT: s_or_b64 exec, exec, s[6:7]
; SI-NEXT: s_and_b64 s[2:3], s[2:3], exec
; SI-NEXT: .LBB0_3: ; %Flow6
; SI-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5]
; SI-NEXT: s_cbranch_execz .LBB0_5
; SI-NEXT: ; %bb.4: ; %LeafBlock
; SI-NEXT: s_mov_b64 s[10:11], exec
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 1, v0
; SI-NEXT: s_andn2_b64 s[2:3], s[2:3], exec
; SI-NEXT: s_and_b64 s[6:7], vcc, exec
; SI-NEXT: s_or_b64 s[2:3], s[2:3], s[6:7]
; SI-NEXT: .LBB0_5: ; %Flow8
; SI-NEXT: s_or_b64 exec, exec, s[4:5]
; SI-NEXT: s_and_saveexec_b64 s[4:5], s[2:3]
; SI-NEXT: s_xor_b64 s[2:3], exec, s[4:5]
; SI-NEXT: s_cbranch_execnz .LBB0_10
; SI-NEXT: .LBB0_6: ; %Flow9
; SI-NEXT: s_or_b64 exec, exec, s[2:3]
; SI-NEXT: s_and_saveexec_b64 s[2:3], s[10:11]
; SI-NEXT: s_cbranch_execz .LBB0_8
; SI-NEXT: ; %bb.7: ; %case1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_ashr_i32 s9, s8, 31
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, 0
; SI-NEXT: s_lshl_b64 s[4:5], s[8:9], 2
; SI-NEXT: v_mov_b32_e32 v2, 13
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64
; SI-NEXT: .LBB0_8: ; %end
; SI-NEXT: s_endpgm
; SI-NEXT: .LBB0_9: ; %case2
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_ashr_i32 s9, s8, 31
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, 0
; SI-NEXT: s_lshl_b64 s[12:13], s[8:9], 2
; SI-NEXT: v_mov_b32_e32 v3, 17
; SI-NEXT: v_mov_b32_e32 v1, s12
; SI-NEXT: v_mov_b32_e32 v2, s13
; SI-NEXT: buffer_store_dword v3, v[1:2], s[0:3], 0 addr64
; SI-NEXT: s_xor_b64 s[2:3], exec, -1
; SI-NEXT: s_branch .LBB0_2
; SI-NEXT: .LBB0_10: ; %default
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 2, v0
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_ashr_i32 s9, s8, 31
; SI-NEXT: s_lshl_b64 s[4:5], s[8:9], 2
; SI-NEXT: s_add_u32 s4, s0, s4
; SI-NEXT: s_addc_u32 s5, s1, s5
; SI-NEXT: s_and_saveexec_b64 s[6:7], vcc
; SI-NEXT: s_xor_b64 s[12:13], exec, s[6:7]
; SI-NEXT: s_cbranch_execnz .LBB0_14
; SI-NEXT: .LBB0_11: ; %Flow
; SI-NEXT: s_andn2_saveexec_b64 s[12:13], s[12:13]
; SI-NEXT: s_cbranch_execz .LBB0_13
; SI-NEXT: ; %bb.12: ; %if
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, 19
; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
; SI-NEXT: .LBB0_13: ; %Flow5
; SI-NEXT: s_or_b64 exec, exec, s[12:13]
; SI-NEXT: s_andn2_b64 s[10:11], s[10:11], exec
; SI-NEXT: s_branch .LBB0_6
; SI-NEXT: .LBB0_14: ; %else
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: v_mov_b32_e32 v0, 21
; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
; SI-NEXT: s_branch .LBB0_11
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
switch i32 %tid, label %default [
@@ -59,17 +134,23 @@ end:
ret void
}
; SI-LABEL: {{^}}simple_test_v_if:
; SI: v_cmp_ne_u32_e32 vcc, 0, v{{[0-9]+}}
; SI: s_and_saveexec_b64 [[BR_SREG:s\[[0-9]+:[0-9]+\]]], vcc
; SI-NEXT: s_cbranch_execz [[EXIT:.LBB[0-9]+_[0-9]+]]
; SI-NEXT: ; %bb.{{[0-9]+}}:
; SI: buffer_store_dword
; SI-NEXT: {{^}}[[EXIT]]:
; SI: s_endpgm
define amdgpu_kernel void @simple_test_v_if(ptr addrspace(1) %dst, ptr addrspace(1) %src) #1 {
; SI-LABEL: simple_test_v_if:
; SI: ; %bb.0:
; SI-NEXT: s_mov_b32 s2, 0
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; SI-NEXT: s_and_saveexec_b64 s[0:1], vcc
; SI-NEXT: s_cbranch_execz .LBB1_2
; SI-NEXT: ; %bb.1: ; %then
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
; SI-NEXT: v_mov_b32_e32 v1, 0
; SI-NEXT: v_mov_b32_e32 v2, 0x3e7
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64
; SI-NEXT: .LBB1_2: ; %exit
; SI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%is.0 = icmp ne i32 %tid, 0
br i1 %is.0, label %then, label %exit
@@ -84,18 +165,23 @@ exit:
}
; FIXME: It would be better to endpgm in the then block.
; SI-LABEL: {{^}}simple_test_v_if_ret_else_ret:
; SI: v_cmp_ne_u32_e32 vcc, 0, v{{[0-9]+}}
; SI: s_and_saveexec_b64 [[BR_SREG:s\[[0-9]+:[0-9]+\]]], vcc
; SI-NEXT: s_cbranch_execz [[EXIT:.LBB[0-9]+_[0-9]+]]
; SI-NEXT: ; %bb.{{[0-9]+}}:
; SI: buffer_store_dword
; SI-NEXT: {{^}}[[EXIT]]:
; SI: s_endpgm
define amdgpu_kernel void @simple_test_v_if_ret_else_ret(ptr addrspace(1) %dst, ptr addrspace(1) %src) #1 {
; SI-LABEL: simple_test_v_if_ret_else_ret:
; SI: ; %bb.0:
; SI-NEXT: s_mov_b32 s2, 0
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; SI-NEXT: s_and_saveexec_b64 s[0:1], vcc
; SI-NEXT: s_cbranch_execz .LBB2_2
; SI-NEXT: ; %bb.1: ; %then
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
; SI-NEXT: v_mov_b32_e32 v1, 0
; SI-NEXT: v_mov_b32_e32 v2, 0x3e7
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64
; SI-NEXT: .LBB2_2: ; %UnifiedReturnBlock
; SI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%is.0 = icmp ne i32 %tid, 0
br i1 %is.0, label %then, label %exit
@@ -112,27 +198,33 @@ exit:
; Final block has more than a ret to execute. This was miscompiled
; before function exit blocks were unified since the endpgm would
; terminate the then wavefront before reaching the store.
; SI-LABEL: {{^}}simple_test_v_if_ret_else_code_ret:
; SI: v_cmp_eq_u32_e32 vcc, 0, v{{[0-9]+}}
; SI: s_and_saveexec_b64 [[BR_SREG:s\[[0-9]+:[0-9]+\]]], vcc
; SI: s_xor_b64 [[BR_SREG]], exec, [[BR_SREG]]
; SI: s_cbranch_execnz [[EXIT:.LBB[0-9]+_[0-9]+]]
; SI-NEXT: {{^.LBB[0-9]+_[0-9]+}}: ; %Flow
; SI-NEXT: s_andn2_saveexec_b64 [[BR_SREG]], [[BR_SREG]]
; SI-NEXT: s_cbranch_execz [[UNIFIED_RETURN:.LBB[0-9]+_[0-9]+]]
; SI-NEXT: ; %bb.{{[0-9]+}}: ; %then
; SI: s_waitcnt
; SI-NEXT: buffer_store_dword
; SI-NEXT: {{^}}[[UNIFIED_RETURN]]: ; %UnifiedReturnBlock
; SI: s_endpgm
; SI-NEXT: {{^}}[[EXIT]]:
; SI: ds_write_b32
define amdgpu_kernel void @simple_test_v_if_ret_else_code_ret(ptr addrspace(1) %dst, ptr addrspace(1) %src) #1 {
; SI-LABEL: simple_test_v_if_ret_else_code_ret:
; SI: ; %bb.0:
; SI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
; SI-NEXT: s_and_saveexec_b64 s[0:1], vcc
; SI-NEXT: s_xor_b64 s[0:1], exec, s[0:1]
; SI-NEXT: s_cbranch_execnz .LBB3_4
; SI-NEXT: .LBB3_1: ; %Flow
; SI-NEXT: s_andn2_saveexec_b64 s[0:1], s[0:1]
; SI-NEXT: s_cbranch_execz .LBB3_3
; SI-NEXT: ; %bb.2: ; %then
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_mov_b32 s2, 0
; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
; SI-NEXT: v_mov_b32_e32 v1, 0
; SI-NEXT: v_mov_b32_e32 v2, 0x3e7
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64
; SI-NEXT: .LBB3_3: ; %UnifiedReturnBlock
; SI-NEXT: s_endpgm
; SI-NEXT: .LBB3_4: ; %exit
; SI-NEXT: v_mov_b32_e32 v0, 7
; SI-NEXT: s_mov_b32 m0, -1
; SI-NEXT: ds_write_b32 v0, v0
; SI-NEXT: ; implicit-def: $vgpr0
; SI-NEXT: s_branch .LBB3_1
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%is.0 = icmp ne i32 %tid, 0
br i1 %is.0, label %then, label %exit
@@ -147,21 +239,38 @@ exit:
ret void
}
; SI-LABEL: {{^}}simple_test_v_loop:
; SI: v_cmp_ne_u32_e32 vcc, 0, v{{[0-9]+}}
; SI: s_and_saveexec_b64 [[BR_SREG:s\[[0-9]+:[0-9]+\]]], vcc
; SI-NEXT: s_cbranch_execz [[LABEL_EXIT:.LBB[0-9]+_[0-9]+]]
; SI: s_mov_b64 {{s\[[0-9]+:[0-9]+\]}}, 0{{$}}
; SI: [[LABEL_LOOP:.LBB[0-9]+_[0-9]+]]:
; SI: buffer_load_dword
; SI-DAG: buffer_store_dword
; SI-DAG: s_cmpk_lg_i32 s{{[0-9]+}}, 0x100
; SI: s_cbranch_scc1 [[LABEL_LOOP]]
; SI: [[LABEL_EXIT]]:
; SI: s_endpgm
define amdgpu_kernel void @simple_test_v_loop(ptr addrspace(1) %dst, ptr addrspace(1) %src) #1 {
; SI-LABEL: simple_test_v_loop:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_mov_b32 s2, 0
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; SI-NEXT: s_and_saveexec_b64 s[0:1], vcc
; SI-NEXT: s_cbranch_execz .LBB4_3
; SI-NEXT: ; %bb.1: ; %loop.preheader
; SI-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x9
; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
; SI-NEXT: s_mov_b64 s[0:1], 0
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v1, s9
; SI-NEXT: v_add_i32_e32 v0, vcc, s8, v0
; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; SI-NEXT: s_mov_b32 s6, -1
; SI-NEXT: s_mov_b32 s4, s10
; SI-NEXT: s_mov_b32 s5, s11
; SI-NEXT: s_mov_b32 s7, s3
; SI-NEXT: .LBB4_2: ; %loop
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v2, off, s[4:7], 0
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64
; SI-NEXT: s_add_u32 s0, s0, 4
; SI-NEXT: s_addc_u32 s1, s1, 0
; SI-NEXT: s_cmpk_lg_i32 s0, 0x100
; SI-NEXT: s_cbranch_scc1 .LBB4_2
; SI-NEXT: .LBB4_3: ; %exit
; SI-NEXT: s_endpgm
entry:
%tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
%is.0 = icmp ne i32 %tid, 0
@@ -182,45 +291,81 @@ exit:
ret void
}
; SI-LABEL: {{^}}multi_vcond_loop:
; Load loop limit from buffer
; Branch to exit if uniformly not taken
; SI: ; %bb.0:
; SI: buffer_load_dword [[VBOUND:v[0-9]+]]
; SI: v_cmp_lt_i32_e32 vcc
; SI: s_and_saveexec_b64 [[OUTER_CMP_SREG:s\[[0-9]+:[0-9]+\]]], vcc
; SI-NEXT: s_cbranch_execz [[LABEL_EXIT:.LBB[0-9]+_[0-9]+]]
; Initialize inner condition to false
; SI: ; %bb.{{[0-9]+}}: ; %bb10.preheader
; SI: s_mov_b64 [[COND_STATE:s\[[0-9]+:[0-9]+\]]], 0{{$}}
; Clear exec bits for workitems that load -1s
; SI: .L[[LABEL_LOOP:BB[0-9]+_[0-9]+]]:
; SI: buffer_load_dword [[B:v[0-9]+]]
; SI: buffer_load_dword [[A:v[0-9]+]]
; SI-DAG: v_cmp_ne_u32_e64 [[NEG1_CHECK_0:s\[[0-9]+:[0-9]+\]]], -1, [[A]]
; SI-DAG: v_cmp_ne_u32_e32 [[NEG1_CHECK_1:vcc]], -1, [[B]]
; SI: s_and_b64 [[ORNEG1:s\[[0-9]+:[0-9]+\]]], [[NEG1_CHECK_1]], [[NEG1_CHECK_0]]
; SI: s_and_saveexec_b64 [[ORNEG2:s\[[0-9]+:[0-9]+\]]], [[ORNEG1]]
; SI: s_cbranch_execz [[LABEL_FLOW:.LBB[0-9]+_[0-9]+]]
; SI: ; %bb.{{[0-9]+}}: ; %bb20
; SI: buffer_store_dword
; SI: [[LABEL_FLOW]]:
; SI-NEXT: ; in Loop: Header=[[LABEL_LOOP]]
; SI-NEXT: s_or_b64 exec, exec, [[ORNEG2]]
; SI-NEXT: s_and_b64 [[TMP1:s\[[0-9]+:[0-9]+\]]],
; SI-NEXT: s_or_b64 [[COND_STATE]], [[TMP1]], [[COND_STATE]]
; SI-NEXT: s_andn2_b64 exec, exec, [[COND_STATE]]
; SI-NEXT: s_cbranch_execnz .L[[LABEL_LOOP]]
; SI: [[LABEL_EXIT]]:
; SI-NOT: [[COND_STATE]]
; SI: s_endpgm
define amdgpu_kernel void @multi_vcond_loop(ptr addrspace(1) noalias nocapture %arg, ptr addrspace(1) noalias nocapture readonly %arg1, ptr addrspace(1) noalias nocapture readonly %arg2, ptr addrspace(1) noalias nocapture readonly %arg3) #1 {
; SI-LABEL: multi_vcond_loop:
; SI: ; %bb.0: ; %bb
; SI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0xf
; SI-NEXT: s_mov_b32 s10, 0
; SI-NEXT: v_mov_b32_e32 v7, 0
; SI-NEXT: s_mov_b32 s11, 0xf000
; SI-NEXT: v_lshlrev_b32_e32 v6, 2, v0
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: buffer_load_dword v0, v[6:7], s[8:11], 0 addr64
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cmp_lt_i32_e32 vcc, 0, v0
; SI-NEXT: s_and_saveexec_b64 s[0:1], vcc
; SI-NEXT: s_cbranch_execz .LBB5_5
; SI-NEXT: ; %bb.1: ; %bb10.preheader
; SI-NEXT: s_load_dwordx4 s[12:15], s[4:5], 0x9
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0xd
; SI-NEXT: v_ashrrev_i32_e32 v1, 31, v0
; SI-NEXT: s_mov_b64 s[2:3], 0
; SI-NEXT: s_mov_b32 s8, s10
; SI-NEXT: s_mov_b32 s9, s10
; SI-NEXT: ; implicit-def: $sgpr4_sgpr5
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v3, s13
; SI-NEXT: v_add_i32_e32 v2, vcc, s12, v6
; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; SI-NEXT: v_mov_b32_e32 v5, s1
; SI-NEXT: v_add_i32_e32 v4, vcc, s0, v6
; SI-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
; SI-NEXT: v_mov_b32_e32 v7, s15
; SI-NEXT: v_add_i32_e32 v6, vcc, s14, v6
; SI-NEXT: v_addc_u32_e32 v7, vcc, 0, v7, vcc
; SI-NEXT: s_mov_b64 s[6:7], 0
; SI-NEXT: .LBB5_2: ; %bb10
; SI-NEXT: ; =>This Inner Loop Header: Depth=1
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v8, v[6:7], s[8:11], 0 addr64
; SI-NEXT: buffer_load_dword v9, v[4:5], s[8:11], 0 addr64
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, -1, v8
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e64 s[0:1], -1, v9
; SI-NEXT: s_and_b64 s[12:13], vcc, s[0:1]
; SI-NEXT: s_or_b64 s[4:5], s[4:5], exec
; SI-NEXT: s_and_saveexec_b64 s[0:1], s[12:13]
; SI-NEXT: s_cbranch_execz .LBB5_4
; SI-NEXT: ; %bb.3: ; %bb20
; SI-NEXT: ; in Loop: Header=BB5_2 Depth=1
; SI-NEXT: v_add_i32_e32 v8, vcc, v9, v8
; SI-NEXT: s_add_u32 s6, s6, 1
; SI-NEXT: v_add_i32_e32 v4, vcc, 4, v4
; SI-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
; SI-NEXT: v_add_i32_e32 v6, vcc, 4, v6
; SI-NEXT: v_addc_u32_e32 v7, vcc, 0, v7, vcc
; SI-NEXT: buffer_store_dword v8, v[2:3], s[8:11], 0 addr64
; SI-NEXT: s_addc_u32 s7, s7, 0
; SI-NEXT: v_add_i32_e32 v2, vcc, 4, v2
; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; SI-NEXT: v_cmp_ge_i64_e32 vcc, s[6:7], v[0:1]
; SI-NEXT: s_andn2_b64 s[4:5], s[4:5], exec
; SI-NEXT: s_and_b64 s[12:13], vcc, exec
; SI-NEXT: s_or_b64 s[4:5], s[4:5], s[12:13]
; SI-NEXT: .LBB5_4: ; %Flow
; SI-NEXT: ; in Loop: Header=BB5_2 Depth=1
; SI-NEXT: s_or_b64 exec, exec, s[0:1]
; SI-NEXT: s_and_b64 s[0:1], exec, s[4:5]
; SI-NEXT: s_or_b64 s[2:3], s[0:1], s[2:3]
; SI-NEXT: s_andn2_b64 exec, exec, s[2:3]
; SI-NEXT: s_cbranch_execnz .LBB5_2
; SI-NEXT: .LBB5_5: ; %bb26
; SI-NEXT: s_endpgm
bb:
%tmp = tail call i32 @llvm.amdgcn.workitem.id.x() #0
%tmp4 = sext i32 %tmp to i64

View File

@@ -1,12 +1,29 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc -mtriple=amdgcn -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX1010 %s
; RUN: llc -mtriple=amdgcn -mcpu=gfx1030 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX1030 %s
; GCN-LABEL: {{^}}test_insert_vcmpx_pattern_lt:
; GFX1010: v_cmp_lt_i32_e32 vcc_lo, 15, v{{.*}}
; GFX1010-NEXT: s_and_saveexec_b32 s{{.*}}, vcc_lo
; GFX1030: s_mov_b32 s{{.*}}, exec_lo
; GFX1030-NEXT: v_cmpx_lt_i32_e32 15, v{{.*}}
define i32 @test_insert_vcmpx_pattern_lt(i32 %x) {
; GFX1010-LABEL: test_insert_vcmpx_pattern_lt:
; GFX1010: ; %bb.0: ; %entry
; GFX1010-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX1010-NEXT: v_cmp_lt_i32_e32 vcc_lo, 15, v0
; GFX1010-NEXT: s_and_saveexec_b32 s4, vcc_lo
; GFX1010-NEXT: ; %bb.1: ; %if
; GFX1010-NEXT: v_lshlrev_b32_e32 v0, 2, v0
; GFX1010-NEXT: ; %bb.2: ; %UnifiedReturnBlock
; GFX1010-NEXT: s_or_b32 exec_lo, exec_lo, s4
; GFX1010-NEXT: s_setpc_b64 s[30:31]
;
; GFX1030-LABEL: test_insert_vcmpx_pattern_lt:
; GFX1030: ; %bb.0: ; %entry
; GFX1030-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX1030-NEXT: s_mov_b32 s4, exec_lo
; GFX1030-NEXT: v_cmpx_lt_i32_e32 15, v0
; GFX1030-NEXT: ; %bb.1: ; %if
; GFX1030-NEXT: v_lshlrev_b32_e32 v0, 2, v0
; GFX1030-NEXT: ; %bb.2: ; %UnifiedReturnBlock
; GFX1030-NEXT: s_or_b32 exec_lo, exec_lo, s4
; GFX1030-NEXT: s_setpc_b64 s[30:31]
entry:
%bc = icmp slt i32 %x, 16
br i1 %bc, label %endif, label %if
@@ -19,12 +36,28 @@ endif:
ret i32 %x
}
; GCN-LABEL: {{^}}test_insert_vcmpx_pattern_gt:
; GFX1010: v_cmp_gt_i32_e32 vcc_lo, 17, v{{.*}}
; GFX1010-NEXT: s_and_saveexec_b32 s{{.*}}, vcc_lo
; GFX1030: s_mov_b32 s{{.*}}, exec_lo
; GFX1030-NEXT: v_cmpx_gt_i32_e32 17, v{{.*}}
define i32 @test_insert_vcmpx_pattern_gt(i32 %x) {
; GFX1010-LABEL: test_insert_vcmpx_pattern_gt:
; GFX1010: ; %bb.0: ; %entry
; GFX1010-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX1010-NEXT: v_cmp_gt_i32_e32 vcc_lo, 17, v0
; GFX1010-NEXT: s_and_saveexec_b32 s4, vcc_lo
; GFX1010-NEXT: ; %bb.1: ; %if
; GFX1010-NEXT: v_lshlrev_b32_e32 v0, 2, v0
; GFX1010-NEXT: ; %bb.2: ; %UnifiedReturnBlock
; GFX1010-NEXT: s_or_b32 exec_lo, exec_lo, s4
; GFX1010-NEXT: s_setpc_b64 s[30:31]
;
; GFX1030-LABEL: test_insert_vcmpx_pattern_gt:
; GFX1030: ; %bb.0: ; %entry
; GFX1030-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX1030-NEXT: s_mov_b32 s4, exec_lo
; GFX1030-NEXT: v_cmpx_gt_i32_e32 17, v0
; GFX1030-NEXT: ; %bb.1: ; %if
; GFX1030-NEXT: v_lshlrev_b32_e32 v0, 2, v0
; GFX1030-NEXT: ; %bb.2: ; %UnifiedReturnBlock
; GFX1030-NEXT: s_or_b32 exec_lo, exec_lo, s4
; GFX1030-NEXT: s_setpc_b64 s[30:31]
entry:
%bc = icmp sgt i32 %x, 16
br i1 %bc, label %endif, label %if
@@ -37,12 +70,28 @@ endif:
ret i32 %x
}
; GCN-LABEL: {{^}}test_insert_vcmpx_pattern_eq:
; GFX1010: v_cmp_ne_u32_e32 vcc_lo, 16, v{{.*}}
; GFX1010-NEXT: s_and_saveexec_b32 s{{.*}}, vcc_lo
; GFX1030: s_mov_b32 s{{.*}}, exec_lo
; GFX1030-NEXT: v_cmpx_ne_u32_e32 16, v{{.*}}
define i32 @test_insert_vcmpx_pattern_eq(i32 %x) {
; GFX1010-LABEL: test_insert_vcmpx_pattern_eq:
; GFX1010: ; %bb.0: ; %entry
; GFX1010-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX1010-NEXT: v_cmp_ne_u32_e32 vcc_lo, 16, v0
; GFX1010-NEXT: s_and_saveexec_b32 s4, vcc_lo
; GFX1010-NEXT: ; %bb.1: ; %if
; GFX1010-NEXT: v_lshlrev_b32_e32 v0, 2, v0
; GFX1010-NEXT: ; %bb.2: ; %UnifiedReturnBlock
; GFX1010-NEXT: s_or_b32 exec_lo, exec_lo, s4
; GFX1010-NEXT: s_setpc_b64 s[30:31]
;
; GFX1030-LABEL: test_insert_vcmpx_pattern_eq:
; GFX1030: ; %bb.0: ; %entry
; GFX1030-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX1030-NEXT: s_mov_b32 s4, exec_lo
; GFX1030-NEXT: v_cmpx_ne_u32_e32 16, v0
; GFX1030-NEXT: ; %bb.1: ; %if
; GFX1030-NEXT: v_lshlrev_b32_e32 v0, 2, v0
; GFX1030-NEXT: ; %bb.2: ; %UnifiedReturnBlock
; GFX1030-NEXT: s_or_b32 exec_lo, exec_lo, s4
; GFX1030-NEXT: s_setpc_b64 s[30:31]
entry:
%bc = icmp eq i32 %x, 16
br i1 %bc, label %endif, label %if
@@ -55,12 +104,28 @@ endif:
ret i32 %x
}
; GCN-LABEL: {{^}}test_insert_vcmpx_pattern_ne:
; GFX1010: v_cmp_eq_u32_e32 vcc_lo, 16, v{{.*}}
; GFX1010-NEXT: s_and_saveexec_b32 s{{.*}}, vcc_lo
; GFX1030: s_mov_b32 s{{.*}}, exec_lo
; GFX1030-NEXT: v_cmpx_eq_u32_e32 16, v{{.*}}
define i32 @test_insert_vcmpx_pattern_ne(i32 %x) {
; GFX1010-LABEL: test_insert_vcmpx_pattern_ne:
; GFX1010: ; %bb.0: ; %entry
; GFX1010-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX1010-NEXT: v_cmp_eq_u32_e32 vcc_lo, 16, v0
; GFX1010-NEXT: s_and_saveexec_b32 s4, vcc_lo
; GFX1010-NEXT: ; %bb.1: ; %if
; GFX1010-NEXT: v_lshlrev_b32_e32 v0, 2, v0
; GFX1010-NEXT: ; %bb.2: ; %UnifiedReturnBlock
; GFX1010-NEXT: s_or_b32 exec_lo, exec_lo, s4
; GFX1010-NEXT: s_setpc_b64 s[30:31]
;
; GFX1030-LABEL: test_insert_vcmpx_pattern_ne:
; GFX1030: ; %bb.0: ; %entry
; GFX1030-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX1030-NEXT: s_mov_b32 s4, exec_lo
; GFX1030-NEXT: v_cmpx_eq_u32_e32 16, v0
; GFX1030-NEXT: ; %bb.1: ; %if
; GFX1030-NEXT: v_lshlrev_b32_e32 v0, 2, v0
; GFX1030-NEXT: ; %bb.2: ; %UnifiedReturnBlock
; GFX1030-NEXT: s_or_b32 exec_lo, exec_lo, s4
; GFX1030-NEXT: s_setpc_b64 s[30:31]
entry:
%bc = icmp ne i32 %x, 16
br i1 %bc, label %endif, label %if
@@ -73,12 +138,28 @@ endif:
ret i32 %x
}
; GCN-LABEL: {{^}}test_insert_vcmpx_pattern_le:
; GFX1010: v_cmp_lt_i32_e32 vcc_lo, 16, v{{.*}}
; GFX1010-NEXT: s_and_saveexec_b32 s{{.*}}, vcc_lo
; GFX1030: s_mov_b32 s{{.*}}, exec_lo
; GFX1030-NEXT: v_cmpx_lt_i32_e32 16, v{{.*}}
define i32 @test_insert_vcmpx_pattern_le(i32 %x) {
; GFX1010-LABEL: test_insert_vcmpx_pattern_le:
; GFX1010: ; %bb.0: ; %entry
; GFX1010-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX1010-NEXT: v_cmp_lt_i32_e32 vcc_lo, 16, v0
; GFX1010-NEXT: s_and_saveexec_b32 s4, vcc_lo
; GFX1010-NEXT: ; %bb.1: ; %if
; GFX1010-NEXT: v_lshlrev_b32_e32 v0, 2, v0
; GFX1010-NEXT: ; %bb.2: ; %UnifiedReturnBlock
; GFX1010-NEXT: s_or_b32 exec_lo, exec_lo, s4
; GFX1010-NEXT: s_setpc_b64 s[30:31]
;
; GFX1030-LABEL: test_insert_vcmpx_pattern_le:
; GFX1030: ; %bb.0: ; %entry
; GFX1030-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX1030-NEXT: s_mov_b32 s4, exec_lo
; GFX1030-NEXT: v_cmpx_lt_i32_e32 16, v0
; GFX1030-NEXT: ; %bb.1: ; %if
; GFX1030-NEXT: v_lshlrev_b32_e32 v0, 2, v0
; GFX1030-NEXT: ; %bb.2: ; %UnifiedReturnBlock
; GFX1030-NEXT: s_or_b32 exec_lo, exec_lo, s4
; GFX1030-NEXT: s_setpc_b64 s[30:31]
entry:
%bc = icmp sle i32 %x, 16
br i1 %bc, label %endif, label %if
@@ -91,12 +172,28 @@ endif:
ret i32 %x
}
; GCN-LABEL: {{^}}test_insert_vcmpx_pattern_ge:
; GFX1010: v_cmp_gt_i32_e32 vcc_lo, 16, v{{.*}}
; GFX1010-NEXT: s_and_saveexec_b32 s{{.*}}, vcc_lo
; GFX1030: s_mov_b32 s{{.*}}, exec_lo
; GFX1030-NEXT: v_cmpx_gt_i32_e32 16, v{{.*}}
define i32 @test_insert_vcmpx_pattern_ge(i32 %x) {
; GFX1010-LABEL: test_insert_vcmpx_pattern_ge:
; GFX1010: ; %bb.0: ; %entry
; GFX1010-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX1010-NEXT: v_cmp_gt_i32_e32 vcc_lo, 16, v0
; GFX1010-NEXT: s_and_saveexec_b32 s4, vcc_lo
; GFX1010-NEXT: ; %bb.1: ; %if
; GFX1010-NEXT: v_lshlrev_b32_e32 v0, 2, v0
; GFX1010-NEXT: ; %bb.2: ; %UnifiedReturnBlock
; GFX1010-NEXT: s_or_b32 exec_lo, exec_lo, s4
; GFX1010-NEXT: s_setpc_b64 s[30:31]
;
; GFX1030-LABEL: test_insert_vcmpx_pattern_ge:
; GFX1030: ; %bb.0: ; %entry
; GFX1030-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX1030-NEXT: s_mov_b32 s4, exec_lo
; GFX1030-NEXT: v_cmpx_gt_i32_e32 16, v0
; GFX1030-NEXT: ; %bb.1: ; %if
; GFX1030-NEXT: v_lshlrev_b32_e32 v0, 2, v0
; GFX1030-NEXT: ; %bb.2: ; %UnifiedReturnBlock
; GFX1030-NEXT: s_or_b32 exec_lo, exec_lo, s4
; GFX1030-NEXT: s_setpc_b64 s[30:31]
entry:
%bc = icmp sge i32 %x, 16
br i1 %bc, label %endif, label %if
@@ -113,13 +210,32 @@ declare amdgpu_gfx void @check_live_outs_helper(i64) #0
; In cases where the output operand cannot be safely removed,
; don't apply the v_cmpx transformation.
; GCN-LABEL: {{^}}check_live_outs:
; GFX1010: v_cmp_eq_u32_e64 s{{.*}}, v{{.*}}, v{{.*}}
; GFX1010: s_and_saveexec_b32 s{{.*}}, s{{.*}}
; GFX1030: v_cmp_eq_u32_e64 s{{.*}}, v{{.*}}, v{{.*}}
; GFX1030: s_and_saveexec_b32 s{{.*}}, s{{.*}}
define amdgpu_cs void @check_live_outs(i32 %a, i32 %b) {
; GCN-LABEL: check_live_outs:
; GCN: ; %bb.0:
; GCN-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
; GCN-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
; GCN-NEXT: s_mov_b32 s10, -1
; GCN-NEXT: s_mov_b32 s11, 0x31c16000
; GCN-NEXT: s_add_u32 s8, s8, s0
; GCN-NEXT: v_cmp_eq_u32_e64 s0, v0, v1
; GCN-NEXT: s_addc_u32 s9, s9, 0
; GCN-NEXT: s_mov_b32 s32, 0
; GCN-NEXT: s_and_saveexec_b32 s1, s0
; GCN-NEXT: s_cbranch_execz .LBB6_2
; GCN-NEXT: ; %bb.1: ; %l1
; GCN-NEXT: s_getpc_b64 s[2:3]
; GCN-NEXT: s_add_u32 s2, s2, check_live_outs_helper@gotpcrel32@lo+4
; GCN-NEXT: s_addc_u32 s3, s3, check_live_outs_helper@gotpcrel32@hi+12
; GCN-NEXT: v_mov_b32_e32 v0, s0
; GCN-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0
; GCN-NEXT: v_mov_b32_e32 v1, 0
; GCN-NEXT: s_mov_b64 s[0:1], s[8:9]
; GCN-NEXT: s_mov_b64 s[2:3], s[10:11]
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_swappc_b64 s[30:31], s[4:5]
; GCN-NEXT: .LBB6_2: ; %l2
; GCN-NEXT: s_endpgm
%cond = icmp eq i32 %a, %b
%result = call i64 @llvm.amdgcn.icmp.i32(i32 %a, i32 %b, i32 32)
br i1 %cond, label %l1, label %l2
@@ -132,14 +248,27 @@ l2:
; Omit the transformation if the s_and_saveexec instruction overwrites
; any of the v_cmp source operands.
; GCN-LABEL: check_saveexec_overwrites_vcmp_source:
; GCN: .LBB7_2: ; %then
; GFX1010: v_cmp_eq_u32_e64 s[[C:[0-9]+]], s[[A:[0-9]+]], s[[B:[0-9]+]]
; GFX1010-NEXT: s_cmp_ge_i32 s[[C]], s[[B]]
; GFX1030: v_cmp_eq_u32_e64 s[[C:[0-9]+]], s[[A:[0-9]+]], s[[B:[0-9]+]]
; GFX1030-NEXT: s_cmp_ge_i32 s[[C]], s[[B]]
define i32 @check_saveexec_overwrites_vcmp_source(i32 inreg %a, i32 inreg %b) {
; GCN-LABEL: check_saveexec_overwrites_vcmp_source:
; GCN: ; %bb.0: ; %entry
; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GCN-NEXT: s_cmp_lt_i32 s16, 0
; GCN-NEXT: s_cbranch_scc1 .LBB7_2
; GCN-NEXT: ; %bb.1: ; %if
; GCN-NEXT: s_lshl_b32 s4, s16, 2
; GCN-NEXT: s_or_b32 s4, s4, s17
; GCN-NEXT: v_mov_b32_e32 v0, s4
; GCN-NEXT: s_setpc_b64 s[30:31]
; GCN-NEXT: .LBB7_2: ; %then
; GCN-NEXT: v_cmp_eq_u32_e64 s4, s16, s17
; GCN-NEXT: s_cmp_ge_i32 s4, s17
; GCN-NEXT: s_cbranch_scc1 .LBB7_4
; GCN-NEXT: ; %bb.3: ; %after
; GCN-NEXT: v_mov_b32_e32 v0, s4
; GCN-NEXT: s_setpc_b64 s[30:31]
; GCN-NEXT: .LBB7_4: ; %end
; GCN-NEXT: v_mov_b32_e32 v0, s16
; GCN-NEXT: s_setpc_b64 s[30:31]
entry:
%0 = icmp sge i32 %a, 0
br i1 %0, label %if, label %then

View File

@@ -1,15 +1,37 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc -mtriple=amdgcn -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN %s
; Check that DAGTypeLegalizer::WidenVSELECTAndMask doesn't try to
; create vselects with i64 condition masks.
; FIXME: Should be able to avoid intermediate vselect
; GCN-LABEL: {{^}}widen_vselect_and_mask_v4f64:
; GCN: v_cmp_u_f64_e64 [[CMP:s\[[0-9]+:[0-9]+\]]],
; GCN: v_cndmask_b32_e64 v[[VSEL:[0-9]+]], 0, -1, [[CMP]]
; GCN: v_mov_b32_e32 v[[VSEL_EXT:[0-9]+]], v[[VSEL]]
; GCN: v_cmp_lt_i64_e32 vcc, -1, v[[[VSEL]]:[[VSEL_EXT]]]
define amdgpu_kernel void @widen_vselect_and_mask_v4f64(<4 x double> %arg) #0 {
; GCN-LABEL: widen_vselect_and_mask_v4f64:
; GCN: ; %bb.0: ; %bb
; GCN-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
; GCN-NEXT: v_mov_b32_e32 v0, 0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_mov_b64 s[4:5], 16
; GCN-NEXT: s_mov_b32 s7, 0xf000
; GCN-NEXT: s_mov_b32 s6, -1
; GCN-NEXT: v_mov_b32_e32 v1, v0
; GCN-NEXT: v_mov_b32_e32 v2, v0
; GCN-NEXT: v_mov_b32_e32 v3, v0
; GCN-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0
; GCN-NEXT: v_cmp_u_f64_e64 s[2:3], s[0:1], s[0:1]
; GCN-NEXT: s_waitcnt expcnt(0)
; GCN-NEXT: v_cndmask_b32_e64 v1, 0, -1, s[2:3]
; GCN-NEXT: v_cmp_neq_f64_e64 s[0:1], s[0:1], 0
; GCN-NEXT: v_mov_b32_e32 v2, v1
; GCN-NEXT: v_cmp_lt_i64_e32 vcc, -1, v[1:2]
; GCN-NEXT: s_and_b64 s[0:1], vcc, s[0:1]
; GCN-NEXT: s_and_b64 s[0:1], s[0:1], exec
; GCN-NEXT: s_cselect_b32 s0, 0x3ff00000, 0
; GCN-NEXT: s_mov_b64 s[4:5], 0
; GCN-NEXT: v_mov_b32_e32 v2, v0
; GCN-NEXT: v_mov_b32_e32 v1, s0
; GCN-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0
; GCN-NEXT: s_endpgm
bb:
%tmp = extractelement <4 x double> %arg, i64 0
%tmp1 = fcmp uno double %tmp, 0.000000e+00
@@ -26,12 +48,34 @@ bb:
ret void
}
; GCN-LABEL: {{^}}widen_vselect_and_mask_v4i64:
; GCN: v_cmp_eq_u64_e64 [[CMP:s\[[0-9]+:[0-9]+\]]],
; GCN: v_cndmask_b32_e64 v[[VSEL:[0-9]+]], 0, -1, [[CMP]]
; GCN: v_mov_b32_e32 v[[VSEL_EXT:[0-9]+]], v[[VSEL]]
; GCN: v_cmp_lt_i64_e32 vcc, -1, v[[[VSEL]]:[[VSEL_EXT]]]
define amdgpu_kernel void @widen_vselect_and_mask_v4i64(<4 x i64> %arg) #0 {
; GCN-LABEL: widen_vselect_and_mask_v4i64:
; GCN: ; %bb.0: ; %bb
; GCN-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_mov_b64 s[4:5], 0
; GCN-NEXT: v_mov_b32_e32 v0, 0
; GCN-NEXT: s_mov_b64 s[8:9], 16
; GCN-NEXT: s_mov_b32 s11, 0xf000
; GCN-NEXT: s_mov_b32 s10, -1
; GCN-NEXT: v_mov_b32_e32 v1, v0
; GCN-NEXT: v_mov_b32_e32 v2, v0
; GCN-NEXT: v_mov_b32_e32 v3, v0
; GCN-NEXT: v_cmp_eq_u64_e64 s[2:3], s[0:1], 0
; GCN-NEXT: v_cndmask_b32_e64 v4, 0, -1, s[2:3]
; GCN-NEXT: v_cmp_ne_u64_e64 s[0:1], s[0:1], 0
; GCN-NEXT: v_mov_b32_e32 v5, v4
; GCN-NEXT: v_cmp_lt_i64_e32 vcc, -1, v[4:5]
; GCN-NEXT: s_and_b64 s[0:1], vcc, s[0:1]
; GCN-NEXT: v_cndmask_b32_e64 v4, 0, 1, s[0:1]
; GCN-NEXT: buffer_store_dwordx4 v[0:3], off, s[8:11], 0
; GCN-NEXT: v_mov_b32_e32 v5, v0
; GCN-NEXT: v_mov_b32_e32 v6, v0
; GCN-NEXT: v_mov_b32_e32 v7, v0
; GCN-NEXT: s_mov_b32 s6, s10
; GCN-NEXT: s_mov_b32 s7, s11
; GCN-NEXT: buffer_store_dwordx4 v[4:7], off, s[4:7], 0
; GCN-NEXT: s_endpgm
bb:
%tmp = extractelement <4 x i64> %arg, i64 0
%tmp1 = icmp eq i64 %tmp, 0