From 3355cca9380e6e72e5139c6407de7132478635c3 Mon Sep 17 00:00:00 2001 From: Shilei Tian Date: Tue, 1 Jul 2025 09:25:08 -0400 Subject: [PATCH] [NFC][AMDGPU] Auto generate check lines for some test cases (#146400) --- .../CodeGen/AMDGPU/combine-and-sext-bool.ll | 132 +- llvm/test/CodeGen/AMDGPU/icmp.i16.ll | 1640 +++++++++++++++-- llvm/test/CodeGen/AMDGPU/valu-i1.ll | 355 ++-- .../CodeGen/AMDGPU/vcmp-saveexec-to-vcmpx.ll | 215 ++- .../CodeGen/AMDGPU/widen-vselect-and-mask.ll | 64 +- 5 files changed, 2085 insertions(+), 321 deletions(-) diff --git a/llvm/test/CodeGen/AMDGPU/combine-and-sext-bool.ll b/llvm/test/CodeGen/AMDGPU/combine-and-sext-bool.ll index b98c81db5da9..4b0fc9380b29 100644 --- a/llvm/test/CodeGen/AMDGPU/combine-and-sext-bool.ll +++ b/llvm/test/CodeGen/AMDGPU/combine-and-sext-bool.ll @@ -1,13 +1,21 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; RUN: llc -mtriple=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s -; GCN-LABEL: {{^}}and_i1_sext_bool: -; GCN: v_cmp_{{gt|le}}_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}} -; GCN: v_cndmask_b32_e{{32|64}} [[VAL:v[0-9]+]], 0, v{{[0-9]+}}, [[CC]] -; GCN: store_dword {{.*}}[[VAL]] -; GCN-NOT: v_cndmask_b32_e64 v{{[0-9]+}}, {{0|-1}}, {{0|-1}} -; GCN-NOT: v_and_b32_e32 - define amdgpu_kernel void @and_i1_sext_bool(ptr addrspace(1) nocapture %arg) { +; GCN-LABEL: and_i1_sext_bool: +; GCN: ; %bb.0: ; %bb +; GCN-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9 +; GCN-NEXT: s_mov_b32 s3, 0xf000 +; GCN-NEXT: s_mov_b32 s2, 0 +; GCN-NEXT: v_lshlrev_b32_e32 v2, 2, v0 +; GCN-NEXT: v_mov_b32_e32 v3, 0 +; GCN-NEXT: s_waitcnt lgkmcnt(0) +; GCN-NEXT: buffer_load_dword v4, v[2:3], s[0:3], 0 addr64 +; GCN-NEXT: v_cmp_gt_u32_e32 vcc, v0, v1 +; GCN-NEXT: s_waitcnt vmcnt(0) +; GCN-NEXT: v_cndmask_b32_e32 v0, 0, v4, vcc +; GCN-NEXT: buffer_store_dword v0, v[2:3], s[0:3], 0 addr64 +; GCN-NEXT: s_endpgm bb: %x = tail call i32 @llvm.amdgcn.workitem.id.x() %y = tail call i32 @llvm.amdgcn.workitem.id.y() @@ -20,37 +28,40 @@ bb: ret void } -; GCN-LABEL: {{^}}and_sext_bool_fcmp: -; GCN: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GCN-NEXT: v_cmp_eq_f32_e32 vcc, 0, v0 -; GCN-NEXT: v_cndmask_b32_e32 v0, 0, v1, vcc -; GCN-NEXT: s_setpc_b64 define i32 @and_sext_bool_fcmp(float %x, i32 %y) { +; GCN-LABEL: and_sext_bool_fcmp: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cmp_eq_f32_e32 vcc, 0, v0 +; GCN-NEXT: v_cndmask_b32_e32 v0, 0, v1, vcc +; GCN-NEXT: s_setpc_b64 s[30:31] %cmp = fcmp oeq float %x, 0.0 %sext = sext i1 %cmp to i32 %and = and i32 %sext, %y ret i32 %and } -; GCN-LABEL: {{^}}and_sext_bool_fpclass: -; GCN: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GCN-NEXT: v_mov_b32_e32 [[K:v[0-9]+]], 0x7b -; GCN-NEXT: v_cmp_class_f32_e32 vcc, v0, [[K]] -; GCN-NEXT: v_cndmask_b32_e32 v0, 0, v1, vcc -; GCN-NEXT: s_setpc_b64 define i32 @and_sext_bool_fpclass(float %x, i32 %y) { +; GCN-LABEL: and_sext_bool_fpclass: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_mov_b32_e32 v2, 0x7b +; GCN-NEXT: v_cmp_class_f32_e32 vcc, v0, v2 +; GCN-NEXT: v_cndmask_b32_e32 v0, 0, v1, vcc +; GCN-NEXT: s_setpc_b64 s[30:31] %class = call i1 @llvm.is.fpclass(float %x, i32 123) %sext = sext i1 %class to i32 %and = and i32 %sext, %y ret i32 %and } -; GCN-LABEL: {{^}}and_sext_bool_uadd_w_overflow: -; GCN: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GCN-NEXT: v_add_i32_e32 v0, vcc, v0, v1 -; GCN-NEXT: v_cndmask_b32_e32 v0, 0, v1, vcc -; GCN-NEXT: s_setpc_b64 define i32 @and_sext_bool_uadd_w_overflow(i32 %x, i32 %y) { +; GCN-LABEL: and_sext_bool_uadd_w_overflow: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_add_i32_e32 v0, vcc, v0, v1 +; GCN-NEXT: v_cndmask_b32_e32 v0, 0, v1, vcc +; GCN-NEXT: s_setpc_b64 s[30:31] %uadd = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %x, i32 %y) %carry = extractvalue { i32, i1 } %uadd, 1 %sext = sext i1 %carry to i32 @@ -58,12 +69,13 @@ define i32 @and_sext_bool_uadd_w_overflow(i32 %x, i32 %y) { ret i32 %and } -; GCN-LABEL: {{^}}and_sext_bool_usub_w_overflow: -; GCN: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GCN-NEXT: v_sub_i32_e32 v0, vcc, v0, v1 -; GCN-NEXT: v_cndmask_b32_e32 v0, 0, v1, vcc -; GCN-NEXT: s_setpc_b64 define i32 @and_sext_bool_usub_w_overflow(i32 %x, i32 %y) { +; GCN-LABEL: and_sext_bool_usub_w_overflow: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_sub_i32_e32 v0, vcc, v0, v1 +; GCN-NEXT: v_cndmask_b32_e32 v0, 0, v1, vcc +; GCN-NEXT: s_setpc_b64 s[30:31] %uadd = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %x, i32 %y) %carry = extractvalue { i32, i1 } %uadd, 1 %sext = sext i1 %carry to i32 @@ -71,15 +83,16 @@ define i32 @and_sext_bool_usub_w_overflow(i32 %x, i32 %y) { ret i32 %and } -; GCN-LABEL: {{^}}and_sext_bool_sadd_w_overflow: -; GCN: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GCN-NEXT: v_cmp_gt_i32_e32 vcc, 0, v1 -; GCN-NEXT: v_add_i32_e64 v2, s[4:5], v0, v1 -; GCN-NEXT: v_cmp_lt_i32_e64 s[4:5], v2, v0 -; GCN-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GCN-NEXT: v_cndmask_b32_e32 v0, 0, v1, vcc -; GCN-NEXT: s_setpc_b64 define i32 @and_sext_bool_sadd_w_overflow(i32 %x, i32 %y) { +; GCN-LABEL: and_sext_bool_sadd_w_overflow: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cmp_gt_i32_e32 vcc, 0, v1 +; GCN-NEXT: v_add_i32_e64 v2, s[4:5], v0, v1 +; GCN-NEXT: v_cmp_lt_i32_e64 s[4:5], v2, v0 +; GCN-NEXT: s_xor_b64 vcc, vcc, s[4:5] +; GCN-NEXT: v_cndmask_b32_e32 v0, 0, v1, vcc +; GCN-NEXT: s_setpc_b64 s[30:31] %uadd = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %x, i32 %y) %carry = extractvalue { i32, i1 } %uadd, 1 %sext = sext i1 %carry to i32 @@ -87,15 +100,16 @@ define i32 @and_sext_bool_sadd_w_overflow(i32 %x, i32 %y) { ret i32 %and } -; GCN-LABEL: {{^}}and_sext_bool_ssub_w_overflow: -; GCN: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GCN-NEXT: v_cmp_gt_i32_e32 vcc, 0, v1 -; GCN-NEXT: v_add_i32_e64 v2, s[4:5], v0, v1 -; GCN-NEXT: v_cmp_lt_i32_e64 s[4:5], v2, v0 -; GCN-NEXT: s_xor_b64 vcc, vcc, s[4:5] -; GCN-NEXT: v_cndmask_b32_e32 v0, 0, v1, vcc -; GCN-NEXT: s_setpc_b64 define i32 @and_sext_bool_ssub_w_overflow(i32 %x, i32 %y) { +; GCN-LABEL: and_sext_bool_ssub_w_overflow: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_cmp_gt_i32_e32 vcc, 0, v1 +; GCN-NEXT: v_add_i32_e64 v2, s[4:5], v0, v1 +; GCN-NEXT: v_cmp_lt_i32_e64 s[4:5], v2, v0 +; GCN-NEXT: s_xor_b64 vcc, vcc, s[4:5] +; GCN-NEXT: v_cndmask_b32_e32 v0, 0, v1, vcc +; GCN-NEXT: s_setpc_b64 s[30:31] %uadd = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %x, i32 %y) %carry = extractvalue { i32, i1 } %uadd, 1 %sext = sext i1 %carry to i32 @@ -103,15 +117,16 @@ define i32 @and_sext_bool_ssub_w_overflow(i32 %x, i32 %y) { ret i32 %and } -; GCN-LABEL: {{^}}and_sext_bool_smul_w_overflow: -; GCN: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GCN-NEXT: v_mul_hi_i32 v2, v0, v1 -; GCN-NEXT: v_mul_lo_u32 v0, v0, v1 -; GCN-NEXT: v_ashrrev_i32_e32 v0, 31, v0 -; GCN-NEXT: v_cmp_ne_u32_e32 vcc, v2, v0 -; GCN-NEXT: v_cndmask_b32_e32 v0, 0, v1, vcc -; GCN-NEXT: s_setpc_b64 define i32 @and_sext_bool_smul_w_overflow(i32 %x, i32 %y) { +; GCN-LABEL: and_sext_bool_smul_w_overflow: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_mul_hi_i32 v2, v0, v1 +; GCN-NEXT: v_mul_lo_u32 v0, v0, v1 +; GCN-NEXT: v_ashrrev_i32_e32 v0, 31, v0 +; GCN-NEXT: v_cmp_ne_u32_e32 vcc, v2, v0 +; GCN-NEXT: v_cndmask_b32_e32 v0, 0, v1, vcc +; GCN-NEXT: s_setpc_b64 s[30:31] %uadd = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %x, i32 %y) %carry = extractvalue { i32, i1 } %uadd, 1 %sext = sext i1 %carry to i32 @@ -119,13 +134,14 @@ define i32 @and_sext_bool_smul_w_overflow(i32 %x, i32 %y) { ret i32 %and } -; GCN-LABEL: {{^}}and_sext_bool_umul_w_overflow: -; GCN: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) -; GCN-NEXT: v_mul_hi_u32 v0, v0, v1 -; GCN-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 -; GCN-NEXT: v_cndmask_b32_e32 v0, 0, v1, vcc -; GCN-NEXT: s_setpc_b64 define i32 @and_sext_bool_umul_w_overflow(i32 %x, i32 %y) { +; GCN-LABEL: and_sext_bool_umul_w_overflow: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_mul_hi_u32 v0, v0, v1 +; GCN-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 +; GCN-NEXT: v_cndmask_b32_e32 v0, 0, v1, vcc +; GCN-NEXT: s_setpc_b64 s[30:31] %uadd = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %x, i32 %y) %carry = extractvalue { i32, i1 } %uadd, 1 %sext = sext i1 %carry to i32 diff --git a/llvm/test/CodeGen/AMDGPU/icmp.i16.ll b/llvm/test/CodeGen/AMDGPU/icmp.i16.ll index 6a4ae7f4e0d7..f9dcd92a3e51 100644 --- a/llvm/test/CodeGen/AMDGPU/icmp.i16.ll +++ b/llvm/test/CodeGen/AMDGPU/icmp.i16.ll @@ -1,18 +1,95 @@ -; RUN: llc -mtriple=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s -; RUN: llc -mtriple=amdgcn -verify-machineinstrs < %s| FileCheck -check-prefix=GCN -check-prefix=SI %s -; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -verify-machineinstrs < %s| FileCheck -check-prefixes=GCN,GFX11-FAKE16 %s -; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -verify-machineinstrs < %s| FileCheck -check-prefixes=GCN,GFX11-TRUE16 %s +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: llc -mtriple=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=VI %s +; RUN: llc -mtriple=amdgcn -verify-machineinstrs < %s| FileCheck -check-prefix=SI %s +; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=-real-true16 -verify-machineinstrs < %s| FileCheck -check-prefix=GFX11-FAKE16 %s +; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr=+real-true16 -verify-machineinstrs < %s| FileCheck -check-prefix=GFX11-TRUE16 %s ;;;==========================================================================;;; ;; 16-bit integer comparisons ;;;==========================================================================;;; - -; GCN-LABEL: {{^}}i16_eq: -; VI: v_cmp_eq_u16_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}} -; SI: v_cmp_eq_u32_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}} -; GFX11-FAKE16: v_cmp_eq_u16_e32 vcc_lo, v{{[0-9]+}}, v{{[0-9]+}} -; GFX11-TRUE16: v_cmp_eq_u16_e32 vcc_lo, v{{[0-9]+}}.{{(l|h)}}, v{{[0-9]+}}.{{(l|h)}} define amdgpu_kernel void @i16_eq(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 { +; VI-LABEL: i16_eq: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34 +; VI-NEXT: v_lshlrev_b32_e32 v3, 1, v0 +; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v2, s3 +; VI-NEXT: v_add_u32_e32 v1, vcc, s2, v3 +; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc +; VI-NEXT: v_mov_b32_e32 v4, s5 +; VI-NEXT: v_add_u32_e32 v3, vcc, s4, v3 +; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v4, vcc +; VI-NEXT: flat_load_ushort v2, v[1:2] +; VI-NEXT: flat_load_ushort v3, v[3:4] +; VI-NEXT: v_mov_b32_e32 v1, s1 +; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0 +; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_cmp_eq_u16_e32 vcc, v2, v3 +; VI-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc +; VI-NEXT: flat_store_dword v[0:1], v2 +; VI-NEXT: s_endpgm +; +; SI-LABEL: i16_eq: +; SI: ; %bb.0: ; %entry +; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd +; SI-NEXT: s_mov_b32 s11, 0xf000 +; SI-NEXT: s_mov_b32 s10, 0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 1, v0 +; SI-NEXT: v_mov_b32_e32 v2, 0 +; SI-NEXT: s_mov_b64 s[6:7], s[10:11] +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_mov_b64 s[8:9], s[2:3] +; SI-NEXT: buffer_load_ushort v3, v[1:2], s[8:11], 0 addr64 +; SI-NEXT: buffer_load_ushort v4, v[1:2], s[4:7], 0 addr64 +; SI-NEXT: v_lshlrev_b32_e32 v1, 2, v0 +; SI-NEXT: s_mov_b64 s[2:3], s[10:11] +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cmp_eq_u32_e32 vcc, v3, v4 +; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc +; SI-NEXT: buffer_store_dword v0, v[1:2], s[0:3], 0 addr64 +; SI-NEXT: s_endpgm +; +; GFX11-FAKE16-LABEL: i16_eq: +; GFX11-FAKE16: ; %bb.0: ; %entry +; GFX11-FAKE16-NEXT: s_clause 0x1 +; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-FAKE16-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 +; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 1, v0 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-FAKE16-NEXT: s_clause 0x1 +; GFX11-FAKE16-NEXT: global_load_u16 v2, v1, s[2:3] +; GFX11-FAKE16-NEXT: global_load_u16 v1, v1, s[4:5] +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-FAKE16-NEXT: v_cmp_eq_u16_e32 vcc_lo, v2, v1 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc_lo +; GFX11-FAKE16-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX11-FAKE16-NEXT: s_endpgm +; +; GFX11-TRUE16-LABEL: i16_eq: +; GFX11-TRUE16: ; %bb.0: ; %entry +; GFX11-TRUE16-NEXT: s_clause 0x1 +; GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-TRUE16-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 +; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0x3ff, v0 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 1, v1 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 2, v1 +; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-TRUE16-NEXT: s_clause 0x1 +; GFX11-TRUE16-NEXT: global_load_d16_b16 v0, v2, s[2:3] +; GFX11-TRUE16-NEXT: global_load_d16_hi_b16 v0, v2, s[4:5] +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_eq_u16_e32 vcc_lo, v0.l, v0.h +; GFX11-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc_lo +; GFX11-TRUE16-NEXT: global_store_b32 v1, v0, s[0:1] +; GFX11-TRUE16-NEXT: s_endpgm entry: %tid = call i32 @llvm.amdgcn.workitem.id.x() %tid.ext = sext i32 %tid to i64 @@ -27,12 +104,89 @@ entry: ret void } -; GCN-LABEL: {{^}}i16_ne: -; VI: v_cmp_ne_u16_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}} -; SI: v_cmp_ne_u32_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}} -; GFX11-FAKE16: v_cmp_ne_u16_e32 vcc_lo, v{{[0-9]+}}, v{{[0-9]+}} -; GFX11-TRUE16: v_cmp_ne_u16_e32 vcc_lo, v{{[0-9]+}}.{{(l|h)}}, v{{[0-9]+}}.{{(l|h)}} define amdgpu_kernel void @i16_ne(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 { +; VI-LABEL: i16_ne: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34 +; VI-NEXT: v_lshlrev_b32_e32 v3, 1, v0 +; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v2, s3 +; VI-NEXT: v_add_u32_e32 v1, vcc, s2, v3 +; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc +; VI-NEXT: v_mov_b32_e32 v4, s5 +; VI-NEXT: v_add_u32_e32 v3, vcc, s4, v3 +; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v4, vcc +; VI-NEXT: flat_load_ushort v2, v[1:2] +; VI-NEXT: flat_load_ushort v3, v[3:4] +; VI-NEXT: v_mov_b32_e32 v1, s1 +; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0 +; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_cmp_ne_u16_e32 vcc, v2, v3 +; VI-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc +; VI-NEXT: flat_store_dword v[0:1], v2 +; VI-NEXT: s_endpgm +; +; SI-LABEL: i16_ne: +; SI: ; %bb.0: ; %entry +; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd +; SI-NEXT: s_mov_b32 s11, 0xf000 +; SI-NEXT: s_mov_b32 s10, 0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 1, v0 +; SI-NEXT: v_mov_b32_e32 v2, 0 +; SI-NEXT: s_mov_b64 s[6:7], s[10:11] +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_mov_b64 s[8:9], s[2:3] +; SI-NEXT: buffer_load_ushort v3, v[1:2], s[8:11], 0 addr64 +; SI-NEXT: buffer_load_ushort v4, v[1:2], s[4:7], 0 addr64 +; SI-NEXT: v_lshlrev_b32_e32 v1, 2, v0 +; SI-NEXT: s_mov_b64 s[2:3], s[10:11] +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cmp_ne_u32_e32 vcc, v3, v4 +; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc +; SI-NEXT: buffer_store_dword v0, v[1:2], s[0:3], 0 addr64 +; SI-NEXT: s_endpgm +; +; GFX11-FAKE16-LABEL: i16_ne: +; GFX11-FAKE16: ; %bb.0: ; %entry +; GFX11-FAKE16-NEXT: s_clause 0x1 +; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-FAKE16-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 +; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 1, v0 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-FAKE16-NEXT: s_clause 0x1 +; GFX11-FAKE16-NEXT: global_load_u16 v2, v1, s[2:3] +; GFX11-FAKE16-NEXT: global_load_u16 v1, v1, s[4:5] +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-FAKE16-NEXT: v_cmp_ne_u16_e32 vcc_lo, v2, v1 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc_lo +; GFX11-FAKE16-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX11-FAKE16-NEXT: s_endpgm +; +; GFX11-TRUE16-LABEL: i16_ne: +; GFX11-TRUE16: ; %bb.0: ; %entry +; GFX11-TRUE16-NEXT: s_clause 0x1 +; GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-TRUE16-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 +; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0x3ff, v0 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 1, v1 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 2, v1 +; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-TRUE16-NEXT: s_clause 0x1 +; GFX11-TRUE16-NEXT: global_load_d16_b16 v0, v2, s[2:3] +; GFX11-TRUE16-NEXT: global_load_d16_hi_b16 v0, v2, s[4:5] +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_ne_u16_e32 vcc_lo, v0.l, v0.h +; GFX11-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc_lo +; GFX11-TRUE16-NEXT: global_store_b32 v1, v0, s[0:1] +; GFX11-TRUE16-NEXT: s_endpgm entry: %tid = call i32 @llvm.amdgcn.workitem.id.x() %tid.ext = sext i32 %tid to i64 @@ -47,12 +201,89 @@ entry: ret void } -; GCN-LABEL: {{^}}i16_ugt: -; VI: v_cmp_gt_u16_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}} -; SI: v_cmp_gt_u32_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}} -; GFX11-FAKE16: v_cmp_gt_u16_e32 vcc_lo, v{{[0-9]+}}, v{{[0-9]+}} -; GFX11-TRUE16: v_cmp_gt_u16_e32 vcc_lo, v{{[0-9]+}}.{{(l|h)}}, v{{[0-9]+}}.{{(l|h)}} define amdgpu_kernel void @i16_ugt(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 { +; VI-LABEL: i16_ugt: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34 +; VI-NEXT: v_lshlrev_b32_e32 v3, 1, v0 +; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v2, s3 +; VI-NEXT: v_add_u32_e32 v1, vcc, s2, v3 +; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc +; VI-NEXT: v_mov_b32_e32 v4, s5 +; VI-NEXT: v_add_u32_e32 v3, vcc, s4, v3 +; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v4, vcc +; VI-NEXT: flat_load_ushort v2, v[1:2] +; VI-NEXT: flat_load_ushort v3, v[3:4] +; VI-NEXT: v_mov_b32_e32 v1, s1 +; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0 +; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_cmp_gt_u16_e32 vcc, v2, v3 +; VI-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc +; VI-NEXT: flat_store_dword v[0:1], v2 +; VI-NEXT: s_endpgm +; +; SI-LABEL: i16_ugt: +; SI: ; %bb.0: ; %entry +; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd +; SI-NEXT: s_mov_b32 s11, 0xf000 +; SI-NEXT: s_mov_b32 s10, 0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 1, v0 +; SI-NEXT: v_mov_b32_e32 v2, 0 +; SI-NEXT: s_mov_b64 s[6:7], s[10:11] +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_mov_b64 s[8:9], s[2:3] +; SI-NEXT: buffer_load_ushort v3, v[1:2], s[8:11], 0 addr64 +; SI-NEXT: buffer_load_ushort v4, v[1:2], s[4:7], 0 addr64 +; SI-NEXT: v_lshlrev_b32_e32 v1, 2, v0 +; SI-NEXT: s_mov_b64 s[2:3], s[10:11] +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cmp_gt_u32_e32 vcc, v3, v4 +; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc +; SI-NEXT: buffer_store_dword v0, v[1:2], s[0:3], 0 addr64 +; SI-NEXT: s_endpgm +; +; GFX11-FAKE16-LABEL: i16_ugt: +; GFX11-FAKE16: ; %bb.0: ; %entry +; GFX11-FAKE16-NEXT: s_clause 0x1 +; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-FAKE16-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 +; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 1, v0 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-FAKE16-NEXT: s_clause 0x1 +; GFX11-FAKE16-NEXT: global_load_u16 v2, v1, s[2:3] +; GFX11-FAKE16-NEXT: global_load_u16 v1, v1, s[4:5] +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-FAKE16-NEXT: v_cmp_gt_u16_e32 vcc_lo, v2, v1 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc_lo +; GFX11-FAKE16-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX11-FAKE16-NEXT: s_endpgm +; +; GFX11-TRUE16-LABEL: i16_ugt: +; GFX11-TRUE16: ; %bb.0: ; %entry +; GFX11-TRUE16-NEXT: s_clause 0x1 +; GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-TRUE16-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 +; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0x3ff, v0 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 1, v1 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 2, v1 +; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-TRUE16-NEXT: s_clause 0x1 +; GFX11-TRUE16-NEXT: global_load_d16_b16 v0, v2, s[2:3] +; GFX11-TRUE16-NEXT: global_load_d16_hi_b16 v0, v2, s[4:5] +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_gt_u16_e32 vcc_lo, v0.l, v0.h +; GFX11-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc_lo +; GFX11-TRUE16-NEXT: global_store_b32 v1, v0, s[0:1] +; GFX11-TRUE16-NEXT: s_endpgm entry: %tid = call i32 @llvm.amdgcn.workitem.id.x() %tid.ext = sext i32 %tid to i64 @@ -67,12 +298,89 @@ entry: ret void } -; GCN-LABEL: {{^}}i16_uge: -; VI: v_cmp_ge_u16_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}} -; SI: v_cmp_ge_u32_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}} -; GFX11-FAKE16: v_cmp_ge_u16_e32 vcc_lo, v{{[0-9]+}}, v{{[0-9]+}} -; GFX11-TRUE16: v_cmp_ge_u16_e32 vcc_lo, v{{[0-9]+}}.{{(l|h)}}, v{{[0-9]+}}.{{(l|h)}} define amdgpu_kernel void @i16_uge(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 { +; VI-LABEL: i16_uge: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34 +; VI-NEXT: v_lshlrev_b32_e32 v3, 1, v0 +; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v2, s3 +; VI-NEXT: v_add_u32_e32 v1, vcc, s2, v3 +; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc +; VI-NEXT: v_mov_b32_e32 v4, s5 +; VI-NEXT: v_add_u32_e32 v3, vcc, s4, v3 +; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v4, vcc +; VI-NEXT: flat_load_ushort v2, v[1:2] +; VI-NEXT: flat_load_ushort v3, v[3:4] +; VI-NEXT: v_mov_b32_e32 v1, s1 +; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0 +; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_cmp_ge_u16_e32 vcc, v2, v3 +; VI-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc +; VI-NEXT: flat_store_dword v[0:1], v2 +; VI-NEXT: s_endpgm +; +; SI-LABEL: i16_uge: +; SI: ; %bb.0: ; %entry +; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd +; SI-NEXT: s_mov_b32 s11, 0xf000 +; SI-NEXT: s_mov_b32 s10, 0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 1, v0 +; SI-NEXT: v_mov_b32_e32 v2, 0 +; SI-NEXT: s_mov_b64 s[6:7], s[10:11] +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_mov_b64 s[8:9], s[2:3] +; SI-NEXT: buffer_load_ushort v3, v[1:2], s[8:11], 0 addr64 +; SI-NEXT: buffer_load_ushort v4, v[1:2], s[4:7], 0 addr64 +; SI-NEXT: v_lshlrev_b32_e32 v1, 2, v0 +; SI-NEXT: s_mov_b64 s[2:3], s[10:11] +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cmp_ge_u32_e32 vcc, v3, v4 +; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc +; SI-NEXT: buffer_store_dword v0, v[1:2], s[0:3], 0 addr64 +; SI-NEXT: s_endpgm +; +; GFX11-FAKE16-LABEL: i16_uge: +; GFX11-FAKE16: ; %bb.0: ; %entry +; GFX11-FAKE16-NEXT: s_clause 0x1 +; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-FAKE16-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 +; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 1, v0 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-FAKE16-NEXT: s_clause 0x1 +; GFX11-FAKE16-NEXT: global_load_u16 v2, v1, s[2:3] +; GFX11-FAKE16-NEXT: global_load_u16 v1, v1, s[4:5] +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-FAKE16-NEXT: v_cmp_ge_u16_e32 vcc_lo, v2, v1 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc_lo +; GFX11-FAKE16-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX11-FAKE16-NEXT: s_endpgm +; +; GFX11-TRUE16-LABEL: i16_uge: +; GFX11-TRUE16: ; %bb.0: ; %entry +; GFX11-TRUE16-NEXT: s_clause 0x1 +; GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-TRUE16-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 +; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0x3ff, v0 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 1, v1 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 2, v1 +; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-TRUE16-NEXT: s_clause 0x1 +; GFX11-TRUE16-NEXT: global_load_d16_b16 v0, v2, s[2:3] +; GFX11-TRUE16-NEXT: global_load_d16_hi_b16 v0, v2, s[4:5] +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_ge_u16_e32 vcc_lo, v0.l, v0.h +; GFX11-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc_lo +; GFX11-TRUE16-NEXT: global_store_b32 v1, v0, s[0:1] +; GFX11-TRUE16-NEXT: s_endpgm entry: %tid = call i32 @llvm.amdgcn.workitem.id.x() %tid.ext = sext i32 %tid to i64 @@ -87,12 +395,89 @@ entry: ret void } -; GCN-LABEL: {{^}}i16_ult: -; VI: v_cmp_lt_u16_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}} -; SI: v_cmp_lt_u32_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}} -; GFX11-FAKE16: v_cmp_lt_u16_e32 vcc_lo, v{{[0-9]+}}, v{{[0-9]+}} -; GFX11-TRUE16: v_cmp_lt_u16_e32 vcc_lo, v{{[0-9]+}}.{{(l|h)}}, v{{[0-9]+}}.{{(l|h)}} define amdgpu_kernel void @i16_ult(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 { +; VI-LABEL: i16_ult: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34 +; VI-NEXT: v_lshlrev_b32_e32 v3, 1, v0 +; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v2, s3 +; VI-NEXT: v_add_u32_e32 v1, vcc, s2, v3 +; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc +; VI-NEXT: v_mov_b32_e32 v4, s5 +; VI-NEXT: v_add_u32_e32 v3, vcc, s4, v3 +; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v4, vcc +; VI-NEXT: flat_load_ushort v2, v[1:2] +; VI-NEXT: flat_load_ushort v3, v[3:4] +; VI-NEXT: v_mov_b32_e32 v1, s1 +; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0 +; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_cmp_lt_u16_e32 vcc, v2, v3 +; VI-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc +; VI-NEXT: flat_store_dword v[0:1], v2 +; VI-NEXT: s_endpgm +; +; SI-LABEL: i16_ult: +; SI: ; %bb.0: ; %entry +; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd +; SI-NEXT: s_mov_b32 s11, 0xf000 +; SI-NEXT: s_mov_b32 s10, 0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 1, v0 +; SI-NEXT: v_mov_b32_e32 v2, 0 +; SI-NEXT: s_mov_b64 s[6:7], s[10:11] +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_mov_b64 s[8:9], s[2:3] +; SI-NEXT: buffer_load_ushort v3, v[1:2], s[8:11], 0 addr64 +; SI-NEXT: buffer_load_ushort v4, v[1:2], s[4:7], 0 addr64 +; SI-NEXT: v_lshlrev_b32_e32 v1, 2, v0 +; SI-NEXT: s_mov_b64 s[2:3], s[10:11] +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cmp_lt_u32_e32 vcc, v3, v4 +; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc +; SI-NEXT: buffer_store_dword v0, v[1:2], s[0:3], 0 addr64 +; SI-NEXT: s_endpgm +; +; GFX11-FAKE16-LABEL: i16_ult: +; GFX11-FAKE16: ; %bb.0: ; %entry +; GFX11-FAKE16-NEXT: s_clause 0x1 +; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-FAKE16-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 +; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 1, v0 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-FAKE16-NEXT: s_clause 0x1 +; GFX11-FAKE16-NEXT: global_load_u16 v2, v1, s[2:3] +; GFX11-FAKE16-NEXT: global_load_u16 v1, v1, s[4:5] +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-FAKE16-NEXT: v_cmp_lt_u16_e32 vcc_lo, v2, v1 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc_lo +; GFX11-FAKE16-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX11-FAKE16-NEXT: s_endpgm +; +; GFX11-TRUE16-LABEL: i16_ult: +; GFX11-TRUE16: ; %bb.0: ; %entry +; GFX11-TRUE16-NEXT: s_clause 0x1 +; GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-TRUE16-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 +; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0x3ff, v0 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 1, v1 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 2, v1 +; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-TRUE16-NEXT: s_clause 0x1 +; GFX11-TRUE16-NEXT: global_load_d16_b16 v0, v2, s[2:3] +; GFX11-TRUE16-NEXT: global_load_d16_hi_b16 v0, v2, s[4:5] +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_lt_u16_e32 vcc_lo, v0.l, v0.h +; GFX11-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc_lo +; GFX11-TRUE16-NEXT: global_store_b32 v1, v0, s[0:1] +; GFX11-TRUE16-NEXT: s_endpgm entry: %tid = call i32 @llvm.amdgcn.workitem.id.x() %tid.ext = sext i32 %tid to i64 @@ -107,12 +492,89 @@ entry: ret void } -; GCN-LABEL: {{^}}i16_ule: -; VI: v_cmp_le_u16_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}} -; SI: v_cmp_le_u32_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}} -; GFX11-FAKE16: v_cmp_le_u16_e32 vcc_lo, v{{[0-9]+}}, v{{[0-9]+}} -; GFX11-TRUE16: v_cmp_le_u16_e32 vcc_lo, v{{[0-9]+}}.{{(l|h)}}, v{{[0-9]+}}.{{(l|h)}} define amdgpu_kernel void @i16_ule(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 { +; VI-LABEL: i16_ule: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34 +; VI-NEXT: v_lshlrev_b32_e32 v3, 1, v0 +; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v2, s3 +; VI-NEXT: v_add_u32_e32 v1, vcc, s2, v3 +; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc +; VI-NEXT: v_mov_b32_e32 v4, s5 +; VI-NEXT: v_add_u32_e32 v3, vcc, s4, v3 +; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v4, vcc +; VI-NEXT: flat_load_ushort v2, v[1:2] +; VI-NEXT: flat_load_ushort v3, v[3:4] +; VI-NEXT: v_mov_b32_e32 v1, s1 +; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0 +; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_cmp_le_u16_e32 vcc, v2, v3 +; VI-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc +; VI-NEXT: flat_store_dword v[0:1], v2 +; VI-NEXT: s_endpgm +; +; SI-LABEL: i16_ule: +; SI: ; %bb.0: ; %entry +; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd +; SI-NEXT: s_mov_b32 s11, 0xf000 +; SI-NEXT: s_mov_b32 s10, 0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 1, v0 +; SI-NEXT: v_mov_b32_e32 v2, 0 +; SI-NEXT: s_mov_b64 s[6:7], s[10:11] +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_mov_b64 s[8:9], s[2:3] +; SI-NEXT: buffer_load_ushort v3, v[1:2], s[8:11], 0 addr64 +; SI-NEXT: buffer_load_ushort v4, v[1:2], s[4:7], 0 addr64 +; SI-NEXT: v_lshlrev_b32_e32 v1, 2, v0 +; SI-NEXT: s_mov_b64 s[2:3], s[10:11] +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cmp_le_u32_e32 vcc, v3, v4 +; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc +; SI-NEXT: buffer_store_dword v0, v[1:2], s[0:3], 0 addr64 +; SI-NEXT: s_endpgm +; +; GFX11-FAKE16-LABEL: i16_ule: +; GFX11-FAKE16: ; %bb.0: ; %entry +; GFX11-FAKE16-NEXT: s_clause 0x1 +; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-FAKE16-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 +; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 1, v0 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-FAKE16-NEXT: s_clause 0x1 +; GFX11-FAKE16-NEXT: global_load_u16 v2, v1, s[2:3] +; GFX11-FAKE16-NEXT: global_load_u16 v1, v1, s[4:5] +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-FAKE16-NEXT: v_cmp_le_u16_e32 vcc_lo, v2, v1 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc_lo +; GFX11-FAKE16-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX11-FAKE16-NEXT: s_endpgm +; +; GFX11-TRUE16-LABEL: i16_ule: +; GFX11-TRUE16: ; %bb.0: ; %entry +; GFX11-TRUE16-NEXT: s_clause 0x1 +; GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-TRUE16-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 +; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0x3ff, v0 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 1, v1 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 2, v1 +; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-TRUE16-NEXT: s_clause 0x1 +; GFX11-TRUE16-NEXT: global_load_d16_b16 v0, v2, s[2:3] +; GFX11-TRUE16-NEXT: global_load_d16_hi_b16 v0, v2, s[4:5] +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_le_u16_e32 vcc_lo, v0.l, v0.h +; GFX11-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc_lo +; GFX11-TRUE16-NEXT: global_store_b32 v1, v0, s[0:1] +; GFX11-TRUE16-NEXT: s_endpgm entry: %tid = call i32 @llvm.amdgcn.workitem.id.x() %tid.ext = sext i32 %tid to i64 @@ -128,12 +590,89 @@ entry: } -; GCN-LABEL: {{^}}i16_sgt: -; VI: v_cmp_gt_i16_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}} -; SI: v_cmp_gt_i32_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}} -; GFX11-FAKE16: v_cmp_gt_i16_e32 vcc_lo, v{{[0-9]+}}, v{{[0-9]+}} -; GFX11-TRUE16: v_cmp_gt_i16_e32 vcc_lo, v{{[0-9]+}}.{{(l|h)}}, v{{[0-9]+}}.{{(l|h)}} define amdgpu_kernel void @i16_sgt(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 { +; VI-LABEL: i16_sgt: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34 +; VI-NEXT: v_lshlrev_b32_e32 v3, 1, v0 +; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v2, s3 +; VI-NEXT: v_add_u32_e32 v1, vcc, s2, v3 +; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc +; VI-NEXT: v_mov_b32_e32 v4, s5 +; VI-NEXT: v_add_u32_e32 v3, vcc, s4, v3 +; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v4, vcc +; VI-NEXT: flat_load_ushort v2, v[1:2] +; VI-NEXT: flat_load_ushort v3, v[3:4] +; VI-NEXT: v_mov_b32_e32 v1, s1 +; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0 +; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_cmp_gt_i16_e32 vcc, v2, v3 +; VI-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc +; VI-NEXT: flat_store_dword v[0:1], v2 +; VI-NEXT: s_endpgm +; +; SI-LABEL: i16_sgt: +; SI: ; %bb.0: ; %entry +; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd +; SI-NEXT: s_mov_b32 s11, 0xf000 +; SI-NEXT: s_mov_b32 s10, 0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 1, v0 +; SI-NEXT: v_mov_b32_e32 v2, 0 +; SI-NEXT: s_mov_b64 s[6:7], s[10:11] +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_mov_b64 s[8:9], s[2:3] +; SI-NEXT: buffer_load_sshort v3, v[1:2], s[8:11], 0 addr64 +; SI-NEXT: buffer_load_sshort v4, v[1:2], s[4:7], 0 addr64 +; SI-NEXT: v_lshlrev_b32_e32 v1, 2, v0 +; SI-NEXT: s_mov_b64 s[2:3], s[10:11] +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cmp_gt_i32_e32 vcc, v3, v4 +; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc +; SI-NEXT: buffer_store_dword v0, v[1:2], s[0:3], 0 addr64 +; SI-NEXT: s_endpgm +; +; GFX11-FAKE16-LABEL: i16_sgt: +; GFX11-FAKE16: ; %bb.0: ; %entry +; GFX11-FAKE16-NEXT: s_clause 0x1 +; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-FAKE16-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 +; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 1, v0 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-FAKE16-NEXT: s_clause 0x1 +; GFX11-FAKE16-NEXT: global_load_u16 v2, v1, s[2:3] +; GFX11-FAKE16-NEXT: global_load_u16 v1, v1, s[4:5] +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-FAKE16-NEXT: v_cmp_gt_i16_e32 vcc_lo, v2, v1 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc_lo +; GFX11-FAKE16-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX11-FAKE16-NEXT: s_endpgm +; +; GFX11-TRUE16-LABEL: i16_sgt: +; GFX11-TRUE16: ; %bb.0: ; %entry +; GFX11-TRUE16-NEXT: s_clause 0x1 +; GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-TRUE16-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 +; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0x3ff, v0 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 1, v1 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 2, v1 +; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-TRUE16-NEXT: s_clause 0x1 +; GFX11-TRUE16-NEXT: global_load_d16_b16 v0, v2, s[2:3] +; GFX11-TRUE16-NEXT: global_load_d16_hi_b16 v0, v2, s[4:5] +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_gt_i16_e32 vcc_lo, v0.l, v0.h +; GFX11-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc_lo +; GFX11-TRUE16-NEXT: global_store_b32 v1, v0, s[0:1] +; GFX11-TRUE16-NEXT: s_endpgm entry: %tid = call i32 @llvm.amdgcn.workitem.id.x() %tid.ext = sext i32 %tid to i64 @@ -148,12 +687,89 @@ entry: ret void } -; GCN-LABEL: {{^}}i16_sge: -; VI: v_cmp_ge_i16_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}} -; SI: v_cmp_ge_i32_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}} -; GFX11-FAKE16: v_cmp_ge_i16_e32 vcc_lo, v{{[0-9]+}}, v{{[0-9]+}} -; GFX11-TRUE16: v_cmp_ge_i16_e32 vcc_lo, v{{[0-9]+}}.{{(l|h)}}, v{{[0-9]+}}.{{(l|h)}} define amdgpu_kernel void @i16_sge(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 { +; VI-LABEL: i16_sge: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34 +; VI-NEXT: v_lshlrev_b32_e32 v3, 1, v0 +; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v2, s3 +; VI-NEXT: v_add_u32_e32 v1, vcc, s2, v3 +; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc +; VI-NEXT: v_mov_b32_e32 v4, s5 +; VI-NEXT: v_add_u32_e32 v3, vcc, s4, v3 +; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v4, vcc +; VI-NEXT: flat_load_ushort v2, v[1:2] +; VI-NEXT: flat_load_ushort v3, v[3:4] +; VI-NEXT: v_mov_b32_e32 v1, s1 +; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0 +; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_cmp_ge_i16_e32 vcc, v2, v3 +; VI-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc +; VI-NEXT: flat_store_dword v[0:1], v2 +; VI-NEXT: s_endpgm +; +; SI-LABEL: i16_sge: +; SI: ; %bb.0: ; %entry +; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd +; SI-NEXT: s_mov_b32 s11, 0xf000 +; SI-NEXT: s_mov_b32 s10, 0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 1, v0 +; SI-NEXT: v_mov_b32_e32 v2, 0 +; SI-NEXT: s_mov_b64 s[6:7], s[10:11] +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_mov_b64 s[8:9], s[2:3] +; SI-NEXT: buffer_load_sshort v3, v[1:2], s[8:11], 0 addr64 +; SI-NEXT: buffer_load_sshort v4, v[1:2], s[4:7], 0 addr64 +; SI-NEXT: v_lshlrev_b32_e32 v1, 2, v0 +; SI-NEXT: s_mov_b64 s[2:3], s[10:11] +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cmp_ge_i32_e32 vcc, v3, v4 +; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc +; SI-NEXT: buffer_store_dword v0, v[1:2], s[0:3], 0 addr64 +; SI-NEXT: s_endpgm +; +; GFX11-FAKE16-LABEL: i16_sge: +; GFX11-FAKE16: ; %bb.0: ; %entry +; GFX11-FAKE16-NEXT: s_clause 0x1 +; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-FAKE16-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 +; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 1, v0 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-FAKE16-NEXT: s_clause 0x1 +; GFX11-FAKE16-NEXT: global_load_u16 v2, v1, s[2:3] +; GFX11-FAKE16-NEXT: global_load_u16 v1, v1, s[4:5] +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-FAKE16-NEXT: v_cmp_ge_i16_e32 vcc_lo, v2, v1 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc_lo +; GFX11-FAKE16-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX11-FAKE16-NEXT: s_endpgm +; +; GFX11-TRUE16-LABEL: i16_sge: +; GFX11-TRUE16: ; %bb.0: ; %entry +; GFX11-TRUE16-NEXT: s_clause 0x1 +; GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-TRUE16-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 +; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0x3ff, v0 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 1, v1 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 2, v1 +; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-TRUE16-NEXT: s_clause 0x1 +; GFX11-TRUE16-NEXT: global_load_d16_b16 v0, v2, s[2:3] +; GFX11-TRUE16-NEXT: global_load_d16_hi_b16 v0, v2, s[4:5] +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_ge_i16_e32 vcc_lo, v0.l, v0.h +; GFX11-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc_lo +; GFX11-TRUE16-NEXT: global_store_b32 v1, v0, s[0:1] +; GFX11-TRUE16-NEXT: s_endpgm entry: %tid = call i32 @llvm.amdgcn.workitem.id.x() %tid.ext = sext i32 %tid to i64 @@ -168,12 +784,89 @@ entry: ret void } -; GCN-LABEL: {{^}}i16_slt: -; VI: v_cmp_lt_i16_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}} -; SI: v_cmp_lt_i32_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}} -; GFX11-FAKE16: v_cmp_lt_i16_e32 vcc_lo, v{{[0-9]+}}, v{{[0-9]+}} -; GFX11-TRUE16: v_cmp_lt_i16_e32 vcc_lo, v{{[0-9]+}}.{{(l|h)}}, v{{[0-9]+}}.{{(l|h)}} define amdgpu_kernel void @i16_slt(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 { +; VI-LABEL: i16_slt: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34 +; VI-NEXT: v_lshlrev_b32_e32 v3, 1, v0 +; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v2, s3 +; VI-NEXT: v_add_u32_e32 v1, vcc, s2, v3 +; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc +; VI-NEXT: v_mov_b32_e32 v4, s5 +; VI-NEXT: v_add_u32_e32 v3, vcc, s4, v3 +; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v4, vcc +; VI-NEXT: flat_load_ushort v2, v[1:2] +; VI-NEXT: flat_load_ushort v3, v[3:4] +; VI-NEXT: v_mov_b32_e32 v1, s1 +; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0 +; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_cmp_lt_i16_e32 vcc, v2, v3 +; VI-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc +; VI-NEXT: flat_store_dword v[0:1], v2 +; VI-NEXT: s_endpgm +; +; SI-LABEL: i16_slt: +; SI: ; %bb.0: ; %entry +; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd +; SI-NEXT: s_mov_b32 s11, 0xf000 +; SI-NEXT: s_mov_b32 s10, 0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 1, v0 +; SI-NEXT: v_mov_b32_e32 v2, 0 +; SI-NEXT: s_mov_b64 s[6:7], s[10:11] +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_mov_b64 s[8:9], s[2:3] +; SI-NEXT: buffer_load_sshort v3, v[1:2], s[8:11], 0 addr64 +; SI-NEXT: buffer_load_sshort v4, v[1:2], s[4:7], 0 addr64 +; SI-NEXT: v_lshlrev_b32_e32 v1, 2, v0 +; SI-NEXT: s_mov_b64 s[2:3], s[10:11] +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cmp_lt_i32_e32 vcc, v3, v4 +; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc +; SI-NEXT: buffer_store_dword v0, v[1:2], s[0:3], 0 addr64 +; SI-NEXT: s_endpgm +; +; GFX11-FAKE16-LABEL: i16_slt: +; GFX11-FAKE16: ; %bb.0: ; %entry +; GFX11-FAKE16-NEXT: s_clause 0x1 +; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-FAKE16-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 +; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 1, v0 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-FAKE16-NEXT: s_clause 0x1 +; GFX11-FAKE16-NEXT: global_load_u16 v2, v1, s[2:3] +; GFX11-FAKE16-NEXT: global_load_u16 v1, v1, s[4:5] +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-FAKE16-NEXT: v_cmp_lt_i16_e32 vcc_lo, v2, v1 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc_lo +; GFX11-FAKE16-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX11-FAKE16-NEXT: s_endpgm +; +; GFX11-TRUE16-LABEL: i16_slt: +; GFX11-TRUE16: ; %bb.0: ; %entry +; GFX11-TRUE16-NEXT: s_clause 0x1 +; GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-TRUE16-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 +; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0x3ff, v0 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 1, v1 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 2, v1 +; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-TRUE16-NEXT: s_clause 0x1 +; GFX11-TRUE16-NEXT: global_load_d16_b16 v0, v2, s[2:3] +; GFX11-TRUE16-NEXT: global_load_d16_hi_b16 v0, v2, s[4:5] +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_lt_i16_e32 vcc_lo, v0.l, v0.h +; GFX11-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc_lo +; GFX11-TRUE16-NEXT: global_store_b32 v1, v0, s[0:1] +; GFX11-TRUE16-NEXT: s_endpgm entry: %tid = call i32 @llvm.amdgcn.workitem.id.x() %tid.ext = sext i32 %tid to i64 @@ -188,12 +881,89 @@ entry: ret void } -; GCN-LABEL: {{^}}i16_sle: -; VI: v_cmp_le_i16_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}} -; SI: v_cmp_le_i32_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}} -; GFX11-FAKE16: v_cmp_le_i16_e32 vcc_lo, v{{[0-9]+}}, v{{[0-9]+}} -; GFX11-TRUE16: v_cmp_le_i16_e32 vcc_lo, v{{[0-9]+}}.{{(l|h)}}, v{{[0-9]+}}.{{(l|h)}} define amdgpu_kernel void @i16_sle(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 { +; VI-LABEL: i16_sle: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34 +; VI-NEXT: v_lshlrev_b32_e32 v3, 1, v0 +; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v2, s3 +; VI-NEXT: v_add_u32_e32 v1, vcc, s2, v3 +; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc +; VI-NEXT: v_mov_b32_e32 v4, s5 +; VI-NEXT: v_add_u32_e32 v3, vcc, s4, v3 +; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v4, vcc +; VI-NEXT: flat_load_ushort v2, v[1:2] +; VI-NEXT: flat_load_ushort v3, v[3:4] +; VI-NEXT: v_mov_b32_e32 v1, s1 +; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0 +; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_cmp_le_i16_e32 vcc, v2, v3 +; VI-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc +; VI-NEXT: flat_store_dword v[0:1], v2 +; VI-NEXT: s_endpgm +; +; SI-LABEL: i16_sle: +; SI: ; %bb.0: ; %entry +; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd +; SI-NEXT: s_mov_b32 s11, 0xf000 +; SI-NEXT: s_mov_b32 s10, 0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 1, v0 +; SI-NEXT: v_mov_b32_e32 v2, 0 +; SI-NEXT: s_mov_b64 s[6:7], s[10:11] +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_mov_b64 s[8:9], s[2:3] +; SI-NEXT: buffer_load_sshort v3, v[1:2], s[8:11], 0 addr64 +; SI-NEXT: buffer_load_sshort v4, v[1:2], s[4:7], 0 addr64 +; SI-NEXT: v_lshlrev_b32_e32 v1, 2, v0 +; SI-NEXT: s_mov_b64 s[2:3], s[10:11] +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cmp_le_i32_e32 vcc, v3, v4 +; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc +; SI-NEXT: buffer_store_dword v0, v[1:2], s[0:3], 0 addr64 +; SI-NEXT: s_endpgm +; +; GFX11-FAKE16-LABEL: i16_sle: +; GFX11-FAKE16: ; %bb.0: ; %entry +; GFX11-FAKE16-NEXT: s_clause 0x1 +; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-FAKE16-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 +; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 1, v0 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-FAKE16-NEXT: s_clause 0x1 +; GFX11-FAKE16-NEXT: global_load_u16 v2, v1, s[2:3] +; GFX11-FAKE16-NEXT: global_load_u16 v1, v1, s[4:5] +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-FAKE16-NEXT: v_cmp_le_i16_e32 vcc_lo, v2, v1 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc_lo +; GFX11-FAKE16-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX11-FAKE16-NEXT: s_endpgm +; +; GFX11-TRUE16-LABEL: i16_sle: +; GFX11-TRUE16: ; %bb.0: ; %entry +; GFX11-TRUE16-NEXT: s_clause 0x1 +; GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-TRUE16-NEXT: s_load_b64 s[4:5], s[4:5], 0x34 +; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0x3ff, v0 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 1, v1 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 2, v1 +; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-TRUE16-NEXT: s_clause 0x1 +; GFX11-TRUE16-NEXT: global_load_d16_b16 v0, v2, s[2:3] +; GFX11-TRUE16-NEXT: global_load_d16_hi_b16 v0, v2, s[4:5] +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_le_i16_e32 vcc_lo, v0.l, v0.h +; GFX11-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc_lo +; GFX11-TRUE16-NEXT: global_store_b32 v1, v0, s[0:1] +; GFX11-TRUE16-NEXT: s_endpgm entry: %tid = call i32 @llvm.amdgcn.workitem.id.x() %tid.ext = sext i32 %tid to i64 @@ -209,12 +979,78 @@ entry: } ; These should be commuted to reduce code size -; GCN-LABEL: {{^}}i16_eq_v_s: -; VI: v_cmp_eq_u16_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}} -; SI: v_cmp_eq_u32_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}} -; GFX11-FAKE16: v_cmp_eq_u16_e32 vcc_lo, s{{[0-9]+}}, v{{[0-9]+}} -; GFX11-TRUE16: v_cmp_eq_u16_e32 vcc_lo, s{{[0-9]+}}, v{{[0-9]+}}.{{(l|h)}} define amdgpu_kernel void @i16_eq_v_s(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, i16 %b) #0 { +; VI-LABEL: i16_eq_v_s: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; VI-NEXT: s_load_dword s4, s[4:5], 0x34 +; VI-NEXT: v_lshlrev_b32_e32 v1, 1, v0 +; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v2, s3 +; VI-NEXT: v_add_u32_e32 v1, vcc, s2, v1 +; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc +; VI-NEXT: flat_load_ushort v2, v[1:2] +; VI-NEXT: v_mov_b32_e32 v1, s1 +; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0 +; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_cmp_eq_u16_e32 vcc, s4, v2 +; VI-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc +; VI-NEXT: flat_store_dword v[0:1], v2 +; VI-NEXT: s_endpgm +; +; SI-LABEL: i16_eq_v_s: +; SI: ; %bb.0: ; %entry +; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; SI-NEXT: s_load_dword s8, s[4:5], 0xd +; SI-NEXT: s_mov_b32 s7, 0xf000 +; SI-NEXT: s_mov_b32 s6, 0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 1, v0 +; SI-NEXT: v_mov_b32_e32 v2, 0 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-NEXT: buffer_load_ushort v3, v[1:2], s[4:7], 0 addr64 +; SI-NEXT: v_lshlrev_b32_e32 v1, 2, v0 +; SI-NEXT: s_and_b32 s4, s8, 0xffff +; SI-NEXT: s_mov_b64 s[2:3], s[6:7] +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cmp_eq_u32_e32 vcc, s4, v3 +; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc +; SI-NEXT: buffer_store_dword v0, v[1:2], s[0:3], 0 addr64 +; SI-NEXT: s_endpgm +; +; GFX11-FAKE16-LABEL: i16_eq_v_s: +; GFX11-FAKE16: ; %bb.0: ; %entry +; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-FAKE16-NEXT: s_load_b32 s4, s[4:5], 0x34 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 1, v0 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-FAKE16-NEXT: global_load_u16 v1, v1, s[2:3] +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-FAKE16-NEXT: v_cmp_eq_u16_e32 vcc_lo, s4, v1 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc_lo +; GFX11-FAKE16-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX11-FAKE16-NEXT: s_endpgm +; +; GFX11-TRUE16-LABEL: i16_eq_v_s: +; GFX11-TRUE16: ; %bb.0: ; %entry +; GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0x3ff, v0 +; GFX11-TRUE16-NEXT: s_load_b32 s4, s[4:5], 0x34 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 1, v1 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 2, v1 +; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-TRUE16-NEXT: global_load_d16_b16 v0, v0, s[2:3] +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_eq_u16_e32 vcc_lo, s4, v0.l +; GFX11-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc_lo +; GFX11-TRUE16-NEXT: global_store_b32 v1, v0, s[0:1] +; GFX11-TRUE16-NEXT: s_endpgm entry: %tid = call i32 @llvm.amdgcn.workitem.id.x() %tid.ext = sext i32 %tid to i64 @@ -227,12 +1063,78 @@ entry: ret void } -; GCN-LABEL: {{^}}i16_ne_v_s: -; VI: v_cmp_ne_u16_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}} -; SI: v_cmp_ne_u32_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}} -; GFX11-FAKE16: v_cmp_ne_u16_e32 vcc_lo, s{{[0-9]+}}, v{{[0-9]+}} -; GFX11-TRUE16: v_cmp_ne_u16_e32 vcc_lo, s{{[0-9]+}}, v{{[0-9]+}}.{{(l|h)}} define amdgpu_kernel void @i16_ne_v_s(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, i16 %b) #0 { +; VI-LABEL: i16_ne_v_s: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; VI-NEXT: s_load_dword s4, s[4:5], 0x34 +; VI-NEXT: v_lshlrev_b32_e32 v1, 1, v0 +; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v2, s3 +; VI-NEXT: v_add_u32_e32 v1, vcc, s2, v1 +; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc +; VI-NEXT: flat_load_ushort v2, v[1:2] +; VI-NEXT: v_mov_b32_e32 v1, s1 +; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0 +; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_cmp_ne_u16_e32 vcc, s4, v2 +; VI-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc +; VI-NEXT: flat_store_dword v[0:1], v2 +; VI-NEXT: s_endpgm +; +; SI-LABEL: i16_ne_v_s: +; SI: ; %bb.0: ; %entry +; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; SI-NEXT: s_load_dword s8, s[4:5], 0xd +; SI-NEXT: s_mov_b32 s7, 0xf000 +; SI-NEXT: s_mov_b32 s6, 0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 1, v0 +; SI-NEXT: v_mov_b32_e32 v2, 0 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-NEXT: buffer_load_ushort v3, v[1:2], s[4:7], 0 addr64 +; SI-NEXT: v_lshlrev_b32_e32 v1, 2, v0 +; SI-NEXT: s_and_b32 s4, s8, 0xffff +; SI-NEXT: s_mov_b64 s[2:3], s[6:7] +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cmp_ne_u32_e32 vcc, s4, v3 +; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc +; SI-NEXT: buffer_store_dword v0, v[1:2], s[0:3], 0 addr64 +; SI-NEXT: s_endpgm +; +; GFX11-FAKE16-LABEL: i16_ne_v_s: +; GFX11-FAKE16: ; %bb.0: ; %entry +; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-FAKE16-NEXT: s_load_b32 s4, s[4:5], 0x34 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 1, v0 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-FAKE16-NEXT: global_load_u16 v1, v1, s[2:3] +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-FAKE16-NEXT: v_cmp_ne_u16_e32 vcc_lo, s4, v1 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc_lo +; GFX11-FAKE16-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX11-FAKE16-NEXT: s_endpgm +; +; GFX11-TRUE16-LABEL: i16_ne_v_s: +; GFX11-TRUE16: ; %bb.0: ; %entry +; GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0x3ff, v0 +; GFX11-TRUE16-NEXT: s_load_b32 s4, s[4:5], 0x34 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 1, v1 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 2, v1 +; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-TRUE16-NEXT: global_load_d16_b16 v0, v0, s[2:3] +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_ne_u16_e32 vcc_lo, s4, v0.l +; GFX11-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc_lo +; GFX11-TRUE16-NEXT: global_store_b32 v1, v0, s[0:1] +; GFX11-TRUE16-NEXT: s_endpgm entry: %tid = call i32 @llvm.amdgcn.workitem.id.x() %tid.ext = sext i32 %tid to i64 @@ -245,12 +1147,78 @@ entry: ret void } -; GCN-LABEL: {{^}}i16_ugt_v_s: -; VI: v_cmp_lt_u16_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}} -; SI: v_cmp_lt_u32_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}} -; GFX11-FAKE16: v_cmp_lt_u16_e32 vcc_lo, s{{[0-9]+}}, v{{[0-9]+}} -; GFX11-TRUE16: v_cmp_lt_u16_e32 vcc_lo, s{{[0-9]+}}, v{{[0-9]+}}.{{(l|h)}} define amdgpu_kernel void @i16_ugt_v_s(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, i16 %b) #0 { +; VI-LABEL: i16_ugt_v_s: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; VI-NEXT: s_load_dword s4, s[4:5], 0x34 +; VI-NEXT: v_lshlrev_b32_e32 v1, 1, v0 +; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v2, s3 +; VI-NEXT: v_add_u32_e32 v1, vcc, s2, v1 +; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc +; VI-NEXT: flat_load_ushort v2, v[1:2] +; VI-NEXT: v_mov_b32_e32 v1, s1 +; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0 +; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_cmp_lt_u16_e32 vcc, s4, v2 +; VI-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc +; VI-NEXT: flat_store_dword v[0:1], v2 +; VI-NEXT: s_endpgm +; +; SI-LABEL: i16_ugt_v_s: +; SI: ; %bb.0: ; %entry +; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; SI-NEXT: s_load_dword s8, s[4:5], 0xd +; SI-NEXT: s_mov_b32 s7, 0xf000 +; SI-NEXT: s_mov_b32 s6, 0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 1, v0 +; SI-NEXT: v_mov_b32_e32 v2, 0 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-NEXT: buffer_load_ushort v3, v[1:2], s[4:7], 0 addr64 +; SI-NEXT: v_lshlrev_b32_e32 v1, 2, v0 +; SI-NEXT: s_and_b32 s4, s8, 0xffff +; SI-NEXT: s_mov_b64 s[2:3], s[6:7] +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cmp_lt_u32_e32 vcc, s4, v3 +; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc +; SI-NEXT: buffer_store_dword v0, v[1:2], s[0:3], 0 addr64 +; SI-NEXT: s_endpgm +; +; GFX11-FAKE16-LABEL: i16_ugt_v_s: +; GFX11-FAKE16: ; %bb.0: ; %entry +; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-FAKE16-NEXT: s_load_b32 s4, s[4:5], 0x34 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 1, v0 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-FAKE16-NEXT: global_load_u16 v1, v1, s[2:3] +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-FAKE16-NEXT: v_cmp_lt_u16_e32 vcc_lo, s4, v1 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc_lo +; GFX11-FAKE16-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX11-FAKE16-NEXT: s_endpgm +; +; GFX11-TRUE16-LABEL: i16_ugt_v_s: +; GFX11-TRUE16: ; %bb.0: ; %entry +; GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0x3ff, v0 +; GFX11-TRUE16-NEXT: s_load_b32 s4, s[4:5], 0x34 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 1, v1 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 2, v1 +; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-TRUE16-NEXT: global_load_d16_b16 v0, v0, s[2:3] +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_lt_u16_e32 vcc_lo, s4, v0.l +; GFX11-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc_lo +; GFX11-TRUE16-NEXT: global_store_b32 v1, v0, s[0:1] +; GFX11-TRUE16-NEXT: s_endpgm entry: %tid = call i32 @llvm.amdgcn.workitem.id.x() %tid.ext = sext i32 %tid to i64 @@ -263,12 +1231,78 @@ entry: ret void } -; GCN-LABEL: {{^}}i16_uge_v_s: -; VI: v_cmp_le_u16_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}} -; SI: v_cmp_le_u32_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}} -; GFX11-FAKE16: v_cmp_le_u16_e32 vcc_lo, s{{[0-9]+}}, v{{[0-9]+}} -; GFX11-TRUE16: v_cmp_le_u16_e32 vcc_lo, s{{[0-9]+}}, v{{[0-9]+}}.{{(l|h)}} define amdgpu_kernel void @i16_uge_v_s(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, i16 %b) #0 { +; VI-LABEL: i16_uge_v_s: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; VI-NEXT: s_load_dword s4, s[4:5], 0x34 +; VI-NEXT: v_lshlrev_b32_e32 v1, 1, v0 +; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v2, s3 +; VI-NEXT: v_add_u32_e32 v1, vcc, s2, v1 +; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc +; VI-NEXT: flat_load_ushort v2, v[1:2] +; VI-NEXT: v_mov_b32_e32 v1, s1 +; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0 +; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_cmp_le_u16_e32 vcc, s4, v2 +; VI-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc +; VI-NEXT: flat_store_dword v[0:1], v2 +; VI-NEXT: s_endpgm +; +; SI-LABEL: i16_uge_v_s: +; SI: ; %bb.0: ; %entry +; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; SI-NEXT: s_load_dword s8, s[4:5], 0xd +; SI-NEXT: s_mov_b32 s7, 0xf000 +; SI-NEXT: s_mov_b32 s6, 0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 1, v0 +; SI-NEXT: v_mov_b32_e32 v2, 0 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-NEXT: buffer_load_ushort v3, v[1:2], s[4:7], 0 addr64 +; SI-NEXT: v_lshlrev_b32_e32 v1, 2, v0 +; SI-NEXT: s_and_b32 s4, s8, 0xffff +; SI-NEXT: s_mov_b64 s[2:3], s[6:7] +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cmp_le_u32_e32 vcc, s4, v3 +; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc +; SI-NEXT: buffer_store_dword v0, v[1:2], s[0:3], 0 addr64 +; SI-NEXT: s_endpgm +; +; GFX11-FAKE16-LABEL: i16_uge_v_s: +; GFX11-FAKE16: ; %bb.0: ; %entry +; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-FAKE16-NEXT: s_load_b32 s4, s[4:5], 0x34 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 1, v0 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-FAKE16-NEXT: global_load_u16 v1, v1, s[2:3] +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-FAKE16-NEXT: v_cmp_le_u16_e32 vcc_lo, s4, v1 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc_lo +; GFX11-FAKE16-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX11-FAKE16-NEXT: s_endpgm +; +; GFX11-TRUE16-LABEL: i16_uge_v_s: +; GFX11-TRUE16: ; %bb.0: ; %entry +; GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0x3ff, v0 +; GFX11-TRUE16-NEXT: s_load_b32 s4, s[4:5], 0x34 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 1, v1 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 2, v1 +; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-TRUE16-NEXT: global_load_d16_b16 v0, v0, s[2:3] +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_le_u16_e32 vcc_lo, s4, v0.l +; GFX11-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc_lo +; GFX11-TRUE16-NEXT: global_store_b32 v1, v0, s[0:1] +; GFX11-TRUE16-NEXT: s_endpgm entry: %tid = call i32 @llvm.amdgcn.workitem.id.x() %tid.ext = sext i32 %tid to i64 @@ -281,12 +1315,78 @@ entry: ret void } -; GCN-LABEL: {{^}}i16_ult_v_s: -; VI: v_cmp_gt_u16_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}} -; SI: v_cmp_gt_u32_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}} -; GFX11-FAKE16: v_cmp_gt_u16_e32 vcc_lo, s{{[0-9]+}}, v{{[0-9]+}} -; GFX11-TRUE16: v_cmp_gt_u16_e32 vcc_lo, s{{[0-9]+}}, v{{[0-9]+}}.{{(l|h)}} define amdgpu_kernel void @i16_ult_v_s(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, i16 %b) #0 { +; VI-LABEL: i16_ult_v_s: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; VI-NEXT: s_load_dword s4, s[4:5], 0x34 +; VI-NEXT: v_lshlrev_b32_e32 v1, 1, v0 +; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v2, s3 +; VI-NEXT: v_add_u32_e32 v1, vcc, s2, v1 +; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc +; VI-NEXT: flat_load_ushort v2, v[1:2] +; VI-NEXT: v_mov_b32_e32 v1, s1 +; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0 +; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_cmp_gt_u16_e32 vcc, s4, v2 +; VI-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc +; VI-NEXT: flat_store_dword v[0:1], v2 +; VI-NEXT: s_endpgm +; +; SI-LABEL: i16_ult_v_s: +; SI: ; %bb.0: ; %entry +; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; SI-NEXT: s_load_dword s8, s[4:5], 0xd +; SI-NEXT: s_mov_b32 s7, 0xf000 +; SI-NEXT: s_mov_b32 s6, 0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 1, v0 +; SI-NEXT: v_mov_b32_e32 v2, 0 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-NEXT: buffer_load_ushort v3, v[1:2], s[4:7], 0 addr64 +; SI-NEXT: v_lshlrev_b32_e32 v1, 2, v0 +; SI-NEXT: s_and_b32 s4, s8, 0xffff +; SI-NEXT: s_mov_b64 s[2:3], s[6:7] +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cmp_gt_u32_e32 vcc, s4, v3 +; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc +; SI-NEXT: buffer_store_dword v0, v[1:2], s[0:3], 0 addr64 +; SI-NEXT: s_endpgm +; +; GFX11-FAKE16-LABEL: i16_ult_v_s: +; GFX11-FAKE16: ; %bb.0: ; %entry +; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-FAKE16-NEXT: s_load_b32 s4, s[4:5], 0x34 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 1, v0 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-FAKE16-NEXT: global_load_u16 v1, v1, s[2:3] +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-FAKE16-NEXT: v_cmp_gt_u16_e32 vcc_lo, s4, v1 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc_lo +; GFX11-FAKE16-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX11-FAKE16-NEXT: s_endpgm +; +; GFX11-TRUE16-LABEL: i16_ult_v_s: +; GFX11-TRUE16: ; %bb.0: ; %entry +; GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0x3ff, v0 +; GFX11-TRUE16-NEXT: s_load_b32 s4, s[4:5], 0x34 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 1, v1 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 2, v1 +; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-TRUE16-NEXT: global_load_d16_b16 v0, v0, s[2:3] +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_gt_u16_e32 vcc_lo, s4, v0.l +; GFX11-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc_lo +; GFX11-TRUE16-NEXT: global_store_b32 v1, v0, s[0:1] +; GFX11-TRUE16-NEXT: s_endpgm entry: %tid = call i32 @llvm.amdgcn.workitem.id.x() %tid.ext = sext i32 %tid to i64 @@ -299,12 +1399,78 @@ entry: ret void } -; GCN-LABEL: {{^}}i16_ule_v_s: -; VI: v_cmp_ge_u16_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}} -; SI: v_cmp_ge_u32_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}} -; GFX11-FAKE16: v_cmp_ge_u16_e32 vcc_lo, s{{[0-9]+}}, v{{[0-9]+}} -; GFX11-TRUE16: v_cmp_ge_u16_e32 vcc_lo, s{{[0-9]+}}, v{{[0-9]+}}.{{(l|h)}} define amdgpu_kernel void @i16_ule_v_s(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, i16 %b) #0 { +; VI-LABEL: i16_ule_v_s: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; VI-NEXT: s_load_dword s4, s[4:5], 0x34 +; VI-NEXT: v_lshlrev_b32_e32 v1, 1, v0 +; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v2, s3 +; VI-NEXT: v_add_u32_e32 v1, vcc, s2, v1 +; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc +; VI-NEXT: flat_load_ushort v2, v[1:2] +; VI-NEXT: v_mov_b32_e32 v1, s1 +; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0 +; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_cmp_ge_u16_e32 vcc, s4, v2 +; VI-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc +; VI-NEXT: flat_store_dword v[0:1], v2 +; VI-NEXT: s_endpgm +; +; SI-LABEL: i16_ule_v_s: +; SI: ; %bb.0: ; %entry +; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; SI-NEXT: s_load_dword s8, s[4:5], 0xd +; SI-NEXT: s_mov_b32 s7, 0xf000 +; SI-NEXT: s_mov_b32 s6, 0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 1, v0 +; SI-NEXT: v_mov_b32_e32 v2, 0 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-NEXT: buffer_load_ushort v3, v[1:2], s[4:7], 0 addr64 +; SI-NEXT: v_lshlrev_b32_e32 v1, 2, v0 +; SI-NEXT: s_and_b32 s4, s8, 0xffff +; SI-NEXT: s_mov_b64 s[2:3], s[6:7] +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cmp_ge_u32_e32 vcc, s4, v3 +; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc +; SI-NEXT: buffer_store_dword v0, v[1:2], s[0:3], 0 addr64 +; SI-NEXT: s_endpgm +; +; GFX11-FAKE16-LABEL: i16_ule_v_s: +; GFX11-FAKE16: ; %bb.0: ; %entry +; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-FAKE16-NEXT: s_load_b32 s4, s[4:5], 0x34 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 1, v0 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-FAKE16-NEXT: global_load_u16 v1, v1, s[2:3] +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-FAKE16-NEXT: v_cmp_ge_u16_e32 vcc_lo, s4, v1 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc_lo +; GFX11-FAKE16-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX11-FAKE16-NEXT: s_endpgm +; +; GFX11-TRUE16-LABEL: i16_ule_v_s: +; GFX11-TRUE16: ; %bb.0: ; %entry +; GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0x3ff, v0 +; GFX11-TRUE16-NEXT: s_load_b32 s4, s[4:5], 0x34 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 1, v1 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 2, v1 +; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-TRUE16-NEXT: global_load_d16_b16 v0, v0, s[2:3] +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_ge_u16_e32 vcc_lo, s4, v0.l +; GFX11-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc_lo +; GFX11-TRUE16-NEXT: global_store_b32 v1, v0, s[0:1] +; GFX11-TRUE16-NEXT: s_endpgm entry: %tid = call i32 @llvm.amdgcn.workitem.id.x() %tid.ext = sext i32 %tid to i64 @@ -317,12 +1483,78 @@ entry: ret void } -; GCN-LABEL: {{^}}i16_sgt_v_s: -; VI: v_cmp_lt_i16_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}} -; SI: v_cmp_lt_i32_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}} -; GFX11-FAKE16: v_cmp_lt_i16_e32 vcc_lo, s{{[0-9]+}}, v{{[0-9]+}} -; GFX11-TRUE16: v_cmp_lt_i16_e32 vcc_lo, s{{[0-9]+}}, v{{[0-9]+}}.{{(l|h)}} define amdgpu_kernel void @i16_sgt_v_s(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, i16 %b) #0 { +; VI-LABEL: i16_sgt_v_s: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; VI-NEXT: s_load_dword s4, s[4:5], 0x34 +; VI-NEXT: v_lshlrev_b32_e32 v1, 1, v0 +; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v2, s3 +; VI-NEXT: v_add_u32_e32 v1, vcc, s2, v1 +; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc +; VI-NEXT: flat_load_ushort v2, v[1:2] +; VI-NEXT: v_mov_b32_e32 v1, s1 +; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0 +; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_cmp_lt_i16_e32 vcc, s4, v2 +; VI-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc +; VI-NEXT: flat_store_dword v[0:1], v2 +; VI-NEXT: s_endpgm +; +; SI-LABEL: i16_sgt_v_s: +; SI: ; %bb.0: ; %entry +; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; SI-NEXT: s_load_dword s8, s[4:5], 0xd +; SI-NEXT: s_mov_b32 s7, 0xf000 +; SI-NEXT: s_mov_b32 s6, 0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 1, v0 +; SI-NEXT: v_mov_b32_e32 v2, 0 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-NEXT: buffer_load_sshort v3, v[1:2], s[4:7], 0 addr64 +; SI-NEXT: v_lshlrev_b32_e32 v1, 2, v0 +; SI-NEXT: s_sext_i32_i16 s4, s8 +; SI-NEXT: s_mov_b64 s[2:3], s[6:7] +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cmp_lt_i32_e32 vcc, s4, v3 +; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc +; SI-NEXT: buffer_store_dword v0, v[1:2], s[0:3], 0 addr64 +; SI-NEXT: s_endpgm +; +; GFX11-FAKE16-LABEL: i16_sgt_v_s: +; GFX11-FAKE16: ; %bb.0: ; %entry +; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-FAKE16-NEXT: s_load_b32 s4, s[4:5], 0x34 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 1, v0 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-FAKE16-NEXT: global_load_u16 v1, v1, s[2:3] +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-FAKE16-NEXT: v_cmp_lt_i16_e32 vcc_lo, s4, v1 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc_lo +; GFX11-FAKE16-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX11-FAKE16-NEXT: s_endpgm +; +; GFX11-TRUE16-LABEL: i16_sgt_v_s: +; GFX11-TRUE16: ; %bb.0: ; %entry +; GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0x3ff, v0 +; GFX11-TRUE16-NEXT: s_load_b32 s4, s[4:5], 0x34 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 1, v1 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 2, v1 +; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-TRUE16-NEXT: global_load_d16_b16 v0, v0, s[2:3] +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_lt_i16_e32 vcc_lo, s4, v0.l +; GFX11-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc_lo +; GFX11-TRUE16-NEXT: global_store_b32 v1, v0, s[0:1] +; GFX11-TRUE16-NEXT: s_endpgm entry: %tid = call i32 @llvm.amdgcn.workitem.id.x() %tid.ext = sext i32 %tid to i64 @@ -335,12 +1567,78 @@ entry: ret void } -; GCN-LABEL: {{^}}i16_sge_v_s: -; VI: v_cmp_le_i16_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}} -; SI: v_cmp_le_i32_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}} -; GFX11-FAKE16: v_cmp_le_i16_e32 vcc_lo, s{{[0-9]+}}, v{{[0-9]+}} -; GFX11-TRUE16: v_cmp_le_i16_e32 vcc_lo, s{{[0-9]+}}, v{{[0-9]+}}.{{(l|h)}} define amdgpu_kernel void @i16_sge_v_s(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, i16 %b) #0 { +; VI-LABEL: i16_sge_v_s: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; VI-NEXT: s_load_dword s4, s[4:5], 0x34 +; VI-NEXT: v_lshlrev_b32_e32 v1, 1, v0 +; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v2, s3 +; VI-NEXT: v_add_u32_e32 v1, vcc, s2, v1 +; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc +; VI-NEXT: flat_load_ushort v2, v[1:2] +; VI-NEXT: v_mov_b32_e32 v1, s1 +; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0 +; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_cmp_le_i16_e32 vcc, s4, v2 +; VI-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc +; VI-NEXT: flat_store_dword v[0:1], v2 +; VI-NEXT: s_endpgm +; +; SI-LABEL: i16_sge_v_s: +; SI: ; %bb.0: ; %entry +; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; SI-NEXT: s_load_dword s8, s[4:5], 0xd +; SI-NEXT: s_mov_b32 s7, 0xf000 +; SI-NEXT: s_mov_b32 s6, 0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 1, v0 +; SI-NEXT: v_mov_b32_e32 v2, 0 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-NEXT: buffer_load_sshort v3, v[1:2], s[4:7], 0 addr64 +; SI-NEXT: v_lshlrev_b32_e32 v1, 2, v0 +; SI-NEXT: s_sext_i32_i16 s4, s8 +; SI-NEXT: s_mov_b64 s[2:3], s[6:7] +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cmp_le_i32_e32 vcc, s4, v3 +; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc +; SI-NEXT: buffer_store_dword v0, v[1:2], s[0:3], 0 addr64 +; SI-NEXT: s_endpgm +; +; GFX11-FAKE16-LABEL: i16_sge_v_s: +; GFX11-FAKE16: ; %bb.0: ; %entry +; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-FAKE16-NEXT: s_load_b32 s4, s[4:5], 0x34 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 1, v0 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-FAKE16-NEXT: global_load_u16 v1, v1, s[2:3] +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-FAKE16-NEXT: v_cmp_le_i16_e32 vcc_lo, s4, v1 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc_lo +; GFX11-FAKE16-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX11-FAKE16-NEXT: s_endpgm +; +; GFX11-TRUE16-LABEL: i16_sge_v_s: +; GFX11-TRUE16: ; %bb.0: ; %entry +; GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0x3ff, v0 +; GFX11-TRUE16-NEXT: s_load_b32 s4, s[4:5], 0x34 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 1, v1 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 2, v1 +; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-TRUE16-NEXT: global_load_d16_b16 v0, v0, s[2:3] +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_le_i16_e32 vcc_lo, s4, v0.l +; GFX11-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc_lo +; GFX11-TRUE16-NEXT: global_store_b32 v1, v0, s[0:1] +; GFX11-TRUE16-NEXT: s_endpgm entry: %tid = call i32 @llvm.amdgcn.workitem.id.x() %tid.ext = sext i32 %tid to i64 @@ -353,12 +1651,78 @@ entry: ret void } -; GCN-LABEL: {{^}}i16_slt_v_s: -; VI: v_cmp_gt_i16_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}} -; SI: v_cmp_gt_i32_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}} -; GFX11-FAKE16: v_cmp_gt_i16_e32 vcc_lo, s{{[0-9]+}}, v{{[0-9]+}} -; GFX11-TRUE16: v_cmp_gt_i16_e32 vcc_lo, s{{[0-9]+}}, v{{[0-9]+}}.{{(l|h)}} define amdgpu_kernel void @i16_slt_v_s(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, i16 %b) #0 { +; VI-LABEL: i16_slt_v_s: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; VI-NEXT: s_load_dword s4, s[4:5], 0x34 +; VI-NEXT: v_lshlrev_b32_e32 v1, 1, v0 +; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v2, s3 +; VI-NEXT: v_add_u32_e32 v1, vcc, s2, v1 +; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc +; VI-NEXT: flat_load_ushort v2, v[1:2] +; VI-NEXT: v_mov_b32_e32 v1, s1 +; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0 +; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_cmp_gt_i16_e32 vcc, s4, v2 +; VI-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc +; VI-NEXT: flat_store_dword v[0:1], v2 +; VI-NEXT: s_endpgm +; +; SI-LABEL: i16_slt_v_s: +; SI: ; %bb.0: ; %entry +; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; SI-NEXT: s_load_dword s8, s[4:5], 0xd +; SI-NEXT: s_mov_b32 s7, 0xf000 +; SI-NEXT: s_mov_b32 s6, 0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 1, v0 +; SI-NEXT: v_mov_b32_e32 v2, 0 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-NEXT: buffer_load_sshort v3, v[1:2], s[4:7], 0 addr64 +; SI-NEXT: v_lshlrev_b32_e32 v1, 2, v0 +; SI-NEXT: s_sext_i32_i16 s4, s8 +; SI-NEXT: s_mov_b64 s[2:3], s[6:7] +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cmp_gt_i32_e32 vcc, s4, v3 +; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc +; SI-NEXT: buffer_store_dword v0, v[1:2], s[0:3], 0 addr64 +; SI-NEXT: s_endpgm +; +; GFX11-FAKE16-LABEL: i16_slt_v_s: +; GFX11-FAKE16: ; %bb.0: ; %entry +; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-FAKE16-NEXT: s_load_b32 s4, s[4:5], 0x34 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 1, v0 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-FAKE16-NEXT: global_load_u16 v1, v1, s[2:3] +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-FAKE16-NEXT: v_cmp_gt_i16_e32 vcc_lo, s4, v1 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc_lo +; GFX11-FAKE16-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX11-FAKE16-NEXT: s_endpgm +; +; GFX11-TRUE16-LABEL: i16_slt_v_s: +; GFX11-TRUE16: ; %bb.0: ; %entry +; GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0x3ff, v0 +; GFX11-TRUE16-NEXT: s_load_b32 s4, s[4:5], 0x34 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 1, v1 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 2, v1 +; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-TRUE16-NEXT: global_load_d16_b16 v0, v0, s[2:3] +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_gt_i16_e32 vcc_lo, s4, v0.l +; GFX11-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc_lo +; GFX11-TRUE16-NEXT: global_store_b32 v1, v0, s[0:1] +; GFX11-TRUE16-NEXT: s_endpgm entry: %tid = call i32 @llvm.amdgcn.workitem.id.x() %tid.ext = sext i32 %tid to i64 @@ -371,12 +1735,78 @@ entry: ret void } -; GCN-LABEL: {{^}}i16_sle_v_s: -; VI: v_cmp_ge_i16_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}} -; SI: v_cmp_ge_i32_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}} -; GFX11-FAKE16: v_cmp_ge_i16_e32 vcc_lo, s{{[0-9]+}}, v{{[0-9]+}} -; GFX11-TRUE16: v_cmp_ge_i16_e32 vcc_lo, s{{[0-9]+}}, v{{[0-9]+}}.{{(l|h)}} define amdgpu_kernel void @i16_sle_v_s(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, i16 %b) #0 { +; VI-LABEL: i16_sle_v_s: +; VI: ; %bb.0: ; %entry +; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24 +; VI-NEXT: s_load_dword s4, s[4:5], 0x34 +; VI-NEXT: v_lshlrev_b32_e32 v1, 1, v0 +; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; VI-NEXT: s_waitcnt lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v2, s3 +; VI-NEXT: v_add_u32_e32 v1, vcc, s2, v1 +; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc +; VI-NEXT: flat_load_ushort v2, v[1:2] +; VI-NEXT: v_mov_b32_e32 v1, s1 +; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0 +; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc +; VI-NEXT: s_waitcnt vmcnt(0) +; VI-NEXT: v_cmp_ge_i16_e32 vcc, s4, v2 +; VI-NEXT: v_cndmask_b32_e64 v2, 0, -1, vcc +; VI-NEXT: flat_store_dword v[0:1], v2 +; VI-NEXT: s_endpgm +; +; SI-LABEL: i16_sle_v_s: +; SI: ; %bb.0: ; %entry +; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9 +; SI-NEXT: s_load_dword s8, s[4:5], 0xd +; SI-NEXT: s_mov_b32 s7, 0xf000 +; SI-NEXT: s_mov_b32 s6, 0 +; SI-NEXT: v_lshlrev_b32_e32 v1, 1, v0 +; SI-NEXT: v_mov_b32_e32 v2, 0 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_mov_b64 s[4:5], s[2:3] +; SI-NEXT: buffer_load_sshort v3, v[1:2], s[4:7], 0 addr64 +; SI-NEXT: v_lshlrev_b32_e32 v1, 2, v0 +; SI-NEXT: s_sext_i32_i16 s4, s8 +; SI-NEXT: s_mov_b64 s[2:3], s[6:7] +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cmp_ge_i32_e32 vcc, s4, v3 +; SI-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc +; SI-NEXT: buffer_store_dword v0, v[1:2], s[0:3], 0 addr64 +; SI-NEXT: s_endpgm +; +; GFX11-FAKE16-LABEL: i16_sle_v_s: +; GFX11-FAKE16: ; %bb.0: ; %entry +; GFX11-FAKE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0x3ff, v0 +; GFX11-FAKE16-NEXT: s_load_b32 s4, s[4:5], 0x34 +; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 1, v0 +; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX11-FAKE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-FAKE16-NEXT: global_load_u16 v1, v1, s[2:3] +; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-FAKE16-NEXT: v_cmp_ge_i16_e32 vcc_lo, s4, v1 +; GFX11-FAKE16-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc_lo +; GFX11-FAKE16-NEXT: global_store_b32 v0, v1, s[0:1] +; GFX11-FAKE16-NEXT: s_endpgm +; +; GFX11-TRUE16-LABEL: i16_sle_v_s: +; GFX11-TRUE16: ; %bb.0: ; %entry +; GFX11-TRUE16-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 +; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0x3ff, v0 +; GFX11-TRUE16-NEXT: s_load_b32 s4, s[4:5], 0x34 +; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 1, v1 +; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 2, v1 +; GFX11-TRUE16-NEXT: s_waitcnt lgkmcnt(0) +; GFX11-TRUE16-NEXT: global_load_d16_b16 v0, v0, s[2:3] +; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) +; GFX11-TRUE16-NEXT: v_cmp_ge_i16_e32 vcc_lo, s4, v0.l +; GFX11-TRUE16-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc_lo +; GFX11-TRUE16-NEXT: global_store_b32 v1, v0, s[0:1] +; GFX11-TRUE16-NEXT: s_endpgm entry: %tid = call i32 @llvm.amdgcn.workitem.id.x() %tid.ext = sext i32 %tid to i64 diff --git a/llvm/test/CodeGen/AMDGPU/valu-i1.ll b/llvm/test/CodeGen/AMDGPU/valu-i1.ll index 35cd2663f523..c500565d36b0 100644 --- a/llvm/test/CodeGen/AMDGPU/valu-i1.ll +++ b/llvm/test/CodeGen/AMDGPU/valu-i1.ll @@ -1,30 +1,105 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; RUN: llc -mtriple=amdgcn -verify-machineinstrs -enable-misched -asm-verbose -disable-block-placement -simplifycfg-require-and-preserve-domtree=1 < %s | FileCheck -check-prefix=SI %s declare i32 @llvm.amdgcn.workitem.id.x() nounwind readnone -; SI-LABEL: {{^}}test_if: ; Make sure the i1 values created by the cfg structurizer pass are ; moved using VALU instructions ; waitcnt should be inserted after exec modification -; SI: v_cmp_lt_i32_e32 vcc, 1, -; SI-NEXT: s_mov_b64 {{s\[[0-9]+:[0-9]+\]}}, 0 -; SI-NEXT: s_mov_b64 {{s\[[0-9]+:[0-9]+\]}}, 0 -; SI-NEXT: s_and_saveexec_b64 [[SAVE1:s\[[0-9]+:[0-9]+\]]], vcc -; SI-NEXT: s_xor_b64 [[SAVE2:s\[[0-9]+:[0-9]+\]]], exec, [[SAVE1]] -; SI-NEXT: s_cbranch_execz [[FLOW_BB:.LBB[0-9]+_[0-9]+]] - -; SI-NEXT: ; %bb.{{[0-9]+}}: ; %LeafBlock3 -; SI: s_mov_b64 s[{{[0-9]:[0-9]}}], -1 -; SI: s_and_saveexec_b64 -; SI-NEXT: s_cbranch_execnz - ; v_mov should be after exec modification -; SI: [[FLOW_BB]]: -; SI-NEXT: s_andn2_saveexec_b64 [[SAVE2]], [[SAVE2]] -; define amdgpu_kernel void @test_if(i32 %b, ptr addrspace(1) %src, ptr addrspace(1) %dst) #1 { +; SI-LABEL: test_if: +; SI: ; %bb.0: ; %entry +; SI-NEXT: s_load_dword s8, s[4:5], 0x9 +; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0xd +; SI-NEXT: v_cmp_lt_i32_e32 vcc, 1, v0 +; SI-NEXT: s_mov_b64 s[10:11], 0 +; SI-NEXT: s_mov_b64 s[2:3], 0 +; SI-NEXT: s_and_saveexec_b64 s[4:5], vcc +; SI-NEXT: s_xor_b64 s[4:5], exec, s[4:5] +; SI-NEXT: s_cbranch_execz .LBB0_3 +; SI-NEXT: ; %bb.1: ; %LeafBlock3 +; SI-NEXT: v_cmp_eq_u32_e32 vcc, 2, v0 +; SI-NEXT: s_mov_b64 s[2:3], -1 +; SI-NEXT: s_and_saveexec_b64 s[6:7], vcc +; SI-NEXT: s_cbranch_execnz .LBB0_9 +; SI-NEXT: .LBB0_2: ; %Flow7 +; SI-NEXT: s_or_b64 exec, exec, s[6:7] +; SI-NEXT: s_and_b64 s[2:3], s[2:3], exec +; SI-NEXT: .LBB0_3: ; %Flow6 +; SI-NEXT: s_andn2_saveexec_b64 s[4:5], s[4:5] +; SI-NEXT: s_cbranch_execz .LBB0_5 +; SI-NEXT: ; %bb.4: ; %LeafBlock +; SI-NEXT: s_mov_b64 s[10:11], exec +; SI-NEXT: v_cmp_ne_u32_e32 vcc, 1, v0 +; SI-NEXT: s_andn2_b64 s[2:3], s[2:3], exec +; SI-NEXT: s_and_b64 s[6:7], vcc, exec +; SI-NEXT: s_or_b64 s[2:3], s[2:3], s[6:7] +; SI-NEXT: .LBB0_5: ; %Flow8 +; SI-NEXT: s_or_b64 exec, exec, s[4:5] +; SI-NEXT: s_and_saveexec_b64 s[4:5], s[2:3] +; SI-NEXT: s_xor_b64 s[2:3], exec, s[4:5] +; SI-NEXT: s_cbranch_execnz .LBB0_10 +; SI-NEXT: .LBB0_6: ; %Flow9 +; SI-NEXT: s_or_b64 exec, exec, s[2:3] +; SI-NEXT: s_and_saveexec_b64 s[2:3], s[10:11] +; SI-NEXT: s_cbranch_execz .LBB0_8 +; SI-NEXT: ; %bb.7: ; %case1 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_ashr_i32 s9, s8, 31 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_mov_b32 s2, 0 +; SI-NEXT: s_lshl_b64 s[4:5], s[8:9], 2 +; SI-NEXT: v_mov_b32_e32 v2, 13 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v0, s4 +; SI-NEXT: v_mov_b32_e32 v1, s5 +; SI-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64 +; SI-NEXT: .LBB0_8: ; %end +; SI-NEXT: s_endpgm +; SI-NEXT: .LBB0_9: ; %case2 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_ashr_i32 s9, s8, 31 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_mov_b32 s2, 0 +; SI-NEXT: s_lshl_b64 s[12:13], s[8:9], 2 +; SI-NEXT: v_mov_b32_e32 v3, 17 +; SI-NEXT: v_mov_b32_e32 v1, s12 +; SI-NEXT: v_mov_b32_e32 v2, s13 +; SI-NEXT: buffer_store_dword v3, v[1:2], s[0:3], 0 addr64 +; SI-NEXT: s_xor_b64 s[2:3], exec, -1 +; SI-NEXT: s_branch .LBB0_2 +; SI-NEXT: .LBB0_10: ; %default +; SI-NEXT: v_cmp_ne_u32_e32 vcc, 2, v0 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: s_ashr_i32 s9, s8, 31 +; SI-NEXT: s_lshl_b64 s[4:5], s[8:9], 2 +; SI-NEXT: s_add_u32 s4, s0, s4 +; SI-NEXT: s_addc_u32 s5, s1, s5 +; SI-NEXT: s_and_saveexec_b64 s[6:7], vcc +; SI-NEXT: s_xor_b64 s[12:13], exec, s[6:7] +; SI-NEXT: s_cbranch_execnz .LBB0_14 +; SI-NEXT: .LBB0_11: ; %Flow +; SI-NEXT: s_andn2_saveexec_b64 s[12:13], s[12:13] +; SI-NEXT: s_cbranch_execz .LBB0_13 +; SI-NEXT: ; %bb.12: ; %if +; SI-NEXT: s_mov_b32 s7, 0xf000 +; SI-NEXT: s_mov_b32 s6, -1 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: v_mov_b32_e32 v0, 19 +; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; SI-NEXT: .LBB0_13: ; %Flow5 +; SI-NEXT: s_or_b64 exec, exec, s[12:13] +; SI-NEXT: s_andn2_b64 s[10:11], s[10:11], exec +; SI-NEXT: s_branch .LBB0_6 +; SI-NEXT: .LBB0_14: ; %else +; SI-NEXT: s_mov_b32 s7, 0xf000 +; SI-NEXT: s_mov_b32 s6, -1 +; SI-NEXT: v_mov_b32_e32 v0, 21 +; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0 +; SI-NEXT: s_branch .LBB0_11 entry: %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone switch i32 %tid, label %default [ @@ -59,17 +134,23 @@ end: ret void } -; SI-LABEL: {{^}}simple_test_v_if: -; SI: v_cmp_ne_u32_e32 vcc, 0, v{{[0-9]+}} -; SI: s_and_saveexec_b64 [[BR_SREG:s\[[0-9]+:[0-9]+\]]], vcc -; SI-NEXT: s_cbranch_execz [[EXIT:.LBB[0-9]+_[0-9]+]] - -; SI-NEXT: ; %bb.{{[0-9]+}}: -; SI: buffer_store_dword - -; SI-NEXT: {{^}}[[EXIT]]: -; SI: s_endpgm define amdgpu_kernel void @simple_test_v_if(ptr addrspace(1) %dst, ptr addrspace(1) %src) #1 { +; SI-LABEL: simple_test_v_if: +; SI: ; %bb.0: +; SI-NEXT: s_mov_b32 s2, 0 +; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 +; SI-NEXT: s_and_saveexec_b64 s[0:1], vcc +; SI-NEXT: s_cbranch_execz .LBB1_2 +; SI-NEXT: ; %bb.1: ; %then +; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-NEXT: v_mov_b32_e32 v1, 0 +; SI-NEXT: v_mov_b32_e32 v2, 0x3e7 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64 +; SI-NEXT: .LBB1_2: ; %exit +; SI-NEXT: s_endpgm %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone %is.0 = icmp ne i32 %tid, 0 br i1 %is.0, label %then, label %exit @@ -84,18 +165,23 @@ exit: } ; FIXME: It would be better to endpgm in the then block. - -; SI-LABEL: {{^}}simple_test_v_if_ret_else_ret: -; SI: v_cmp_ne_u32_e32 vcc, 0, v{{[0-9]+}} -; SI: s_and_saveexec_b64 [[BR_SREG:s\[[0-9]+:[0-9]+\]]], vcc -; SI-NEXT: s_cbranch_execz [[EXIT:.LBB[0-9]+_[0-9]+]] - -; SI-NEXT: ; %bb.{{[0-9]+}}: -; SI: buffer_store_dword - -; SI-NEXT: {{^}}[[EXIT]]: -; SI: s_endpgm define amdgpu_kernel void @simple_test_v_if_ret_else_ret(ptr addrspace(1) %dst, ptr addrspace(1) %src) #1 { +; SI-LABEL: simple_test_v_if_ret_else_ret: +; SI: ; %bb.0: +; SI-NEXT: s_mov_b32 s2, 0 +; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 +; SI-NEXT: s_and_saveexec_b64 s[0:1], vcc +; SI-NEXT: s_cbranch_execz .LBB2_2 +; SI-NEXT: ; %bb.1: ; %then +; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-NEXT: v_mov_b32_e32 v1, 0 +; SI-NEXT: v_mov_b32_e32 v2, 0x3e7 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64 +; SI-NEXT: .LBB2_2: ; %UnifiedReturnBlock +; SI-NEXT: s_endpgm %tid = call i32 @llvm.amdgcn.workitem.id.x() %is.0 = icmp ne i32 %tid, 0 br i1 %is.0, label %then, label %exit @@ -112,27 +198,33 @@ exit: ; Final block has more than a ret to execute. This was miscompiled ; before function exit blocks were unified since the endpgm would ; terminate the then wavefront before reaching the store. - -; SI-LABEL: {{^}}simple_test_v_if_ret_else_code_ret: -; SI: v_cmp_eq_u32_e32 vcc, 0, v{{[0-9]+}} -; SI: s_and_saveexec_b64 [[BR_SREG:s\[[0-9]+:[0-9]+\]]], vcc -; SI: s_xor_b64 [[BR_SREG]], exec, [[BR_SREG]] -; SI: s_cbranch_execnz [[EXIT:.LBB[0-9]+_[0-9]+]] - -; SI-NEXT: {{^.LBB[0-9]+_[0-9]+}}: ; %Flow -; SI-NEXT: s_andn2_saveexec_b64 [[BR_SREG]], [[BR_SREG]] -; SI-NEXT: s_cbranch_execz [[UNIFIED_RETURN:.LBB[0-9]+_[0-9]+]] - -; SI-NEXT: ; %bb.{{[0-9]+}}: ; %then -; SI: s_waitcnt -; SI-NEXT: buffer_store_dword - -; SI-NEXT: {{^}}[[UNIFIED_RETURN]]: ; %UnifiedReturnBlock -; SI: s_endpgm - -; SI-NEXT: {{^}}[[EXIT]]: -; SI: ds_write_b32 define amdgpu_kernel void @simple_test_v_if_ret_else_code_ret(ptr addrspace(1) %dst, ptr addrspace(1) %src) #1 { +; SI-LABEL: simple_test_v_if_ret_else_code_ret: +; SI: ; %bb.0: +; SI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0 +; SI-NEXT: s_and_saveexec_b64 s[0:1], vcc +; SI-NEXT: s_xor_b64 s[0:1], exec, s[0:1] +; SI-NEXT: s_cbranch_execnz .LBB3_4 +; SI-NEXT: .LBB3_1: ; %Flow +; SI-NEXT: s_andn2_saveexec_b64 s[0:1], s[0:1] +; SI-NEXT: s_cbranch_execz .LBB3_3 +; SI-NEXT: ; %bb.2: ; %then +; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_mov_b32 s2, 0 +; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-NEXT: v_mov_b32_e32 v1, 0 +; SI-NEXT: v_mov_b32_e32 v2, 0x3e7 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64 +; SI-NEXT: .LBB3_3: ; %UnifiedReturnBlock +; SI-NEXT: s_endpgm +; SI-NEXT: .LBB3_4: ; %exit +; SI-NEXT: v_mov_b32_e32 v0, 7 +; SI-NEXT: s_mov_b32 m0, -1 +; SI-NEXT: ds_write_b32 v0, v0 +; SI-NEXT: ; implicit-def: $vgpr0 +; SI-NEXT: s_branch .LBB3_1 %tid = call i32 @llvm.amdgcn.workitem.id.x() %is.0 = icmp ne i32 %tid, 0 br i1 %is.0, label %then, label %exit @@ -147,21 +239,38 @@ exit: ret void } -; SI-LABEL: {{^}}simple_test_v_loop: -; SI: v_cmp_ne_u32_e32 vcc, 0, v{{[0-9]+}} -; SI: s_and_saveexec_b64 [[BR_SREG:s\[[0-9]+:[0-9]+\]]], vcc -; SI-NEXT: s_cbranch_execz [[LABEL_EXIT:.LBB[0-9]+_[0-9]+]] - -; SI: s_mov_b64 {{s\[[0-9]+:[0-9]+\]}}, 0{{$}} - -; SI: [[LABEL_LOOP:.LBB[0-9]+_[0-9]+]]: -; SI: buffer_load_dword -; SI-DAG: buffer_store_dword -; SI-DAG: s_cmpk_lg_i32 s{{[0-9]+}}, 0x100 -; SI: s_cbranch_scc1 [[LABEL_LOOP]] -; SI: [[LABEL_EXIT]]: -; SI: s_endpgm define amdgpu_kernel void @simple_test_v_loop(ptr addrspace(1) %dst, ptr addrspace(1) %src) #1 { +; SI-LABEL: simple_test_v_loop: +; SI: ; %bb.0: ; %entry +; SI-NEXT: s_mov_b32 s2, 0 +; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0 +; SI-NEXT: s_and_saveexec_b64 s[0:1], vcc +; SI-NEXT: s_cbranch_execz .LBB4_3 +; SI-NEXT: ; %bb.1: ; %loop.preheader +; SI-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x9 +; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; SI-NEXT: s_mov_b64 s[0:1], 0 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: v_mov_b32_e32 v1, s9 +; SI-NEXT: v_add_i32_e32 v0, vcc, s8, v0 +; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc +; SI-NEXT: s_mov_b32 s6, -1 +; SI-NEXT: s_mov_b32 s4, s10 +; SI-NEXT: s_mov_b32 s5, s11 +; SI-NEXT: s_mov_b32 s7, s3 +; SI-NEXT: .LBB4_2: ; %loop +; SI-NEXT: ; =>This Inner Loop Header: Depth=1 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v2, off, s[4:7], 0 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: buffer_store_dword v2, v[0:1], s[0:3], 0 addr64 +; SI-NEXT: s_add_u32 s0, s0, 4 +; SI-NEXT: s_addc_u32 s1, s1, 0 +; SI-NEXT: s_cmpk_lg_i32 s0, 0x100 +; SI-NEXT: s_cbranch_scc1 .LBB4_2 +; SI-NEXT: .LBB4_3: ; %exit +; SI-NEXT: s_endpgm entry: %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone %is.0 = icmp ne i32 %tid, 0 @@ -182,45 +291,81 @@ exit: ret void } -; SI-LABEL: {{^}}multi_vcond_loop: - ; Load loop limit from buffer ; Branch to exit if uniformly not taken -; SI: ; %bb.0: -; SI: buffer_load_dword [[VBOUND:v[0-9]+]] -; SI: v_cmp_lt_i32_e32 vcc -; SI: s_and_saveexec_b64 [[OUTER_CMP_SREG:s\[[0-9]+:[0-9]+\]]], vcc -; SI-NEXT: s_cbranch_execz [[LABEL_EXIT:.LBB[0-9]+_[0-9]+]] - ; Initialize inner condition to false -; SI: ; %bb.{{[0-9]+}}: ; %bb10.preheader -; SI: s_mov_b64 [[COND_STATE:s\[[0-9]+:[0-9]+\]]], 0{{$}} - ; Clear exec bits for workitems that load -1s -; SI: .L[[LABEL_LOOP:BB[0-9]+_[0-9]+]]: -; SI: buffer_load_dword [[B:v[0-9]+]] -; SI: buffer_load_dword [[A:v[0-9]+]] -; SI-DAG: v_cmp_ne_u32_e64 [[NEG1_CHECK_0:s\[[0-9]+:[0-9]+\]]], -1, [[A]] -; SI-DAG: v_cmp_ne_u32_e32 [[NEG1_CHECK_1:vcc]], -1, [[B]] -; SI: s_and_b64 [[ORNEG1:s\[[0-9]+:[0-9]+\]]], [[NEG1_CHECK_1]], [[NEG1_CHECK_0]] -; SI: s_and_saveexec_b64 [[ORNEG2:s\[[0-9]+:[0-9]+\]]], [[ORNEG1]] -; SI: s_cbranch_execz [[LABEL_FLOW:.LBB[0-9]+_[0-9]+]] - -; SI: ; %bb.{{[0-9]+}}: ; %bb20 -; SI: buffer_store_dword - -; SI: [[LABEL_FLOW]]: -; SI-NEXT: ; in Loop: Header=[[LABEL_LOOP]] -; SI-NEXT: s_or_b64 exec, exec, [[ORNEG2]] -; SI-NEXT: s_and_b64 [[TMP1:s\[[0-9]+:[0-9]+\]]], -; SI-NEXT: s_or_b64 [[COND_STATE]], [[TMP1]], [[COND_STATE]] -; SI-NEXT: s_andn2_b64 exec, exec, [[COND_STATE]] -; SI-NEXT: s_cbranch_execnz .L[[LABEL_LOOP]] - -; SI: [[LABEL_EXIT]]: -; SI-NOT: [[COND_STATE]] -; SI: s_endpgm define amdgpu_kernel void @multi_vcond_loop(ptr addrspace(1) noalias nocapture %arg, ptr addrspace(1) noalias nocapture readonly %arg1, ptr addrspace(1) noalias nocapture readonly %arg2, ptr addrspace(1) noalias nocapture readonly %arg3) #1 { +; SI-LABEL: multi_vcond_loop: +; SI: ; %bb.0: ; %bb +; SI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0xf +; SI-NEXT: s_mov_b32 s10, 0 +; SI-NEXT: v_mov_b32_e32 v7, 0 +; SI-NEXT: s_mov_b32 s11, 0xf000 +; SI-NEXT: v_lshlrev_b32_e32 v6, 2, v0 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: buffer_load_dword v0, v[6:7], s[8:11], 0 addr64 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cmp_lt_i32_e32 vcc, 0, v0 +; SI-NEXT: s_and_saveexec_b64 s[0:1], vcc +; SI-NEXT: s_cbranch_execz .LBB5_5 +; SI-NEXT: ; %bb.1: ; %bb10.preheader +; SI-NEXT: s_load_dwordx4 s[12:15], s[4:5], 0x9 +; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0xd +; SI-NEXT: v_ashrrev_i32_e32 v1, 31, v0 +; SI-NEXT: s_mov_b64 s[2:3], 0 +; SI-NEXT: s_mov_b32 s8, s10 +; SI-NEXT: s_mov_b32 s9, s10 +; SI-NEXT: ; implicit-def: $sgpr4_sgpr5 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: v_mov_b32_e32 v3, s13 +; SI-NEXT: v_add_i32_e32 v2, vcc, s12, v6 +; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc +; SI-NEXT: v_mov_b32_e32 v5, s1 +; SI-NEXT: v_add_i32_e32 v4, vcc, s0, v6 +; SI-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc +; SI-NEXT: v_mov_b32_e32 v7, s15 +; SI-NEXT: v_add_i32_e32 v6, vcc, s14, v6 +; SI-NEXT: v_addc_u32_e32 v7, vcc, 0, v7, vcc +; SI-NEXT: s_mov_b64 s[6:7], 0 +; SI-NEXT: .LBB5_2: ; %bb10 +; SI-NEXT: ; =>This Inner Loop Header: Depth=1 +; SI-NEXT: s_waitcnt expcnt(0) +; SI-NEXT: buffer_load_dword v8, v[6:7], s[8:11], 0 addr64 +; SI-NEXT: buffer_load_dword v9, v[4:5], s[8:11], 0 addr64 +; SI-NEXT: s_waitcnt vmcnt(1) +; SI-NEXT: v_cmp_ne_u32_e32 vcc, -1, v8 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_cmp_ne_u32_e64 s[0:1], -1, v9 +; SI-NEXT: s_and_b64 s[12:13], vcc, s[0:1] +; SI-NEXT: s_or_b64 s[4:5], s[4:5], exec +; SI-NEXT: s_and_saveexec_b64 s[0:1], s[12:13] +; SI-NEXT: s_cbranch_execz .LBB5_4 +; SI-NEXT: ; %bb.3: ; %bb20 +; SI-NEXT: ; in Loop: Header=BB5_2 Depth=1 +; SI-NEXT: v_add_i32_e32 v8, vcc, v9, v8 +; SI-NEXT: s_add_u32 s6, s6, 1 +; SI-NEXT: v_add_i32_e32 v4, vcc, 4, v4 +; SI-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc +; SI-NEXT: v_add_i32_e32 v6, vcc, 4, v6 +; SI-NEXT: v_addc_u32_e32 v7, vcc, 0, v7, vcc +; SI-NEXT: buffer_store_dword v8, v[2:3], s[8:11], 0 addr64 +; SI-NEXT: s_addc_u32 s7, s7, 0 +; SI-NEXT: v_add_i32_e32 v2, vcc, 4, v2 +; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc +; SI-NEXT: v_cmp_ge_i64_e32 vcc, s[6:7], v[0:1] +; SI-NEXT: s_andn2_b64 s[4:5], s[4:5], exec +; SI-NEXT: s_and_b64 s[12:13], vcc, exec +; SI-NEXT: s_or_b64 s[4:5], s[4:5], s[12:13] +; SI-NEXT: .LBB5_4: ; %Flow +; SI-NEXT: ; in Loop: Header=BB5_2 Depth=1 +; SI-NEXT: s_or_b64 exec, exec, s[0:1] +; SI-NEXT: s_and_b64 s[0:1], exec, s[4:5] +; SI-NEXT: s_or_b64 s[2:3], s[0:1], s[2:3] +; SI-NEXT: s_andn2_b64 exec, exec, s[2:3] +; SI-NEXT: s_cbranch_execnz .LBB5_2 +; SI-NEXT: .LBB5_5: ; %bb26 +; SI-NEXT: s_endpgm bb: %tmp = tail call i32 @llvm.amdgcn.workitem.id.x() #0 %tmp4 = sext i32 %tmp to i64 diff --git a/llvm/test/CodeGen/AMDGPU/vcmp-saveexec-to-vcmpx.ll b/llvm/test/CodeGen/AMDGPU/vcmp-saveexec-to-vcmpx.ll index 2c66d38a1be6..33ca7180325a 100644 --- a/llvm/test/CodeGen/AMDGPU/vcmp-saveexec-to-vcmpx.ll +++ b/llvm/test/CodeGen/AMDGPU/vcmp-saveexec-to-vcmpx.ll @@ -1,12 +1,29 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; RUN: llc -mtriple=amdgcn -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX1010 %s ; RUN: llc -mtriple=amdgcn -mcpu=gfx1030 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX1030 %s -; GCN-LABEL: {{^}}test_insert_vcmpx_pattern_lt: -; GFX1010: v_cmp_lt_i32_e32 vcc_lo, 15, v{{.*}} -; GFX1010-NEXT: s_and_saveexec_b32 s{{.*}}, vcc_lo -; GFX1030: s_mov_b32 s{{.*}}, exec_lo -; GFX1030-NEXT: v_cmpx_lt_i32_e32 15, v{{.*}} define i32 @test_insert_vcmpx_pattern_lt(i32 %x) { +; GFX1010-LABEL: test_insert_vcmpx_pattern_lt: +; GFX1010: ; %bb.0: ; %entry +; GFX1010-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX1010-NEXT: v_cmp_lt_i32_e32 vcc_lo, 15, v0 +; GFX1010-NEXT: s_and_saveexec_b32 s4, vcc_lo +; GFX1010-NEXT: ; %bb.1: ; %if +; GFX1010-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX1010-NEXT: ; %bb.2: ; %UnifiedReturnBlock +; GFX1010-NEXT: s_or_b32 exec_lo, exec_lo, s4 +; GFX1010-NEXT: s_setpc_b64 s[30:31] +; +; GFX1030-LABEL: test_insert_vcmpx_pattern_lt: +; GFX1030: ; %bb.0: ; %entry +; GFX1030-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX1030-NEXT: s_mov_b32 s4, exec_lo +; GFX1030-NEXT: v_cmpx_lt_i32_e32 15, v0 +; GFX1030-NEXT: ; %bb.1: ; %if +; GFX1030-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX1030-NEXT: ; %bb.2: ; %UnifiedReturnBlock +; GFX1030-NEXT: s_or_b32 exec_lo, exec_lo, s4 +; GFX1030-NEXT: s_setpc_b64 s[30:31] entry: %bc = icmp slt i32 %x, 16 br i1 %bc, label %endif, label %if @@ -19,12 +36,28 @@ endif: ret i32 %x } -; GCN-LABEL: {{^}}test_insert_vcmpx_pattern_gt: -; GFX1010: v_cmp_gt_i32_e32 vcc_lo, 17, v{{.*}} -; GFX1010-NEXT: s_and_saveexec_b32 s{{.*}}, vcc_lo -; GFX1030: s_mov_b32 s{{.*}}, exec_lo -; GFX1030-NEXT: v_cmpx_gt_i32_e32 17, v{{.*}} define i32 @test_insert_vcmpx_pattern_gt(i32 %x) { +; GFX1010-LABEL: test_insert_vcmpx_pattern_gt: +; GFX1010: ; %bb.0: ; %entry +; GFX1010-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX1010-NEXT: v_cmp_gt_i32_e32 vcc_lo, 17, v0 +; GFX1010-NEXT: s_and_saveexec_b32 s4, vcc_lo +; GFX1010-NEXT: ; %bb.1: ; %if +; GFX1010-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX1010-NEXT: ; %bb.2: ; %UnifiedReturnBlock +; GFX1010-NEXT: s_or_b32 exec_lo, exec_lo, s4 +; GFX1010-NEXT: s_setpc_b64 s[30:31] +; +; GFX1030-LABEL: test_insert_vcmpx_pattern_gt: +; GFX1030: ; %bb.0: ; %entry +; GFX1030-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX1030-NEXT: s_mov_b32 s4, exec_lo +; GFX1030-NEXT: v_cmpx_gt_i32_e32 17, v0 +; GFX1030-NEXT: ; %bb.1: ; %if +; GFX1030-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX1030-NEXT: ; %bb.2: ; %UnifiedReturnBlock +; GFX1030-NEXT: s_or_b32 exec_lo, exec_lo, s4 +; GFX1030-NEXT: s_setpc_b64 s[30:31] entry: %bc = icmp sgt i32 %x, 16 br i1 %bc, label %endif, label %if @@ -37,12 +70,28 @@ endif: ret i32 %x } -; GCN-LABEL: {{^}}test_insert_vcmpx_pattern_eq: -; GFX1010: v_cmp_ne_u32_e32 vcc_lo, 16, v{{.*}} -; GFX1010-NEXT: s_and_saveexec_b32 s{{.*}}, vcc_lo -; GFX1030: s_mov_b32 s{{.*}}, exec_lo -; GFX1030-NEXT: v_cmpx_ne_u32_e32 16, v{{.*}} define i32 @test_insert_vcmpx_pattern_eq(i32 %x) { +; GFX1010-LABEL: test_insert_vcmpx_pattern_eq: +; GFX1010: ; %bb.0: ; %entry +; GFX1010-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX1010-NEXT: v_cmp_ne_u32_e32 vcc_lo, 16, v0 +; GFX1010-NEXT: s_and_saveexec_b32 s4, vcc_lo +; GFX1010-NEXT: ; %bb.1: ; %if +; GFX1010-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX1010-NEXT: ; %bb.2: ; %UnifiedReturnBlock +; GFX1010-NEXT: s_or_b32 exec_lo, exec_lo, s4 +; GFX1010-NEXT: s_setpc_b64 s[30:31] +; +; GFX1030-LABEL: test_insert_vcmpx_pattern_eq: +; GFX1030: ; %bb.0: ; %entry +; GFX1030-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX1030-NEXT: s_mov_b32 s4, exec_lo +; GFX1030-NEXT: v_cmpx_ne_u32_e32 16, v0 +; GFX1030-NEXT: ; %bb.1: ; %if +; GFX1030-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX1030-NEXT: ; %bb.2: ; %UnifiedReturnBlock +; GFX1030-NEXT: s_or_b32 exec_lo, exec_lo, s4 +; GFX1030-NEXT: s_setpc_b64 s[30:31] entry: %bc = icmp eq i32 %x, 16 br i1 %bc, label %endif, label %if @@ -55,12 +104,28 @@ endif: ret i32 %x } -; GCN-LABEL: {{^}}test_insert_vcmpx_pattern_ne: -; GFX1010: v_cmp_eq_u32_e32 vcc_lo, 16, v{{.*}} -; GFX1010-NEXT: s_and_saveexec_b32 s{{.*}}, vcc_lo -; GFX1030: s_mov_b32 s{{.*}}, exec_lo -; GFX1030-NEXT: v_cmpx_eq_u32_e32 16, v{{.*}} define i32 @test_insert_vcmpx_pattern_ne(i32 %x) { +; GFX1010-LABEL: test_insert_vcmpx_pattern_ne: +; GFX1010: ; %bb.0: ; %entry +; GFX1010-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX1010-NEXT: v_cmp_eq_u32_e32 vcc_lo, 16, v0 +; GFX1010-NEXT: s_and_saveexec_b32 s4, vcc_lo +; GFX1010-NEXT: ; %bb.1: ; %if +; GFX1010-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX1010-NEXT: ; %bb.2: ; %UnifiedReturnBlock +; GFX1010-NEXT: s_or_b32 exec_lo, exec_lo, s4 +; GFX1010-NEXT: s_setpc_b64 s[30:31] +; +; GFX1030-LABEL: test_insert_vcmpx_pattern_ne: +; GFX1030: ; %bb.0: ; %entry +; GFX1030-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX1030-NEXT: s_mov_b32 s4, exec_lo +; GFX1030-NEXT: v_cmpx_eq_u32_e32 16, v0 +; GFX1030-NEXT: ; %bb.1: ; %if +; GFX1030-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX1030-NEXT: ; %bb.2: ; %UnifiedReturnBlock +; GFX1030-NEXT: s_or_b32 exec_lo, exec_lo, s4 +; GFX1030-NEXT: s_setpc_b64 s[30:31] entry: %bc = icmp ne i32 %x, 16 br i1 %bc, label %endif, label %if @@ -73,12 +138,28 @@ endif: ret i32 %x } -; GCN-LABEL: {{^}}test_insert_vcmpx_pattern_le: -; GFX1010: v_cmp_lt_i32_e32 vcc_lo, 16, v{{.*}} -; GFX1010-NEXT: s_and_saveexec_b32 s{{.*}}, vcc_lo -; GFX1030: s_mov_b32 s{{.*}}, exec_lo -; GFX1030-NEXT: v_cmpx_lt_i32_e32 16, v{{.*}} define i32 @test_insert_vcmpx_pattern_le(i32 %x) { +; GFX1010-LABEL: test_insert_vcmpx_pattern_le: +; GFX1010: ; %bb.0: ; %entry +; GFX1010-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX1010-NEXT: v_cmp_lt_i32_e32 vcc_lo, 16, v0 +; GFX1010-NEXT: s_and_saveexec_b32 s4, vcc_lo +; GFX1010-NEXT: ; %bb.1: ; %if +; GFX1010-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX1010-NEXT: ; %bb.2: ; %UnifiedReturnBlock +; GFX1010-NEXT: s_or_b32 exec_lo, exec_lo, s4 +; GFX1010-NEXT: s_setpc_b64 s[30:31] +; +; GFX1030-LABEL: test_insert_vcmpx_pattern_le: +; GFX1030: ; %bb.0: ; %entry +; GFX1030-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX1030-NEXT: s_mov_b32 s4, exec_lo +; GFX1030-NEXT: v_cmpx_lt_i32_e32 16, v0 +; GFX1030-NEXT: ; %bb.1: ; %if +; GFX1030-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX1030-NEXT: ; %bb.2: ; %UnifiedReturnBlock +; GFX1030-NEXT: s_or_b32 exec_lo, exec_lo, s4 +; GFX1030-NEXT: s_setpc_b64 s[30:31] entry: %bc = icmp sle i32 %x, 16 br i1 %bc, label %endif, label %if @@ -91,12 +172,28 @@ endif: ret i32 %x } -; GCN-LABEL: {{^}}test_insert_vcmpx_pattern_ge: -; GFX1010: v_cmp_gt_i32_e32 vcc_lo, 16, v{{.*}} -; GFX1010-NEXT: s_and_saveexec_b32 s{{.*}}, vcc_lo -; GFX1030: s_mov_b32 s{{.*}}, exec_lo -; GFX1030-NEXT: v_cmpx_gt_i32_e32 16, v{{.*}} define i32 @test_insert_vcmpx_pattern_ge(i32 %x) { +; GFX1010-LABEL: test_insert_vcmpx_pattern_ge: +; GFX1010: ; %bb.0: ; %entry +; GFX1010-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX1010-NEXT: v_cmp_gt_i32_e32 vcc_lo, 16, v0 +; GFX1010-NEXT: s_and_saveexec_b32 s4, vcc_lo +; GFX1010-NEXT: ; %bb.1: ; %if +; GFX1010-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX1010-NEXT: ; %bb.2: ; %UnifiedReturnBlock +; GFX1010-NEXT: s_or_b32 exec_lo, exec_lo, s4 +; GFX1010-NEXT: s_setpc_b64 s[30:31] +; +; GFX1030-LABEL: test_insert_vcmpx_pattern_ge: +; GFX1030: ; %bb.0: ; %entry +; GFX1030-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX1030-NEXT: s_mov_b32 s4, exec_lo +; GFX1030-NEXT: v_cmpx_gt_i32_e32 16, v0 +; GFX1030-NEXT: ; %bb.1: ; %if +; GFX1030-NEXT: v_lshlrev_b32_e32 v0, 2, v0 +; GFX1030-NEXT: ; %bb.2: ; %UnifiedReturnBlock +; GFX1030-NEXT: s_or_b32 exec_lo, exec_lo, s4 +; GFX1030-NEXT: s_setpc_b64 s[30:31] entry: %bc = icmp sge i32 %x, 16 br i1 %bc, label %endif, label %if @@ -113,13 +210,32 @@ declare amdgpu_gfx void @check_live_outs_helper(i64) #0 ; In cases where the output operand cannot be safely removed, ; don't apply the v_cmpx transformation. - -; GCN-LABEL: {{^}}check_live_outs: -; GFX1010: v_cmp_eq_u32_e64 s{{.*}}, v{{.*}}, v{{.*}} -; GFX1010: s_and_saveexec_b32 s{{.*}}, s{{.*}} -; GFX1030: v_cmp_eq_u32_e64 s{{.*}}, v{{.*}}, v{{.*}} -; GFX1030: s_and_saveexec_b32 s{{.*}}, s{{.*}} define amdgpu_cs void @check_live_outs(i32 %a, i32 %b) { +; GCN-LABEL: check_live_outs: +; GCN: ; %bb.0: +; GCN-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0 +; GCN-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1 +; GCN-NEXT: s_mov_b32 s10, -1 +; GCN-NEXT: s_mov_b32 s11, 0x31c16000 +; GCN-NEXT: s_add_u32 s8, s8, s0 +; GCN-NEXT: v_cmp_eq_u32_e64 s0, v0, v1 +; GCN-NEXT: s_addc_u32 s9, s9, 0 +; GCN-NEXT: s_mov_b32 s32, 0 +; GCN-NEXT: s_and_saveexec_b32 s1, s0 +; GCN-NEXT: s_cbranch_execz .LBB6_2 +; GCN-NEXT: ; %bb.1: ; %l1 +; GCN-NEXT: s_getpc_b64 s[2:3] +; GCN-NEXT: s_add_u32 s2, s2, check_live_outs_helper@gotpcrel32@lo+4 +; GCN-NEXT: s_addc_u32 s3, s3, check_live_outs_helper@gotpcrel32@hi+12 +; GCN-NEXT: v_mov_b32_e32 v0, s0 +; GCN-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0 +; GCN-NEXT: v_mov_b32_e32 v1, 0 +; GCN-NEXT: s_mov_b64 s[0:1], s[8:9] +; GCN-NEXT: s_mov_b64 s[2:3], s[10:11] +; GCN-NEXT: s_waitcnt lgkmcnt(0) +; GCN-NEXT: s_swappc_b64 s[30:31], s[4:5] +; GCN-NEXT: .LBB6_2: ; %l2 +; GCN-NEXT: s_endpgm %cond = icmp eq i32 %a, %b %result = call i64 @llvm.amdgcn.icmp.i32(i32 %a, i32 %b, i32 32) br i1 %cond, label %l1, label %l2 @@ -132,14 +248,27 @@ l2: ; Omit the transformation if the s_and_saveexec instruction overwrites ; any of the v_cmp source operands. - -; GCN-LABEL: check_saveexec_overwrites_vcmp_source: -; GCN: .LBB7_2: ; %then -; GFX1010: v_cmp_eq_u32_e64 s[[C:[0-9]+]], s[[A:[0-9]+]], s[[B:[0-9]+]] -; GFX1010-NEXT: s_cmp_ge_i32 s[[C]], s[[B]] -; GFX1030: v_cmp_eq_u32_e64 s[[C:[0-9]+]], s[[A:[0-9]+]], s[[B:[0-9]+]] -; GFX1030-NEXT: s_cmp_ge_i32 s[[C]], s[[B]] define i32 @check_saveexec_overwrites_vcmp_source(i32 inreg %a, i32 inreg %b) { +; GCN-LABEL: check_saveexec_overwrites_vcmp_source: +; GCN: ; %bb.0: ; %entry +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: s_cmp_lt_i32 s16, 0 +; GCN-NEXT: s_cbranch_scc1 .LBB7_2 +; GCN-NEXT: ; %bb.1: ; %if +; GCN-NEXT: s_lshl_b32 s4, s16, 2 +; GCN-NEXT: s_or_b32 s4, s4, s17 +; GCN-NEXT: v_mov_b32_e32 v0, s4 +; GCN-NEXT: s_setpc_b64 s[30:31] +; GCN-NEXT: .LBB7_2: ; %then +; GCN-NEXT: v_cmp_eq_u32_e64 s4, s16, s17 +; GCN-NEXT: s_cmp_ge_i32 s4, s17 +; GCN-NEXT: s_cbranch_scc1 .LBB7_4 +; GCN-NEXT: ; %bb.3: ; %after +; GCN-NEXT: v_mov_b32_e32 v0, s4 +; GCN-NEXT: s_setpc_b64 s[30:31] +; GCN-NEXT: .LBB7_4: ; %end +; GCN-NEXT: v_mov_b32_e32 v0, s16 +; GCN-NEXT: s_setpc_b64 s[30:31] entry: %0 = icmp sge i32 %a, 0 br i1 %0, label %if, label %then diff --git a/llvm/test/CodeGen/AMDGPU/widen-vselect-and-mask.ll b/llvm/test/CodeGen/AMDGPU/widen-vselect-and-mask.ll index 26be45e45697..ce01a9d66955 100644 --- a/llvm/test/CodeGen/AMDGPU/widen-vselect-and-mask.ll +++ b/llvm/test/CodeGen/AMDGPU/widen-vselect-and-mask.ll @@ -1,15 +1,37 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 ; RUN: llc -mtriple=amdgcn -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN %s ; Check that DAGTypeLegalizer::WidenVSELECTAndMask doesn't try to ; create vselects with i64 condition masks. ; FIXME: Should be able to avoid intermediate vselect -; GCN-LABEL: {{^}}widen_vselect_and_mask_v4f64: -; GCN: v_cmp_u_f64_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], -; GCN: v_cndmask_b32_e64 v[[VSEL:[0-9]+]], 0, -1, [[CMP]] -; GCN: v_mov_b32_e32 v[[VSEL_EXT:[0-9]+]], v[[VSEL]] -; GCN: v_cmp_lt_i64_e32 vcc, -1, v[[[VSEL]]:[[VSEL_EXT]]] define amdgpu_kernel void @widen_vselect_and_mask_v4f64(<4 x double> %arg) #0 { +; GCN-LABEL: widen_vselect_and_mask_v4f64: +; GCN: ; %bb.0: ; %bb +; GCN-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9 +; GCN-NEXT: v_mov_b32_e32 v0, 0 +; GCN-NEXT: s_waitcnt lgkmcnt(0) +; GCN-NEXT: s_mov_b64 s[4:5], 16 +; GCN-NEXT: s_mov_b32 s7, 0xf000 +; GCN-NEXT: s_mov_b32 s6, -1 +; GCN-NEXT: v_mov_b32_e32 v1, v0 +; GCN-NEXT: v_mov_b32_e32 v2, v0 +; GCN-NEXT: v_mov_b32_e32 v3, v0 +; GCN-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0 +; GCN-NEXT: v_cmp_u_f64_e64 s[2:3], s[0:1], s[0:1] +; GCN-NEXT: s_waitcnt expcnt(0) +; GCN-NEXT: v_cndmask_b32_e64 v1, 0, -1, s[2:3] +; GCN-NEXT: v_cmp_neq_f64_e64 s[0:1], s[0:1], 0 +; GCN-NEXT: v_mov_b32_e32 v2, v1 +; GCN-NEXT: v_cmp_lt_i64_e32 vcc, -1, v[1:2] +; GCN-NEXT: s_and_b64 s[0:1], vcc, s[0:1] +; GCN-NEXT: s_and_b64 s[0:1], s[0:1], exec +; GCN-NEXT: s_cselect_b32 s0, 0x3ff00000, 0 +; GCN-NEXT: s_mov_b64 s[4:5], 0 +; GCN-NEXT: v_mov_b32_e32 v2, v0 +; GCN-NEXT: v_mov_b32_e32 v1, s0 +; GCN-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0 +; GCN-NEXT: s_endpgm bb: %tmp = extractelement <4 x double> %arg, i64 0 %tmp1 = fcmp uno double %tmp, 0.000000e+00 @@ -26,12 +48,34 @@ bb: ret void } -; GCN-LABEL: {{^}}widen_vselect_and_mask_v4i64: -; GCN: v_cmp_eq_u64_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], -; GCN: v_cndmask_b32_e64 v[[VSEL:[0-9]+]], 0, -1, [[CMP]] -; GCN: v_mov_b32_e32 v[[VSEL_EXT:[0-9]+]], v[[VSEL]] -; GCN: v_cmp_lt_i64_e32 vcc, -1, v[[[VSEL]]:[[VSEL_EXT]]] define amdgpu_kernel void @widen_vselect_and_mask_v4i64(<4 x i64> %arg) #0 { +; GCN-LABEL: widen_vselect_and_mask_v4i64: +; GCN: ; %bb.0: ; %bb +; GCN-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9 +; GCN-NEXT: s_waitcnt lgkmcnt(0) +; GCN-NEXT: s_mov_b64 s[4:5], 0 +; GCN-NEXT: v_mov_b32_e32 v0, 0 +; GCN-NEXT: s_mov_b64 s[8:9], 16 +; GCN-NEXT: s_mov_b32 s11, 0xf000 +; GCN-NEXT: s_mov_b32 s10, -1 +; GCN-NEXT: v_mov_b32_e32 v1, v0 +; GCN-NEXT: v_mov_b32_e32 v2, v0 +; GCN-NEXT: v_mov_b32_e32 v3, v0 +; GCN-NEXT: v_cmp_eq_u64_e64 s[2:3], s[0:1], 0 +; GCN-NEXT: v_cndmask_b32_e64 v4, 0, -1, s[2:3] +; GCN-NEXT: v_cmp_ne_u64_e64 s[0:1], s[0:1], 0 +; GCN-NEXT: v_mov_b32_e32 v5, v4 +; GCN-NEXT: v_cmp_lt_i64_e32 vcc, -1, v[4:5] +; GCN-NEXT: s_and_b64 s[0:1], vcc, s[0:1] +; GCN-NEXT: v_cndmask_b32_e64 v4, 0, 1, s[0:1] +; GCN-NEXT: buffer_store_dwordx4 v[0:3], off, s[8:11], 0 +; GCN-NEXT: v_mov_b32_e32 v5, v0 +; GCN-NEXT: v_mov_b32_e32 v6, v0 +; GCN-NEXT: v_mov_b32_e32 v7, v0 +; GCN-NEXT: s_mov_b32 s6, s10 +; GCN-NEXT: s_mov_b32 s7, s11 +; GCN-NEXT: buffer_store_dwordx4 v[4:7], off, s[4:7], 0 +; GCN-NEXT: s_endpgm bb: %tmp = extractelement <4 x i64> %arg, i64 0 %tmp1 = icmp eq i64 %tmp, 0