From 121cd7c6f0270540ce976b98061ba765edc1675d Mon Sep 17 00:00:00 2001 From: Shoreshen <372660931@qq.com> Date: Thu, 17 Apr 2025 17:03:32 +0800 Subject: [PATCH] Re apply 130577 narrow math for and operand (#133896) Re-apply https://github.com/llvm/llvm-project/pull/130577 Which is reverted in https://github.com/llvm/llvm-project/pull/133880 The old application failed in address sanitizer due to `tryNarrowMathIfNoOverflow` was called after `I.eraseFromParent();` in `AMDGPUCodeGenPrepareImpl::visitBinaryOperator`, it create a use after free failure. To fix this, `tryNarrowMathIfNoOverflow` will be called before and directly return if `tryNarrowMathIfNoOverflow` result in true. --- .../Target/AMDGPU/AMDGPUCodeGenPrepare.cpp | 77 ++++++ .../AMDGPU/amdgpu-codegenprepare-mul24.ll | 5 +- .../atomic_optimizations_global_pointer.ll | 52 ++-- .../CodeGen/AMDGPU/memcpy-crash-issue63986.ll | 10 +- .../CodeGen/AMDGPU/narrow_math_for_and.ll | 225 ++++++++++++++++++ llvm/test/CodeGen/AMDGPU/widen-smrd-loads.ll | 25 +- 6 files changed, 346 insertions(+), 48 deletions(-) create mode 100644 llvm/test/CodeGen/AMDGPU/narrow_math_for_and.ll diff --git a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp index df92847c1ba7..a37128b0d745 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUCodeGenPrepare.cpp @@ -1559,6 +1559,80 @@ void AMDGPUCodeGenPrepareImpl::expandDivRem64(BinaryOperator &I) const { llvm_unreachable("not a division"); } +/* +This will cause non-byte load in consistency, for example: +``` + %load = load i1, ptr addrspace(4) %arg, align 4 + %zext = zext i1 %load to + i64 %add = add i64 %zext +``` +Instead of creating `s_and_b32 s0, s0, 1`, +it will create `s_and_b32 s0, s0, 0xff`. +We accept this change since the non-byte load assumes the upper bits +within the byte are all 0. +*/ +static bool tryNarrowMathIfNoOverflow(Instruction *I, + const SITargetLowering *TLI, + const TargetTransformInfo &TTI, + const DataLayout &DL) { + unsigned Opc = I->getOpcode(); + Type *OldType = I->getType(); + + if (Opc != Instruction::Add && Opc != Instruction::Mul) + return false; + + unsigned OrigBit = OldType->getScalarSizeInBits(); + + if (Opc != Instruction::Add && Opc != Instruction::Mul) + llvm_unreachable("Unexpected opcode, only valid for Instruction::Add and " + "Instruction::Mul."); + + unsigned MaxBitsNeeded = computeKnownBits(I, DL).countMaxActiveBits(); + + MaxBitsNeeded = std::max(bit_ceil(MaxBitsNeeded), 8); + Type *NewType = DL.getSmallestLegalIntType(I->getContext(), MaxBitsNeeded); + if (!NewType) + return false; + unsigned NewBit = NewType->getIntegerBitWidth(); + if (NewBit >= OrigBit) + return false; + NewType = I->getType()->getWithNewBitWidth(NewBit); + + // Old cost + InstructionCost OldCost = + TTI.getArithmeticInstrCost(Opc, OldType, TTI::TCK_RecipThroughput); + // New cost of new op + InstructionCost NewCost = + TTI.getArithmeticInstrCost(Opc, NewType, TTI::TCK_RecipThroughput); + // New cost of narrowing 2 operands (use trunc) + int NumOfNonConstOps = 2; + if (isa(I->getOperand(0)) || isa(I->getOperand(1))) { + // Cannot be both constant, should be propagated + NumOfNonConstOps = 1; + } + NewCost += NumOfNonConstOps * TTI.getCastInstrCost(Instruction::Trunc, + NewType, OldType, + TTI.getCastContextHint(I), + TTI::TCK_RecipThroughput); + // New cost of zext narrowed result to original type + NewCost += + TTI.getCastInstrCost(Instruction::ZExt, OldType, NewType, + TTI.getCastContextHint(I), TTI::TCK_RecipThroughput); + if (NewCost >= OldCost) + return false; + + IRBuilder<> Builder(I); + Value *Trunc0 = Builder.CreateTrunc(I->getOperand(0), NewType); + Value *Trunc1 = Builder.CreateTrunc(I->getOperand(1), NewType); + Value *Arith = + Builder.CreateBinOp((Instruction::BinaryOps)Opc, Trunc0, Trunc1); + + Value *Zext = Builder.CreateZExt(Arith, OldType); + I->replaceAllUsesWith(Zext); + I->eraseFromParent(); + return true; +} + bool AMDGPUCodeGenPrepareImpl::visitBinaryOperator(BinaryOperator &I) { if (foldBinOpIntoSelect(I)) return true; @@ -1569,6 +1643,9 @@ bool AMDGPUCodeGenPrepareImpl::visitBinaryOperator(BinaryOperator &I) { if (UseMul24Intrin && replaceMulWithMul24(I)) return true; + if (tryNarrowMathIfNoOverflow(&I, ST.getTargetLowering(), + TM.getTargetTransformInfo(F), DL)) + return true; bool Changed = false; Instruction::BinaryOps Opc = I.getOpcode(); diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-mul24.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-mul24.ll index 296b817bc8f7..d7c35a8b007c 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-mul24.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgpu-codegenprepare-mul24.ll @@ -414,7 +414,10 @@ define i64 @umul24_i64_2(i64 %lhs, i64 %rhs) { ; DISABLED-LABEL: @umul24_i64_2( ; DISABLED-NEXT: [[LHS24:%.*]] = and i64 [[LHS:%.*]], 65535 ; DISABLED-NEXT: [[RHS24:%.*]] = and i64 [[RHS:%.*]], 65535 -; DISABLED-NEXT: [[MUL:%.*]] = mul i64 [[LHS24]], [[RHS24]] +; DISABLED-NEXT: [[TMP1:%.*]] = trunc i64 [[LHS24]] to i32 +; DISABLED-NEXT: [[TMP2:%.*]] = trunc i64 [[RHS24]] to i32 +; DISABLED-NEXT: [[TMP3:%.*]] = mul i32 [[TMP1]], [[TMP2]] +; DISABLED-NEXT: [[MUL:%.*]] = zext i32 [[TMP3]] to i64 ; DISABLED-NEXT: ret i64 [[MUL]] ; %lhs24 = and i64 %lhs, 65535 diff --git a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll index 62083b3e67ab..e2dfcf55b785 100644 --- a/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll +++ b/llvm/test/CodeGen/AMDGPU/atomic_optimizations_global_pointer.ll @@ -1823,22 +1823,22 @@ define amdgpu_kernel void @add_i64_constant(ptr addrspace(1) %out, ptr addrspace ; GFX1264: ; %bb.0: ; %entry ; GFX1264-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 ; GFX1264-NEXT: s_mov_b64 s[6:7], exec -; GFX1264-NEXT: s_mov_b32 s9, 0 -; GFX1264-NEXT: v_mbcnt_lo_u32_b32 v0, s6, 0 ; GFX1264-NEXT: s_mov_b64 s[4:5], exec +; GFX1264-NEXT: v_mbcnt_lo_u32_b32 v0, s6, 0 ; GFX1264-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1264-NEXT: v_mbcnt_hi_u32_b32 v2, s7, v0 ; GFX1264-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1264-NEXT: v_cmpx_eq_u32_e32 0, v2 ; GFX1264-NEXT: s_cbranch_execz .LBB3_2 ; GFX1264-NEXT: ; %bb.1: -; GFX1264-NEXT: s_bcnt1_i32_b64 s8, s[6:7] +; GFX1264-NEXT: s_bcnt1_i32_b64 s6, s[6:7] +; GFX1264-NEXT: v_mov_b32_e32 v1, 0 +; GFX1264-NEXT: s_wait_alu 0xfffe +; GFX1264-NEXT: s_mul_i32 s6, s6, 5 ; GFX1264-NEXT: s_mov_b32 s11, 0x31016000 -; GFX1264-NEXT: s_mul_u64 s[6:7], s[8:9], 5 -; GFX1264-NEXT: s_mov_b32 s10, -1 ; GFX1264-NEXT: s_wait_alu 0xfffe ; GFX1264-NEXT: v_mov_b32_e32 v0, s6 -; GFX1264-NEXT: v_mov_b32_e32 v1, s7 +; GFX1264-NEXT: s_mov_b32 s10, -1 ; GFX1264-NEXT: s_wait_kmcnt 0x0 ; GFX1264-NEXT: s_mov_b32 s8, s2 ; GFX1264-NEXT: s_mov_b32 s9, s3 @@ -1860,20 +1860,19 @@ define amdgpu_kernel void @add_i64_constant(ptr addrspace(1) %out, ptr addrspace ; GFX1232-LABEL: add_i64_constant: ; GFX1232: ; %bb.0: ; %entry ; GFX1232-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; GFX1232-NEXT: s_mov_b32 s7, exec_lo -; GFX1232-NEXT: s_mov_b32 s5, 0 -; GFX1232-NEXT: v_mbcnt_lo_u32_b32 v2, s7, 0 ; GFX1232-NEXT: s_mov_b32 s6, exec_lo +; GFX1232-NEXT: s_mov_b32 s4, exec_lo +; GFX1232-NEXT: v_mbcnt_lo_u32_b32 v2, s6, 0 ; GFX1232-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1232-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1232-NEXT: v_cmpx_eq_u32_e32 0, v2 ; GFX1232-NEXT: s_cbranch_execz .LBB3_2 ; GFX1232-NEXT: ; %bb.1: -; GFX1232-NEXT: s_bcnt1_i32_b32 s4, s7 +; GFX1232-NEXT: s_bcnt1_i32_b32 s5, s6 ; GFX1232-NEXT: s_mov_b32 s11, 0x31016000 -; GFX1232-NEXT: s_mul_u64 s[4:5], s[4:5], 5 +; GFX1232-NEXT: s_mul_i32 s5, s5, 5 ; GFX1232-NEXT: s_mov_b32 s10, -1 -; GFX1232-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5 +; GFX1232-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, 0 ; GFX1232-NEXT: s_wait_kmcnt 0x0 ; GFX1232-NEXT: s_mov_b32 s8, s2 ; GFX1232-NEXT: s_mov_b32 s9, s3 @@ -1881,8 +1880,7 @@ define amdgpu_kernel void @add_i64_constant(ptr addrspace(1) %out, ptr addrspace ; GFX1232-NEXT: s_wait_loadcnt 0x0 ; GFX1232-NEXT: global_inv scope:SCOPE_DEV ; GFX1232-NEXT: .LBB3_2: -; GFX1232-NEXT: s_wait_alu 0xfffe -; GFX1232-NEXT: s_or_b32 exec_lo, exec_lo, s6 +; GFX1232-NEXT: s_or_b32 exec_lo, exec_lo, s4 ; GFX1232-NEXT: s_wait_kmcnt 0x0 ; GFX1232-NEXT: v_readfirstlane_b32 s3, v1 ; GFX1232-NEXT: v_readfirstlane_b32 s2, v0 @@ -5372,22 +5370,22 @@ define amdgpu_kernel void @sub_i64_constant(ptr addrspace(1) %out, ptr addrspace ; GFX1264: ; %bb.0: ; %entry ; GFX1264-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 ; GFX1264-NEXT: s_mov_b64 s[6:7], exec -; GFX1264-NEXT: s_mov_b32 s9, 0 -; GFX1264-NEXT: v_mbcnt_lo_u32_b32 v0, s6, 0 ; GFX1264-NEXT: s_mov_b64 s[4:5], exec +; GFX1264-NEXT: v_mbcnt_lo_u32_b32 v0, s6, 0 ; GFX1264-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) ; GFX1264-NEXT: v_mbcnt_hi_u32_b32 v2, s7, v0 ; GFX1264-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1264-NEXT: v_cmpx_eq_u32_e32 0, v2 ; GFX1264-NEXT: s_cbranch_execz .LBB9_2 ; GFX1264-NEXT: ; %bb.1: -; GFX1264-NEXT: s_bcnt1_i32_b64 s8, s[6:7] +; GFX1264-NEXT: s_bcnt1_i32_b64 s6, s[6:7] +; GFX1264-NEXT: v_mov_b32_e32 v1, 0 +; GFX1264-NEXT: s_wait_alu 0xfffe +; GFX1264-NEXT: s_mul_i32 s6, s6, 5 ; GFX1264-NEXT: s_mov_b32 s11, 0x31016000 -; GFX1264-NEXT: s_mul_u64 s[6:7], s[8:9], 5 -; GFX1264-NEXT: s_mov_b32 s10, -1 ; GFX1264-NEXT: s_wait_alu 0xfffe ; GFX1264-NEXT: v_mov_b32_e32 v0, s6 -; GFX1264-NEXT: v_mov_b32_e32 v1, s7 +; GFX1264-NEXT: s_mov_b32 s10, -1 ; GFX1264-NEXT: s_wait_kmcnt 0x0 ; GFX1264-NEXT: s_mov_b32 s8, s2 ; GFX1264-NEXT: s_mov_b32 s9, s3 @@ -5412,20 +5410,19 @@ define amdgpu_kernel void @sub_i64_constant(ptr addrspace(1) %out, ptr addrspace ; GFX1232-LABEL: sub_i64_constant: ; GFX1232: ; %bb.0: ; %entry ; GFX1232-NEXT: s_load_b128 s[0:3], s[4:5], 0x24 -; GFX1232-NEXT: s_mov_b32 s7, exec_lo -; GFX1232-NEXT: s_mov_b32 s5, 0 -; GFX1232-NEXT: v_mbcnt_lo_u32_b32 v2, s7, 0 ; GFX1232-NEXT: s_mov_b32 s6, exec_lo +; GFX1232-NEXT: s_mov_b32 s4, exec_lo +; GFX1232-NEXT: v_mbcnt_lo_u32_b32 v2, s6, 0 ; GFX1232-NEXT: ; implicit-def: $vgpr0_vgpr1 ; GFX1232-NEXT: s_delay_alu instid0(VALU_DEP_1) ; GFX1232-NEXT: v_cmpx_eq_u32_e32 0, v2 ; GFX1232-NEXT: s_cbranch_execz .LBB9_2 ; GFX1232-NEXT: ; %bb.1: -; GFX1232-NEXT: s_bcnt1_i32_b32 s4, s7 +; GFX1232-NEXT: s_bcnt1_i32_b32 s5, s6 ; GFX1232-NEXT: s_mov_b32 s11, 0x31016000 -; GFX1232-NEXT: s_mul_u64 s[4:5], s[4:5], 5 +; GFX1232-NEXT: s_mul_i32 s5, s5, 5 ; GFX1232-NEXT: s_mov_b32 s10, -1 -; GFX1232-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5 +; GFX1232-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, 0 ; GFX1232-NEXT: s_wait_kmcnt 0x0 ; GFX1232-NEXT: s_mov_b32 s8, s2 ; GFX1232-NEXT: s_mov_b32 s9, s3 @@ -5433,8 +5430,7 @@ define amdgpu_kernel void @sub_i64_constant(ptr addrspace(1) %out, ptr addrspace ; GFX1232-NEXT: s_wait_loadcnt 0x0 ; GFX1232-NEXT: global_inv scope:SCOPE_DEV ; GFX1232-NEXT: .LBB9_2: -; GFX1232-NEXT: s_wait_alu 0xfffe -; GFX1232-NEXT: s_or_b32 exec_lo, exec_lo, s6 +; GFX1232-NEXT: s_or_b32 exec_lo, exec_lo, s4 ; GFX1232-NEXT: s_wait_kmcnt 0x0 ; GFX1232-NEXT: v_readfirstlane_b32 s2, v0 ; GFX1232-NEXT: v_mul_u32_u24_e32 v0, 5, v2 diff --git a/llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll b/llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll index 29d9164f95d9..c92c672dda2a 100644 --- a/llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll +++ b/llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll @@ -170,13 +170,13 @@ define void @issue63986_reduced_expanded(i64 %idxprom) { ; CHECK-NEXT: s_cbranch_execnz .LBB1_8 ; CHECK-NEXT: .LBB1_5: ; %loop-memcpy-residual.preheader ; CHECK-NEXT: v_mov_b32_e32 v0, s4 -; CHECK-NEXT: s_mov_b64 s[6:7], 0 +; CHECK-NEXT: s_mov_b64 s[8:9], 0 +; CHECK-NEXT: s_mov_b32 s7, 0 ; CHECK-NEXT: v_mov_b32_e32 v1, s5 ; CHECK-NEXT: .LBB1_6: ; %loop-memcpy-residual -; CHECK-NEXT: s_add_u32 s4, s6, 1 -; CHECK-NEXT: s_addc_u32 s5, s7, 0 -; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[0:1] -; CHECK-NEXT: s_mov_b64 s[6:7], 1 +; CHECK-NEXT: s_add_i32 s6, s8, 1 +; CHECK-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[0:1] +; CHECK-NEXT: s_mov_b64 s[8:9], 1 ; CHECK-NEXT: s_cbranch_vccnz .LBB1_6 ; CHECK-NEXT: ; %bb.7: ; %Flow ; CHECK-NEXT: v_mov_b32_e32 v0, 0 diff --git a/llvm/test/CodeGen/AMDGPU/narrow_math_for_and.ll b/llvm/test/CodeGen/AMDGPU/narrow_math_for_and.ll new file mode 100644 index 000000000000..151456e82ae5 --- /dev/null +++ b/llvm/test/CodeGen/AMDGPU/narrow_math_for_and.ll @@ -0,0 +1,225 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 + +; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 < %s | FileCheck %s + +define i64 @narrow_add(i64 %a, i64 %b) { +; CHECK-LABEL: narrow_add: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0 +; CHECK-NEXT: v_and_b32_e32 v1, 0x7fffffff, v2 +; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1) +; CHECK-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_add_nc_u32 v0, v0, v1 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %zext0 = and i64 %a, 2147483647 + %zext1 = and i64 %b, 2147483647 + %add = add i64 %zext0, %zext1 + ret i64 %add +} + +define i64 @narrow_add_1(i64 %a, i64 %b) { +; CHECK-LABEL: narrow_add_1: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_bfi_b32 v0, 0x7fffffff, v0, v2 +; CHECK-NEXT: v_mov_b32_e32 v1, 0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %zext0 = and i64 %a, 2147483647 + %zext1 = and i64 %b, 2147483648 + %add = add i64 %zext0, %zext1 + ret i64 %add +} + +define <2 x i64> @narrow_add_vec(<2 x i64> %a, <2 x i64> %b) #0 { +; CHECK-LABEL: narrow_add_vec: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_and_b32_e32 v1, 30, v2 +; CHECK-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0 +; CHECK-NEXT: v_and_b32_e32 v2, 0x7fffffff, v4 +; CHECK-NEXT: v_and_b32_e32 v3, 0x7ffffffe, v6 +; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; CHECK-NEXT: v_add_nc_u32_e32 v0, v0, v2 +; CHECK-NEXT: v_dual_mov_b32 v3, 0 :: v_dual_add_nc_u32 v2, v1, v3 +; CHECK-NEXT: v_mov_b32_e32 v1, 0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %zext0 = and <2 x i64> %a, + %zext1 = and <2 x i64> %b, + %add = add <2 x i64> %zext0, %zext1 + ret <2 x i64> %add +} + +define <2 x i32> @narrow_add_vec_1(<2 x i32> %a, <2 x i32> %b) #0 { +; CHECK-LABEL: narrow_add_vec_1: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_and_b32_e32 v1, 0x3fff, v1 +; CHECK-NEXT: v_and_b32_e32 v0, 0x4000, v0 +; CHECK-NEXT: v_and_b32_e32 v2, 0x4000, v2 +; CHECK-NEXT: v_and_b32_e32 v3, 0x4001, v3 +; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; CHECK-NEXT: v_add_nc_u32_e32 v0, v0, v2 +; CHECK-NEXT: v_add_nc_u32_e32 v1, v1, v3 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %zext0 = and <2 x i32> %a, + %zext1 = and <2 x i32> %b, + %add = add <2 x i32> %zext0, %zext1 + ret <2 x i32> %add +} + +define i64 @narrow_mul(i64 %a, i64 %b) { +; CHECK-LABEL: narrow_mul: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_and_b32_e32 v1, 2, v2 +; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1) +; CHECK-NEXT: v_mul_lo_u32 v0, v0, v1 +; CHECK-NEXT: v_mov_b32_e32 v1, 0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %zext0 = and i64 %a, 2147483647 + %zext1 = and i64 %b, 2 + %mul = mul i64 %zext0, %zext1 + ret i64 %mul +} + +define i64 @narrow_mul_1(i64 %a, i64 %b) { +; CHECK-LABEL: narrow_mul_1: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_and_b32_e32 v1, 0xf73594, v0 +; CHECK-NEXT: v_and_b32_e32 v2, 0x100, v2 +; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1) +; CHECK-NEXT: v_mul_u32_u24_e32 v0, v1, v2 +; CHECK-NEXT: v_mul_hi_u32_u24_e32 v1, v1, v2 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %zext0 = and i64 %a, 16201108 + %zext1 = and i64 %b, 256 + %mul = mul i64 %zext0, %zext1 + ret i64 %mul +} + +define <2 x i64> @narrow_mul_vec(<2 x i64> %a, <2 x i64> %b) #0 { +; CHECK-LABEL: narrow_mul_vec: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_and_b32_e32 v0, 0x2d48aff, v0 +; CHECK-NEXT: v_and_b32_e32 v1, 0x50, v4 +; CHECK-NEXT: v_and_b32_e32 v2, 50, v2 +; CHECK-NEXT: v_and_b32_e32 v3, 20, v6 +; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3) +; CHECK-NEXT: v_mul_lo_u32 v0, v0, v1 +; CHECK-NEXT: v_mov_b32_e32 v1, 0 +; CHECK-NEXT: v_mul_u32_u24_e32 v2, v2, v3 +; CHECK-NEXT: v_mov_b32_e32 v3, 0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %zext0 = and <2 x i64> %a, + %zext1 = and <2 x i64> %b, + %mul = mul <2 x i64> %zext0, %zext1 + ret <2 x i64> %mul +} + +define <2 x i32> @narrow_add_mul_1(<2 x i32> %a, <2 x i32> %b) #0 { +; CHECK-LABEL: narrow_add_mul_1: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_and_b32_e32 v1, 0x4000, v1 +; CHECK-NEXT: v_and_b32_e32 v0, 0x4000, v0 +; CHECK-NEXT: v_and_b32_e32 v2, 3, v2 +; CHECK-NEXT: v_and_b32_e32 v3, 2, v3 +; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2) +; CHECK-NEXT: v_mul_u32_u24_e32 v0, v0, v2 +; CHECK-NEXT: v_mul_u32_u24_e32 v1, v1, v3 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %zext0 = and <2 x i32> %a, + %zext1 = and <2 x i32> %b, + %mul = mul <2 x i32> %zext0, %zext1 + ret <2 x i32> %mul +} + +define i64 @no_narrow_add(i64 %a, i64 %b) { +; CHECK-LABEL: no_narrow_add: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_and_b32_e32 v0, 0x80000000, v0 +; CHECK-NEXT: v_and_b32_e32 v1, 0x80000000, v2 +; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; CHECK-NEXT: v_add_co_u32 v0, s0, v0, v1 +; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, 0, 0, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %zext0 = and i64 %a, 2147483648 + %zext1 = and i64 %b, 2147483648 + %add = add i64 %zext0, %zext1 + ret i64 %add +} + +define i64 @no_narrow_add_1(i64 %a, i64 %b) { +; CHECK-LABEL: no_narrow_add_1: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_and_b32_e32 v1, 1, v2 +; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1) +; CHECK-NEXT: v_add_co_u32 v0, s0, v0, v1 +; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, 0, 0, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %zext0 = and i64 %a, 4294967295 + %zext1 = and i64 %b, 1 + %add = add i64 %zext0, %zext1 + ret i64 %add +} + +define <2 x i64> @no_narrow_add_vec(<2 x i64> %a, <2 x i64> %b) #0 { +; CHECK-LABEL: no_narrow_add_vec: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_and_b32_e32 v0, 0x80000000, v0 +; CHECK-NEXT: v_and_b32_e32 v1, 0x80000000, v4 +; CHECK-NEXT: v_and_b32_e32 v2, 30, v2 +; CHECK-NEXT: v_and_b32_e32 v3, 0x7ffffffe, v6 +; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) +; CHECK-NEXT: v_add_co_u32 v0, s0, v0, v1 +; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, 0, 0, s0 +; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1) +; CHECK-NEXT: v_add_co_u32 v2, s0, v2, v3 +; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, 0, 0, s0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %zext0 = and <2 x i64> %a, + %zext1 = and <2 x i64> %b, + %add = add <2 x i64> %zext0, %zext1 + ret <2 x i64> %add +} + +define i64 @no_narrow_mul(i64 %a, i64 %b) { +; CHECK-LABEL: no_narrow_mul: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_and_b32_e32 v0, 0x80000000, v0 +; CHECK-NEXT: v_and_b32_e32 v1, 2, v2 +; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1) +; CHECK-NEXT: v_mul_hi_u32 v1, v0, v1 +; CHECK-NEXT: v_mov_b32_e32 v0, 0 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %zext0 = and i64 %a, 2147483648 + %zext1 = and i64 %b, 2 + %mul = mul i64 %zext0, %zext1 + ret i64 %mul +} + +define <2 x i64> @no_narrow_mul_vec(<2 x i64> %a, <2 x i64> %b) #0 { +; CHECK-LABEL: no_narrow_mul_vec: +; CHECK: ; %bb.0: +; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; CHECK-NEXT: v_and_b32_e32 v1, 0x8000, v0 +; CHECK-NEXT: v_and_b32_e32 v3, 0x20000, v4 +; CHECK-NEXT: v_and_b32_e32 v4, 50, v2 +; CHECK-NEXT: v_and_b32_e32 v5, 20, v6 +; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3) +; CHECK-NEXT: v_mul_u32_u24_e32 v0, v1, v3 +; CHECK-NEXT: v_mul_hi_u32_u24_e32 v1, v1, v3 +; CHECK-NEXT: v_mul_u32_u24_e32 v2, v4, v5 +; CHECK-NEXT: v_mul_hi_u32_u24_e32 v3, v4, v5 +; CHECK-NEXT: s_setpc_b64 s[30:31] + %zext0 = and <2 x i64> %a, + %zext1 = and <2 x i64> %b, + %mul = mul <2 x i64> %zext0, %zext1 + ret <2 x i64> %mul +} diff --git a/llvm/test/CodeGen/AMDGPU/widen-smrd-loads.ll b/llvm/test/CodeGen/AMDGPU/widen-smrd-loads.ll index 4290590e9971..bac70b69650c 100644 --- a/llvm/test/CodeGen/AMDGPU/widen-smrd-loads.ll +++ b/llvm/test/CodeGen/AMDGPU/widen-smrd-loads.ll @@ -508,17 +508,16 @@ define amdgpu_kernel void @widen_i1_zext_to_i64_constant_load(ptr addrspace(4) % ; SI-LABEL: widen_i1_zext_to_i64_constant_load: ; SI: ; %bb.0: ; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9 +; SI-NEXT: v_mov_b32_e32 v1, 0 ; SI-NEXT: s_mov_b32 s3, 0xf000 ; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: s_load_dword s2, s[0:1], 0x0 ; SI-NEXT: s_mov_b64 s[0:1], 0 ; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: s_and_b32 s2, s2, 1 -; SI-NEXT: s_add_u32 s4, s2, 0x3e7 -; SI-NEXT: s_addc_u32 s5, 0, 0 -; SI-NEXT: v_mov_b32_e32 v0, s4 +; SI-NEXT: s_and_b32 s2, s2, 0xff +; SI-NEXT: s_addk_i32 s2, 0x3e7 +; SI-NEXT: v_mov_b32_e32 v0, s2 ; SI-NEXT: s_mov_b32 s2, -1 -; SI-NEXT: v_mov_b32_e32 v1, s5 ; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; SI-NEXT: s_endpgm ; @@ -527,13 +526,12 @@ define amdgpu_kernel void @widen_i1_zext_to_i64_constant_load(ptr addrspace(4) % ; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24 ; VI-NEXT: v_mov_b32_e32 v0, 0 ; VI-NEXT: v_mov_b32_e32 v1, 0 +; VI-NEXT: v_mov_b32_e32 v3, 0 ; VI-NEXT: s_waitcnt lgkmcnt(0) ; VI-NEXT: s_load_dword s0, s[0:1], 0x0 ; VI-NEXT: s_waitcnt lgkmcnt(0) -; VI-NEXT: s_and_b32 s0, s0, 1 -; VI-NEXT: s_add_u32 s0, s0, 0x3e7 -; VI-NEXT: s_addc_u32 s1, 0, 0 -; VI-NEXT: v_mov_b32_e32 v3, s1 +; VI-NEXT: s_and_b32 s0, s0, 0xff +; VI-NEXT: s_addk_i32 s0, 0x3e7 ; VI-NEXT: v_mov_b32_e32 v2, s0 ; VI-NEXT: flat_store_dwordx2 v[0:1], v[2:3] ; VI-NEXT: s_endpgm @@ -541,14 +539,13 @@ define amdgpu_kernel void @widen_i1_zext_to_i64_constant_load(ptr addrspace(4) % ; GFX11-LABEL: widen_i1_zext_to_i64_constant_load: ; GFX11: ; %bb.0: ; GFX11-NEXT: s_load_b64 s[0:1], s[4:5], 0x24 +; GFX11-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v3, 0 ; GFX11-NEXT: s_waitcnt lgkmcnt(0) ; GFX11-NEXT: s_load_b32 s0, s[0:1], 0x0 ; GFX11-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-NEXT: s_and_b32 s0, s0, 1 -; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1) -; GFX11-NEXT: s_add_u32 s0, s0, 0x3e7 -; GFX11-NEXT: s_addc_u32 s1, 0, 0 -; GFX11-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v3, s1 +; GFX11-NEXT: s_and_b32 s0, s0, 0xff +; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1) +; GFX11-NEXT: s_addk_i32 s0, 0x3e7 ; GFX11-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s0 ; GFX11-NEXT: global_store_b64 v[0:1], v[2:3], off ; GFX11-NEXT: s_endpgm