Revert "[AMDGPU][CodeGenPrepare] Narrow 64 bit math to 32 bit if profitable" (#133880)

Reverts llvm/llvm-project#130577
This commit is contained in:
Shoreshen
2025-04-01 17:37:02 +08:00
committed by GitHub
parent 66fca0674d
commit 7f14b2a9eb
5 changed files with 34 additions and 347 deletions

View File

@@ -1561,87 +1561,6 @@ void AMDGPUCodeGenPrepareImpl::expandDivRem64(BinaryOperator &I) const {
llvm_unreachable("not a division");
}
Type *findSmallestLegalBits(Instruction *I, int OrigBit, int MaxBitsNeeded,
const TargetLowering *TLI, const DataLayout &DL) {
if (MaxBitsNeeded >= OrigBit)
return nullptr;
Type *NewType = I->getType()->getWithNewBitWidth(MaxBitsNeeded);
while (OrigBit > MaxBitsNeeded) {
if (TLI->isOperationLegalOrCustom(
TLI->InstructionOpcodeToISD(I->getOpcode()),
TLI->getValueType(DL, NewType, true)))
return NewType;
MaxBitsNeeded *= 2;
NewType = I->getType()->getWithNewBitWidth(MaxBitsNeeded);
}
return nullptr;
}
static bool tryNarrowMathIfNoOverflow(Instruction *I, const TargetLowering *TLI,
const TargetTransformInfo &TTI,
const DataLayout &DL) {
unsigned Opc = I->getOpcode();
Type *OldType = I->getType();
if (Opc != Instruction::Add && Opc != Instruction::Mul)
return false;
unsigned OrigBit = OldType->getScalarSizeInBits();
unsigned MaxBitsNeeded = OrigBit;
switch (Opc) {
case Instruction::Add:
MaxBitsNeeded = KnownBits::add(computeKnownBits(I->getOperand(0), DL),
computeKnownBits(I->getOperand(1), DL))
.countMaxActiveBits();
break;
case Instruction::Mul:
MaxBitsNeeded = KnownBits::mul(computeKnownBits(I->getOperand(0), DL),
computeKnownBits(I->getOperand(1), DL))
.countMaxActiveBits();
break;
default:
llvm_unreachable("Unexpected opcode, only valid for Instruction::Add and "
"Instruction::Mul.");
}
MaxBitsNeeded = std::max<unsigned>(bit_ceil(MaxBitsNeeded), 8);
Type *NewType = findSmallestLegalBits(I, OrigBit, MaxBitsNeeded, TLI, DL);
if (!NewType)
return false;
// Old cost
InstructionCost OldCost =
TTI.getArithmeticInstrCost(Opc, OldType, TTI::TCK_RecipThroughput);
// New cost of new op
InstructionCost NewCost =
TTI.getArithmeticInstrCost(Opc, NewType, TTI::TCK_RecipThroughput);
// New cost of narrowing 2 operands (use trunc)
NewCost += 2 * TTI.getCastInstrCost(Instruction::Trunc, NewType, OldType,
TTI.getCastContextHint(I),
TTI::TCK_RecipThroughput);
// New cost of zext narrowed result to original type
NewCost +=
TTI.getCastInstrCost(Instruction::ZExt, OldType, NewType,
TTI.getCastContextHint(I), TTI::TCK_RecipThroughput);
if (NewCost >= OldCost)
return false;
IRBuilder<> Builder(I);
Value *Trunc0 = Builder.CreateTrunc(I->getOperand(0), NewType);
Value *Trunc1 = Builder.CreateTrunc(I->getOperand(1), NewType);
Value *Arith =
Builder.CreateBinOp((Instruction::BinaryOps)Opc, Trunc0, Trunc1);
Value *Zext = Builder.CreateZExt(Arith, OldType);
I->replaceAllUsesWith(Zext);
I->eraseFromParent();
return true;
}
bool AMDGPUCodeGenPrepareImpl::visitBinaryOperator(BinaryOperator &I) {
if (foldBinOpIntoSelect(I))
return true;
@@ -1726,9 +1645,6 @@ bool AMDGPUCodeGenPrepareImpl::visitBinaryOperator(BinaryOperator &I) {
}
}
Changed = tryNarrowMathIfNoOverflow(&I, ST.getTargetLowering(),
TM.getTargetTransformInfo(F), DL);
return Changed;
}

View File

@@ -414,10 +414,7 @@ define i64 @umul24_i64_2(i64 %lhs, i64 %rhs) {
; DISABLED-LABEL: @umul24_i64_2(
; DISABLED-NEXT: [[LHS24:%.*]] = and i64 [[LHS:%.*]], 65535
; DISABLED-NEXT: [[RHS24:%.*]] = and i64 [[RHS:%.*]], 65535
; DISABLED-NEXT: [[TMP1:%.*]] = trunc i64 [[LHS24]] to i32
; DISABLED-NEXT: [[TMP2:%.*]] = trunc i64 [[RHS24]] to i32
; DISABLED-NEXT: [[TMP3:%.*]] = mul i32 [[TMP1]], [[TMP2]]
; DISABLED-NEXT: [[MUL:%.*]] = zext i32 [[TMP3]] to i64
; DISABLED-NEXT: [[MUL:%.*]] = mul i64 [[LHS24]], [[RHS24]]
; DISABLED-NEXT: ret i64 [[MUL]]
;
%lhs24 = and i64 %lhs, 65535

View File

@@ -1823,22 +1823,22 @@ define amdgpu_kernel void @add_i64_constant(ptr addrspace(1) %out, ptr addrspace
; GFX1264: ; %bb.0: ; %entry
; GFX1264-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
; GFX1264-NEXT: s_mov_b64 s[6:7], exec
; GFX1264-NEXT: s_mov_b64 s[4:5], exec
; GFX1264-NEXT: s_mov_b32 s9, 0
; GFX1264-NEXT: v_mbcnt_lo_u32_b32 v0, s6, 0
; GFX1264-NEXT: s_mov_b64 s[4:5], exec
; GFX1264-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1264-NEXT: v_mbcnt_hi_u32_b32 v2, s7, v0
; GFX1264-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1264-NEXT: v_cmpx_eq_u32_e32 0, v2
; GFX1264-NEXT: s_cbranch_execz .LBB3_2
; GFX1264-NEXT: ; %bb.1:
; GFX1264-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
; GFX1264-NEXT: v_mov_b32_e32 v1, 0
; GFX1264-NEXT: s_wait_alu 0xfffe
; GFX1264-NEXT: s_mul_i32 s6, s6, 5
; GFX1264-NEXT: s_bcnt1_i32_b64 s8, s[6:7]
; GFX1264-NEXT: s_mov_b32 s11, 0x31016000
; GFX1264-NEXT: s_mul_u64 s[6:7], s[8:9], 5
; GFX1264-NEXT: s_mov_b32 s10, -1
; GFX1264-NEXT: s_wait_alu 0xfffe
; GFX1264-NEXT: v_mov_b32_e32 v0, s6
; GFX1264-NEXT: s_mov_b32 s10, -1
; GFX1264-NEXT: v_mov_b32_e32 v1, s7
; GFX1264-NEXT: s_wait_kmcnt 0x0
; GFX1264-NEXT: s_mov_b32 s8, s2
; GFX1264-NEXT: s_mov_b32 s9, s3
@@ -1860,19 +1860,20 @@ define amdgpu_kernel void @add_i64_constant(ptr addrspace(1) %out, ptr addrspace
; GFX1232-LABEL: add_i64_constant:
; GFX1232: ; %bb.0: ; %entry
; GFX1232-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
; GFX1232-NEXT: s_mov_b32 s7, exec_lo
; GFX1232-NEXT: s_mov_b32 s5, 0
; GFX1232-NEXT: v_mbcnt_lo_u32_b32 v2, s7, 0
; GFX1232-NEXT: s_mov_b32 s6, exec_lo
; GFX1232-NEXT: s_mov_b32 s4, exec_lo
; GFX1232-NEXT: v_mbcnt_lo_u32_b32 v2, s6, 0
; GFX1232-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1232-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1232-NEXT: v_cmpx_eq_u32_e32 0, v2
; GFX1232-NEXT: s_cbranch_execz .LBB3_2
; GFX1232-NEXT: ; %bb.1:
; GFX1232-NEXT: s_bcnt1_i32_b32 s5, s6
; GFX1232-NEXT: s_bcnt1_i32_b32 s4, s7
; GFX1232-NEXT: s_mov_b32 s11, 0x31016000
; GFX1232-NEXT: s_mul_i32 s5, s5, 5
; GFX1232-NEXT: s_mul_u64 s[4:5], s[4:5], 5
; GFX1232-NEXT: s_mov_b32 s10, -1
; GFX1232-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, 0
; GFX1232-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
; GFX1232-NEXT: s_wait_kmcnt 0x0
; GFX1232-NEXT: s_mov_b32 s8, s2
; GFX1232-NEXT: s_mov_b32 s9, s3
@@ -1880,7 +1881,8 @@ define amdgpu_kernel void @add_i64_constant(ptr addrspace(1) %out, ptr addrspace
; GFX1232-NEXT: s_wait_loadcnt 0x0
; GFX1232-NEXT: global_inv scope:SCOPE_DEV
; GFX1232-NEXT: .LBB3_2:
; GFX1232-NEXT: s_or_b32 exec_lo, exec_lo, s4
; GFX1232-NEXT: s_wait_alu 0xfffe
; GFX1232-NEXT: s_or_b32 exec_lo, exec_lo, s6
; GFX1232-NEXT: s_wait_kmcnt 0x0
; GFX1232-NEXT: v_readfirstlane_b32 s3, v1
; GFX1232-NEXT: v_readfirstlane_b32 s2, v0
@@ -5370,22 +5372,22 @@ define amdgpu_kernel void @sub_i64_constant(ptr addrspace(1) %out, ptr addrspace
; GFX1264: ; %bb.0: ; %entry
; GFX1264-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
; GFX1264-NEXT: s_mov_b64 s[6:7], exec
; GFX1264-NEXT: s_mov_b64 s[4:5], exec
; GFX1264-NEXT: s_mov_b32 s9, 0
; GFX1264-NEXT: v_mbcnt_lo_u32_b32 v0, s6, 0
; GFX1264-NEXT: s_mov_b64 s[4:5], exec
; GFX1264-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1264-NEXT: v_mbcnt_hi_u32_b32 v2, s7, v0
; GFX1264-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1264-NEXT: v_cmpx_eq_u32_e32 0, v2
; GFX1264-NEXT: s_cbranch_execz .LBB9_2
; GFX1264-NEXT: ; %bb.1:
; GFX1264-NEXT: s_bcnt1_i32_b64 s6, s[6:7]
; GFX1264-NEXT: v_mov_b32_e32 v1, 0
; GFX1264-NEXT: s_wait_alu 0xfffe
; GFX1264-NEXT: s_mul_i32 s6, s6, 5
; GFX1264-NEXT: s_bcnt1_i32_b64 s8, s[6:7]
; GFX1264-NEXT: s_mov_b32 s11, 0x31016000
; GFX1264-NEXT: s_mul_u64 s[6:7], s[8:9], 5
; GFX1264-NEXT: s_mov_b32 s10, -1
; GFX1264-NEXT: s_wait_alu 0xfffe
; GFX1264-NEXT: v_mov_b32_e32 v0, s6
; GFX1264-NEXT: s_mov_b32 s10, -1
; GFX1264-NEXT: v_mov_b32_e32 v1, s7
; GFX1264-NEXT: s_wait_kmcnt 0x0
; GFX1264-NEXT: s_mov_b32 s8, s2
; GFX1264-NEXT: s_mov_b32 s9, s3
@@ -5410,19 +5412,20 @@ define amdgpu_kernel void @sub_i64_constant(ptr addrspace(1) %out, ptr addrspace
; GFX1232-LABEL: sub_i64_constant:
; GFX1232: ; %bb.0: ; %entry
; GFX1232-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
; GFX1232-NEXT: s_mov_b32 s7, exec_lo
; GFX1232-NEXT: s_mov_b32 s5, 0
; GFX1232-NEXT: v_mbcnt_lo_u32_b32 v2, s7, 0
; GFX1232-NEXT: s_mov_b32 s6, exec_lo
; GFX1232-NEXT: s_mov_b32 s4, exec_lo
; GFX1232-NEXT: v_mbcnt_lo_u32_b32 v2, s6, 0
; GFX1232-NEXT: ; implicit-def: $vgpr0_vgpr1
; GFX1232-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1232-NEXT: v_cmpx_eq_u32_e32 0, v2
; GFX1232-NEXT: s_cbranch_execz .LBB9_2
; GFX1232-NEXT: ; %bb.1:
; GFX1232-NEXT: s_bcnt1_i32_b32 s5, s6
; GFX1232-NEXT: s_bcnt1_i32_b32 s4, s7
; GFX1232-NEXT: s_mov_b32 s11, 0x31016000
; GFX1232-NEXT: s_mul_i32 s5, s5, 5
; GFX1232-NEXT: s_mul_u64 s[4:5], s[4:5], 5
; GFX1232-NEXT: s_mov_b32 s10, -1
; GFX1232-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, 0
; GFX1232-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
; GFX1232-NEXT: s_wait_kmcnt 0x0
; GFX1232-NEXT: s_mov_b32 s8, s2
; GFX1232-NEXT: s_mov_b32 s9, s3
@@ -5430,7 +5433,8 @@ define amdgpu_kernel void @sub_i64_constant(ptr addrspace(1) %out, ptr addrspace
; GFX1232-NEXT: s_wait_loadcnt 0x0
; GFX1232-NEXT: global_inv scope:SCOPE_DEV
; GFX1232-NEXT: .LBB9_2:
; GFX1232-NEXT: s_or_b32 exec_lo, exec_lo, s4
; GFX1232-NEXT: s_wait_alu 0xfffe
; GFX1232-NEXT: s_or_b32 exec_lo, exec_lo, s6
; GFX1232-NEXT: s_wait_kmcnt 0x0
; GFX1232-NEXT: v_readfirstlane_b32 s2, v0
; GFX1232-NEXT: v_mul_u32_u24_e32 v0, 5, v2

View File

@@ -1,231 +0,0 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 < %s | FileCheck %s
define i64 @narrow_add(i64 %a, i64 %b) {
; CHECK-LABEL: narrow_add:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0
; CHECK-NEXT: v_and_b32_e32 v1, 0x7fffffff, v2
; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1)
; CHECK-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_add_nc_u32 v0, v0, v1
; CHECK-NEXT: s_setpc_b64 s[30:31]
%zext0 = and i64 %a, 2147483647
%zext1 = and i64 %b, 2147483647
%add = add i64 %zext0, %zext1
ret i64 %add
}
define i64 @narrow_add_1(i64 %a, i64 %b) {
; CHECK-LABEL: narrow_add_1:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT: v_bfi_b32 v0, 0x7fffffff, v0, v2
; CHECK-NEXT: v_mov_b32_e32 v1, 0
; CHECK-NEXT: s_setpc_b64 s[30:31]
%zext0 = and i64 %a, 2147483647
%zext1 = and i64 %b, 2147483648
%add = add i64 %zext0, %zext1
ret i64 %add
}
define <2 x i64> @narrow_add_vec(<2 x i64> %a, <2 x i64> %b) #0 {
; CHECK-LABEL: narrow_add_vec:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0
; CHECK-NEXT: v_and_b32_e32 v1, 0x7fffffff, v4
; CHECK-NEXT: v_and_b32_e32 v2, 30, v2
; CHECK-NEXT: v_and_b32_e32 v3, 0x7ffffffe, v6
; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; CHECK-NEXT: v_add_co_u32 v0, s0, v0, v1
; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, 0, 0, s0
; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; CHECK-NEXT: v_add_co_u32 v2, s0, v2, v3
; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, 0, 0, s0
; CHECK-NEXT: s_setpc_b64 s[30:31]
%zext0 = and <2 x i64> %a, <i64 2147483647, i64 30>
%zext1 = and <2 x i64> %b, <i64 2147483647, i64 2147483646>
%add = add <2 x i64> %zext0, %zext1
ret <2 x i64> %add
}
define <2 x i32> @narrow_add_vec_1(<2 x i32> %a, <2 x i32> %b) #0 {
; CHECK-LABEL: narrow_add_vec_1:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT: v_and_b32_e32 v1, 0x3fff, v1
; CHECK-NEXT: v_and_b32_e32 v0, 0x4000, v0
; CHECK-NEXT: v_and_b32_e32 v3, 0x4001, v3
; CHECK-NEXT: v_and_b32_e32 v2, 0x4000, v2
; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_2)
; CHECK-NEXT: v_perm_b32 v0, v1, v0, 0x5040100
; CHECK-NEXT: v_perm_b32 v1, v3, v2, 0x5040100
; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; CHECK-NEXT: v_pk_add_u16 v1, v0, v1
; CHECK-NEXT: v_and_b32_e32 v0, 0xc000, v1
; CHECK-NEXT: v_lshrrev_b32_e32 v1, 16, v1
; CHECK-NEXT: s_setpc_b64 s[30:31]
%zext0 = and <2 x i32> %a, <i32 16384, i32 16383>
%zext1 = and <2 x i32> %b, <i32 16384, i32 16385>
%add = add <2 x i32> %zext0, %zext1
ret <2 x i32> %add
}
define i64 @narrow_mul(i64 %a, i64 %b) {
; CHECK-LABEL: narrow_mul:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT: v_and_b32_e32 v1, 2, v2
; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1)
; CHECK-NEXT: v_mul_lo_u32 v0, v0, v1
; CHECK-NEXT: v_mov_b32_e32 v1, 0
; CHECK-NEXT: s_setpc_b64 s[30:31]
%zext0 = and i64 %a, 2147483647
%zext1 = and i64 %b, 2
%mul = mul i64 %zext0, %zext1
ret i64 %mul
}
define i64 @narrow_mul_1(i64 %a, i64 %b) {
; CHECK-LABEL: narrow_mul_1:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT: v_and_b32_e32 v1, 0xf73594, v0
; CHECK-NEXT: v_and_b32_e32 v2, 0x100, v2
; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1)
; CHECK-NEXT: v_mul_u32_u24_e32 v0, v1, v2
; CHECK-NEXT: v_mul_hi_u32_u24_e32 v1, v1, v2
; CHECK-NEXT: s_setpc_b64 s[30:31]
%zext0 = and i64 %a, 16201108
%zext1 = and i64 %b, 256
%mul = mul i64 %zext0, %zext1
ret i64 %mul
}
define <2 x i64> @narrow_mul_vec(<2 x i64> %a, <2 x i64> %b) #0 {
; CHECK-LABEL: narrow_mul_vec:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT: v_and_b32_e32 v0, 0x2d48aff, v0
; CHECK-NEXT: v_and_b32_e32 v1, 0x50, v4
; CHECK-NEXT: v_and_b32_e32 v3, 50, v2
; CHECK-NEXT: v_and_b32_e32 v4, 20, v6
; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
; CHECK-NEXT: v_mul_lo_u32 v0, v0, v1
; CHECK-NEXT: v_mov_b32_e32 v1, 0
; CHECK-NEXT: v_mul_u32_u24_e32 v2, v3, v4
; CHECK-NEXT: v_mul_hi_u32_u24_e32 v3, v3, v4
; CHECK-NEXT: s_setpc_b64 s[30:31]
%zext0 = and <2 x i64> %a, <i64 47483647, i64 50>
%zext1 = and <2 x i64> %b, <i64 80, i64 20>
%mul = mul <2 x i64> %zext0, %zext1
ret <2 x i64> %mul
}
define <2 x i32> @narrow_add_mul_1(<2 x i32> %a, <2 x i32> %b) #0 {
; CHECK-LABEL: narrow_add_mul_1:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT: v_and_b32_e32 v1, 0x4000, v1
; CHECK-NEXT: v_and_b32_e32 v0, 0x4000, v0
; CHECK-NEXT: v_and_b32_e32 v2, 3, v2
; CHECK-NEXT: v_and_b32_e32 v3, 2, v3
; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; CHECK-NEXT: v_mul_u32_u24_e32 v0, v0, v2
; CHECK-NEXT: v_mul_u32_u24_e32 v1, v1, v3
; CHECK-NEXT: s_setpc_b64 s[30:31]
%zext0 = and <2 x i32> %a, <i32 16384, i32 16384>
%zext1 = and <2 x i32> %b, <i32 3, i32 2>
%mul = mul <2 x i32> %zext0, %zext1
ret <2 x i32> %mul
}
define i64 @no_narrow_add(i64 %a, i64 %b) {
; CHECK-LABEL: no_narrow_add:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT: v_and_b32_e32 v0, 0x80000000, v0
; CHECK-NEXT: v_and_b32_e32 v1, 0x80000000, v2
; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; CHECK-NEXT: v_add_co_u32 v0, s0, v0, v1
; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, 0, 0, s0
; CHECK-NEXT: s_setpc_b64 s[30:31]
%zext0 = and i64 %a, 2147483648
%zext1 = and i64 %b, 2147483648
%add = add i64 %zext0, %zext1
ret i64 %add
}
define i64 @no_narrow_add_1(i64 %a, i64 %b) {
; CHECK-LABEL: no_narrow_add_1:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT: v_and_b32_e32 v1, 1, v2
; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; CHECK-NEXT: v_add_co_u32 v0, s0, v0, v1
; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, 0, 0, s0
; CHECK-NEXT: s_setpc_b64 s[30:31]
%zext0 = and i64 %a, 4294967295
%zext1 = and i64 %b, 1
%add = add i64 %zext0, %zext1
ret i64 %add
}
define <2 x i64> @no_narrow_add_vec(<2 x i64> %a, <2 x i64> %b) #0 {
; CHECK-LABEL: no_narrow_add_vec:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT: v_and_b32_e32 v0, 0x80000000, v0
; CHECK-NEXT: v_and_b32_e32 v1, 0x80000000, v4
; CHECK-NEXT: v_and_b32_e32 v2, 30, v2
; CHECK-NEXT: v_and_b32_e32 v3, 0x7ffffffe, v6
; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; CHECK-NEXT: v_add_co_u32 v0, s0, v0, v1
; CHECK-NEXT: v_add_co_ci_u32_e64 v1, null, 0, 0, s0
; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
; CHECK-NEXT: v_add_co_u32 v2, s0, v2, v3
; CHECK-NEXT: v_add_co_ci_u32_e64 v3, null, 0, 0, s0
; CHECK-NEXT: s_setpc_b64 s[30:31]
%zext0 = and <2 x i64> %a, <i64 2147483648, i64 30>
%zext1 = and <2 x i64> %b, <i64 2147483648, i64 2147483646>
%add = add <2 x i64> %zext0, %zext1
ret <2 x i64> %add
}
define i64 @no_narrow_mul(i64 %a, i64 %b) {
; CHECK-LABEL: no_narrow_mul:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT: v_and_b32_e32 v0, 0x80000000, v0
; CHECK-NEXT: v_and_b32_e32 v1, 2, v2
; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_1)
; CHECK-NEXT: v_mul_hi_u32 v1, v0, v1
; CHECK-NEXT: v_mov_b32_e32 v0, 0
; CHECK-NEXT: s_setpc_b64 s[30:31]
%zext0 = and i64 %a, 2147483648
%zext1 = and i64 %b, 2
%mul = mul i64 %zext0, %zext1
ret i64 %mul
}
define <2 x i64> @no_narrow_mul_vec(<2 x i64> %a, <2 x i64> %b) #0 {
; CHECK-LABEL: no_narrow_mul_vec:
; CHECK: ; %bb.0:
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; CHECK-NEXT: v_and_b32_e32 v1, 0x8000, v0
; CHECK-NEXT: v_and_b32_e32 v3, 0x20000, v4
; CHECK-NEXT: v_and_b32_e32 v4, 50, v2
; CHECK-NEXT: v_and_b32_e32 v5, 20, v6
; CHECK-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_3)
; CHECK-NEXT: v_mul_u32_u24_e32 v0, v1, v3
; CHECK-NEXT: v_mul_hi_u32_u24_e32 v1, v1, v3
; CHECK-NEXT: v_mul_u32_u24_e32 v2, v4, v5
; CHECK-NEXT: v_mul_hi_u32_u24_e32 v3, v4, v5
; CHECK-NEXT: s_setpc_b64 s[30:31]
%zext0 = and <2 x i64> %a, <i64 32768, i64 50>
%zext1 = and <2 x i64> %b, <i64 131072, i64 20>
%mul = mul <2 x i64> %zext0, %zext1
ret <2 x i64> %mul
}

View File

@@ -508,16 +508,17 @@ define amdgpu_kernel void @widen_i1_zext_to_i64_constant_load(ptr addrspace(4) %
; SI-LABEL: widen_i1_zext_to_i64_constant_load:
; SI: ; %bb.0:
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
; SI-NEXT: v_mov_b32_e32 v1, 0
; SI-NEXT: s_mov_b32 s3, 0xf000
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_load_dword s2, s[0:1], 0x0
; SI-NEXT: s_mov_b64 s[0:1], 0
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_and_b32 s2, s2, 0xff
; SI-NEXT: s_addk_i32 s2, 0x3e7
; SI-NEXT: v_mov_b32_e32 v0, s2
; SI-NEXT: s_and_b32 s2, s2, 1
; SI-NEXT: s_add_u32 s4, s2, 0x3e7
; SI-NEXT: s_addc_u32 s5, 0, 0
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: s_mov_b32 s2, -1
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; SI-NEXT: s_endpgm
;