Files
clang-p2996/llvm/test/CodeGen/AMDGPU/smfmac_no_agprs.ll
alex-t 2973febe10 [AMDGPU] Force the third source operand of the MAI instructions to VGPR if no AGPRs are used. (#69720)
eaf85b9c28 "[AMDGPU] Select VGPR versions of MFMA if possible" prevents
the compiler from reserving AGPRs if a kernel has no inline asm
explicitly using AGPRs, no calls, and runs at least 2 waves with not
more than 256 VGPRs. This, in turn, makes it impossible to allocate AGPR
if necessary. As a result, regalloc fails in case we have an MAI
instruction that has at least one AGPR operand.
This change checks if we have AGPRs and forces operands to VGPR if we do
not have them.

---------

Co-authored-by: Alexander Timofeev <alexander.timofeev@amd.com>
2023-10-23 19:41:07 +02:00

54 lines
2.6 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx940 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GFX940 %s
define protected amdgpu_kernel void @test(ptr addrspace(1) %in, ptr addrspace(1) %out) {
; GFX940-LABEL: test:
; GFX940: ; %bb.0: ; %entry
; GFX940-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x0
; GFX940-NEXT: v_mov_b32_e32 v0, 0
; GFX940-NEXT: v_mov_b32_e32 v2, v0
; GFX940-NEXT: v_mov_b32_e32 v3, v0
; GFX940-NEXT: v_mov_b32_e32 v1, v0
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x0
; GFX940-NEXT: v_mov_b64_e32 v[10:11], v[2:3]
; GFX940-NEXT: v_mov_b64_e32 v[8:9], v[0:1]
; GFX940-NEXT: s_waitcnt lgkmcnt(0)
; GFX940-NEXT: v_mov_b32_e32 v12, s4
; GFX940-NEXT: v_mov_b32_e32 v13, s5
; GFX940-NEXT: v_mov_b32_e32 v4, s6
; GFX940-NEXT: v_mov_b32_e32 v5, s7
; GFX940-NEXT: v_mov_b32_e32 v6, s7
; GFX940-NEXT: v_mov_b32_e32 v7, s7
; GFX940-NEXT: s_nop 1
; GFX940-NEXT: v_smfmac_i32_16x16x64_i8 v[8:11], v[12:13], v[4:7], v13
; GFX940-NEXT: s_nop 6
; GFX940-NEXT: global_store_dword v0, v11, s[2:3] offset:12 sc0 sc1
; GFX940-NEXT: s_endpgm
entry:
%arrayidx = getelementptr inbounds i32, ptr addrspace(1) %in, i64 0
%arrayidx1 = getelementptr inbounds i32, ptr addrspace(1) %in, i64 1
%arrayidx2 = getelementptr inbounds i32, ptr addrspace(1) %in, i64 2
%arrayidx3 = getelementptr inbounds i32, ptr addrspace(1) %in, i64 3
%0 = load i32, ptr addrspace(1) %arrayidx
%1 = load i32, ptr addrspace(1) %arrayidx1
%2 = load i32, ptr addrspace(1) %arrayidx2
%3 = load i32, ptr addrspace(1) %arrayidx3
%src1.0 = insertelement <2 x i32> undef, i32 %0, i64 0
%src1 = insertelement <2 x i32> %src1.0, i32 %1, i64 1
%src2.0 = insertelement <4 x i32> undef, i32 %2, i64 0
%src2.1 = insertelement <4 x i32> %src2.0, i32 %3, i64 1
%src2.2 = insertelement <4 x i32> %src2.1, i32 %3, i64 2
%src2 = insertelement <4 x i32> %src2.2, i32 %3, i64 3
%4 = tail call <4 x i32> @llvm.amdgcn.smfmac.i32.16x16x64.i8(<2 x i32> %src1, <4 x i32> %src2, <4 x i32> zeroinitializer, i32 %1, i32 0, i32 0)
%vecext = extractelement <4 x i32> %4, i64 0
%vecext.1 = extractelement <4 x i32> %4, i64 1
%vecext.2 = extractelement <4 x i32> %4, i64 2
%vecext.3 = extractelement <4 x i32> %4, i64 3
%arrayidx4 = getelementptr inbounds i32, ptr addrspace(1) %out, i64 3
store i32 %vecext.3, ptr addrspace(1) %arrayidx4
ret void
}
declare <4 x i32> @llvm.amdgcn.smfmac.i32.16x16x64.i8(<2 x i32>, <4 x i32>, <4 x i32>, i32, i32 immarg, i32 immarg)