[X86] Fix tile register spill issue.
The tile register spill need 2 instructions. %46:gr64_nosp = MOV64ri 64 TILESTORED %stack.2, 1, killed %46:gr64_nosp, 0, $noreg, %43:tile The first instruction load the stride to a GPR, and the second instruction store tile register to stack slot. The optimization of merge spill instruction is done after register allocation. And spill tile register need create a new virtual register to for stride, so we can't hoist tile spill instruction in postOptimization() of register allocation. We can't hoist TILESTORED alone and we can't hoist the 2 instuctions together because MOV64ri will clobber some GPR. This patch is to disble the spill merge for any spill which need 2 instructions. Differential Revision: https://reviews.llvm.org/D93898
This commit is contained in:
@@ -269,6 +269,14 @@ static Register isFullCopyOf(const MachineInstr &MI, Register Reg) {
|
||||
return Register();
|
||||
}
|
||||
|
||||
static void getVDefInterval(const MachineInstr &MI, LiveIntervals &LIS) {
|
||||
for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
|
||||
const MachineOperand &MO = MI.getOperand(I);
|
||||
if (MO.isReg() && MO.isDef() && Register::isVirtualRegister(MO.getReg()))
|
||||
LIS.getInterval(MO.getReg());
|
||||
}
|
||||
}
|
||||
|
||||
/// isSnippet - Identify if a live interval is a snippet that should be spilled.
|
||||
/// It is assumed that SnipLI is a virtual register with the same original as
|
||||
/// Edit->getReg().
|
||||
@@ -410,14 +418,21 @@ bool InlineSpiller::hoistSpillInsideBB(LiveInterval &SpillLI,
|
||||
MII = DefMI;
|
||||
++MII;
|
||||
}
|
||||
MachineInstrSpan MIS(MII, MBB);
|
||||
// Insert spill without kill flag immediately after def.
|
||||
TII.storeRegToStackSlot(*MBB, MII, SrcReg, false, StackSlot,
|
||||
MRI.getRegClass(SrcReg), &TRI);
|
||||
LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MII);
|
||||
for (const MachineInstr &MI : make_range(MIS.begin(), MII))
|
||||
getVDefInterval(MI, LIS);
|
||||
--MII; // Point to store instruction.
|
||||
LIS.InsertMachineInstrInMaps(*MII);
|
||||
LLVM_DEBUG(dbgs() << "\thoisted: " << SrcVNI->def << '\t' << *MII);
|
||||
|
||||
HSpiller.addToMergeableSpills(*MII, StackSlot, Original);
|
||||
// If there is only 1 store instruction is required for spill, add it
|
||||
// to mergeable list. In X86 AMX, 2 intructions are required to store.
|
||||
// We disable the merge for this case.
|
||||
if (std::distance(MIS.begin(), MII) <= 1)
|
||||
HSpiller.addToMergeableSpills(*MII, StackSlot, Original);
|
||||
++NumSpills;
|
||||
return true;
|
||||
}
|
||||
@@ -918,7 +933,11 @@ foldMemoryOperand(ArrayRef<std::pair<MachineInstr *, unsigned>> Ops,
|
||||
++NumFolded;
|
||||
else if (Ops.front().second == 0) {
|
||||
++NumSpills;
|
||||
HSpiller.addToMergeableSpills(*FoldMI, StackSlot, Original);
|
||||
// If there is only 1 store instruction is required for spill, add it
|
||||
// to mergeable list. In X86 AMX, 2 intructions are required to store.
|
||||
// We disable the merge for this case.
|
||||
if (std::distance(MIS.begin(), MIS.end()) <= 1)
|
||||
HSpiller.addToMergeableSpills(*FoldMI, StackSlot, Original);
|
||||
} else
|
||||
++NumReloads;
|
||||
return true;
|
||||
@@ -965,6 +984,7 @@ void InlineSpiller::insertSpill(Register NewVReg, bool isKill,
|
||||
MachineInstrSpan MIS(MI, &MBB);
|
||||
MachineBasicBlock::iterator SpillBefore = std::next(MI);
|
||||
bool IsRealSpill = isRealSpill(*MI);
|
||||
|
||||
if (IsRealSpill)
|
||||
TII.storeRegToStackSlot(MBB, SpillBefore, NewVReg, isKill, StackSlot,
|
||||
MRI.getRegClass(NewVReg), &TRI);
|
||||
@@ -978,11 +998,16 @@ void InlineSpiller::insertSpill(Register NewVReg, bool isKill,
|
||||
|
||||
MachineBasicBlock::iterator Spill = std::next(MI);
|
||||
LIS.InsertMachineInstrRangeInMaps(Spill, MIS.end());
|
||||
for (const MachineInstr &MI : make_range(Spill, MIS.end()))
|
||||
getVDefInterval(MI, LIS);
|
||||
|
||||
LLVM_DEBUG(
|
||||
dumpMachineInstrRangeWithSlotIndex(Spill, MIS.end(), LIS, "spill"));
|
||||
++NumSpills;
|
||||
if (IsRealSpill)
|
||||
// If there is only 1 store instruction is required for spill, add it
|
||||
// to mergeable list. In X86 AMX, 2 intructions are required to store.
|
||||
// We disable the merge for this case.
|
||||
if (IsRealSpill && std::distance(Spill, MIS.end()) <= 1)
|
||||
HSpiller.addToMergeableSpills(*Spill, StackSlot, Original);
|
||||
}
|
||||
|
||||
@@ -1529,9 +1554,12 @@ void HoistSpillHelper::hoistAllSpills() {
|
||||
MachineBasicBlock *BB = Insert.first;
|
||||
Register LiveReg = Insert.second;
|
||||
MachineBasicBlock::iterator MI = IPA.getLastInsertPointIter(OrigLI, *BB);
|
||||
MachineInstrSpan MIS(MI, BB);
|
||||
TII.storeRegToStackSlot(*BB, MI, LiveReg, false, Slot,
|
||||
MRI.getRegClass(LiveReg), &TRI);
|
||||
LIS.InsertMachineInstrRangeInMaps(std::prev(MI), MI);
|
||||
LIS.InsertMachineInstrRangeInMaps(MIS.begin(), MI);
|
||||
for (const MachineInstr &MI : make_range(MIS.begin(), MI))
|
||||
getVDefInterval(MI, LIS);
|
||||
++NumSpills;
|
||||
}
|
||||
|
||||
|
||||
133
llvm/test/CodeGen/X86/AMX/amx-spill-merge.ll
Normal file
133
llvm/test/CodeGen/X86/AMX/amx-spill-merge.ll
Normal file
@@ -0,0 +1,133 @@
|
||||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+amx-int8 -mattr=+avx512f -verify-machineinstrs | FileCheck %s
|
||||
|
||||
@buf = dso_local global [3072 x i8] zeroinitializer, align 16
|
||||
|
||||
define dso_local void @test_api(i16 signext %0, i16 signext %1) local_unnamed_addr {
|
||||
; CHECK-LABEL: test_api:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: pushq %rbp
|
||||
; CHECK-NEXT: .cfi_def_cfa_offset 16
|
||||
; CHECK-NEXT: pushq %r15
|
||||
; CHECK-NEXT: .cfi_def_cfa_offset 24
|
||||
; CHECK-NEXT: pushq %r14
|
||||
; CHECK-NEXT: .cfi_def_cfa_offset 32
|
||||
; CHECK-NEXT: pushq %rbx
|
||||
; CHECK-NEXT: .cfi_def_cfa_offset 40
|
||||
; CHECK-NEXT: subq $4056, %rsp # imm = 0xFD8
|
||||
; CHECK-NEXT: .cfi_def_cfa_offset 4096
|
||||
; CHECK-NEXT: .cfi_offset %rbx, -40
|
||||
; CHECK-NEXT: .cfi_offset %r14, -32
|
||||
; CHECK-NEXT: .cfi_offset %r15, -24
|
||||
; CHECK-NEXT: .cfi_offset %rbp, -16
|
||||
; CHECK-NEXT: movl %esi, %ebx
|
||||
; CHECK-NEXT: movl %edi, %ebp
|
||||
; CHECK-NEXT: vpxord %zmm0, %zmm0, %zmm0
|
||||
; CHECK-NEXT: vmovdqu64 %zmm0, {{[0-9]+}}(%rsp)
|
||||
; CHECK-NEXT: movb $1, {{[0-9]+}}(%rsp)
|
||||
; CHECK-NEXT: movb %bpl, {{[0-9]+}}(%rsp)
|
||||
; CHECK-NEXT: movw $8, {{[0-9]+}}(%rsp)
|
||||
; CHECK-NEXT: movb $8, {{[0-9]+}}(%rsp)
|
||||
; CHECK-NEXT: movw %bx, {{[0-9]+}}(%rsp)
|
||||
; CHECK-NEXT: movb %bpl, {{[0-9]+}}(%rsp)
|
||||
; CHECK-NEXT: movw $8, {{[0-9]+}}(%rsp)
|
||||
; CHECK-NEXT: movb $8, {{[0-9]+}}(%rsp)
|
||||
; CHECK-NEXT: movw %bx, {{[0-9]+}}(%rsp)
|
||||
; CHECK-NEXT: movb %bpl, {{[0-9]+}}(%rsp)
|
||||
; CHECK-NEXT: movw $8, {{[0-9]+}}(%rsp)
|
||||
; CHECK-NEXT: movb %bpl, {{[0-9]+}}(%rsp)
|
||||
; CHECK-NEXT: movw %bx, {{[0-9]+}}(%rsp)
|
||||
; CHECK-NEXT: movb %bpl, {{[0-9]+}}(%rsp)
|
||||
; CHECK-NEXT: movw %bx, {{[0-9]+}}(%rsp)
|
||||
; CHECK-NEXT: ldtilecfg {{[0-9]+}}(%rsp)
|
||||
; CHECK-NEXT: movl $32, %r14d
|
||||
; CHECK-NEXT: movl $buf+2048, %r15d
|
||||
; CHECK-NEXT: tileloadd (%r15,%r14), %tmm5
|
||||
; CHECK-NEXT: xorl %eax, %eax
|
||||
; CHECK-NEXT: testb %al, %al
|
||||
; CHECK-NEXT: sttilecfg {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Folded Spill
|
||||
; CHECK-NEXT: movl $buf, %eax
|
||||
; CHECK-NEXT: movw $8, %cx
|
||||
; CHECK-NEXT: jne .LBB0_2
|
||||
; CHECK-NEXT: # %bb.1: # %if.true
|
||||
; CHECK-NEXT: tileloadd (%rax,%r14), %tmm0
|
||||
; CHECK-NEXT: movl $buf+1024, %eax
|
||||
; CHECK-NEXT: tileloadd (%rax,%r14), %tmm1
|
||||
; CHECK-NEXT: movabsq $64, %rax
|
||||
; CHECK-NEXT: tilestored %tmm5, 2048(%rsp,%rax) # 1024-byte Folded Spill
|
||||
; CHECK-NEXT: tdpbssd %tmm1, %tmm0, %tmm5
|
||||
; CHECK-NEXT: movabsq $64, %rax
|
||||
; CHECK-NEXT: tilestored %tmm5, 1024(%rsp,%rax) # 1024-byte Folded Spill
|
||||
; CHECK-NEXT: xorl %eax, %eax
|
||||
; CHECK-NEXT: vzeroupper
|
||||
; CHECK-NEXT: callq foo
|
||||
; CHECK-NEXT: ldtilecfg {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Folded Reload
|
||||
; CHECK-NEXT: movabsq $64, %rax
|
||||
; CHECK-NEXT: tileloadd 1024(%rsp,%rax), %tmm6 # 1024-byte Folded Reload
|
||||
; CHECK-NEXT: jmp .LBB0_3
|
||||
; CHECK-NEXT: .LBB0_2: # %if.false
|
||||
; CHECK-NEXT: tileloadd (%rax,%r14), %tmm2
|
||||
; CHECK-NEXT: movl $buf+1024, %eax
|
||||
; CHECK-NEXT: tileloadd (%rax,%r14), %tmm3
|
||||
; CHECK-NEXT: movabsq $64, %rax
|
||||
; CHECK-NEXT: tilestored %tmm5, 2048(%rsp,%rax) # 1024-byte Folded Spill
|
||||
; CHECK-NEXT: tdpbssd %tmm3, %tmm2, %tmm5
|
||||
; CHECK-NEXT: movabsq $64, %rax
|
||||
; CHECK-NEXT: tilestored %tmm5, 1024(%rsp,%rax) # 1024-byte Folded Spill
|
||||
; CHECK-NEXT: xorl %eax, %eax
|
||||
; CHECK-NEXT: vzeroupper
|
||||
; CHECK-NEXT: callq foo
|
||||
; CHECK-NEXT: ldtilecfg {{[-0-9]+}}(%r{{[sb]}}p) # 64-byte Folded Reload
|
||||
; CHECK-NEXT: movabsq $64, %rax
|
||||
; CHECK-NEXT: tileloadd 1024(%rsp,%rax), %tmm6 # 1024-byte Folded Reload
|
||||
; CHECK-NEXT: tilestored %tmm6, (%r15,%r14)
|
||||
; CHECK-NEXT: .LBB0_3: # %exit
|
||||
; CHECK-NEXT: movl $buf, %eax
|
||||
; CHECK-NEXT: movl $32, %ecx
|
||||
; CHECK-NEXT: movw $8, %dx
|
||||
; CHECK-NEXT: tileloadd (%rax,%rcx), %tmm4
|
||||
; CHECK-NEXT: movabsq $64, %rax
|
||||
; CHECK-NEXT: tileloadd 2048(%rsp,%rax), %tmm5 # 1024-byte Folded Reload
|
||||
; CHECK-NEXT: tdpbssd %tmm4, %tmm6, %tmm5
|
||||
; CHECK-NEXT: movl $buf+2048, %eax
|
||||
; CHECK-NEXT: tilestored %tmm5, (%rax,%rcx)
|
||||
; CHECK-NEXT: addq $4056, %rsp # imm = 0xFD8
|
||||
; CHECK-NEXT: .cfi_def_cfa_offset 40
|
||||
; CHECK-NEXT: popq %rbx
|
||||
; CHECK-NEXT: .cfi_def_cfa_offset 32
|
||||
; CHECK-NEXT: popq %r14
|
||||
; CHECK-NEXT: .cfi_def_cfa_offset 24
|
||||
; CHECK-NEXT: popq %r15
|
||||
; CHECK-NEXT: .cfi_def_cfa_offset 16
|
||||
; CHECK-NEXT: popq %rbp
|
||||
; CHECK-NEXT: .cfi_def_cfa_offset 8
|
||||
; CHECK-NEXT: tilerelease
|
||||
; CHECK-NEXT: retq
|
||||
%c = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 %0, i16 %1, i8* getelementptr inbounds ([3072 x i8], [3072 x i8]* @buf, i64 0, i64 2048), i64 32)
|
||||
br i1 undef, label %if.true, label %if.false
|
||||
if.true:
|
||||
%a1 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 %0, i16 8, i8* getelementptr inbounds ([3072 x i8], [3072 x i8]* @buf, i64 0, i64 0), i64 32)
|
||||
%b1 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 8, i16 %1, i8* getelementptr inbounds ([3072 x i8], [3072 x i8]* @buf, i64 0, i64 1024), i64 32)
|
||||
%d1 = tail call x86_amx @llvm.x86.tdpbssd.internal(i16 %0, i16 %1, i16 8, x86_amx %c, x86_amx %a1, x86_amx %b1)
|
||||
tail call void (...) @foo()
|
||||
br label %exit
|
||||
if.false:
|
||||
%a2 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 %0, i16 8, i8* getelementptr inbounds ([3072 x i8], [3072 x i8]* @buf, i64 0, i64 0), i64 32)
|
||||
%b2 = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 8, i16 %1, i8* getelementptr inbounds ([3072 x i8], [3072 x i8]* @buf, i64 0, i64 1024), i64 32)
|
||||
%d2 = tail call x86_amx @llvm.x86.tdpbssd.internal(i16 %0, i16 %1, i16 8, x86_amx %c, x86_amx %a2, x86_amx %b2)
|
||||
tail call void (...) @foo()
|
||||
tail call void @llvm.x86.tilestored64.internal(i16 %0, i16 %1, i8* getelementptr inbounds ([3072 x i8], [3072 x i8]* @buf, i64 0, i64 2048), i64 32, x86_amx %d2)
|
||||
br label %exit
|
||||
exit:
|
||||
%d = phi x86_amx [ %d1, %if.true ], [ %d2, %if.false ]
|
||||
%a = tail call x86_amx @llvm.x86.tileloadd64.internal(i16 %0, i16 8, i8* getelementptr inbounds ([3072 x i8], [3072 x i8]* @buf, i64 0, i64 0), i64 32)
|
||||
%res = tail call x86_amx @llvm.x86.tdpbssd.internal(i16 %0, i16 %1, i16 8, x86_amx %c, x86_amx %d, x86_amx %a)
|
||||
tail call void @llvm.x86.tilestored64.internal(i16 %0, i16 %1, i8* getelementptr inbounds ([3072 x i8], [3072 x i8]* @buf, i64 0, i64 2048), i64 32, x86_amx %res)
|
||||
ret void
|
||||
}
|
||||
|
||||
declare dso_local void @foo(...) local_unnamed_addr
|
||||
|
||||
declare x86_amx @llvm.x86.tileloadd64.internal(i16, i16, i8*, i64)
|
||||
declare x86_amx @llvm.x86.tdpbssd.internal(i16, i16, i16, x86_amx, x86_amx, x86_amx)
|
||||
declare void @llvm.x86.tilestored64.internal(i16, i16, i8*, i64, x86_amx)
|
||||
Reference in New Issue
Block a user