Files
clang-p2996/llvm/test/CodeGen/X86/pr18344.ll
Matthias Braun 189900eb14 X86: Stop assigning register costs for longer encodings.
This stops reporting CostPerUse 1 for `R8`-`R15` and `XMM8`-`XMM31`.
This was previously done because instruction encoding require a REX
prefix when using them resulting in longer instruction encodings. I
found that this regresses the quality of the register allocation as the
costs impose an ordering on eviction candidates. I also feel that there
is a bit of an impedance mismatch as the actual costs occure when
encoding instructions using those registers, but the order of VReg
assignments is not primarily ordered by number of Defs+Uses.

I did extensive measurements with the llvm-test-suite wiht SPEC2006 +
SPEC2017 included, internal services showed similar patterns. Generally
there are a log of improvements but also a lot of regression. But on
average the allocation quality seems to improve at a small code size
regression.

Results for measuring static and dynamic instruction counts:

Dynamic Counts (scaled by execution frequency) / Optimization Remarks:
    Spills+FoldedSpills   -5.6%
    Reloads+FoldedReloads -4.2%
    Copies                -0.1%

Static / LLVM Statistics:
    regalloc.NumSpills    mean -1.6%, geomean -2.8%
    regalloc.NumReloads   mean -1.7%, geomean -3.1%
    size..text            mean +0.4%, geomean +0.4%

Static / LLVM Statistics:
    mean -2.2%, geomean -3.1%) regalloc.NumSpills
    mean -2.6%, geomean -3.9%) regalloc.NumReloads
    mean +0.6%, geomean +0.6%) size..text

Static / LLVM Statistics:
    regalloc.NumSpills   mean -3.0%
    regalloc.NumReloads  mean -3.3%
    size..text           mean +0.3%, geomean +0.3%

Differential Revision: https://reviews.llvm.org/D133902
2022-09-30 16:01:33 -07:00

88 lines
3.7 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=X86
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=X64
%v4_varying_complex = type { <4 x float>, <4 x float> }
define void @FFT(ptr noalias nocapture %destination, ptr noalias %re, ptr noalias nocapture %ptr_cast_for_load) nounwind {
; X86-LABEL: FFT:
; X86: # %bb.0: # %begin
; X86-NEXT: pushl %ebx
; X86-NEXT: pushl %edi
; X86-NEXT: pushl %esi
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movdqu (%edx), %xmm0
; X86-NEXT: pslld $4, %xmm0
; X86-NEXT: movd %xmm0, %edx
; X86-NEXT: pextrd $1, %xmm0, %esi
; X86-NEXT: pextrd $2, %xmm0, %edi
; X86-NEXT: pextrd $3, %xmm0, %ebx
; X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X86-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; X86-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
; X86-NEXT: movss %xmm0, 128(%eax)
; X86-NEXT: movss %xmm1, 164(%eax)
; X86-NEXT: movss %xmm2, 200(%eax)
; X86-NEXT: movss %xmm3, 236(%eax)
; X86-NEXT: popl %esi
; X86-NEXT: popl %edi
; X86-NEXT: popl %ebx
; X86-NEXT: retl
;
; X64-LABEL: FFT:
; X64: # %bb.0: # %begin
; X64-NEXT: movdqu (%rdx), %xmm0
; X64-NEXT: pslld $4, %xmm0
; X64-NEXT: movd %xmm0, %eax
; X64-NEXT: cltq
; X64-NEXT: pextrd $1, %xmm0, %ecx
; X64-NEXT: movslq %ecx, %rcx
; X64-NEXT: pextrd $2, %xmm0, %edx
; X64-NEXT: movslq %edx, %rdx
; X64-NEXT: pextrd $3, %xmm0, %r8d
; X64-NEXT: movslq %r8d, %r8
; X64-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X64-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X64-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
; X64-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
; X64-NEXT: movss %xmm0, 128(%rdi)
; X64-NEXT: movss %xmm1, 164(%rdi)
; X64-NEXT: movss %xmm2, 200(%rdi)
; X64-NEXT: movss %xmm3, 236(%rdi)
; X64-NEXT: retq
begin:
%ptr_masked_load79 = load <4 x i32>, ptr %ptr_cast_for_load, align 4
%mul__bitReversedProgramIndex_load = shl <4 x i32> %ptr_masked_load79, <i32 4, i32 4, i32 4, i32 4>
%offset32_1 = extractelement <4 x i32> %mul__bitReversedProgramIndex_load, i32 0
%ptroffset_1 = sext i32 %offset32_1 to i64
%offset32_2 = extractelement <4 x i32> %mul__bitReversedProgramIndex_load, i32 1
%ptroffset_2 = sext i32 %offset32_2 to i64
%offset32_3 = extractelement <4 x i32> %mul__bitReversedProgramIndex_load, i32 2
%ptroffset_3 = sext i32 %offset32_3 to i64
%offset32_4 = extractelement <4 x i32> %mul__bitReversedProgramIndex_load, i32 3
%ptroffset_4 = sext i32 %offset32_4 to i64
%ptrcast_1 = getelementptr float, ptr %re, i64 %ptroffset_1
%val_1 = load float, ptr %ptrcast_1, align 4
%ptrcast_2 = getelementptr float, ptr %re, i64 %ptroffset_2
%val_2 = load float, ptr %ptrcast_2, align 4
%ptrcast_3 = getelementptr float, ptr %re, i64 %ptroffset_3
%val_3 = load float, ptr %ptrcast_3, align 4
%ptrcast_4 = getelementptr float, ptr %re, i64 %ptroffset_4
%val_4 = load float, ptr %ptrcast_4, align 4
%ptrcast1_1 = getelementptr inbounds %v4_varying_complex, ptr %destination, i64 4, i32 0, i64 0
store float %val_1, ptr %ptrcast1_1, align 4
%finalptr_2 = getelementptr i8, ptr %destination, i64 164
store float %val_2, ptr %finalptr_2, align 4
%finalptr_3 = getelementptr i8, ptr %destination, i64 200
store float %val_3, ptr %finalptr_3, align 4
%finalptr_4 = getelementptr i8, ptr %destination, i64 236
store float %val_4, ptr %finalptr_4, align 4
ret void
}