Files
clang-p2996/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.memmove.ll
Amara Emerson bbbe8ecc17 [GlobalISel][Localizer] Allow localization of a small number of repeated phi uses. (#77566)
We previously had a heuristic that if a value V was used multiple times
in a single PHI, then to avoid potentially rematerializing into many predecessors
we bail out. The phi uses only counted as a single use in the shouldLocalize() hook
because it counted the PHI as a single instruction use, not factoring in it may
have many incoming edges.

It turns out this heuristic is slightly too pessimistic, and allowing a small number
of these uses to be localized can improve code size due to shortening live ranges,
especially if those ranges span a call.

This change results in some improvements in size on CTMark -Os:
```
Program                                       size.__text
                                              before         after           diff
kimwitu++/kc                                  451676.00      451860.00       0.0%
mafft/pairlocalalign                          241460.00      241540.00       0.0%
tramp3d-v4/tramp3d-v4                         389216.00      389208.00      -0.0%
7zip/7zip-benchmark                           587528.00      587464.00      -0.0%
Bullet/bullet                                 457424.00      457348.00      -0.0%
consumer-typeset/consumer-typeset             405472.00      405376.00      -0.0%
SPASS/SPASS                                   410288.00      410120.00      -0.0%
lencod/lencod                                 426396.00      426108.00      -0.1%
ClamAV/clamscan                               380108.00      379756.00      -0.1%
sqlite3/sqlite3                               283664.00      283372.00      -0.1%
                           Geomean difference                               -0.0%
```
I experimented with different variations and thresholds. Using 3 instead
of 2 resulted in a further 0.1% improvement on ClamAV but also regressed
sqlite3 by the same %.
2024-01-11 18:57:37 +08:00

85 lines
3.8 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -global-isel -mtriple=amdgcn-- -verify-machineinstrs -mem-intrinsic-expand-size=3 %s -o - | FileCheck -check-prefix=LOOP %s
; RUN: llc -global-isel -mtriple=amdgcn-- -verify-machineinstrs -mem-intrinsic-expand-size=5 %s -o - | FileCheck -check-prefix=UNROLL %s
declare void @llvm.memmove.p1.p1.i32(ptr addrspace(1), ptr addrspace(1), i32, i1)
define amdgpu_cs void @memmove_p1i8(ptr addrspace(1) %dst, ptr addrspace(1) %src) {
; LOOP-LABEL: memmove_p1i8:
; LOOP: ; %bb.0:
; LOOP-NEXT: v_cmp_ge_u64_e32 vcc, v[2:3], v[0:1]
; LOOP-NEXT: s_and_saveexec_b64 s[0:1], vcc
; LOOP-NEXT: s_xor_b64 s[4:5], exec, s[0:1]
; LOOP-NEXT: s_cbranch_execz .LBB0_3
; LOOP-NEXT: ; %bb.1: ; %copy_forward
; LOOP-NEXT: s_mov_b64 s[6:7], 0
; LOOP-NEXT: s_mov_b32 s2, 0
; LOOP-NEXT: s_mov_b32 s3, 0xf000
; LOOP-NEXT: s_mov_b64 s[0:1], 0
; LOOP-NEXT: v_mov_b32_e32 v4, s6
; LOOP-NEXT: v_mov_b32_e32 v5, s7
; LOOP-NEXT: .LBB0_2: ; %copy_forward_loop
; LOOP-NEXT: ; =>This Inner Loop Header: Depth=1
; LOOP-NEXT: v_add_i32_e32 v6, vcc, v2, v4
; LOOP-NEXT: v_addc_u32_e32 v7, vcc, v3, v5, vcc
; LOOP-NEXT: s_waitcnt expcnt(0)
; LOOP-NEXT: buffer_load_ubyte v8, v[6:7], s[0:3], 0 addr64
; LOOP-NEXT: v_add_i32_e32 v6, vcc, v0, v4
; LOOP-NEXT: v_addc_u32_e32 v7, vcc, v1, v5, vcc
; LOOP-NEXT: v_add_i32_e32 v4, vcc, 1, v4
; LOOP-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
; LOOP-NEXT: v_cmp_ne_u32_e32 vcc, 4, v4
; LOOP-NEXT: s_waitcnt vmcnt(0)
; LOOP-NEXT: buffer_store_byte v8, v[6:7], s[0:3], 0 addr64
; LOOP-NEXT: s_cbranch_vccnz .LBB0_2
; LOOP-NEXT: .LBB0_3: ; %Flow17
; LOOP-NEXT: s_andn2_saveexec_b64 s[0:1], s[4:5]
; LOOP-NEXT: s_cbranch_execz .LBB0_6
; LOOP-NEXT: ; %bb.4: ; %copy_backwards
; LOOP-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; LOOP-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; LOOP-NEXT: v_add_i32_e32 v2, vcc, 3, v2
; LOOP-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; LOOP-NEXT: s_mov_b32 s0, -4
; LOOP-NEXT: s_mov_b32 s6, 0
; LOOP-NEXT: s_mov_b32 s7, 0xf000
; LOOP-NEXT: s_mov_b64 s[4:5], 0
; LOOP-NEXT: v_mov_b32_e32 v4, s0
; LOOP-NEXT: .LBB0_5: ; %copy_backwards_loop
; LOOP-NEXT: ; =>This Inner Loop Header: Depth=1
; LOOP-NEXT: s_waitcnt expcnt(0)
; LOOP-NEXT: buffer_load_ubyte v5, v[2:3], s[4:7], 0 addr64
; LOOP-NEXT: v_add_i32_e32 v4, vcc, 1, v4
; LOOP-NEXT: s_and_b64 vcc, vcc, exec
; LOOP-NEXT: s_waitcnt vmcnt(0)
; LOOP-NEXT: buffer_store_byte v5, v[0:1], s[4:7], 0 addr64
; LOOP-NEXT: v_add_i32_e64 v0, s[0:1], -1, v0
; LOOP-NEXT: v_addc_u32_e64 v1, s[0:1], -1, v1, s[0:1]
; LOOP-NEXT: v_add_i32_e64 v2, s[0:1], -1, v2
; LOOP-NEXT: v_addc_u32_e64 v3, s[0:1], -1, v3, s[0:1]
; LOOP-NEXT: s_cbranch_vccz .LBB0_5
; LOOP-NEXT: .LBB0_6: ; %memmove_done
; LOOP-NEXT: s_endpgm
;
; UNROLL-LABEL: memmove_p1i8:
; UNROLL: ; %bb.0:
; UNROLL-NEXT: s_mov_b32 s2, 0
; UNROLL-NEXT: s_mov_b32 s3, 0xf000
; UNROLL-NEXT: s_mov_b64 s[0:1], 0
; UNROLL-NEXT: buffer_load_ubyte v4, v[2:3], s[0:3], 0 addr64
; UNROLL-NEXT: buffer_load_ubyte v5, v[2:3], s[0:3], 0 addr64 offset:1
; UNROLL-NEXT: buffer_load_ubyte v6, v[2:3], s[0:3], 0 addr64 offset:2
; UNROLL-NEXT: buffer_load_ubyte v2, v[2:3], s[0:3], 0 addr64 offset:3
; UNROLL-NEXT: s_waitcnt vmcnt(3)
; UNROLL-NEXT: buffer_store_byte v4, v[0:1], s[0:3], 0 addr64
; UNROLL-NEXT: s_waitcnt vmcnt(3)
; UNROLL-NEXT: buffer_store_byte v5, v[0:1], s[0:3], 0 addr64 offset:1
; UNROLL-NEXT: s_waitcnt vmcnt(3)
; UNROLL-NEXT: buffer_store_byte v6, v[0:1], s[0:3], 0 addr64 offset:2
; UNROLL-NEXT: s_waitcnt vmcnt(3)
; UNROLL-NEXT: buffer_store_byte v2, v[0:1], s[0:3], 0 addr64 offset:3
; UNROLL-NEXT: s_endpgm
call void @llvm.memmove.p1.p1.i32(ptr addrspace(1) %dst, ptr addrspace(1) %src, i32 4, i1 false)
ret void
}