Using a BufferSize of one for memory ProcResources will result in better ILP since it more accurately models the dependencies between memory ops and their consumers on an in-order processor. After this change, the scheduler will treat the data edges from loads as blocking so that stalls are guaranteed when waiting for data to be retreaved from memory. Since we don't actually track waitcnt here, this should do a better job at modeling their behavior. Practically, this means that the scheduler will trigger the 'STALL' heuristic more often. This type of change needs to be evaluated experimentally. Preliminary results are positive. Fixes: SWDEV-282962 Reviewed By: rampitec Differential Revision: https://reviews.llvm.org/D114777
219 lines
8.3 KiB
LLVM
219 lines
8.3 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc -mtriple=amdgcn-- -mcpu=tahiti < %s | FileCheck -check-prefix=SI %s
|
|
; RUN: llc -mtriple=amdgcn-- -mcpu=fiji < %s | FileCheck -check-prefix=VI %s
|
|
|
|
; Make sure we don't try to form FMAX_LEGACY nodes with f64
|
|
|
|
define amdgpu_kernel void @test_fmax_legacy_uge_f64(double addrspace(1)* %out, double addrspace(1)* %in) #0 {
|
|
; SI-LABEL: test_fmax_legacy_uge_f64:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s10, 0
|
|
; SI-NEXT: s_mov_b32 s11, s7
|
|
; SI-NEXT: v_lshlrev_b32_e32 v0, 3, v0
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b64 s[8:9], s[2:3]
|
|
; SI-NEXT: v_mov_b32_e32 v1, 0
|
|
; SI-NEXT: buffer_load_dwordx4 v[0:3], v[0:1], s[8:11], 0 addr64
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_mov_b32 s4, s0
|
|
; SI-NEXT: s_mov_b32 s5, s1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cmp_nlt_f64_e32 vcc, v[0:1], v[2:3]
|
|
; SI-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
|
|
; SI-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
|
|
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: test_fmax_legacy_uge_f64:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
|
|
; VI-NEXT: v_lshlrev_b32_e32 v0, 3, v0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v1, s3
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dwordx4 v[0:3], v[0:1]
|
|
; VI-NEXT: v_mov_b32_e32 v4, s0
|
|
; VI-NEXT: v_mov_b32_e32 v5, s1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_cmp_nlt_f64_e32 vcc, v[0:1], v[2:3]
|
|
; VI-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
|
|
; VI-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
|
|
; VI-NEXT: flat_store_dwordx2 v[4:5], v[0:1]
|
|
; VI-NEXT: s_endpgm
|
|
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
|
|
%gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
|
|
%gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
|
|
|
|
%a = load double, double addrspace(1)* %gep.0, align 8
|
|
%b = load double, double addrspace(1)* %gep.1, align 8
|
|
|
|
%cmp = fcmp uge double %a, %b
|
|
%val = select i1 %cmp, double %a, double %b
|
|
store double %val, double addrspace(1)* %out, align 8
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @test_fmax_legacy_oge_f64(double addrspace(1)* %out, double addrspace(1)* %in) #0 {
|
|
; SI-LABEL: test_fmax_legacy_oge_f64:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s10, 0
|
|
; SI-NEXT: s_mov_b32 s11, s7
|
|
; SI-NEXT: v_lshlrev_b32_e32 v0, 3, v0
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b64 s[8:9], s[2:3]
|
|
; SI-NEXT: v_mov_b32_e32 v1, 0
|
|
; SI-NEXT: buffer_load_dwordx4 v[0:3], v[0:1], s[8:11], 0 addr64
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_mov_b32 s4, s0
|
|
; SI-NEXT: s_mov_b32 s5, s1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cmp_ge_f64_e32 vcc, v[0:1], v[2:3]
|
|
; SI-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
|
|
; SI-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
|
|
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: test_fmax_legacy_oge_f64:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
|
|
; VI-NEXT: v_lshlrev_b32_e32 v0, 3, v0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v1, s3
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dwordx4 v[0:3], v[0:1]
|
|
; VI-NEXT: v_mov_b32_e32 v4, s0
|
|
; VI-NEXT: v_mov_b32_e32 v5, s1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_cmp_ge_f64_e32 vcc, v[0:1], v[2:3]
|
|
; VI-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
|
|
; VI-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
|
|
; VI-NEXT: flat_store_dwordx2 v[4:5], v[0:1]
|
|
; VI-NEXT: s_endpgm
|
|
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
|
|
%gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
|
|
%gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
|
|
|
|
%a = load double, double addrspace(1)* %gep.0, align 8
|
|
%b = load double, double addrspace(1)* %gep.1, align 8
|
|
|
|
%cmp = fcmp oge double %a, %b
|
|
%val = select i1 %cmp, double %a, double %b
|
|
store double %val, double addrspace(1)* %out, align 8
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @test_fmax_legacy_ugt_f64(double addrspace(1)* %out, double addrspace(1)* %in) #0 {
|
|
; SI-LABEL: test_fmax_legacy_ugt_f64:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s10, 0
|
|
; SI-NEXT: s_mov_b32 s11, s7
|
|
; SI-NEXT: v_lshlrev_b32_e32 v0, 3, v0
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b64 s[8:9], s[2:3]
|
|
; SI-NEXT: v_mov_b32_e32 v1, 0
|
|
; SI-NEXT: buffer_load_dwordx4 v[0:3], v[0:1], s[8:11], 0 addr64
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_mov_b32 s4, s0
|
|
; SI-NEXT: s_mov_b32 s5, s1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cmp_nle_f64_e32 vcc, v[0:1], v[2:3]
|
|
; SI-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
|
|
; SI-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
|
|
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: test_fmax_legacy_ugt_f64:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
|
|
; VI-NEXT: v_lshlrev_b32_e32 v0, 3, v0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v1, s3
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dwordx4 v[0:3], v[0:1]
|
|
; VI-NEXT: v_mov_b32_e32 v4, s0
|
|
; VI-NEXT: v_mov_b32_e32 v5, s1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_cmp_nle_f64_e32 vcc, v[0:1], v[2:3]
|
|
; VI-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
|
|
; VI-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
|
|
; VI-NEXT: flat_store_dwordx2 v[4:5], v[0:1]
|
|
; VI-NEXT: s_endpgm
|
|
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
|
|
%gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
|
|
%gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
|
|
|
|
%a = load double, double addrspace(1)* %gep.0, align 8
|
|
%b = load double, double addrspace(1)* %gep.1, align 8
|
|
|
|
%cmp = fcmp ugt double %a, %b
|
|
%val = select i1 %cmp, double %a, double %b
|
|
store double %val, double addrspace(1)* %out, align 8
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @test_fmax_legacy_ogt_f64(double addrspace(1)* %out, double addrspace(1)* %in) #0 {
|
|
; SI-LABEL: test_fmax_legacy_ogt_f64:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s10, 0
|
|
; SI-NEXT: s_mov_b32 s11, s7
|
|
; SI-NEXT: v_lshlrev_b32_e32 v0, 3, v0
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b64 s[8:9], s[2:3]
|
|
; SI-NEXT: v_mov_b32_e32 v1, 0
|
|
; SI-NEXT: buffer_load_dwordx4 v[0:3], v[0:1], s[8:11], 0 addr64
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_mov_b32 s4, s0
|
|
; SI-NEXT: s_mov_b32 s5, s1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_cmp_gt_f64_e32 vcc, v[0:1], v[2:3]
|
|
; SI-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
|
|
; SI-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
|
|
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: test_fmax_legacy_ogt_f64:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
|
|
; VI-NEXT: v_lshlrev_b32_e32 v0, 3, v0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_mov_b32_e32 v1, s3
|
|
; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v0
|
|
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; VI-NEXT: flat_load_dwordx4 v[0:3], v[0:1]
|
|
; VI-NEXT: v_mov_b32_e32 v4, s0
|
|
; VI-NEXT: v_mov_b32_e32 v5, s1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_cmp_gt_f64_e32 vcc, v[0:1], v[2:3]
|
|
; VI-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
|
|
; VI-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
|
|
; VI-NEXT: flat_store_dwordx2 v[4:5], v[0:1]
|
|
; VI-NEXT: s_endpgm
|
|
%tid = call i32 @llvm.amdgcn.workitem.id.x() #1
|
|
%gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
|
|
%gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
|
|
|
|
%a = load double, double addrspace(1)* %gep.0, align 8
|
|
%b = load double, double addrspace(1)* %gep.1, align 8
|
|
|
|
%cmp = fcmp ogt double %a, %b
|
|
%val = select i1 %cmp, double %a, double %b
|
|
store double %val, double addrspace(1)* %out, align 8
|
|
ret void
|
|
}
|
|
|
|
declare i32 @llvm.amdgcn.workitem.id.x() #1
|
|
|
|
attributes #0 = { nounwind }
|
|
attributes #1 = { nounwind readnone }
|