Using a BufferSize of one for memory ProcResources will result in better ILP since it more accurately models the dependencies between memory ops and their consumers on an in-order processor. After this change, the scheduler will treat the data edges from loads as blocking so that stalls are guaranteed when waiting for data to be retreaved from memory. Since we don't actually track waitcnt here, this should do a better job at modeling their behavior. Practically, this means that the scheduler will trigger the 'STALL' heuristic more often. This type of change needs to be evaluated experimentally. Preliminary results are positive. Fixes: SWDEV-282962 Reviewed By: rampitec Differential Revision: https://reviews.llvm.org/D114777
1852 lines
62 KiB
LLVM
1852 lines
62 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,SI %s
|
|
; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,VI %s
|
|
|
|
; Use a 64-bit value with lo bits that can be represented as an inline constant
|
|
define amdgpu_kernel void @i64_imm_inline_lo(i64 addrspace(1) *%out) {
|
|
; SI-LABEL: i64_imm_inline_lo:
|
|
; SI: ; %bb.0: ; %entry
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, 5
|
|
; SI-NEXT: v_mov_b32_e32 v1, 0x12345678
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: i64_imm_inline_lo:
|
|
; VI: ; %bb.0: ; %entry
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: v_mov_b32_e32 v0, 5
|
|
; VI-NEXT: v_mov_b32_e32 v1, 0x12345678
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
entry:
|
|
store i64 1311768464867721221, i64 addrspace(1) *%out ; 0x1234567800000005
|
|
ret void
|
|
}
|
|
|
|
; Use a 64-bit value with hi bits that can be represented as an inline constant
|
|
define amdgpu_kernel void @i64_imm_inline_hi(i64 addrspace(1) *%out) {
|
|
; SI-LABEL: i64_imm_inline_hi:
|
|
; SI: ; %bb.0: ; %entry
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, 0x12345678
|
|
; SI-NEXT: v_mov_b32_e32 v1, 5
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: i64_imm_inline_hi:
|
|
; VI: ; %bb.0: ; %entry
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: v_mov_b32_e32 v0, 0x12345678
|
|
; VI-NEXT: v_mov_b32_e32 v1, 5
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
entry:
|
|
store i64 21780256376, i64 addrspace(1) *%out ; 0x0000000512345678
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @store_imm_neg_0.0_i64(i64 addrspace(1) *%out) {
|
|
; SI-LABEL: store_imm_neg_0.0_i64:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, 0
|
|
; SI-NEXT: v_bfrev_b32_e32 v1, 1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: store_imm_neg_0.0_i64:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: v_mov_b32_e32 v0, 0
|
|
; VI-NEXT: v_bfrev_b32_e32 v1, 1
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
store i64 -9223372036854775808, i64 addrspace(1) *%out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @store_inline_imm_neg_0.0_i32(i32 addrspace(1)* %out) {
|
|
; SI-LABEL: store_inline_imm_neg_0.0_i32:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: v_bfrev_b32_e32 v0, 1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: store_inline_imm_neg_0.0_i32:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: v_bfrev_b32_e32 v0, 1
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
store i32 -2147483648, i32 addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @store_inline_imm_0.0_f32(float addrspace(1)* %out) {
|
|
; SI-LABEL: store_inline_imm_0.0_f32:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, 0
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: store_inline_imm_0.0_f32:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: v_mov_b32_e32 v0, 0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
store float 0.0, float addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @store_imm_neg_0.0_f32(float addrspace(1)* %out) {
|
|
; SI-LABEL: store_imm_neg_0.0_f32:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: v_bfrev_b32_e32 v0, 1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: store_imm_neg_0.0_f32:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: v_bfrev_b32_e32 v0, 1
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
store float -0.0, float addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @store_inline_imm_0.5_f32(float addrspace(1)* %out) {
|
|
; SI-LABEL: store_inline_imm_0.5_f32:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, 0.5
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: store_inline_imm_0.5_f32:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: v_mov_b32_e32 v0, 0.5
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
store float 0.5, float addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @store_inline_imm_m_0.5_f32(float addrspace(1)* %out) {
|
|
; SI-LABEL: store_inline_imm_m_0.5_f32:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, -0.5
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: store_inline_imm_m_0.5_f32:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: v_mov_b32_e32 v0, -0.5
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
store float -0.5, float addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @store_inline_imm_1.0_f32(float addrspace(1)* %out) {
|
|
; SI-LABEL: store_inline_imm_1.0_f32:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, 1.0
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: store_inline_imm_1.0_f32:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: v_mov_b32_e32 v0, 1.0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
store float 1.0, float addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @store_inline_imm_m_1.0_f32(float addrspace(1)* %out) {
|
|
; SI-LABEL: store_inline_imm_m_1.0_f32:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, -1.0
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: store_inline_imm_m_1.0_f32:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: v_mov_b32_e32 v0, -1.0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
store float -1.0, float addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @store_inline_imm_2.0_f32(float addrspace(1)* %out) {
|
|
; SI-LABEL: store_inline_imm_2.0_f32:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, 2.0
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: store_inline_imm_2.0_f32:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: v_mov_b32_e32 v0, 2.0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
store float 2.0, float addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @store_inline_imm_m_2.0_f32(float addrspace(1)* %out) {
|
|
; SI-LABEL: store_inline_imm_m_2.0_f32:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, -2.0
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: store_inline_imm_m_2.0_f32:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: v_mov_b32_e32 v0, -2.0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
store float -2.0, float addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @store_inline_imm_4.0_f32(float addrspace(1)* %out) {
|
|
; SI-LABEL: store_inline_imm_4.0_f32:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, 4.0
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: store_inline_imm_4.0_f32:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: v_mov_b32_e32 v0, 4.0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
store float 4.0, float addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @store_inline_imm_m_4.0_f32(float addrspace(1)* %out) {
|
|
; SI-LABEL: store_inline_imm_m_4.0_f32:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, -4.0
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: store_inline_imm_m_4.0_f32:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: v_mov_b32_e32 v0, -4.0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
store float -4.0, float addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @store_inline_imm_inv_2pi_f32(float addrspace(1)* %out) {
|
|
; SI-LABEL: store_inline_imm_inv_2pi_f32:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, 0x3e22f983
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: store_inline_imm_inv_2pi_f32:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: v_mov_b32_e32 v0, 0.15915494
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
store float 0x3FC45F3060000000, float addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @store_inline_imm_m_inv_2pi_f32(float addrspace(1)* %out) {
|
|
; SI-LABEL: store_inline_imm_m_inv_2pi_f32:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, 0xbe22f983
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: store_inline_imm_m_inv_2pi_f32:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: v_mov_b32_e32 v0, 0xbe22f983
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
store float 0xBFC45F3060000000, float addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @store_literal_imm_f32(float addrspace(1)* %out) {
|
|
; SI-LABEL: store_literal_imm_f32:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, 0x45800000
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: store_literal_imm_f32:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: v_mov_b32_e32 v0, 0x45800000
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
store float 4096.0, float addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @add_inline_imm_0.0_f32(float addrspace(1)* %out, float %x) {
|
|
; SI-LABEL: add_inline_imm_0.0_f32:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dword s4, s[0:1], 0xb
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_add_f32_e64 v0, s4, 0
|
|
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: add_inline_imm_0.0_f32:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dword s4, s[0:1], 0x2c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_add_f32_e64 v0, s4, 0
|
|
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
%y = fadd float %x, 0.0
|
|
store float %y, float addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @add_inline_imm_0.5_f32(float addrspace(1)* %out, float %x) {
|
|
; SI-LABEL: add_inline_imm_0.5_f32:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dword s4, s[0:1], 0xb
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_add_f32_e64 v0, s4, 0.5
|
|
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: add_inline_imm_0.5_f32:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dword s4, s[0:1], 0x2c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_add_f32_e64 v0, s4, 0.5
|
|
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
%y = fadd float %x, 0.5
|
|
store float %y, float addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @add_inline_imm_neg_0.5_f32(float addrspace(1)* %out, float %x) {
|
|
; SI-LABEL: add_inline_imm_neg_0.5_f32:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dword s4, s[0:1], 0xb
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_add_f32_e64 v0, s4, -0.5
|
|
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: add_inline_imm_neg_0.5_f32:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dword s4, s[0:1], 0x2c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_add_f32_e64 v0, s4, -0.5
|
|
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
%y = fadd float %x, -0.5
|
|
store float %y, float addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @add_inline_imm_1.0_f32(float addrspace(1)* %out, float %x) {
|
|
; SI-LABEL: add_inline_imm_1.0_f32:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dword s4, s[0:1], 0xb
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_add_f32_e64 v0, s4, 1.0
|
|
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: add_inline_imm_1.0_f32:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dword s4, s[0:1], 0x2c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_add_f32_e64 v0, s4, 1.0
|
|
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
%y = fadd float %x, 1.0
|
|
store float %y, float addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @add_inline_imm_neg_1.0_f32(float addrspace(1)* %out, float %x) {
|
|
; SI-LABEL: add_inline_imm_neg_1.0_f32:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dword s4, s[0:1], 0xb
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_add_f32_e64 v0, s4, -1.0
|
|
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: add_inline_imm_neg_1.0_f32:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dword s4, s[0:1], 0x2c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_add_f32_e64 v0, s4, -1.0
|
|
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
%y = fadd float %x, -1.0
|
|
store float %y, float addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @add_inline_imm_2.0_f32(float addrspace(1)* %out, float %x) {
|
|
; SI-LABEL: add_inline_imm_2.0_f32:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dword s4, s[0:1], 0xb
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_add_f32_e64 v0, s4, 2.0
|
|
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: add_inline_imm_2.0_f32:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dword s4, s[0:1], 0x2c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_add_f32_e64 v0, s4, 2.0
|
|
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
%y = fadd float %x, 2.0
|
|
store float %y, float addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @add_inline_imm_neg_2.0_f32(float addrspace(1)* %out, float %x) {
|
|
; SI-LABEL: add_inline_imm_neg_2.0_f32:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dword s4, s[0:1], 0xb
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_add_f32_e64 v0, s4, -2.0
|
|
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: add_inline_imm_neg_2.0_f32:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dword s4, s[0:1], 0x2c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_add_f32_e64 v0, s4, -2.0
|
|
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
%y = fadd float %x, -2.0
|
|
store float %y, float addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @add_inline_imm_4.0_f32(float addrspace(1)* %out, float %x) {
|
|
; SI-LABEL: add_inline_imm_4.0_f32:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dword s4, s[0:1], 0xb
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_add_f32_e64 v0, s4, 4.0
|
|
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: add_inline_imm_4.0_f32:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dword s4, s[0:1], 0x2c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_add_f32_e64 v0, s4, 4.0
|
|
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
%y = fadd float %x, 4.0
|
|
store float %y, float addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @add_inline_imm_neg_4.0_f32(float addrspace(1)* %out, float %x) {
|
|
; SI-LABEL: add_inline_imm_neg_4.0_f32:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dword s4, s[0:1], 0xb
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_add_f32_e64 v0, s4, -4.0
|
|
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: add_inline_imm_neg_4.0_f32:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dword s4, s[0:1], 0x2c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_add_f32_e64 v0, s4, -4.0
|
|
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
%y = fadd float %x, -4.0
|
|
store float %y, float addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @commute_add_inline_imm_0.5_f32(float addrspace(1)* %out, float addrspace(1)* %in) {
|
|
; SI-LABEL: commute_add_inline_imm_0.5_f32:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_mov_b32 s10, s6
|
|
; SI-NEXT: s_mov_b32 s11, s7
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s8, s2
|
|
; SI-NEXT: s_mov_b32 s9, s3
|
|
; SI-NEXT: buffer_load_dword v0, off, s[8:11], 0
|
|
; SI-NEXT: s_mov_b32 s4, s0
|
|
; SI-NEXT: s_mov_b32 s5, s1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_add_f32_e32 v0, 0.5, v0
|
|
; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: commute_add_inline_imm_0.5_f32:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
|
|
; VI-NEXT: s_mov_b32 s7, 0xf000
|
|
; VI-NEXT: s_mov_b32 s6, -1
|
|
; VI-NEXT: s_mov_b32 s10, s6
|
|
; VI-NEXT: s_mov_b32 s11, s7
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_mov_b32 s8, s2
|
|
; VI-NEXT: s_mov_b32 s9, s3
|
|
; VI-NEXT: buffer_load_dword v0, off, s[8:11], 0
|
|
; VI-NEXT: s_mov_b32 s4, s0
|
|
; VI-NEXT: s_mov_b32 s5, s1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_add_f32_e32 v0, 0.5, v0
|
|
; VI-NEXT: buffer_store_dword v0, off, s[4:7], 0
|
|
; VI-NEXT: s_endpgm
|
|
%x = load float, float addrspace(1)* %in
|
|
%y = fadd float %x, 0.5
|
|
store float %y, float addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @commute_add_literal_f32(float addrspace(1)* %out, float addrspace(1)* %in) {
|
|
; SI-LABEL: commute_add_literal_f32:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s7, 0xf000
|
|
; SI-NEXT: s_mov_b32 s6, -1
|
|
; SI-NEXT: s_mov_b32 s10, s6
|
|
; SI-NEXT: s_mov_b32 s11, s7
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_mov_b32 s8, s2
|
|
; SI-NEXT: s_mov_b32 s9, s3
|
|
; SI-NEXT: buffer_load_dword v0, off, s[8:11], 0
|
|
; SI-NEXT: s_mov_b32 s4, s0
|
|
; SI-NEXT: s_mov_b32 s5, s1
|
|
; SI-NEXT: s_waitcnt vmcnt(0)
|
|
; SI-NEXT: v_add_f32_e32 v0, 0x44800000, v0
|
|
; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: commute_add_literal_f32:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
|
|
; VI-NEXT: s_mov_b32 s7, 0xf000
|
|
; VI-NEXT: s_mov_b32 s6, -1
|
|
; VI-NEXT: s_mov_b32 s10, s6
|
|
; VI-NEXT: s_mov_b32 s11, s7
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_mov_b32 s8, s2
|
|
; VI-NEXT: s_mov_b32 s9, s3
|
|
; VI-NEXT: buffer_load_dword v0, off, s[8:11], 0
|
|
; VI-NEXT: s_mov_b32 s4, s0
|
|
; VI-NEXT: s_mov_b32 s5, s1
|
|
; VI-NEXT: s_waitcnt vmcnt(0)
|
|
; VI-NEXT: v_add_f32_e32 v0, 0x44800000, v0
|
|
; VI-NEXT: buffer_store_dword v0, off, s[4:7], 0
|
|
; VI-NEXT: s_endpgm
|
|
%x = load float, float addrspace(1)* %in
|
|
%y = fadd float %x, 1024.0
|
|
store float %y, float addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @add_inline_imm_1_f32(float addrspace(1)* %out, float %x) {
|
|
; SI-LABEL: add_inline_imm_1_f32:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dword s4, s[0:1], 0xb
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_add_f32_e64 v0, s4, 1
|
|
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: add_inline_imm_1_f32:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dword s4, s[0:1], 0x2c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_add_f32_e64 v0, s4, 1
|
|
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
%y = fadd float %x, 0x36a0000000000000
|
|
store float %y, float addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @add_inline_imm_2_f32(float addrspace(1)* %out, float %x) {
|
|
; SI-LABEL: add_inline_imm_2_f32:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dword s4, s[0:1], 0xb
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_add_f32_e64 v0, s4, 2
|
|
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: add_inline_imm_2_f32:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dword s4, s[0:1], 0x2c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_add_f32_e64 v0, s4, 2
|
|
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
%y = fadd float %x, 0x36b0000000000000
|
|
store float %y, float addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @add_inline_imm_16_f32(float addrspace(1)* %out, float %x) {
|
|
; SI-LABEL: add_inline_imm_16_f32:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dword s4, s[0:1], 0xb
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_add_f32_e64 v0, s4, 16
|
|
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: add_inline_imm_16_f32:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dword s4, s[0:1], 0x2c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_add_f32_e64 v0, s4, 16
|
|
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
%y = fadd float %x, 0x36e0000000000000
|
|
store float %y, float addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @add_inline_imm_neg_1_f32(float addrspace(1)* %out, float %x) {
|
|
; SI-LABEL: add_inline_imm_neg_1_f32:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dword s4, s[0:1], 0xb
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_add_i32 s4, s4, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, s4
|
|
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: add_inline_imm_neg_1_f32:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dword s4, s[0:1], 0x2c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_add_i32 s4, s4, -1
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
%xbc = bitcast float %x to i32
|
|
%y = add i32 %xbc, -1
|
|
%ybc = bitcast i32 %y to float
|
|
store float %ybc, float addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @add_inline_imm_neg_2_f32(float addrspace(1)* %out, float %x) {
|
|
; SI-LABEL: add_inline_imm_neg_2_f32:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dword s4, s[0:1], 0xb
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_add_i32 s4, s4, -2
|
|
; SI-NEXT: v_mov_b32_e32 v0, s4
|
|
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: add_inline_imm_neg_2_f32:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dword s4, s[0:1], 0x2c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_add_i32 s4, s4, -2
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
%xbc = bitcast float %x to i32
|
|
%y = add i32 %xbc, -2
|
|
%ybc = bitcast i32 %y to float
|
|
store float %ybc, float addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @add_inline_imm_neg_16_f32(float addrspace(1)* %out, float %x) {
|
|
; SI-LABEL: add_inline_imm_neg_16_f32:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dword s4, s[0:1], 0xb
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: s_add_i32 s4, s4, -16
|
|
; SI-NEXT: v_mov_b32_e32 v0, s4
|
|
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: add_inline_imm_neg_16_f32:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dword s4, s[0:1], 0x2c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: s_add_i32 s4, s4, -16
|
|
; VI-NEXT: v_mov_b32_e32 v0, s4
|
|
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
%xbc = bitcast float %x to i32
|
|
%y = add i32 %xbc, -16
|
|
%ybc = bitcast i32 %y to float
|
|
store float %ybc, float addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @add_inline_imm_63_f32(float addrspace(1)* %out, float %x) {
|
|
; SI-LABEL: add_inline_imm_63_f32:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dword s4, s[0:1], 0xb
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_add_f32_e64 v0, s4, 63
|
|
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: add_inline_imm_63_f32:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dword s4, s[0:1], 0x2c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_add_f32_e64 v0, s4, 63
|
|
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
%y = fadd float %x, 0x36ff800000000000
|
|
store float %y, float addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @add_inline_imm_64_f32(float addrspace(1)* %out, float %x) {
|
|
; SI-LABEL: add_inline_imm_64_f32:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dword s4, s[0:1], 0xb
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_add_f32_e64 v0, s4, 64
|
|
; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: add_inline_imm_64_f32:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dword s4, s[0:1], 0x2c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_add_f32_e64 v0, s4, 64
|
|
; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
%y = fadd float %x, 0x3700000000000000
|
|
store float %y, float addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @add_inline_imm_0.0_f64(double addrspace(1)* %out, [8 x i32], double %x) {
|
|
; SI-LABEL: add_inline_imm_0.0_f64:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x13
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_add_f64 v[0:1], s[2:3], 0
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: add_inline_imm_0.0_f64:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x4c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_add_f64 v[0:1], s[2:3], 0
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
%y = fadd double %x, 0.0
|
|
store double %y, double addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @add_inline_imm_0.5_f64(double addrspace(1)* %out, [8 x i32], double %x) {
|
|
; SI-LABEL: add_inline_imm_0.5_f64:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x13
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_add_f64 v[0:1], s[2:3], 0.5
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: add_inline_imm_0.5_f64:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x4c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_add_f64 v[0:1], s[2:3], 0.5
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
%y = fadd double %x, 0.5
|
|
store double %y, double addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @add_inline_imm_neg_0.5_f64(double addrspace(1)* %out, [8 x i32], double %x) {
|
|
; SI-LABEL: add_inline_imm_neg_0.5_f64:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x13
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_add_f64 v[0:1], s[2:3], -0.5
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: add_inline_imm_neg_0.5_f64:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x4c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_add_f64 v[0:1], s[2:3], -0.5
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
%y = fadd double %x, -0.5
|
|
store double %y, double addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @add_inline_imm_1.0_f64(double addrspace(1)* %out, [8 x i32], double %x) {
|
|
; SI-LABEL: add_inline_imm_1.0_f64:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x13
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_add_f64 v[0:1], s[2:3], 1.0
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: add_inline_imm_1.0_f64:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x4c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_add_f64 v[0:1], s[2:3], 1.0
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
%y = fadd double %x, 1.0
|
|
store double %y, double addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @add_inline_imm_neg_1.0_f64(double addrspace(1)* %out, [8 x i32], double %x) {
|
|
; SI-LABEL: add_inline_imm_neg_1.0_f64:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x13
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_add_f64 v[0:1], s[2:3], -1.0
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: add_inline_imm_neg_1.0_f64:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x4c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_add_f64 v[0:1], s[2:3], -1.0
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
%y = fadd double %x, -1.0
|
|
store double %y, double addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @add_inline_imm_2.0_f64(double addrspace(1)* %out, [8 x i32], double %x) {
|
|
; SI-LABEL: add_inline_imm_2.0_f64:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x13
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_add_f64 v[0:1], s[2:3], 2.0
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: add_inline_imm_2.0_f64:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x4c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_add_f64 v[0:1], s[2:3], 2.0
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
%y = fadd double %x, 2.0
|
|
store double %y, double addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @add_inline_imm_neg_2.0_f64(double addrspace(1)* %out, [8 x i32], double %x) {
|
|
; SI-LABEL: add_inline_imm_neg_2.0_f64:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x13
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_add_f64 v[0:1], s[2:3], -2.0
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: add_inline_imm_neg_2.0_f64:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x4c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_add_f64 v[0:1], s[2:3], -2.0
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
%y = fadd double %x, -2.0
|
|
store double %y, double addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @add_inline_imm_4.0_f64(double addrspace(1)* %out, [8 x i32], double %x) {
|
|
; SI-LABEL: add_inline_imm_4.0_f64:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x13
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_add_f64 v[0:1], s[2:3], 4.0
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: add_inline_imm_4.0_f64:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x4c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_add_f64 v[0:1], s[2:3], 4.0
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
%y = fadd double %x, 4.0
|
|
store double %y, double addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @add_inline_imm_neg_4.0_f64(double addrspace(1)* %out, [8 x i32], double %x) {
|
|
; SI-LABEL: add_inline_imm_neg_4.0_f64:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x13
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_add_f64 v[0:1], s[2:3], -4.0
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: add_inline_imm_neg_4.0_f64:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x4c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_add_f64 v[0:1], s[2:3], -4.0
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
%y = fadd double %x, -4.0
|
|
store double %y, double addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @add_inline_imm_inv_2pi_f64(double addrspace(1)* %out, [8 x i32], double %x) {
|
|
; SI-LABEL: add_inline_imm_inv_2pi_f64:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x13
|
|
; SI-NEXT: v_mov_b32_e32 v0, 0x6dc9c882
|
|
; SI-NEXT: v_mov_b32_e32 v1, 0x3fc45f30
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_add_f64 v[0:1], s[2:3], v[0:1]
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: add_inline_imm_inv_2pi_f64:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x4c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_add_f64 v[0:1], s[2:3], 0.15915494309189532
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
%y = fadd double %x, 0x3fc45f306dc9c882
|
|
store double %y, double addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @add_m_inv_2pi_f64(double addrspace(1)* %out, [8 x i32], double %x) {
|
|
; SI-LABEL: add_m_inv_2pi_f64:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x13
|
|
; SI-NEXT: v_mov_b32_e32 v0, 0x6dc9c882
|
|
; SI-NEXT: v_mov_b32_e32 v1, 0xbfc45f30
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_add_f64 v[0:1], s[2:3], v[0:1]
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: add_m_inv_2pi_f64:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x4c
|
|
; VI-NEXT: v_mov_b32_e32 v0, 0x6dc9c882
|
|
; VI-NEXT: v_mov_b32_e32 v1, 0xbfc45f30
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_add_f64 v[0:1], s[2:3], v[0:1]
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
%y = fadd double %x, 0xbfc45f306dc9c882
|
|
store double %y, double addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @add_inline_imm_1_f64(double addrspace(1)* %out, [8 x i32], double %x) {
|
|
; SI-LABEL: add_inline_imm_1_f64:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x13
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_add_f64 v[0:1], s[2:3], 1
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: add_inline_imm_1_f64:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x4c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_add_f64 v[0:1], s[2:3], 1
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
%y = fadd double %x, 0x0000000000000001
|
|
store double %y, double addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @add_inline_imm_2_f64(double addrspace(1)* %out, [8 x i32], double %x) {
|
|
; SI-LABEL: add_inline_imm_2_f64:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x13
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_add_f64 v[0:1], s[2:3], 2
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: add_inline_imm_2_f64:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x4c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_add_f64 v[0:1], s[2:3], 2
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
%y = fadd double %x, 0x0000000000000002
|
|
store double %y, double addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @add_inline_imm_16_f64(double addrspace(1)* %out, [8 x i32], double %x) {
|
|
; SI-LABEL: add_inline_imm_16_f64:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x13
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_add_f64 v[0:1], s[2:3], 16
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: add_inline_imm_16_f64:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x4c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_add_f64 v[0:1], s[2:3], 16
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
%y = fadd double %x, 0x0000000000000010
|
|
store double %y, double addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @add_inline_imm_neg_1_f64(double addrspace(1)* %out, [8 x i32], double %x) {
|
|
; SI-LABEL: add_inline_imm_neg_1_f64:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: v_mov_b32_e32 v0, -1
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: v_mov_b32_e32 v1, v0
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: add_inline_imm_neg_1_f64:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: v_mov_b32_e32 v0, -1
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: v_mov_b32_e32 v1, v0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
%y = fadd double %x, 0xffffffffffffffff
|
|
store double %y, double addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @add_inline_imm_neg_2_f64(double addrspace(1)* %out, [8 x i32], double %x) {
|
|
; SI-LABEL: add_inline_imm_neg_2_f64:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, -2
|
|
; SI-NEXT: v_mov_b32_e32 v1, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: add_inline_imm_neg_2_f64:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: v_mov_b32_e32 v0, -2
|
|
; VI-NEXT: v_mov_b32_e32 v1, -1
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
%y = fadd double %x, 0xfffffffffffffffe
|
|
store double %y, double addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @add_inline_imm_neg_16_f64(double addrspace(1)* %out, [8 x i32], double %x) {
|
|
; SI-LABEL: add_inline_imm_neg_16_f64:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, -16
|
|
; SI-NEXT: v_mov_b32_e32 v1, -1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: add_inline_imm_neg_16_f64:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: v_mov_b32_e32 v0, -16
|
|
; VI-NEXT: v_mov_b32_e32 v1, -1
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
%y = fadd double %x, 0xfffffffffffffff0
|
|
store double %y, double addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @add_inline_imm_63_f64(double addrspace(1)* %out, [8 x i32], double %x) {
|
|
; SI-LABEL: add_inline_imm_63_f64:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x13
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_add_f64 v[0:1], s[2:3], 63
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: add_inline_imm_63_f64:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x4c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_add_f64 v[0:1], s[2:3], 63
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
%y = fadd double %x, 0x000000000000003F
|
|
store double %y, double addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @add_inline_imm_64_f64(double addrspace(1)* %out, [8 x i32], double %x) {
|
|
; SI-LABEL: add_inline_imm_64_f64:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x13
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: v_add_f64 v[0:1], s[2:3], 64
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: add_inline_imm_64_f64:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x4c
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: v_add_f64 v[0:1], s[2:3], 64
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
%y = fadd double %x, 0x0000000000000040
|
|
store double %y, double addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @store_inline_imm_0.0_f64(double addrspace(1)* %out) {
|
|
; SI-LABEL: store_inline_imm_0.0_f64:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: v_mov_b32_e32 v0, 0
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: v_mov_b32_e32 v1, v0
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: store_inline_imm_0.0_f64:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: v_mov_b32_e32 v0, 0
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: v_mov_b32_e32 v1, v0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
store double 0.0, double addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @store_literal_imm_neg_0.0_f64(double addrspace(1)* %out) {
|
|
; SI-LABEL: store_literal_imm_neg_0.0_f64:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, 0
|
|
; SI-NEXT: v_bfrev_b32_e32 v1, 1
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: store_literal_imm_neg_0.0_f64:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: v_mov_b32_e32 v0, 0
|
|
; VI-NEXT: v_bfrev_b32_e32 v1, 1
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
store double -0.0, double addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @store_inline_imm_0.5_f64(double addrspace(1)* %out) {
|
|
; SI-LABEL: store_inline_imm_0.5_f64:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, 0
|
|
; SI-NEXT: v_mov_b32_e32 v1, 0x3fe00000
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: store_inline_imm_0.5_f64:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: v_mov_b32_e32 v0, 0
|
|
; VI-NEXT: v_mov_b32_e32 v1, 0x3fe00000
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
store double 0.5, double addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @store_inline_imm_m_0.5_f64(double addrspace(1)* %out) {
|
|
; SI-LABEL: store_inline_imm_m_0.5_f64:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, 0
|
|
; SI-NEXT: v_mov_b32_e32 v1, 0xbfe00000
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: store_inline_imm_m_0.5_f64:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: v_mov_b32_e32 v0, 0
|
|
; VI-NEXT: v_mov_b32_e32 v1, 0xbfe00000
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
store double -0.5, double addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @store_inline_imm_1.0_f64(double addrspace(1)* %out) {
|
|
; SI-LABEL: store_inline_imm_1.0_f64:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, 0
|
|
; SI-NEXT: v_mov_b32_e32 v1, 0x3ff00000
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: store_inline_imm_1.0_f64:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: v_mov_b32_e32 v0, 0
|
|
; VI-NEXT: v_mov_b32_e32 v1, 0x3ff00000
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
store double 1.0, double addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @store_inline_imm_m_1.0_f64(double addrspace(1)* %out) {
|
|
; SI-LABEL: store_inline_imm_m_1.0_f64:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, 0
|
|
; SI-NEXT: v_mov_b32_e32 v1, 0xbff00000
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: store_inline_imm_m_1.0_f64:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: v_mov_b32_e32 v0, 0
|
|
; VI-NEXT: v_mov_b32_e32 v1, 0xbff00000
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
store double -1.0, double addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @store_inline_imm_2.0_f64(double addrspace(1)* %out) {
|
|
; SI-LABEL: store_inline_imm_2.0_f64:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, 0
|
|
; SI-NEXT: v_mov_b32_e32 v1, 2.0
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: store_inline_imm_2.0_f64:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: v_mov_b32_e32 v0, 0
|
|
; VI-NEXT: v_mov_b32_e32 v1, 2.0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
store double 2.0, double addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @store_inline_imm_m_2.0_f64(double addrspace(1)* %out) {
|
|
; SI-LABEL: store_inline_imm_m_2.0_f64:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, 0
|
|
; SI-NEXT: v_mov_b32_e32 v1, -2.0
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: store_inline_imm_m_2.0_f64:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: v_mov_b32_e32 v0, 0
|
|
; VI-NEXT: v_mov_b32_e32 v1, -2.0
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
store double -2.0, double addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @store_inline_imm_4.0_f64(double addrspace(1)* %out) {
|
|
; SI-LABEL: store_inline_imm_4.0_f64:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, 0
|
|
; SI-NEXT: v_mov_b32_e32 v1, 0x40100000
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: store_inline_imm_4.0_f64:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: v_mov_b32_e32 v0, 0
|
|
; VI-NEXT: v_mov_b32_e32 v1, 0x40100000
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
store double 4.0, double addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @store_inline_imm_m_4.0_f64(double addrspace(1)* %out) {
|
|
; SI-LABEL: store_inline_imm_m_4.0_f64:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, 0
|
|
; SI-NEXT: v_mov_b32_e32 v1, 0xc0100000
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: store_inline_imm_m_4.0_f64:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: v_mov_b32_e32 v0, 0
|
|
; VI-NEXT: v_mov_b32_e32 v1, 0xc0100000
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
store double -4.0, double addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @store_inv_2pi_f64(double addrspace(1)* %out) {
|
|
; SI-LABEL: store_inv_2pi_f64:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, 0x6dc9c882
|
|
; SI-NEXT: v_mov_b32_e32 v1, 0x3fc45f30
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: store_inv_2pi_f64:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: v_mov_b32_e32 v0, 0x6dc9c882
|
|
; VI-NEXT: v_mov_b32_e32 v1, 0x3fc45f30
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
store double 0x3fc45f306dc9c882, double addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @store_inline_imm_m_inv_2pi_f64(double addrspace(1)* %out) {
|
|
; SI-LABEL: store_inline_imm_m_inv_2pi_f64:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, 0x6dc9c882
|
|
; SI-NEXT: v_mov_b32_e32 v1, 0xbfc45f30
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: store_inline_imm_m_inv_2pi_f64:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: v_mov_b32_e32 v0, 0x6dc9c882
|
|
; VI-NEXT: v_mov_b32_e32 v1, 0xbfc45f30
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
store double 0xbfc45f306dc9c882, double addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @store_literal_imm_f64(double addrspace(1)* %out) {
|
|
; SI-LABEL: store_literal_imm_f64:
|
|
; SI: ; %bb.0:
|
|
; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; SI-NEXT: s_mov_b32 s3, 0xf000
|
|
; SI-NEXT: s_mov_b32 s2, -1
|
|
; SI-NEXT: v_mov_b32_e32 v0, 0
|
|
; SI-NEXT: v_mov_b32_e32 v1, 0x40b00000
|
|
; SI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; SI-NEXT: s_endpgm
|
|
;
|
|
; VI-LABEL: store_literal_imm_f64:
|
|
; VI: ; %bb.0:
|
|
; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; VI-NEXT: s_mov_b32 s3, 0xf000
|
|
; VI-NEXT: s_mov_b32 s2, -1
|
|
; VI-NEXT: v_mov_b32_e32 v0, 0
|
|
; VI-NEXT: v_mov_b32_e32 v1, 0x40b00000
|
|
; VI-NEXT: s_waitcnt lgkmcnt(0)
|
|
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
|
|
; VI-NEXT: s_endpgm
|
|
store double 4096.0, double addrspace(1)* %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_vs void @literal_folding(float %arg) {
|
|
; GCN-LABEL: literal_folding:
|
|
; GCN: ; %bb.0: ; %main_body
|
|
; GCN-NEXT: v_mul_f32_e32 v1, 0x3f4353f8, v0
|
|
; GCN-NEXT: v_mul_f32_e32 v0, 0xbf4353f8, v0
|
|
; GCN-NEXT: exp pos0 v1, v1, v0, v0 done
|
|
; GCN-NEXT: s_endpgm
|
|
main_body:
|
|
%tmp = fmul float %arg, 0x3FE86A7F00000000
|
|
%tmp1 = fmul float %arg, 0xBFE86A7F00000000
|
|
call void @llvm.amdgcn.exp.f32(i32 12, i32 15, float %tmp, float %tmp, float %tmp1, float %tmp1, i1 true, i1 false) #0
|
|
ret void
|
|
}
|
|
|
|
declare void @llvm.amdgcn.exp.f32(i32, i32, float, float, float, float, i1, i1) #0
|
|
|
|
attributes #0 = { nounwind }
|