Similarly to 3 DWORD operations it is better for performance to split unlaligned operations as long a these are at least DWORD alignmened. Performance data: ``` Using platform: AMD Accelerated Parallel Processing Using device: gfx900:xnack- ds_write_b128 aligned by 16: 4.9 sec ds_write2_b64 aligned by 16: 5.1 sec ds_write2_b32 * 2 aligned by 16: 5.5 sec ds_write_b128 aligned by 1: 8.1 sec ds_write2_b64 aligned by 1: 8.7 sec ds_write2_b32 * 2 aligned by 1: 14.0 sec ds_write_b128 aligned by 2: 8.1 sec ds_write2_b64 aligned by 2: 8.7 sec ds_write2_b32 * 2 aligned by 2: 14.0 sec ds_write_b128 aligned by 4: 5.6 sec ds_write2_b64 aligned by 4: 8.7 sec ds_write2_b32 * 2 aligned by 4: 5.6 sec ds_write_b128 aligned by 8: 5.6 sec ds_write2_b64 aligned by 8: 5.1 sec ds_write2_b32 * 2 aligned by 8: 5.6 sec ds_read_b128 aligned by 16: 3.8 sec ds_read2_b64 aligned by 16: 3.8 sec ds_read2_b32 * 2 aligned by 16: 4.0 sec ds_read_b128 aligned by 1: 4.6 sec ds_read2_b64 aligned by 1: 8.1 sec ds_read2_b32 * 2 aligned by 1: 14.0 sec ds_read_b128 aligned by 2: 4.6 sec ds_read2_b64 aligned by 2: 8.1 sec ds_read2_b32 * 2 aligned by 2: 14.0 sec ds_read_b128 aligned by 4: 4.6 sec ds_read2_b64 aligned by 4: 8.1 sec ds_read2_b32 * 2 aligned by 4: 4.0 sec ds_read_b128 aligned by 8: 4.6 sec ds_read2_b64 aligned by 8: 3.8 sec ds_read2_b32 * 2 aligned by 8: 4.0 sec Using platform: AMD Accelerated Parallel Processing Using device: gfx1030 ds_write_b128 aligned by 16: 6.2 sec ds_write2_b64 aligned by 16: 7.1 sec ds_write2_b32 * 2 aligned by 16: 7.6 sec ds_write_b128 aligned by 1: 24.1 sec ds_write2_b64 aligned by 1: 25.2 sec ds_write2_b32 * 2 aligned by 1: 43.7 sec ds_write_b128 aligned by 2: 24.1 sec ds_write2_b64 aligned by 2: 25.1 sec ds_write2_b32 * 2 aligned by 2: 43.7 sec ds_write_b128 aligned by 4: 14.4 sec ds_write2_b64 aligned by 4: 25.1 sec ds_write2_b32 * 2 aligned by 4: 7.6 sec ds_write_b128 aligned by 8: 14.4 sec ds_write2_b64 aligned by 8: 7.1 sec ds_write2_b32 * 2 aligned by 8: 7.6 sec ds_read_b128 aligned by 16: 6.2 sec ds_read2_b64 aligned by 16: 6.3 sec ds_read2_b32 * 2 aligned by 16: 7.5 sec ds_read_b128 aligned by 1: 12.5 sec ds_read2_b64 aligned by 1: 24.0 sec ds_read2_b32 * 2 aligned by 1: 43.6 sec ds_read_b128 aligned by 2: 12.5 sec ds_read2_b64 aligned by 2: 24.0 sec ds_read2_b32 * 2 aligned by 2: 43.6 sec ds_read_b128 aligned by 4: 12.5 sec ds_read2_b64 aligned by 4: 24.0 sec ds_read2_b32 * 2 aligned by 4: 7.5 sec ds_read_b128 aligned by 8: 12.5 sec ds_read2_b64 aligned by 8: 6.3 sec ds_read2_b32 * 2 aligned by 8: 7.5 sec ``` Differential Revision: https://reviews.llvm.org/D123634
268 lines
11 KiB
LLVM
268 lines
11 KiB
LLVM
; RUN: llc -march=amdgcn -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,ALIGNED,SPLIT %s
|
|
; RUN: llc -march=amdgcn -mcpu=gfx1011 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,ALIGNED,SPLIT %s
|
|
; RUN: llc -march=amdgcn -mcpu=gfx1012 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,ALIGNED,SPLIT %s
|
|
; RUN: llc -march=amdgcn -mcpu=gfx1010 -verify-machineinstrs -mattr=+cumode < %s | FileCheck -check-prefixes=GCN,ALIGNED,VECT %s
|
|
; RUN: llc -march=amdgcn -mcpu=gfx1010 -verify-machineinstrs -mattr=+cumode,+unaligned-access-mode < %s | FileCheck -check-prefixes=GCN,UNALIGNED,VECT %s
|
|
|
|
; GCN-LABEL: test_local_misaligned_v2:
|
|
; GCN-DAG: ds_read2_b32
|
|
; GCN-DAG: ds_write2_b32
|
|
define amdgpu_kernel void @test_local_misaligned_v2(i32 addrspace(3)* %arg) {
|
|
bb:
|
|
%lid = tail call i32 @llvm.amdgcn.workitem.id.x()
|
|
%gep = getelementptr inbounds i32, i32 addrspace(3)* %arg, i32 %lid
|
|
%ptr = bitcast i32 addrspace(3)* %gep to <2 x i32> addrspace(3)*
|
|
%load = load <2 x i32>, <2 x i32> addrspace(3)* %ptr, align 4
|
|
%v1 = extractelement <2 x i32> %load, i32 0
|
|
%v2 = extractelement <2 x i32> %load, i32 1
|
|
%v3 = insertelement <2 x i32> undef, i32 %v2, i32 0
|
|
%v4 = insertelement <2 x i32> %v3, i32 %v1, i32 1
|
|
store <2 x i32> %v4, <2 x i32> addrspace(3)* %ptr, align 4
|
|
ret void
|
|
}
|
|
|
|
; GCN-LABEL: test_local_misaligned_v4:
|
|
; GCN-DAG: ds_read2_b32
|
|
; GCN-DAG: ds_read2_b32
|
|
; GCN-DAG: ds_write2_b32
|
|
; GCN-DAG: ds_write2_b32
|
|
define amdgpu_kernel void @test_local_misaligned_v4(i32 addrspace(3)* %arg) {
|
|
bb:
|
|
%lid = tail call i32 @llvm.amdgcn.workitem.id.x()
|
|
%gep = getelementptr inbounds i32, i32 addrspace(3)* %arg, i32 %lid
|
|
%ptr = bitcast i32 addrspace(3)* %gep to <4 x i32> addrspace(3)*
|
|
%load = load <4 x i32>, <4 x i32> addrspace(3)* %ptr, align 4
|
|
%v1 = extractelement <4 x i32> %load, i32 0
|
|
%v2 = extractelement <4 x i32> %load, i32 1
|
|
%v3 = extractelement <4 x i32> %load, i32 2
|
|
%v4 = extractelement <4 x i32> %load, i32 3
|
|
%v5 = insertelement <4 x i32> undef, i32 %v4, i32 0
|
|
%v6 = insertelement <4 x i32> %v5, i32 %v3, i32 1
|
|
%v7 = insertelement <4 x i32> %v6, i32 %v2, i32 2
|
|
%v8 = insertelement <4 x i32> %v7, i32 %v1, i32 3
|
|
store <4 x i32> %v8, <4 x i32> addrspace(3)* %ptr, align 4
|
|
ret void
|
|
}
|
|
|
|
; GCN-LABEL: test_local_misaligned_v3:
|
|
; GCN-DAG: ds_read2_b32
|
|
; GCN-DAG: ds_read_b32
|
|
; GCN-DAG: ds_write2_b32
|
|
; GCN-DAG: ds_write_b32
|
|
define amdgpu_kernel void @test_local_misaligned_v3(i32 addrspace(3)* %arg) {
|
|
bb:
|
|
%lid = tail call i32 @llvm.amdgcn.workitem.id.x()
|
|
%gep = getelementptr inbounds i32, i32 addrspace(3)* %arg, i32 %lid
|
|
%ptr = bitcast i32 addrspace(3)* %gep to <3 x i32> addrspace(3)*
|
|
%load = load <3 x i32>, <3 x i32> addrspace(3)* %ptr, align 4
|
|
%v1 = extractelement <3 x i32> %load, i32 0
|
|
%v2 = extractelement <3 x i32> %load, i32 1
|
|
%v3 = extractelement <3 x i32> %load, i32 2
|
|
%v5 = insertelement <3 x i32> undef, i32 %v3, i32 0
|
|
%v6 = insertelement <3 x i32> %v5, i32 %v1, i32 1
|
|
%v7 = insertelement <3 x i32> %v6, i32 %v2, i32 2
|
|
store <3 x i32> %v7, <3 x i32> addrspace(3)* %ptr, align 4
|
|
ret void
|
|
}
|
|
|
|
; GCN-LABEL: test_flat_misaligned_v2:
|
|
; VECT-DAG: flat_load_dwordx2 v
|
|
; VECT-DAG: flat_store_dwordx2 v
|
|
; SPLIT-DAG: flat_load_dword v
|
|
; SPLIT-DAG: flat_load_dword v
|
|
; SPLIT-DAG: flat_store_dword v
|
|
; SPLIT-DAG: flat_store_dword v
|
|
define amdgpu_kernel void @test_flat_misaligned_v2(i32* %arg) {
|
|
bb:
|
|
%lid = tail call i32 @llvm.amdgcn.workitem.id.x()
|
|
%gep = getelementptr inbounds i32, i32* %arg, i32 %lid
|
|
%ptr = bitcast i32* %gep to <2 x i32>*
|
|
%load = load <2 x i32>, <2 x i32>* %ptr, align 4
|
|
%v1 = extractelement <2 x i32> %load, i32 0
|
|
%v2 = extractelement <2 x i32> %load, i32 1
|
|
%v3 = insertelement <2 x i32> undef, i32 %v2, i32 0
|
|
%v4 = insertelement <2 x i32> %v3, i32 %v1, i32 1
|
|
store <2 x i32> %v4, <2 x i32>* %ptr, align 4
|
|
ret void
|
|
}
|
|
|
|
; GCN-LABEL: test_flat_misaligned_v4:
|
|
; VECT-DAG: flat_load_dwordx4 v
|
|
; VECT-DAG: flat_store_dwordx4 v
|
|
; SPLIT-DAG: flat_load_dword v
|
|
; SPLIT-DAG: flat_load_dword v
|
|
; SPLIT-DAG: flat_load_dword v
|
|
; SPLIT-DAG: flat_load_dword v
|
|
; SPLIT-DAG: flat_store_dword v
|
|
; SPLIT-DAG: flat_store_dword v
|
|
; SPLIT-DAG: flat_store_dword v
|
|
; SPLIT-DAG: flat_store_dword v
|
|
define amdgpu_kernel void @test_flat_misaligned_v4(i32* %arg) {
|
|
bb:
|
|
%lid = tail call i32 @llvm.amdgcn.workitem.id.x()
|
|
%gep = getelementptr inbounds i32, i32* %arg, i32 %lid
|
|
%ptr = bitcast i32* %gep to <4 x i32>*
|
|
%load = load <4 x i32>, <4 x i32>* %ptr, align 4
|
|
%v1 = extractelement <4 x i32> %load, i32 0
|
|
%v2 = extractelement <4 x i32> %load, i32 1
|
|
%v3 = extractelement <4 x i32> %load, i32 2
|
|
%v4 = extractelement <4 x i32> %load, i32 3
|
|
%v5 = insertelement <4 x i32> undef, i32 %v4, i32 0
|
|
%v6 = insertelement <4 x i32> %v5, i32 %v3, i32 1
|
|
%v7 = insertelement <4 x i32> %v6, i32 %v2, i32 2
|
|
%v8 = insertelement <4 x i32> %v7, i32 %v1, i32 3
|
|
store <4 x i32> %v8, <4 x i32>* %ptr, align 4
|
|
ret void
|
|
}
|
|
|
|
; TODO: Reinstate the test below once v3i32/v3f32 is reinstated.
|
|
|
|
; GCN-LABEL: test_flat_misaligned_v3:
|
|
; xVECT-DAG: flat_load_dwordx3 v
|
|
; xVECT-DAG: flat_store_dwordx3 v
|
|
; xSPLIT-DAG: flat_load_dword v
|
|
; xSPLIT-DAG: flat_load_dword v
|
|
; xSPLIT-DAG: flat_load_dword v
|
|
; xSPLIT-DAG: flat_store_dword v
|
|
; xSPLIT-DAG: flat_store_dword v
|
|
; xSPLIT-DAG: flat_store_dword v
|
|
define amdgpu_kernel void @test_flat_misaligned_v3(i32* %arg) {
|
|
bb:
|
|
%lid = tail call i32 @llvm.amdgcn.workitem.id.x()
|
|
%gep = getelementptr inbounds i32, i32* %arg, i32 %lid
|
|
%ptr = bitcast i32* %gep to <3 x i32>*
|
|
%load = load <3 x i32>, <3 x i32>* %ptr, align 4
|
|
%v1 = extractelement <3 x i32> %load, i32 0
|
|
%v2 = extractelement <3 x i32> %load, i32 1
|
|
%v3 = extractelement <3 x i32> %load, i32 2
|
|
%v5 = insertelement <3 x i32> undef, i32 %v3, i32 0
|
|
%v6 = insertelement <3 x i32> %v5, i32 %v1, i32 1
|
|
%v7 = insertelement <3 x i32> %v6, i32 %v2, i32 2
|
|
store <3 x i32> %v7, <3 x i32>* %ptr, align 4
|
|
ret void
|
|
}
|
|
|
|
; GCN-LABEL: test_local_aligned_v2:
|
|
; GCN-DAG: ds_read_b64
|
|
; GCN-DAG: ds_write_b64
|
|
define amdgpu_kernel void @test_local_aligned_v2(i32 addrspace(3)* %arg) {
|
|
bb:
|
|
%lid = tail call i32 @llvm.amdgcn.workitem.id.x()
|
|
%gep = getelementptr inbounds i32, i32 addrspace(3)* %arg, i32 %lid
|
|
%ptr = bitcast i32 addrspace(3)* %gep to <2 x i32> addrspace(3)*
|
|
%load = load <2 x i32>, <2 x i32> addrspace(3)* %ptr, align 8
|
|
%v1 = extractelement <2 x i32> %load, i32 0
|
|
%v2 = extractelement <2 x i32> %load, i32 1
|
|
%v3 = insertelement <2 x i32> undef, i32 %v2, i32 0
|
|
%v4 = insertelement <2 x i32> %v3, i32 %v1, i32 1
|
|
store <2 x i32> %v4, <2 x i32> addrspace(3)* %ptr, align 8
|
|
ret void
|
|
}
|
|
|
|
; GCN-LABEL: test_local_aligned_v3:
|
|
; GCN-DAG: ds_read_b96
|
|
; GCN-DAG: ds_write_b96
|
|
define amdgpu_kernel void @test_local_aligned_v3(i32 addrspace(3)* %arg) {
|
|
bb:
|
|
%lid = tail call i32 @llvm.amdgcn.workitem.id.x()
|
|
%gep = getelementptr inbounds i32, i32 addrspace(3)* %arg, i32 %lid
|
|
%ptr = bitcast i32 addrspace(3)* %gep to <3 x i32> addrspace(3)*
|
|
%load = load <3 x i32>, <3 x i32> addrspace(3)* %ptr, align 16
|
|
%v1 = extractelement <3 x i32> %load, i32 0
|
|
%v2 = extractelement <3 x i32> %load, i32 1
|
|
%v3 = extractelement <3 x i32> %load, i32 2
|
|
%v5 = insertelement <3 x i32> undef, i32 %v3, i32 0
|
|
%v6 = insertelement <3 x i32> %v5, i32 %v1, i32 1
|
|
%v7 = insertelement <3 x i32> %v6, i32 %v2, i32 2
|
|
store <3 x i32> %v7, <3 x i32> addrspace(3)* %ptr, align 16
|
|
ret void
|
|
}
|
|
|
|
; GCN-LABEL: test_flat_aligned_v2:
|
|
; GCN-DAG: flat_load_dwordx2 v
|
|
; GCN-DAG: flat_store_dwordx2 v
|
|
define amdgpu_kernel void @test_flat_aligned_v2(i32* %arg) {
|
|
bb:
|
|
%lid = tail call i32 @llvm.amdgcn.workitem.id.x()
|
|
%gep = getelementptr inbounds i32, i32* %arg, i32 %lid
|
|
%ptr = bitcast i32* %gep to <2 x i32>*
|
|
%load = load <2 x i32>, <2 x i32>* %ptr, align 8
|
|
%v1 = extractelement <2 x i32> %load, i32 0
|
|
%v2 = extractelement <2 x i32> %load, i32 1
|
|
%v3 = insertelement <2 x i32> undef, i32 %v2, i32 0
|
|
%v4 = insertelement <2 x i32> %v3, i32 %v1, i32 1
|
|
store <2 x i32> %v4, <2 x i32>* %ptr, align 8
|
|
ret void
|
|
}
|
|
|
|
; GCN-LABEL: test_flat_aligned_v4:
|
|
; GCN-DAG: flat_load_dwordx4 v
|
|
; GCN-DAG: flat_store_dwordx4 v
|
|
define amdgpu_kernel void @test_flat_aligned_v4(i32* %arg) {
|
|
bb:
|
|
%lid = tail call i32 @llvm.amdgcn.workitem.id.x()
|
|
%gep = getelementptr inbounds i32, i32* %arg, i32 %lid
|
|
%ptr = bitcast i32* %gep to <4 x i32>*
|
|
%load = load <4 x i32>, <4 x i32>* %ptr, align 16
|
|
%v1 = extractelement <4 x i32> %load, i32 0
|
|
%v2 = extractelement <4 x i32> %load, i32 1
|
|
%v3 = extractelement <4 x i32> %load, i32 2
|
|
%v4 = extractelement <4 x i32> %load, i32 3
|
|
%v5 = insertelement <4 x i32> undef, i32 %v4, i32 0
|
|
%v6 = insertelement <4 x i32> %v5, i32 %v3, i32 1
|
|
%v7 = insertelement <4 x i32> %v6, i32 %v2, i32 2
|
|
%v8 = insertelement <4 x i32> %v7, i32 %v1, i32 3
|
|
store <4 x i32> %v8, <4 x i32>* %ptr, align 16
|
|
ret void
|
|
}
|
|
|
|
; GCN-LABEL: test_local_v4_aligned8:
|
|
; ALIGNED-DAG: ds_read2_b64
|
|
; ALIGNED-DAG: ds_write2_b64
|
|
; UNALIGNED-DAG: ds_read2_b64
|
|
; UNALIGNED-DAG: ds_write2_b64
|
|
define amdgpu_kernel void @test_local_v4_aligned8(i32 addrspace(3)* %arg) {
|
|
bb:
|
|
%lid = tail call i32 @llvm.amdgcn.workitem.id.x()
|
|
%gep = getelementptr inbounds i32, i32 addrspace(3)* %arg, i32 %lid
|
|
%ptr = bitcast i32 addrspace(3)* %gep to <4 x i32> addrspace(3)*
|
|
%load = load <4 x i32>, <4 x i32> addrspace(3)* %ptr, align 8
|
|
%v1 = extractelement <4 x i32> %load, i32 0
|
|
%v2 = extractelement <4 x i32> %load, i32 1
|
|
%v3 = extractelement <4 x i32> %load, i32 2
|
|
%v4 = extractelement <4 x i32> %load, i32 3
|
|
%v5 = insertelement <4 x i32> undef, i32 %v4, i32 0
|
|
%v6 = insertelement <4 x i32> %v5, i32 %v3, i32 1
|
|
%v7 = insertelement <4 x i32> %v6, i32 %v2, i32 2
|
|
%v8 = insertelement <4 x i32> %v7, i32 %v1, i32 3
|
|
store <4 x i32> %v8, <4 x i32> addrspace(3)* %ptr, align 8
|
|
ret void
|
|
}
|
|
|
|
; GCN-LABEL: test_flat_v4_aligned8:
|
|
; VECT-DAG: flat_load_dwordx4 v
|
|
; VECT-DAG: flat_store_dwordx4 v
|
|
; SPLIT-DAG: flat_load_dwordx2 v
|
|
; SPLIT-DAG: flat_load_dwordx2 v
|
|
; SPLIT-DAG: flat_store_dwordx2 v
|
|
; SPLIT-DAG: flat_store_dwordx2 v
|
|
define amdgpu_kernel void @test_flat_v4_aligned8(i32* %arg) {
|
|
bb:
|
|
%lid = tail call i32 @llvm.amdgcn.workitem.id.x()
|
|
%gep = getelementptr inbounds i32, i32* %arg, i32 %lid
|
|
%ptr = bitcast i32* %gep to <4 x i32>*
|
|
%load = load <4 x i32>, <4 x i32>* %ptr, align 8
|
|
%v1 = extractelement <4 x i32> %load, i32 0
|
|
%v2 = extractelement <4 x i32> %load, i32 1
|
|
%v3 = extractelement <4 x i32> %load, i32 2
|
|
%v4 = extractelement <4 x i32> %load, i32 3
|
|
%v5 = insertelement <4 x i32> undef, i32 %v4, i32 0
|
|
%v6 = insertelement <4 x i32> %v5, i32 %v3, i32 1
|
|
%v7 = insertelement <4 x i32> %v6, i32 %v2, i32 2
|
|
%v8 = insertelement <4 x i32> %v7, i32 %v1, i32 3
|
|
store <4 x i32> %v8, <4 x i32>* %ptr, align 8
|
|
ret void
|
|
}
|
|
|
|
declare i32 @llvm.amdgcn.workitem.id.x()
|