Similar to 806761a762.
For IR files without a target triple, -mtriple= specifies the full
target triple while -march= merely sets the architecture part of the
default target triple, leaving a target triple which may not make sense,
e.g. amdgpu-apple-darwin.
Therefore, -march= is error-prone and not recommended for tests without
a target triple. The issue has been benign as we recognize
$unknown-apple-darwin as ELF instead of rejecting it outrightly.
This patch changes AMDGPU tests to not rely on the default
OS/environment components. Tests that need fixes are not changed:
```
LLVM :: CodeGen/AMDGPU/fabs.f64.ll
LLVM :: CodeGen/AMDGPU/fabs.ll
LLVM :: CodeGen/AMDGPU/floor.ll
LLVM :: CodeGen/AMDGPU/fneg-fabs.f64.ll
LLVM :: CodeGen/AMDGPU/fneg-fabs.ll
LLVM :: CodeGen/AMDGPU/r600-infinite-loop-bug-while-reorganizing-vector.ll
LLVM :: CodeGen/AMDGPU/schedule-if-2.ll
```
156 lines
5.5 KiB
YAML
156 lines
5.5 KiB
YAML
# RUN: llc -mtriple=amdgcn -mcpu=gfx90a -run-pass=si-load-store-opt -verify-machineinstrs -o - %s | FileCheck %s
|
|
|
|
# The purpose of this test is to make sure we are combining relevant memory
|
|
# operations correctly with/without SCC bit.
|
|
|
|
--- |
|
|
define amdgpu_kernel void @test1(ptr addrspace(1) %out) {
|
|
%out.gep.1 = getelementptr i32, ptr addrspace(1) %out, i32 1
|
|
store i32 123, ptr addrspace(1) %out.gep.1
|
|
store i32 456, ptr addrspace(1) %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @test2(ptr addrspace(1) %out) {
|
|
%out.gep.1 = getelementptr i32, ptr addrspace(1) %out, i32 1
|
|
store i32 123, ptr addrspace(1) %out.gep.1
|
|
store i32 456, ptr addrspace(1) %out
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @test3(ptr addrspace(1) %out) {
|
|
%out.gep.1 = getelementptr i32, ptr addrspace(1) %out, i32 1
|
|
store i32 123, ptr addrspace(1) %out.gep.1
|
|
store i32 456, ptr addrspace(1) %out
|
|
ret void
|
|
}
|
|
define amdgpu_kernel void @test4(ptr addrspace(1) %out) {
|
|
%out.gep.1 = getelementptr i32, ptr addrspace(1) %out, i32 1
|
|
store i32 123, ptr addrspace(1) %out.gep.1
|
|
store i32 456, ptr addrspace(1) %out
|
|
ret void
|
|
}
|
|
...
|
|
|
|
# CHECK: BUFFER_STORE_DWORDX2_OFFSET killed %{{[0-9]+}}, %{{[0-9]+}}, 0, 4, 0, 0, implicit $exec :: (store (s64) into %ir.out.gep.1, align 4, addrspace 1)
|
|
---
|
|
name: test1
|
|
liveins:
|
|
- { reg: '$sgpr0_sgpr1', virtual-reg: '' }
|
|
body: |
|
|
bb.0 (%ir-block.0):
|
|
liveins: $sgpr0_sgpr1
|
|
|
|
$vgpr0 = V_MOV_B32_e32 123, implicit $exec
|
|
$vgpr1 = V_MOV_B32_e32 456, implicit $exec
|
|
|
|
$sgpr2 = S_MOV_B32 -1
|
|
$sgpr3 = S_MOV_B32 61440
|
|
|
|
%0:sgpr_64 = COPY $sgpr0_sgpr1
|
|
%1:sgpr_64 = S_LOAD_DWORDX2_IMM %1, 36, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, addrspace 4)
|
|
%2:sgpr_32 = COPY $sgpr2
|
|
%3:sgpr_32 = COPY $sgpr3
|
|
%4:sgpr_128 = REG_SEQUENCE %1, %subreg.sub0, %2, %subreg.sub1, %3, %subreg.sub2
|
|
|
|
%5:vgpr_32 = COPY $vgpr0
|
|
%6:vgpr_32 = COPY $vgpr1
|
|
|
|
BUFFER_STORE_DWORD_OFFSET %5, %4, 0, 4, 0, 0, implicit $exec :: (store (s32) into %ir.out.gep.1, addrspace 1)
|
|
BUFFER_STORE_DWORD_OFFSET %6, %4, 0, 8, 0, 0, implicit $exec :: (store (s32) into %ir.out.gep.1, addrspace 1)
|
|
|
|
S_ENDPGM 0
|
|
...
|
|
|
|
# CHECK: BUFFER_STORE_DWORD_OFFSET %{{[0-9]+}}, %{{[0-9]+}}, 0, 4, 16, 0, implicit $exec :: (store (s32) into %ir.out.gep.1, addrspace 1)
|
|
# CHECK: BUFFER_STORE_DWORD_OFFSET %{{[0-9]+}}, %{{[0-9]+}}, 0, 8, 0, 0, implicit $exec :: (store (s32) into %ir.out.gep.1, addrspace 1)
|
|
---
|
|
name: test2
|
|
liveins:
|
|
- { reg: '$sgpr0_sgpr1', virtual-reg: '' }
|
|
body: |
|
|
bb.0 (%ir-block.0):
|
|
liveins: $sgpr0_sgpr1
|
|
|
|
$vgpr0 = V_MOV_B32_e32 123, implicit $exec
|
|
$vgpr1 = V_MOV_B32_e32 456, implicit $exec
|
|
|
|
$sgpr2 = S_MOV_B32 -1
|
|
$sgpr3 = S_MOV_B32 61440
|
|
|
|
%0:sgpr_64 = COPY $sgpr0_sgpr1
|
|
%1:sgpr_64 = S_LOAD_DWORDX2_IMM %1, 36, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, addrspace 4)
|
|
%2:sgpr_32 = COPY $sgpr2
|
|
%3:sgpr_32 = COPY $sgpr3
|
|
%4:sgpr_128 = REG_SEQUENCE %1, %subreg.sub0, %2, %subreg.sub1, %3, %subreg.sub2
|
|
|
|
%5:vgpr_32 = COPY $vgpr0
|
|
%6:vgpr_32 = COPY $vgpr1
|
|
|
|
BUFFER_STORE_DWORD_OFFSET %5, %4, 0, 4, 16, 0, implicit $exec :: (store (s32) into %ir.out.gep.1, addrspace 1)
|
|
BUFFER_STORE_DWORD_OFFSET %6, %4, 0, 8, 0, 0, implicit $exec :: (store (s32) into %ir.out.gep.1, addrspace 1)
|
|
|
|
S_ENDPGM 0
|
|
...
|
|
|
|
# CHECK: BUFFER_STORE_DWORD_OFFSET %{{[0-9]+}}, %{{[0-9]+}}, 0, 4, 0, 0, implicit $exec :: (store (s32) into %ir.out.gep.1, addrspace 1)
|
|
# CHECK: BUFFER_STORE_DWORD_OFFSET %{{[0-9]+}}, %{{[0-9]+}}, 0, 8, 16, 0, implicit $exec :: (store (s32) into %ir.out.gep.1, addrspace 1)
|
|
---
|
|
name: test3
|
|
liveins:
|
|
- { reg: '$sgpr0_sgpr1', virtual-reg: '' }
|
|
body: |
|
|
bb.0 (%ir-block.0):
|
|
liveins: $sgpr0_sgpr1
|
|
|
|
$vgpr0 = V_MOV_B32_e32 123, implicit $exec
|
|
$vgpr1 = V_MOV_B32_e32 456, implicit $exec
|
|
|
|
$sgpr2 = S_MOV_B32 -1
|
|
$sgpr3 = S_MOV_B32 61440
|
|
|
|
%0:sgpr_64 = COPY $sgpr0_sgpr1
|
|
%1:sgpr_64 = S_LOAD_DWORDX2_IMM %1, 36, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, addrspace 4)
|
|
%2:sgpr_32 = COPY $sgpr2
|
|
%3:sgpr_32 = COPY $sgpr3
|
|
%4:sgpr_128 = REG_SEQUENCE %1, %subreg.sub0, %2, %subreg.sub1, %3, %subreg.sub2
|
|
|
|
%5:vgpr_32 = COPY $vgpr0
|
|
%6:vgpr_32 = COPY $vgpr1
|
|
|
|
BUFFER_STORE_DWORD_OFFSET %5, %4, 0, 4, 0, 0, implicit $exec :: (store (s32) into %ir.out.gep.1, addrspace 1)
|
|
BUFFER_STORE_DWORD_OFFSET %6, %4, 0, 8, 16, 0, implicit $exec :: (store (s32) into %ir.out.gep.1, addrspace 1)
|
|
|
|
S_ENDPGM 0
|
|
...
|
|
|
|
# CHECK: BUFFER_STORE_DWORDX2_OFFSET killed %{{[0-9]+}}, %{{[0-9]+}}, 0, 4, 16, 0, implicit $exec :: (store (s64) into %ir.out.gep.1, align 4, addrspace 1)
|
|
---
|
|
name: test4
|
|
liveins:
|
|
- { reg: '$sgpr0_sgpr1', virtual-reg: '' }
|
|
body: |
|
|
bb.0 (%ir-block.0):
|
|
liveins: $sgpr0_sgpr1
|
|
|
|
$vgpr0 = V_MOV_B32_e32 123, implicit $exec
|
|
$vgpr1 = V_MOV_B32_e32 456, implicit $exec
|
|
|
|
$sgpr2 = S_MOV_B32 -1
|
|
$sgpr3 = S_MOV_B32 61440
|
|
|
|
%0:sgpr_64 = COPY $sgpr0_sgpr1
|
|
%1:sgpr_64 = S_LOAD_DWORDX2_IMM %1, 36, 0 :: (dereferenceable invariant load (s64) from `ptr addrspace(4) undef`, addrspace 4)
|
|
%2:sgpr_32 = COPY $sgpr2
|
|
%3:sgpr_32 = COPY $sgpr3
|
|
%4:sgpr_128 = REG_SEQUENCE %1, %subreg.sub0, %2, %subreg.sub1, %3, %subreg.sub2
|
|
|
|
%5:vgpr_32 = COPY $vgpr0
|
|
%6:vgpr_32 = COPY $vgpr1
|
|
|
|
BUFFER_STORE_DWORD_OFFSET %5, %4, 0, 4, 16, 0, implicit $exec :: (store (s32) into %ir.out.gep.1, addrspace 1)
|
|
BUFFER_STORE_DWORD_OFFSET %6, %4, 0, 8, 16, 0, implicit $exec :: (store (s32) into %ir.out.gep.1, addrspace 1)
|
|
|
|
S_ENDPGM 0
|
|
...
|