Files
clang-p2996/llvm/test/CodeGen/AMDGPU/load-store-opt-scc.mir
Matt Arsenault fae05692a3 CodeGen: Print/parse LLTs in MachineMemOperands
This will currently accept the old number of bytes syntax, and convert
it to a scalar. This should be removed in the near future (I think I
converted all of the tests already, but likely missed a few).

Not sure what the exact syntax and policy should be. We can continue
printing the number of bytes for non-generic instructions to avoid
test churn and only allow non-scalar types for generic instructions.

This will currently print the LLT in parentheses, but accept parsing
the existing integers and implicitly converting to scalar. The
parentheses are a bit ugly, but the parser logic seems unable to deal
without either parentheses or some keyword to indicate the start of a
type.
2021-06-30 16:54:13 -04:00

156 lines
5.4 KiB
YAML

# RUN: llc -march=amdgcn -mcpu=gfx90a -run-pass=si-load-store-opt -verify-machineinstrs -o - %s | FileCheck %s
# The purpose of this test is to make sure we are combining relevant memory
# operations correctly with/without SCC bit.
--- |
define amdgpu_kernel void @test1(i32 addrspace(1)* %out) {
%out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
store i32 123, i32 addrspace(1)* %out.gep.1
store i32 456, i32 addrspace(1)* %out
ret void
}
define amdgpu_kernel void @test2(i32 addrspace(1)* %out) {
%out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
store i32 123, i32 addrspace(1)* %out.gep.1
store i32 456, i32 addrspace(1)* %out
ret void
}
define amdgpu_kernel void @test3(i32 addrspace(1)* %out) {
%out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
store i32 123, i32 addrspace(1)* %out.gep.1
store i32 456, i32 addrspace(1)* %out
ret void
}
define amdgpu_kernel void @test4(i32 addrspace(1)* %out) {
%out.gep.1 = getelementptr i32, i32 addrspace(1)* %out, i32 1
store i32 123, i32 addrspace(1)* %out.gep.1
store i32 456, i32 addrspace(1)* %out
ret void
}
...
# CHECK: BUFFER_STORE_DWORDX2_OFFSET killed %{{[0-9]+}}, %{{[0-9]+}}, 0, 4, 0, 0, 0, implicit $exec :: (store (s64) into %ir.out.gep.1, align 4, addrspace 1)
---
name: test1
liveins:
- { reg: '$sgpr0_sgpr1', virtual-reg: '' }
body: |
bb.0 (%ir-block.0):
liveins: $sgpr0_sgpr1
$vgpr0 = V_MOV_B32_e32 123, implicit $exec
$vgpr1 = V_MOV_B32_e32 456, implicit $exec
$sgpr2 = S_MOV_B32 -1
$sgpr3 = S_MOV_B32 61440
%0:sgpr_64 = COPY $sgpr0_sgpr1
%1:sgpr_64 = S_LOAD_DWORDX2_IMM %1, 36, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, addrspace 4)
%2:sgpr_32 = COPY $sgpr2
%3:sgpr_32 = COPY $sgpr3
%4:sgpr_128 = REG_SEQUENCE %1, %2, %3
%5:vgpr_32 = COPY $vgpr0
%6:vgpr_32 = COPY $vgpr1
BUFFER_STORE_DWORD_OFFSET %5, %4, 0, 4, 0, 0, 0, implicit $exec :: (store (s32) into %ir.out.gep.1, addrspace 1)
BUFFER_STORE_DWORD_OFFSET %6, %4, 0, 8, 0, 0, 0, implicit $exec :: (store (s32) into %ir.out.gep.1, addrspace 1)
S_ENDPGM 0
...
# CHECK: BUFFER_STORE_DWORD_OFFSET %{{[0-9]+}}, %{{[0-9]+}}, 0, 4, 16, 0, 0, implicit $exec :: (store (s32) into %ir.out.gep.1, addrspace 1)
# CHECK: BUFFER_STORE_DWORD_OFFSET %{{[0-9]+}}, %{{[0-9]+}}, 0, 8, 0, 0, 0, implicit $exec :: (store (s32) into %ir.out.gep.1, addrspace 1)
---
name: test2
liveins:
- { reg: '$sgpr0_sgpr1', virtual-reg: '' }
body: |
bb.0 (%ir-block.0):
liveins: $sgpr0_sgpr1
$vgpr0 = V_MOV_B32_e32 123, implicit $exec
$vgpr1 = V_MOV_B32_e32 456, implicit $exec
$sgpr2 = S_MOV_B32 -1
$sgpr3 = S_MOV_B32 61440
%0:sgpr_64 = COPY $sgpr0_sgpr1
%1:sgpr_64 = S_LOAD_DWORDX2_IMM %1, 36, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, addrspace 4)
%2:sgpr_32 = COPY $sgpr2
%3:sgpr_32 = COPY $sgpr3
%4:sgpr_128 = REG_SEQUENCE %1, %2, %3
%5:vgpr_32 = COPY $vgpr0
%6:vgpr_32 = COPY $vgpr1
BUFFER_STORE_DWORD_OFFSET %5, %4, 0, 4, 16, 0, 0, implicit $exec :: (store (s32) into %ir.out.gep.1, addrspace 1)
BUFFER_STORE_DWORD_OFFSET %6, %4, 0, 8, 0, 0, 0, implicit $exec :: (store (s32) into %ir.out.gep.1, addrspace 1)
S_ENDPGM 0
...
# CHECK: BUFFER_STORE_DWORD_OFFSET %{{[0-9]+}}, %{{[0-9]+}}, 0, 4, 0, 0, 0, implicit $exec :: (store (s32) into %ir.out.gep.1, addrspace 1)
# CHECK: BUFFER_STORE_DWORD_OFFSET %{{[0-9]+}}, %{{[0-9]+}}, 0, 8, 16, 0, 0, implicit $exec :: (store (s32) into %ir.out.gep.1, addrspace 1)
---
name: test3
liveins:
- { reg: '$sgpr0_sgpr1', virtual-reg: '' }
body: |
bb.0 (%ir-block.0):
liveins: $sgpr0_sgpr1
$vgpr0 = V_MOV_B32_e32 123, implicit $exec
$vgpr1 = V_MOV_B32_e32 456, implicit $exec
$sgpr2 = S_MOV_B32 -1
$sgpr3 = S_MOV_B32 61440
%0:sgpr_64 = COPY $sgpr0_sgpr1
%1:sgpr_64 = S_LOAD_DWORDX2_IMM %1, 36, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, addrspace 4)
%2:sgpr_32 = COPY $sgpr2
%3:sgpr_32 = COPY $sgpr3
%4:sgpr_128 = REG_SEQUENCE %1, %2, %3
%5:vgpr_32 = COPY $vgpr0
%6:vgpr_32 = COPY $vgpr1
BUFFER_STORE_DWORD_OFFSET %5, %4, 0, 4, 0, 0, 0, implicit $exec :: (store (s32) into %ir.out.gep.1, addrspace 1)
BUFFER_STORE_DWORD_OFFSET %6, %4, 0, 8, 16, 0, 0, implicit $exec :: (store (s32) into %ir.out.gep.1, addrspace 1)
S_ENDPGM 0
...
# CHECK: BUFFER_STORE_DWORDX2_OFFSET killed %{{[0-9]+}}, %{{[0-9]+}}, 0, 4, 16, 0, 0, implicit $exec :: (store (s64) into %ir.out.gep.1, align 4, addrspace 1)
---
name: test4
liveins:
- { reg: '$sgpr0_sgpr1', virtual-reg: '' }
body: |
bb.0 (%ir-block.0):
liveins: $sgpr0_sgpr1
$vgpr0 = V_MOV_B32_e32 123, implicit $exec
$vgpr1 = V_MOV_B32_e32 456, implicit $exec
$sgpr2 = S_MOV_B32 -1
$sgpr3 = S_MOV_B32 61440
%0:sgpr_64 = COPY $sgpr0_sgpr1
%1:sgpr_64 = S_LOAD_DWORDX2_IMM %1, 36, 0 :: (dereferenceable invariant load (s64) from `i64 addrspace(4)* undef`, addrspace 4)
%2:sgpr_32 = COPY $sgpr2
%3:sgpr_32 = COPY $sgpr3
%4:sgpr_128 = REG_SEQUENCE %1, %2, %3
%5:vgpr_32 = COPY $vgpr0
%6:vgpr_32 = COPY $vgpr1
BUFFER_STORE_DWORD_OFFSET %5, %4, 0, 4, 16, 0, 0, implicit $exec :: (store (s32) into %ir.out.gep.1, addrspace 1)
BUFFER_STORE_DWORD_OFFSET %6, %4, 0, 8, 16, 0, 0, implicit $exec :: (store (s32) into %ir.out.gep.1, addrspace 1)
S_ENDPGM 0
...