Currently the return address ABI registers s[30:31], which fall in the call clobbered register range, are added as a live-in on the function entry to preserve its value when we have calls so that it gets saved and restored around the calls. But the DWARF unwind information (CFI) needs to track where the return address resides in a frame and the above approach makes it difficult to track the return address when the CFI information is emitted during the frame lowering, due to the involvment of understanding the control flow. This patch moves the return address ABI registers s[30:31] into callee saved registers range and stops adding live-in for return address registers, so that the CFI machinery will know where the return address resides when CSR save/restore happen during the frame lowering. And doing the above poses an issue that now the return instruction uses undefined register `sgpr30_sgpr31`. This is resolved by hiding the return address register use by the return instruction through the `SI_RETURN` pseudo instruction, which doesn't take any input operands, until the `SI_RETURN` pseudo gets lowered to the `S_SETPC_B64_return` during the `expandPostRAPseudo()`. As an added benefit, this patch simplifies overall return instruction handling. Note: The AMDGPU CFI changes are there only in the downstream code and another version of this patch will be posted for review for the downstream code. Reviewed By: arsenm, ronlieb Differential Revision: https://reviews.llvm.org/D114652
170 lines
9.1 KiB
YAML
170 lines
9.1 KiB
YAML
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
|
|
# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -run-pass=none -o - %s | FileCheck %s
|
|
--- |
|
|
; ModuleID = 'test/CodeGen/AMDGPU/memcpy-scoped-aa.ll'
|
|
source_filename = "test/CodeGen/AMDGPU/memcpy-scoped-aa.ll"
|
|
target datalayout = "e-p:64:64-p1:64:64-p2:32:32-p3:32:32-p4:64:64-p5:32:32-p6:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-S32-A5-G1-ni:7"
|
|
target triple = "amdgcn-amd-amdhsa"
|
|
|
|
define i32 @test_memcpy(i32 addrspace(1)* nocapture %p, i32 addrspace(1)* nocapture readonly %q) #0 {
|
|
%p0 = bitcast i32 addrspace(1)* %p to i8 addrspace(1)*
|
|
%add.ptr = getelementptr inbounds i32, i32 addrspace(1)* %p, i64 4
|
|
%p1 = bitcast i32 addrspace(1)* %add.ptr to i8 addrspace(1)*
|
|
tail call void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* noundef nonnull align 4 dereferenceable(16) %p0, i8 addrspace(1)* noundef nonnull align 4 dereferenceable(16) %p1, i64 16, i1 false), !alias.scope !0, !noalias !3
|
|
%1 = bitcast i32 addrspace(1)* %q to <2 x i32> addrspace(1)*
|
|
%2 = load <2 x i32>, <2 x i32> addrspace(1)* %1, align 4, !alias.scope !3, !noalias !0
|
|
%v01 = extractelement <2 x i32> %2, i32 0
|
|
%v12 = extractelement <2 x i32> %2, i32 1
|
|
%add = add i32 %v01, %v12
|
|
ret i32 %add
|
|
}
|
|
|
|
define i32 @test_memcpy_inline(i32 addrspace(1)* nocapture %p, i32 addrspace(1)* nocapture readonly %q) #0 {
|
|
%p0 = bitcast i32 addrspace(1)* %p to i8 addrspace(1)*
|
|
%add.ptr = getelementptr inbounds i32, i32 addrspace(1)* %p, i64 4
|
|
%p1 = bitcast i32 addrspace(1)* %add.ptr to i8 addrspace(1)*
|
|
tail call void @llvm.memcpy.inline.p1i8.p1i8.i64(i8 addrspace(1)* noundef nonnull align 4 dereferenceable(16) %p0, i8 addrspace(1)* noundef nonnull align 4 dereferenceable(16) %p1, i64 16, i1 false), !alias.scope !0, !noalias !3
|
|
%1 = bitcast i32 addrspace(1)* %q to <2 x i32> addrspace(1)*
|
|
%2 = load <2 x i32>, <2 x i32> addrspace(1)* %1, align 4, !alias.scope !3, !noalias !0
|
|
%v01 = extractelement <2 x i32> %2, i32 0
|
|
%v12 = extractelement <2 x i32> %2, i32 1
|
|
%add = add i32 %v01, %v12
|
|
ret i32 %add
|
|
}
|
|
|
|
; Function Attrs: argmemonly nofree nounwind willreturn
|
|
declare void @llvm.memcpy.p1i8.p1i8.i64(i8 addrspace(1)* noalias nocapture writeonly, i8 addrspace(1)* noalias nocapture readonly, i64, i1 immarg) #1
|
|
|
|
; Function Attrs: argmemonly nofree nounwind willreturn
|
|
declare void @llvm.memcpy.inline.p1i8.p1i8.i64(i8 addrspace(1)* noalias nocapture writeonly, i8 addrspace(1)* noalias nocapture readonly, i64 immarg, i1 immarg) #1
|
|
|
|
; Function Attrs: convergent nounwind willreturn
|
|
declare { i1, i32 } @llvm.amdgcn.if.i32(i1) #2
|
|
|
|
; Function Attrs: convergent nounwind willreturn
|
|
declare { i1, i32 } @llvm.amdgcn.else.i32.i32(i32) #2
|
|
|
|
; Function Attrs: convergent nounwind readnone willreturn
|
|
declare i32 @llvm.amdgcn.if.break.i32(i1, i32) #3
|
|
|
|
; Function Attrs: convergent nounwind willreturn
|
|
declare i1 @llvm.amdgcn.loop.i32(i32) #2
|
|
|
|
; Function Attrs: convergent nounwind willreturn
|
|
declare void @llvm.amdgcn.end.cf.i32(i32) #2
|
|
|
|
attributes #0 = { "target-cpu"="gfx1010" }
|
|
attributes #1 = { argmemonly nofree nounwind willreturn "target-cpu"="gfx1010" }
|
|
attributes #2 = { convergent nounwind willreturn }
|
|
attributes #3 = { convergent nounwind readnone willreturn }
|
|
|
|
!0 = !{!1}
|
|
!1 = distinct !{!1, !2, !"bax: %p"}
|
|
!2 = distinct !{!2, !"bax"}
|
|
!3 = !{!4}
|
|
!4 = distinct !{!4, !2, !"bax: %q"}
|
|
|
|
...
|
|
---
|
|
name: test_memcpy
|
|
machineMetadataNodes:
|
|
- '!9 = distinct !{!9, !7, !"Dst"}'
|
|
- '!6 = distinct !{!6, !7, !"Src"}'
|
|
- '!11 = !{!4, !6}'
|
|
- '!5 = !{!1, !6}'
|
|
- '!8 = !{!4, !9}'
|
|
- '!10 = !{!1, !9}'
|
|
- '!7 = distinct !{!7, !"MemcpyLoweringDomain"}'
|
|
body: |
|
|
bb.0 (%ir-block.0):
|
|
liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
|
|
|
|
; CHECK-LABEL: name: test_memcpy
|
|
; CHECK: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr3
|
|
; CHECK: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
|
|
; CHECK: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr1
|
|
; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
|
|
; CHECK: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY1]], %subreg.sub1
|
|
; CHECK: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY3]], %subreg.sub1
|
|
; CHECK: [[COPY5:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]]
|
|
; CHECK: [[GLOBAL_LOAD_DWORDX4_:%[0-9]+]]:vreg_128 = GLOBAL_LOAD_DWORDX4 [[COPY5]], 16, 0, implicit $exec :: (load (s128) from %ir.p1, align 4, !alias.scope !5, !noalias !8, addrspace 1)
|
|
; CHECK: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]]
|
|
; CHECK: GLOBAL_STORE_DWORDX4 [[COPY6]], killed [[GLOBAL_LOAD_DWORDX4_]], 0, 0, implicit $exec :: (store (s128) into %ir.p0, align 4, !alias.scope !10, !noalias !11, addrspace 1)
|
|
; CHECK: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]]
|
|
; CHECK: [[GLOBAL_LOAD_DWORDX2_:%[0-9]+]]:vreg_64 = GLOBAL_LOAD_DWORDX2 killed [[COPY7]], 0, 0, implicit $exec :: (load (s64) from %ir.1, align 4, !alias.scope !3, !noalias !0, addrspace 1)
|
|
; CHECK: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[GLOBAL_LOAD_DWORDX2_]].sub0
|
|
; CHECK: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[GLOBAL_LOAD_DWORDX2_]].sub1
|
|
; CHECK: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 killed [[COPY8]], killed [[COPY9]], 0, implicit $exec
|
|
; CHECK: $vgpr0 = COPY [[V_ADD_U32_e64_]]
|
|
; CHECK: SI_RETURN implicit $vgpr0
|
|
%3:vgpr_32 = COPY $vgpr3
|
|
%2:vgpr_32 = COPY $vgpr2
|
|
%1:vgpr_32 = COPY $vgpr1
|
|
%0:vgpr_32 = COPY $vgpr0
|
|
%17:vreg_64 = REG_SEQUENCE %2, %subreg.sub0, %3, %subreg.sub1
|
|
%18:vreg_64 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1
|
|
%9:vreg_64 = COPY %18
|
|
%8:vreg_128 = GLOBAL_LOAD_DWORDX4 %9, 16, 0, implicit $exec :: (load (s128) from %ir.p1, align 4, !alias.scope !5, !noalias !8, addrspace 1)
|
|
%10:vreg_64 = COPY %18
|
|
GLOBAL_STORE_DWORDX4 %10, killed %8, 0, 0, implicit $exec :: (store (s128) into %ir.p0, align 4, !alias.scope !10, !noalias !11, addrspace 1)
|
|
%12:vreg_64 = COPY %17
|
|
%11:vreg_64 = GLOBAL_LOAD_DWORDX2 killed %12, 0, 0, implicit $exec :: (load (s64) from %ir.1, align 4, !alias.scope !3, !noalias !0, addrspace 1)
|
|
%13:vgpr_32 = COPY %11.sub0
|
|
%14:vgpr_32 = COPY %11.sub1
|
|
%15:vgpr_32 = V_ADD_U32_e64 killed %13, killed %14, 0, implicit $exec
|
|
$vgpr0 = COPY %15
|
|
SI_RETURN implicit $vgpr0
|
|
|
|
...
|
|
---
|
|
name: test_memcpy_inline
|
|
machineMetadataNodes:
|
|
- '!6 = distinct !{!6, !7, !"Src"}'
|
|
- '!7 = distinct !{!7, !"MemcpyLoweringDomain"}'
|
|
- '!9 = distinct !{!9, !7, !"Dst"}'
|
|
- '!11 = !{!4, !6}'
|
|
- '!5 = !{!1, !6}'
|
|
- '!8 = !{!4, !9}'
|
|
- '!10 = !{!1, !9}'
|
|
body: |
|
|
bb.0 (%ir-block.0):
|
|
liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3
|
|
|
|
; CHECK-LABEL: name: test_memcpy_inline
|
|
; CHECK: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr3
|
|
; CHECK: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr2
|
|
; CHECK: [[COPY3:%[0-9]+]]:vgpr_32 = COPY $vgpr1
|
|
; CHECK: [[COPY4:%[0-9]+]]:vgpr_32 = COPY $vgpr0
|
|
; CHECK: [[REG_SEQUENCE:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY2]], %subreg.sub0, [[COPY1]], %subreg.sub1
|
|
; CHECK: [[REG_SEQUENCE1:%[0-9]+]]:vreg_64 = REG_SEQUENCE [[COPY4]], %subreg.sub0, [[COPY3]], %subreg.sub1
|
|
; CHECK: [[COPY5:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]]
|
|
; CHECK: [[GLOBAL_LOAD_DWORDX4_:%[0-9]+]]:vreg_128 = GLOBAL_LOAD_DWORDX4 [[COPY5]], 16, 0, implicit $exec :: (load (s128) from %ir.p1, align 4, !alias.scope !5, !noalias !8, addrspace 1)
|
|
; CHECK: [[COPY6:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE1]]
|
|
; CHECK: GLOBAL_STORE_DWORDX4 [[COPY6]], killed [[GLOBAL_LOAD_DWORDX4_]], 0, 0, implicit $exec :: (store (s128) into %ir.p0, align 4, !alias.scope !10, !noalias !11, addrspace 1)
|
|
; CHECK: [[COPY7:%[0-9]+]]:vreg_64 = COPY [[REG_SEQUENCE]]
|
|
; CHECK: [[GLOBAL_LOAD_DWORDX2_:%[0-9]+]]:vreg_64 = GLOBAL_LOAD_DWORDX2 killed [[COPY7]], 0, 0, implicit $exec :: (load (s64) from %ir.1, align 4, !alias.scope !3, !noalias !0, addrspace 1)
|
|
; CHECK: [[COPY8:%[0-9]+]]:vgpr_32 = COPY [[GLOBAL_LOAD_DWORDX2_]].sub0
|
|
; CHECK: [[COPY9:%[0-9]+]]:vgpr_32 = COPY [[GLOBAL_LOAD_DWORDX2_]].sub1
|
|
; CHECK: [[V_ADD_U32_e64_:%[0-9]+]]:vgpr_32 = V_ADD_U32_e64 killed [[COPY8]], killed [[COPY9]], 0, implicit $exec
|
|
; CHECK: $vgpr0 = COPY [[V_ADD_U32_e64_]]
|
|
; CHECK: SI_RETURN implicit $vgpr0
|
|
%3:vgpr_32 = COPY $vgpr3
|
|
%2:vgpr_32 = COPY $vgpr2
|
|
%1:vgpr_32 = COPY $vgpr1
|
|
%0:vgpr_32 = COPY $vgpr0
|
|
%17:vreg_64 = REG_SEQUENCE %2, %subreg.sub0, %3, %subreg.sub1
|
|
%18:vreg_64 = REG_SEQUENCE %0, %subreg.sub0, %1, %subreg.sub1
|
|
%9:vreg_64 = COPY %18
|
|
%8:vreg_128 = GLOBAL_LOAD_DWORDX4 %9, 16, 0, implicit $exec :: (load (s128) from %ir.p1, align 4, !alias.scope !5, !noalias !8, addrspace 1)
|
|
%10:vreg_64 = COPY %18
|
|
GLOBAL_STORE_DWORDX4 %10, killed %8, 0, 0, implicit $exec :: (store (s128) into %ir.p0, align 4, !alias.scope !10, !noalias !11, addrspace 1)
|
|
%12:vreg_64 = COPY %17
|
|
%11:vreg_64 = GLOBAL_LOAD_DWORDX2 killed %12, 0, 0, implicit $exec :: (load (s64) from %ir.1, align 4, !alias.scope !3, !noalias !0, addrspace 1)
|
|
%13:vgpr_32 = COPY %11.sub0
|
|
%14:vgpr_32 = COPY %11.sub1
|
|
%15:vgpr_32 = V_ADD_U32_e64 killed %13, killed %14, 0, implicit $exec
|
|
$vgpr0 = COPY %15
|
|
SI_RETURN implicit $vgpr0
|
|
|
|
...
|