Currently the return address ABI registers s[30:31], which fall in the call clobbered register range, are added as a live-in on the function entry to preserve its value when we have calls so that it gets saved and restored around the calls. But the DWARF unwind information (CFI) needs to track where the return address resides in a frame and the above approach makes it difficult to track the return address when the CFI information is emitted during the frame lowering, due to the involvment of understanding the control flow. This patch moves the return address ABI registers s[30:31] into callee saved registers range and stops adding live-in for return address registers, so that the CFI machinery will know where the return address resides when CSR save/restore happen during the frame lowering. And doing the above poses an issue that now the return instruction uses undefined register `sgpr30_sgpr31`. This is resolved by hiding the return address register use by the return instruction through the `SI_RETURN` pseudo instruction, which doesn't take any input operands, until the `SI_RETURN` pseudo gets lowered to the `S_SETPC_B64_return` during the `expandPostRAPseudo()`. As an added benefit, this patch simplifies overall return instruction handling. Note: The AMDGPU CFI changes are there only in the downstream code and another version of this patch will be posted for review for the downstream code. Reviewed By: arsenm, ronlieb Differential Revision: https://reviews.llvm.org/D114652
56 lines
2.2 KiB
LLVM
56 lines
2.2 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc -global-isel -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -o - %s | FileCheck %s
|
|
|
|
declare hidden i32 addrspace(1)* @ext(i8 addrspace(1)*)
|
|
|
|
define i32 addrspace(1)* @call_assert_align() {
|
|
; CHECK-LABEL: call_assert_align:
|
|
; CHECK: ; %bb.0: ; %entry
|
|
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; CHECK-NEXT: s_or_saveexec_b64 s[16:17], -1
|
|
; CHECK-NEXT: buffer_store_dword v40, off, s[0:3], s32 ; 4-byte Folded Spill
|
|
; CHECK-NEXT: s_mov_b64 exec, s[16:17]
|
|
; CHECK-NEXT: v_writelane_b32 v40, s33, 2
|
|
; CHECK-NEXT: s_mov_b32 s33, s32
|
|
; CHECK-NEXT: s_addk_i32 s32, 0x400
|
|
; CHECK-NEXT: v_writelane_b32 v40, s30, 0
|
|
; CHECK-NEXT: v_mov_b32_e32 v0, 0
|
|
; CHECK-NEXT: v_mov_b32_e32 v1, 0
|
|
; CHECK-NEXT: v_writelane_b32 v40, s31, 1
|
|
; CHECK-NEXT: s_getpc_b64 s[16:17]
|
|
; CHECK-NEXT: s_add_u32 s16, s16, ext@rel32@lo+4
|
|
; CHECK-NEXT: s_addc_u32 s17, s17, ext@rel32@hi+12
|
|
; CHECK-NEXT: s_swappc_b64 s[30:31], s[16:17]
|
|
; CHECK-NEXT: v_mov_b32_e32 v2, 0
|
|
; CHECK-NEXT: global_store_dword v[0:1], v2, off
|
|
; CHECK-NEXT: s_waitcnt vmcnt(0)
|
|
; CHECK-NEXT: v_readlane_b32 s31, v40, 1
|
|
; CHECK-NEXT: v_readlane_b32 s30, v40, 0
|
|
; CHECK-NEXT: s_addk_i32 s32, 0xfc00
|
|
; CHECK-NEXT: v_readlane_b32 s33, v40, 2
|
|
; CHECK-NEXT: s_or_saveexec_b64 s[4:5], -1
|
|
; CHECK-NEXT: buffer_load_dword v40, off, s[0:3], s32 ; 4-byte Folded Reload
|
|
; CHECK-NEXT: s_mov_b64 exec, s[4:5]
|
|
; CHECK-NEXT: s_waitcnt vmcnt(0)
|
|
; CHECK-NEXT: s_setpc_b64 s[30:31]
|
|
entry:
|
|
%call = call align 4 i32 addrspace(1)* @ext(i8 addrspace(1)* null)
|
|
store volatile i32 0, i32 addrspace(1)* %call
|
|
ret i32 addrspace(1)* %call
|
|
}
|
|
|
|
define i32 addrspace(1)* @tail_call_assert_align() {
|
|
; CHECK-LABEL: tail_call_assert_align:
|
|
; CHECK: ; %bb.0: ; %entry
|
|
; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
|
|
; CHECK-NEXT: v_mov_b32_e32 v0, 0
|
|
; CHECK-NEXT: v_mov_b32_e32 v1, 0
|
|
; CHECK-NEXT: s_getpc_b64 s[16:17]
|
|
; CHECK-NEXT: s_add_u32 s16, s16, ext@rel32@lo+4
|
|
; CHECK-NEXT: s_addc_u32 s17, s17, ext@rel32@hi+12
|
|
; CHECK-NEXT: s_setpc_b64 s[16:17]
|
|
entry:
|
|
%call = tail call align 4 i32 addrspace(1)* @ext(i8 addrspace(1)* null)
|
|
ret i32 addrspace(1)* %call
|
|
}
|