Similar to 806761a762.
For IR files without a target triple, -mtriple= specifies the full
target triple while -march= merely sets the architecture part of the
default target triple, leaving a target triple which may not make sense,
e.g. amdgpu-apple-darwin.
Therefore, -march= is error-prone and not recommended for tests without
a target triple. The issue has been benign as we recognize
$unknown-apple-darwin as ELF instead of rejecting it outrightly.
This patch changes AMDGPU tests to not rely on the default
OS/environment components. Tests that need fixes are not changed:
```
LLVM :: CodeGen/AMDGPU/fabs.f64.ll
LLVM :: CodeGen/AMDGPU/fabs.ll
LLVM :: CodeGen/AMDGPU/floor.ll
LLVM :: CodeGen/AMDGPU/fneg-fabs.f64.ll
LLVM :: CodeGen/AMDGPU/fneg-fabs.ll
LLVM :: CodeGen/AMDGPU/r600-infinite-loop-bug-while-reorganizing-vector.ll
LLVM :: CodeGen/AMDGPU/schedule-if-2.ll
```
62 lines
2.0 KiB
LLVM
62 lines
2.0 KiB
LLVM
; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=BOTH %s
|
|
; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=BOTH %s
|
|
|
|
; BOTH-LABEL: {{^}}s_rotr_i64:
|
|
; BOTH-DAG: s_sub_i32
|
|
; BOTH-DAG: s_lshr_b64
|
|
; BOTH-DAG: s_lshl_b64
|
|
; BOTH: s_or_b64
|
|
define amdgpu_kernel void @s_rotr_i64(ptr addrspace(1) %in, i64 %x, i64 %y) {
|
|
entry:
|
|
%tmp0 = sub i64 64, %y
|
|
%tmp1 = shl i64 %x, %tmp0
|
|
%tmp2 = lshr i64 %x, %y
|
|
%tmp3 = or i64 %tmp1, %tmp2
|
|
store i64 %tmp3, ptr addrspace(1) %in
|
|
ret void
|
|
}
|
|
|
|
; BOTH-LABEL: {{^}}v_rotr_i64:
|
|
; BOTH-DAG: v_sub_{{[iu]}}32
|
|
; SI-DAG: v_lshr_b64
|
|
; SI-DAG: v_lshl_b64
|
|
; VI-DAG: v_lshrrev_b64
|
|
; VI-DAG: v_lshlrev_b64
|
|
; BOTH: v_or_b32
|
|
; BOTH: v_or_b32
|
|
define amdgpu_kernel void @v_rotr_i64(ptr addrspace(1) %in, ptr addrspace(1) %xptr, ptr addrspace(1) %yptr) {
|
|
entry:
|
|
%x = load i64, ptr addrspace(1) %xptr, align 8
|
|
%y = load i64, ptr addrspace(1) %yptr, align 8
|
|
%tmp0 = sub i64 64, %y
|
|
%tmp1 = shl i64 %x, %tmp0
|
|
%tmp2 = lshr i64 %x, %y
|
|
%tmp3 = or i64 %tmp1, %tmp2
|
|
store i64 %tmp3, ptr addrspace(1) %in
|
|
ret void
|
|
}
|
|
|
|
; BOTH-LABEL: {{^}}s_rotr_v2i64:
|
|
define amdgpu_kernel void @s_rotr_v2i64(ptr addrspace(1) %in, <2 x i64> %x, <2 x i64> %y) {
|
|
entry:
|
|
%tmp0 = sub <2 x i64> <i64 64, i64 64>, %y
|
|
%tmp1 = shl <2 x i64> %x, %tmp0
|
|
%tmp2 = lshr <2 x i64> %x, %y
|
|
%tmp3 = or <2 x i64> %tmp1, %tmp2
|
|
store <2 x i64> %tmp3, ptr addrspace(1) %in
|
|
ret void
|
|
}
|
|
|
|
; BOTH-LABEL: {{^}}v_rotr_v2i64:
|
|
define amdgpu_kernel void @v_rotr_v2i64(ptr addrspace(1) %in, ptr addrspace(1) %xptr, ptr addrspace(1) %yptr) {
|
|
entry:
|
|
%x = load <2 x i64>, ptr addrspace(1) %xptr, align 8
|
|
%y = load <2 x i64>, ptr addrspace(1) %yptr, align 8
|
|
%tmp0 = sub <2 x i64> <i64 64, i64 64>, %y
|
|
%tmp1 = shl <2 x i64> %x, %tmp0
|
|
%tmp2 = lshr <2 x i64> %x, %y
|
|
%tmp3 = or <2 x i64> %tmp1, %tmp2
|
|
store <2 x i64> %tmp3, ptr addrspace(1) %in
|
|
ret void
|
|
}
|