This patch changes the PowerPC backend to generate VSX load/store instructions for all vector loads/stores on Power8 and earlier (LE) instead of VMX load/store instructions. The reason for this change is because VMX instructions require the vector to be 16-byte aligned. So, a vector load/store will fail with VMX instructions if the vector is misaligned. Also, `gcc` generates VSX instructions in this situation which allow for unaligned access but require a swap instruction after loading/before storing. This is not an issue for BE because we already emit VSX instructions since no swap is required. And this is not an issue on Power9 and up since we have access to `lxv[x]`/`stxv[x]` which allow for unaligned access and do not require swaps. This patch also delays the VSX load/store for LE combines until after LegalizeOps to prioritize other load/store combines. Reviewed By: #powerpc, stefanp Differential Revision: https://reviews.llvm.org/D127309
138 lines
4.7 KiB
LLVM
138 lines
4.7 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc -verify-machineinstrs -O0 -mcpu=pwr7 -mtriple=powerpc64-unknown-linux-gnu < %s | FileCheck %s --check-prefixes=CHECK,BE
|
|
; RUN: llc -verify-machineinstrs -O0 -mcpu=pwr7 -mtriple=powerpc64-ibm-aix-xcoff -vec-extabi < %s | FileCheck %s --check-prefixes=CHECK,BE
|
|
; RUN: llc -verify-machineinstrs -O0 -mcpu=pwr7 -mtriple=powerpc64le-unknown-linux-gnu < %s | FileCheck %s --check-prefixes=CHECK,LE
|
|
|
|
define void @test1(<4 x i32>* %P1, <4 x i32>* %P2, <4 x float>* %P3) nounwind {
|
|
; BE-LABEL: test1:
|
|
; BE: # %bb.0:
|
|
; BE-NEXT: lxvw4x 0, 0, 3
|
|
; BE-NEXT: vspltisb 2, -1
|
|
; BE-NEXT: vslw 2, 2, 2
|
|
; BE-NEXT: xxland 0, 0, 34
|
|
; BE-NEXT: stxvw4x 0, 0, 3
|
|
; BE-NEXT: lxvw4x 0, 0, 4
|
|
; BE-NEXT: xxlandc 0, 0, 34
|
|
; BE-NEXT: stxvw4x 0, 0, 4
|
|
; BE-NEXT: lxvw4x 0, 0, 5
|
|
; BE-NEXT: xvabssp 0, 0
|
|
; BE-NEXT: stxvw4x 0, 0, 5
|
|
; BE-NEXT: blr
|
|
;
|
|
; LE-LABEL: test1:
|
|
; LE: # %bb.0:
|
|
; LE-NEXT: lxvd2x 0, 0, 3
|
|
; LE-NEXT: xxswapd 34, 0
|
|
; LE-NEXT: vspltisb 3, -1
|
|
; LE-NEXT: vslw 3, 3, 3
|
|
; LE-NEXT: xxland 0, 34, 35
|
|
; LE-NEXT: xxswapd 0, 0
|
|
; LE-NEXT: stxvd2x 0, 0, 3
|
|
; LE-NEXT: lxvd2x 0, 0, 4
|
|
; LE-NEXT: xxswapd 34, 0
|
|
; LE-NEXT: xxlandc 0, 34, 35
|
|
; LE-NEXT: xxswapd 0, 0
|
|
; LE-NEXT: stxvd2x 0, 0, 4
|
|
; LE-NEXT: lxvd2x 0, 0, 5
|
|
; LE-NEXT: xxswapd 34, 0
|
|
; LE-NEXT: xvabssp 0, 34
|
|
; LE-NEXT: xxswapd 0, 0
|
|
; LE-NEXT: stxvd2x 0, 0, 5
|
|
; LE-NEXT: blr
|
|
%tmp = load <4 x i32>, <4 x i32>* %P1 ; <<4 x i32>> [#uses=1]
|
|
%tmp4 = and <4 x i32> %tmp, < i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648 > ; <<4 x i32>> [#uses=1]
|
|
store <4 x i32> %tmp4, <4 x i32>* %P1
|
|
%tmp7 = load <4 x i32>, <4 x i32>* %P2 ; <<4 x i32>> [#uses=1]
|
|
%tmp9 = and <4 x i32> %tmp7, < i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647 > ; <<4 x i32>> [#uses=1]
|
|
store <4 x i32> %tmp9, <4 x i32>* %P2
|
|
%tmp.upgrd.1 = load <4 x float>, <4 x float>* %P3 ; <<4 x float>> [#uses=1]
|
|
%tmp11 = bitcast <4 x float> %tmp.upgrd.1 to <4 x i32> ; <<4 x i32>> [#uses=1]
|
|
%tmp12 = and <4 x i32> %tmp11, < i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647 > ; <<4 x i32>> [#uses=1]
|
|
%tmp13 = bitcast <4 x i32> %tmp12 to <4 x float> ; <<4 x float>> [#uses=1]
|
|
store <4 x float> %tmp13, <4 x float>* %P3
|
|
ret void
|
|
|
|
}
|
|
|
|
define <4 x i32> @test_30() nounwind {
|
|
; CHECK-LABEL: test_30:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vspltisw 2, 15
|
|
; CHECK-NEXT: vadduwm 2, 2, 2
|
|
; CHECK-NEXT: blr
|
|
ret <4 x i32> < i32 30, i32 30, i32 30, i32 30 >
|
|
}
|
|
|
|
define <4 x i32> @test_29() nounwind {
|
|
; CHECK-LABEL: test_29:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vspltisw 3, -16
|
|
; CHECK-NEXT: vspltisw 2, 13
|
|
; CHECK-NEXT: vsubuwm 2, 2, 3
|
|
; CHECK-NEXT: blr
|
|
ret <4 x i32> < i32 29, i32 29, i32 29, i32 29 >
|
|
}
|
|
|
|
define <8 x i16> @test_n30() nounwind {
|
|
; CHECK-LABEL: test_n30:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vspltish 2, -15
|
|
; CHECK-NEXT: vadduhm 2, 2, 2
|
|
; CHECK-NEXT: blr
|
|
ret <8 x i16> < i16 -30, i16 -30, i16 -30, i16 -30, i16 -30, i16 -30, i16 -30, i16 -30 >
|
|
}
|
|
|
|
define <16 x i8> @test_n104() nounwind {
|
|
; CHECK-LABEL: test_n104:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vspltisb 2, -13
|
|
; CHECK-NEXT: vslb 2, 2, 2
|
|
; CHECK-NEXT: blr
|
|
ret <16 x i8> < i8 -104, i8 -104, i8 -104, i8 -104, i8 -104, i8 -104, i8 -104, i8 -104, i8 -104, i8 -104, i8 -104, i8 -104, i8 -104, i8 -104, i8 -104, i8 -104 >
|
|
}
|
|
|
|
define <4 x i32> @test_vsldoi() nounwind {
|
|
; CHECK-LABEL: test_vsldoi:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vspltisw 2, 2
|
|
; CHECK-NEXT: vsldoi 2, 2, 2, 1
|
|
; CHECK-NEXT: blr
|
|
ret <4 x i32> < i32 512, i32 512, i32 512, i32 512 >
|
|
}
|
|
|
|
define <8 x i16> @test_vsldoi_65023() nounwind {
|
|
; CHECK-LABEL: test_vsldoi_65023:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vspltish 2, -3
|
|
; CHECK-NEXT: vsldoi 2, 2, 2, 1
|
|
; CHECK-NEXT: blr
|
|
ret <8 x i16> < i16 65023, i16 65023,i16 65023,i16 65023,i16 65023,i16 65023,i16 65023,i16 65023 >
|
|
}
|
|
|
|
define <4 x i32> @test_vsldoi_x16() nounwind {
|
|
; CHECK-LABEL: test_vsldoi_x16:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vspltisw 2, -3
|
|
; CHECK-NEXT: vsldoi 2, 2, 2, 2
|
|
; CHECK-NEXT: blr
|
|
ret <4 x i32> <i32 -131073, i32 -131073, i32 -131073, i32 -131073>
|
|
}
|
|
|
|
define <4 x i32> @test_vsldoi_x24() nounwind {
|
|
; CHECK-LABEL: test_vsldoi_x24:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vspltisw 2, -3
|
|
; CHECK-NEXT: vsldoi 2, 2, 2, 3
|
|
; CHECK-NEXT: blr
|
|
ret <4 x i32> <i32 -33554433, i32 -33554433, i32 -33554433, i32 -33554433>
|
|
}
|
|
|
|
define <4 x i32> @test_rol() nounwind {
|
|
; CHECK-LABEL: test_rol:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vspltisw 2, -12
|
|
; CHECK-NEXT: vrlw 2, 2, 2
|
|
; CHECK-NEXT: blr
|
|
ret <4 x i32> < i32 -11534337, i32 -11534337, i32 -11534337, i32 -11534337 >
|
|
}
|