Files
clang-p2996/llvm/test/CodeGen/PowerPC/vec_shuffle_p8vector.ll
Qiu Chaofan 300e1293de [PowerPC] Disable perfect shuffle by default
We are going to remove the old 'perfect shuffle' optimization since it
brings performance penalty in hot loop around vectors. For example, in
following loop sharing the same mask:

  %v.1 = shufflevector ... <0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27>
  %v.2 = shufflevector ... <0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27>

The generated instructions will be `vmrglw-vmrghw-vmrglw-vmrghw` instead
of `vperm-vperm`. In some large loop cases, this causes 20%+ performance
penalty.

The original attempt to resolve this is to pre-record masks of every
shufflevector operation in DAG, but that is somewhat complex and brings
unnecessary computation (to scan all nodes) in optimization. Here we
disable it by default. There're indeed some cases becoming worse after
this, which will be fixed in a more careful way in future patches.

Reviewed By: jsji

Differential Revision: https://reviews.llvm.org/D121082
2022-03-15 15:52:24 +08:00

94 lines
3.8 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -verify-machineinstrs -mcpu=pwr8 -mtriple=powerpc64-unknown-linux-gnu -mattr=+power8-vector < %s | FileCheck %s
; RUN: llc -verify-machineinstrs -mcpu=pwr8 -mtriple=powerpc64-ibm-aix-xcoff -vec-extabi -mattr=+power8-vector < %s | FileCheck %s
; RUN: llc -verify-machineinstrs -mcpu=pwr7 -mtriple=powerpc64-unknown-linux-gnu < %s | FileCheck -check-prefix=CHECK-PWR7 %s
; RUN: llc -verify-machineinstrs -mcpu=pwr7 -mtriple=powerpc64-ibm-aix-xcoff -vec-extabi < %s | FileCheck -check-prefix=CHECK-PWR7-AIX %s
define void @VPKUDUM_unary(<2 x i64>* %A) {
; CHECK-LABEL: VPKUDUM_unary:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxvw4x 34, 0, 3
; CHECK-NEXT: vpkudum 2, 2, 2
; CHECK-NEXT: stxvw4x 34, 0, 3
; CHECK-NEXT: blr
;
; CHECK-PWR7-LABEL: VPKUDUM_unary:
; CHECK-PWR7: # %bb.0: # %entry
; CHECK-PWR7-NEXT: addis 4, 2, .LCPI0_0@toc@ha
; CHECK-PWR7-NEXT: lxvw4x 34, 0, 3
; CHECK-PWR7-NEXT: addi 4, 4, .LCPI0_0@toc@l
; CHECK-PWR7-NEXT: lxvw4x 35, 0, 4
; CHECK-PWR7-NEXT: vperm 2, 2, 2, 3
; CHECK-PWR7-NEXT: stxvw4x 34, 0, 3
; CHECK-PWR7-NEXT: blr
;
; CHECK-PWR7-AIX-LABEL: VPKUDUM_unary:
; CHECK-PWR7-AIX: # %bb.0: # %entry
; CHECK-PWR7-AIX-NEXT: ld 4, L..C0(2) # %const.0
; CHECK-PWR7-AIX-NEXT: lxvw4x 34, 0, 3
; CHECK-PWR7-AIX-NEXT: lxvw4x 35, 0, 4
; CHECK-PWR7-AIX-NEXT: vperm 2, 2, 2, 3
; CHECK-PWR7-AIX-NEXT: stxvw4x 34, 0, 3
; CHECK-PWR7-AIX-NEXT: blr
entry:
%tmp = load <2 x i64>, <2 x i64>* %A
%tmp2 = bitcast <2 x i64> %tmp to <4 x i32>
%tmp3 = extractelement <4 x i32> %tmp2, i32 1
%tmp4 = extractelement <4 x i32> %tmp2, i32 3
%tmp5 = insertelement <4 x i32> undef, i32 %tmp3, i32 0
%tmp6 = insertelement <4 x i32> %tmp5, i32 %tmp4, i32 1
%tmp7 = insertelement <4 x i32> %tmp6, i32 %tmp3, i32 2
%tmp8 = insertelement <4 x i32> %tmp7, i32 %tmp4, i32 3
%tmp9 = bitcast <4 x i32> %tmp8 to <2 x i64>
store <2 x i64> %tmp9, <2 x i64>* %A
ret void
}
define void @VPKUDUM(<2 x i64>* %A, <2 x i64>* %B) {
; CHECK-LABEL: VPKUDUM:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lxvw4x 34, 0, 3
; CHECK-NEXT: lxvw4x 35, 0, 4
; CHECK-NEXT: vpkudum 2, 2, 3
; CHECK-NEXT: stxvw4x 34, 0, 3
; CHECK-NEXT: blr
;
; CHECK-PWR7-LABEL: VPKUDUM:
; CHECK-PWR7: # %bb.0: # %entry
; CHECK-PWR7-NEXT: addis 5, 2, .LCPI1_0@toc@ha
; CHECK-PWR7-NEXT: lxvw4x 34, 0, 4
; CHECK-PWR7-NEXT: lxvw4x 35, 0, 3
; CHECK-PWR7-NEXT: addi 4, 5, .LCPI1_0@toc@l
; CHECK-PWR7-NEXT: lxvw4x 36, 0, 4
; CHECK-PWR7-NEXT: vperm 2, 3, 2, 4
; CHECK-PWR7-NEXT: stxvw4x 34, 0, 3
; CHECK-PWR7-NEXT: blr
;
; CHECK-PWR7-AIX-LABEL: VPKUDUM:
; CHECK-PWR7-AIX: # %bb.0: # %entry
; CHECK-PWR7-AIX-NEXT: ld 5, L..C1(2) # %const.0
; CHECK-PWR7-AIX-NEXT: lxvw4x 34, 0, 4
; CHECK-PWR7-AIX-NEXT: lxvw4x 35, 0, 3
; CHECK-PWR7-AIX-NEXT: lxvw4x 36, 0, 5
; CHECK-PWR7-AIX-NEXT: vperm 2, 3, 2, 4
; CHECK-PWR7-AIX-NEXT: stxvw4x 34, 0, 3
; CHECK-PWR7-AIX-NEXT: blr
entry:
%tmp = load <2 x i64>, <2 x i64>* %A
%tmp2 = bitcast <2 x i64> %tmp to <4 x i32>
%tmp3 = load <2 x i64>, <2 x i64>* %B
%tmp4 = bitcast <2 x i64> %tmp3 to <4 x i32>
%tmp5 = extractelement <4 x i32> %tmp2, i32 1
%tmp6 = extractelement <4 x i32> %tmp2, i32 3
%tmp7 = extractelement <4 x i32> %tmp4, i32 1
%tmp8 = extractelement <4 x i32> %tmp4, i32 3
%tmp9 = insertelement <4 x i32> undef, i32 %tmp5, i32 0
%tmp10 = insertelement <4 x i32> %tmp9, i32 %tmp6, i32 1
%tmp11 = insertelement <4 x i32> %tmp10, i32 %tmp7, i32 2
%tmp12 = insertelement <4 x i32> %tmp11, i32 %tmp8, i32 3
%tmp13 = bitcast <4 x i32> %tmp12 to <2 x i64>
store <2 x i64> %tmp13, <2 x i64>* %A
ret void
}