Files
clang-p2996/llvm/test/CodeGen/X86/ymm-ordering.ll
Max Kazantsev 69a3acffdf [Test] We can benefit from pipelining of ymm load/stores
This patch demonstrates a scenario when we need to load/store a single
64-byte value, which is done by 2 ymm loads and stores in AVX. The current
codegen choses the following sequence:

  load ymm0
  load ymm1
  store ymm1
  store ymm0

If we instead stored ymm0 before ymm1, we could execute 2nd load and 1st store
in parallel.
2021-07-15 17:15:14 +07:00

22 lines
767 B
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-linux -mattr=+avx | FileCheck %s
; TODO: We If we stored ymm0 before ymm1, then we could execute 2nd load and 1st store in
; parallel.
define void @test_01(i8* %src, i8* %dest) {
; CHECK-LABEL: test_01:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmovups (%rdi), %ymm0
; CHECK-NEXT: vmovups 32(%rdi), %ymm1
; CHECK-NEXT: vmovups %ymm1, 32(%rsi)
; CHECK-NEXT: vmovups %ymm0, (%rsi)
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
entry:
%read = bitcast i8* %src to <64 x i8>*
%value = load <64 x i8>, <64 x i8>* %read, align 1
%write = bitcast i8* %dest to <64 x i8>*
store <64 x i8> %value, <64 x i8>* %write, align 1
ret void
}