If we vectorize a e.g. store, we leave around a bunch of getelementptrs for the individual scalar stores which we removed. We can go ahead and delete them as well. This is purely for test output quality and readability. It should have no effect in any sane pipeline. Differential Revision: https://reviews.llvm.org/D122493
68 lines
3.1 KiB
LLVM
68 lines
3.1 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
|
|
; RUN: opt < %s -basic-aa -slp-vectorizer -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
|
|
|
|
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
|
|
target triple = "x86_64-apple-macosx10.8.0"
|
|
|
|
; Simple 3-pair chain with loads and stores
|
|
define void @test1(double* %a, double* %b, double* %c) {
|
|
; CHECK-LABEL: @test1(
|
|
; CHECK-NEXT: entry:
|
|
; CHECK-NEXT: [[AGG_TMP_I_I_SROA_0:%.*]] = alloca [3 x double], align 16
|
|
; CHECK-NEXT: [[STORE1:%.*]] = getelementptr inbounds [3 x double], [3 x double]* [[AGG_TMP_I_I_SROA_0]], i64 0, i64 1
|
|
; CHECK-NEXT: [[TMP0:%.*]] = bitcast double* [[A:%.*]] to <2 x double>*
|
|
; CHECK-NEXT: [[TMP1:%.*]] = load <2 x double>, <2 x double>* [[TMP0]], align 8
|
|
; CHECK-NEXT: [[TMP2:%.*]] = bitcast double* [[B:%.*]] to <2 x double>*
|
|
; CHECK-NEXT: [[TMP3:%.*]] = load <2 x double>, <2 x double>* [[TMP2]], align 8
|
|
; CHECK-NEXT: [[TMP4:%.*]] = fmul <2 x double> [[TMP1]], [[TMP3]]
|
|
; CHECK-NEXT: [[TMP5:%.*]] = bitcast double* [[STORE1]] to <2 x double>*
|
|
; CHECK-NEXT: store <2 x double> [[TMP4]], <2 x double>* [[TMP5]], align 8
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
entry:
|
|
%agg.tmp.i.i.sroa.0 = alloca [3 x double], align 16
|
|
%i0 = load double, double* %a
|
|
%i1 = load double, double* %b
|
|
%mul = fmul double %i0, %i1
|
|
%store1 = getelementptr inbounds [3 x double], [3 x double]* %agg.tmp.i.i.sroa.0, i64 0, i64 1
|
|
%store2 = getelementptr inbounds [3 x double], [3 x double]* %agg.tmp.i.i.sroa.0, i64 0, i64 2
|
|
%arrayidx3 = getelementptr inbounds double, double* %a, i64 1
|
|
%i3 = load double, double* %arrayidx3, align 8
|
|
%arrayidx4 = getelementptr inbounds double, double* %b, i64 1
|
|
%i4 = load double, double* %arrayidx4, align 8
|
|
%mul5 = fmul double %i3, %i4
|
|
store double %mul, double* %store1
|
|
store double %mul5, double* %store2, align 16
|
|
ret void
|
|
}
|
|
|
|
; Float has 4 byte abi alignment on x86_64. We must use the alignment of the
|
|
; value being loaded/stored not the alignment of the pointer type.
|
|
|
|
define void @test2(float * %a, float * %b) {
|
|
; CHECK-LABEL: @test2(
|
|
; CHECK-NEXT: entry:
|
|
; CHECK-NEXT: [[TMP0:%.*]] = bitcast float* [[A:%.*]] to <4 x float>*
|
|
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, <4 x float>* [[TMP0]], align 4
|
|
; CHECK-NEXT: [[TMP2:%.*]] = bitcast float* [[B:%.*]] to <4 x float>*
|
|
; CHECK-NEXT: store <4 x float> [[TMP1]], <4 x float>* [[TMP2]], align 4
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
entry:
|
|
%l0 = load float, float* %a
|
|
%a1 = getelementptr inbounds float, float* %a, i64 1
|
|
%l1 = load float, float* %a1
|
|
%a2 = getelementptr inbounds float, float* %a, i64 2
|
|
%l2 = load float, float* %a2
|
|
%a3 = getelementptr inbounds float, float* %a, i64 3
|
|
%l3 = load float, float* %a3
|
|
store float %l0, float* %b
|
|
%b1 = getelementptr inbounds float, float* %b, i64 1
|
|
store float %l1, float* %b1
|
|
%b2 = getelementptr inbounds float, float* %b, i64 2
|
|
store float %l2, float* %b2
|
|
%b3 = getelementptr inbounds float, float* %b, i64 3
|
|
store float %l3, float* %b3
|
|
ret void
|
|
}
|