Files
clang-p2996/llvm/test/Transforms/LoopVectorize/X86/parallel-loops.ll
Roman Lebedev d137f1288e [X86][LV] X86 does *not* prefer vectorized addressing
And another attempt to start untangling this ball of threads around gather.
There's `TTI::prefersVectorizedAddressing()`hoop, which confusingly defaults to `true`,
which tells LV to try to vectorize the addresses that lead to loads,
but X86 generally can not deal with vectors of addresses,
the only instructions that support that are GATHER/SCATTER,
but even those aren't available until AVX2, and aren't really usable until AVX512.

This specializes the hook for X86, to return true only if we have AVX512 or AVX2 w/ fast gather.

Reviewed By: RKSimon

Differential Revision: https://reviews.llvm.org/D111546
2021-10-16 12:32:18 +03:00

210 lines
11 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -loop-vectorize -force-vector-interleave=1 -force-vector-width=4 -dce -instcombine -S | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"
; A tricky loop:
;
; void loop(int *a, int *b) {
; for (int i = 0; i < 512; ++i) {
; a[a[i]] = b[i];
; a[i] = b[i+1];
; }
;}
define void @loop(i32* nocapture %a, i32* nocapture %b) nounwind uwtable {
; CHECK-LABEL: @loop(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[ARRAYIDX2]], align 4
; CHECK-NEXT: [[IDXPROM3:%.*]] = sext i32 [[TMP1]] to i64
; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[IDXPROM3]]
; CHECK-NEXT: store i32 [[TMP0]], i32* [[ARRAYIDX4]], align 4
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[INDVARS_IV_NEXT]]
; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX6]], align 4
; CHECK-NEXT: store i32 [[TMP2]], i32* [[ARRAYIDX2]], align 4
; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 512
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
entry:
br label %for.body
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
%0 = load i32, i32* %arrayidx, align 4
%arrayidx2 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
%1 = load i32, i32* %arrayidx2, align 4
%idxprom3 = sext i32 %1 to i64
%arrayidx4 = getelementptr inbounds i32, i32* %a, i64 %idxprom3
store i32 %0, i32* %arrayidx4, align 4
%indvars.iv.next = add i64 %indvars.iv, 1
%arrayidx6 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv.next
%2 = load i32, i32* %arrayidx6, align 4
store i32 %2, i32* %arrayidx2, align 4
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, 512
br i1 %exitcond, label %for.end, label %for.body
for.end: ; preds = %for.body
ret void
}
; The same loop with parallel loop metadata added to the loop branch
; and the memory instructions.
define void @parallel_loop(i32* nocapture %a, i32* nocapture %b) nounwind uwtable {
; CHECK-LABEL: @parallel_loop(
; CHECK-NEXT: entry:
; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP0:%.*]] = or i64 [[INDEX]], 1
; CHECK-NEXT: [[TMP1:%.*]] = or i64 [[INDEX]], 2
; CHECK-NEXT: [[TMP2:%.*]] = or i64 [[INDEX]], 3
; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP4:%.*]] = bitcast i32* [[TMP3]] to <4 x i32>*
; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, <4 x i32>* [[TMP4]], align 4, !llvm.access.group !0
; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[INDEX]]
; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[TMP0]]
; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[TMP1]]
; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[TMP2]]
; CHECK-NEXT: [[TMP9:%.*]] = load i32, i32* [[TMP5]], align 4, !llvm.access.group !0
; CHECK-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP6]], align 4, !llvm.access.group !0
; CHECK-NEXT: [[TMP11:%.*]] = load i32, i32* [[TMP7]], align 4, !llvm.access.group !0
; CHECK-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP8]], align 4, !llvm.access.group !0
; CHECK-NEXT: [[TMP13:%.*]] = sext i32 [[TMP9]] to i64
; CHECK-NEXT: [[TMP14:%.*]] = sext i32 [[TMP10]] to i64
; CHECK-NEXT: [[TMP15:%.*]] = sext i32 [[TMP11]] to i64
; CHECK-NEXT: [[TMP16:%.*]] = sext i32 [[TMP12]] to i64
; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[TMP13]]
; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[TMP14]]
; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[TMP15]]
; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[TMP16]]
; CHECK-NEXT: [[TMP21:%.*]] = extractelement <4 x i32> [[WIDE_LOAD]], i32 0
; CHECK-NEXT: store i32 [[TMP21]], i32* [[TMP17]], align 4, !llvm.access.group !1
; CHECK-NEXT: [[TMP22:%.*]] = extractelement <4 x i32> [[WIDE_LOAD]], i32 1
; CHECK-NEXT: store i32 [[TMP22]], i32* [[TMP18]], align 4, !llvm.access.group !1
; CHECK-NEXT: [[TMP23:%.*]] = extractelement <4 x i32> [[WIDE_LOAD]], i32 2
; CHECK-NEXT: store i32 [[TMP23]], i32* [[TMP19]], align 4, !llvm.access.group !1
; CHECK-NEXT: [[TMP24:%.*]] = extractelement <4 x i32> [[WIDE_LOAD]], i32 3
; CHECK-NEXT: store i32 [[TMP24]], i32* [[TMP20]], align 4, !llvm.access.group !1
; CHECK-NEXT: [[TMP25:%.*]] = or i64 [[INDEX]], 1
; CHECK-NEXT: [[TMP26:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[TMP25]]
; CHECK-NEXT: [[TMP27:%.*]] = bitcast i32* [[TMP26]] to <4 x i32>*
; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, <4 x i32>* [[TMP27]], align 4, !llvm.access.group !0
; CHECK-NEXT: [[TMP28:%.*]] = bitcast i32* [[TMP5]] to <4 x i32>*
; CHECK-NEXT: store <4 x i32> [[WIDE_LOAD1]], <4 x i32>* [[TMP28]], align 4, !llvm.access.group !0
; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
; CHECK-NEXT: [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT]], 512
; CHECK-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP2:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: br i1 undef, label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
entry:
br label %for.body
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
%0 = load i32, i32* %arrayidx, align 4, !llvm.access.group !13
%arrayidx2 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
%1 = load i32, i32* %arrayidx2, align 4, !llvm.access.group !13
%idxprom3 = sext i32 %1 to i64
%arrayidx4 = getelementptr inbounds i32, i32* %a, i64 %idxprom3
; This store might have originated from inlining a function with a parallel
; loop. Refers to a list with the "original loop reference" (!4) also included.
store i32 %0, i32* %arrayidx4, align 4, !llvm.access.group !15
%indvars.iv.next = add i64 %indvars.iv, 1
%arrayidx6 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv.next
%2 = load i32, i32* %arrayidx6, align 4, !llvm.access.group !13
store i32 %2, i32* %arrayidx2, align 4, !llvm.access.group !13
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, 512
br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !3
for.end: ; preds = %for.body
ret void
}
; The same loop with an illegal parallel loop metadata: the memory
; accesses refer to a different loop's identifier.
define void @mixed_metadata(i32* nocapture %a, i32* nocapture %b) nounwind uwtable {
; CHECK-LABEL: @mixed_metadata(
; CHECK-NEXT: entry:
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 4, !llvm.access.group !7
; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[INDVARS_IV]]
; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[ARRAYIDX2]], align 4, !llvm.access.group !7
; CHECK-NEXT: [[IDXPROM3:%.*]] = sext i32 [[TMP1]] to i64
; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[IDXPROM3]]
; CHECK-NEXT: store i32 [[TMP0]], i32* [[ARRAYIDX4]], align 4, !llvm.access.group !8
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[INDVARS_IV_NEXT]]
; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX6]], align 4, !llvm.access.group !7
; CHECK-NEXT: store i32 [[TMP2]], i32* [[ARRAYIDX2]], align 4, !llvm.access.group !7
; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], 512
; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]]
; CHECK: for.end:
; CHECK-NEXT: ret void
;
entry:
br label %for.body
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %b, i64 %indvars.iv
%0 = load i32, i32* %arrayidx, align 4, !llvm.access.group !16
%arrayidx2 = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
%1 = load i32, i32* %arrayidx2, align 4, !llvm.access.group !16
%idxprom3 = sext i32 %1 to i64
%arrayidx4 = getelementptr inbounds i32, i32* %a, i64 %idxprom3
; This refers to the loop marked with !7 which we are not in at the moment.
; It should prevent detecting as a parallel loop.
store i32 %0, i32* %arrayidx4, align 4, !llvm.access.group !17
%indvars.iv.next = add i64 %indvars.iv, 1
%arrayidx6 = getelementptr inbounds i32, i32* %b, i64 %indvars.iv.next
%2 = load i32, i32* %arrayidx6, align 4, !llvm.access.group !16
store i32 %2, i32* %arrayidx2, align 4, !llvm.access.group !16
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
%exitcond = icmp eq i32 %lftr.wideiv, 512
br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !6
for.end: ; preds = %for.body
ret void
}
!3 = !{!3, !{!"llvm.loop.parallel_accesses", !13, !15}}
!4 = !{!4, !{!"llvm.loop.parallel_accesses", !14, !15}}
!6 = !{!6, !{!"llvm.loop.parallel_accesses", !16}}
!7 = !{!7, !{!"llvm.loop.parallel_accesses", !17}}
!13 = distinct !{}
!14 = distinct !{}
!15 = distinct !{}
!16 = distinct !{}
!17 = distinct !{}