The current llvm.mem.parallel_loop_access metadata has a problem in that it uses LoopIDs. LoopID unfortunately is not loop identifier. It is neither unique (there's even a regression test assigning the some LoopID to multiple loops; can otherwise happen if passes such as LoopVersioning make copies of entire loops) nor persistent (every time a property is removed/added from a LoopID's MDNode, it will also receive a new LoopID; this happens e.g. when calling Loop::setLoopAlreadyUnrolled()). Since most loop transformation passes change the loop attributes (even if it just to mark that a loop should not be processed again as llvm.loop.isvectorized does, for the versioned and unversioned loop), the parallel access information is lost for any subsequent pass. This patch unlinks LoopIDs and parallel accesses. llvm.mem.parallel_loop_access metadata on instruction is replaced by llvm.access.group metadata. llvm.access.group points to a distinct MDNode with no operands (avoiding the problem to ever need to add/remove operands), called "access group". Alternatively, it can point to a list of access groups. The LoopID then has an attribute llvm.loop.parallel_accesses with all the access groups that are parallel (no dependencies carries by this loop). This intentionally avoid any kind of "ID". Loops that are clones/have their attributes modifies retain the llvm.loop.parallel_accesses attribute. Access instructions that a cloned point to the same access group. It is not necessary for each access to have it's own "ID" MDNode, but those memory access instructions with the same behavior can be grouped together. The behavior of llvm.mem.parallel_loop_access is not changed by this patch, but should be considered deprecated. Differential Revision: https://reviews.llvm.org/D52116 llvm-svn: 349725
88 lines
2.9 KiB
LLVM
88 lines
2.9 KiB
LLVM
; RUN: opt < %s -loop-vectorize -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx -debug-only=loop-vectorize -stats -S 2>&1 | FileCheck %s
|
|
; REQUIRES: asserts
|
|
|
|
; CHECK: LV: Loop hints: force=enabled
|
|
; CHECK: LV: Loop hints: force=?
|
|
; No more loops in the module
|
|
; CHECK-NOT: LV: Loop hints: force=
|
|
; CHECK: 2 loop-vectorize - Number of loops analyzed for vectorization
|
|
; CHECK: 1 loop-vectorize - Number of loops vectorized
|
|
|
|
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
|
|
target triple = "x86_64-apple-macosx10.8.0"
|
|
|
|
;
|
|
; The source code for the test:
|
|
;
|
|
; #include <math.h>
|
|
; void foo(float* restrict A, float * restrict B)
|
|
; {
|
|
; for (int i = 0; i < 1000; i+=2) A[i] = sinf(B[i]);
|
|
; }
|
|
;
|
|
|
|
;
|
|
; This loop will be vectorized, although the scalar cost is lower than any of vector costs, but vectorization is explicitly forced in metadata.
|
|
;
|
|
|
|
define void @vectorized(float* noalias nocapture %A, float* noalias nocapture %B) {
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
|
|
%arrayidx = getelementptr inbounds float, float* %B, i64 %indvars.iv
|
|
%0 = load float, float* %arrayidx, align 4, !llvm.access.group !11
|
|
%call = tail call float @llvm.sin.f32(float %0)
|
|
%arrayidx2 = getelementptr inbounds float, float* %A, i64 %indvars.iv
|
|
store float %call, float* %arrayidx2, align 4, !llvm.access.group !11
|
|
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
|
|
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
|
|
%exitcond = icmp eq i32 %lftr.wideiv, 1000
|
|
br i1 %exitcond, label %for.end.loopexit, label %for.body, !llvm.loop !1
|
|
|
|
for.end.loopexit:
|
|
br label %for.end
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
!1 = !{!1, !2, !{!"llvm.loop.parallel_accesses", !11}}
|
|
!2 = !{!"llvm.loop.vectorize.enable", i1 true}
|
|
!11 = distinct !{}
|
|
|
|
;
|
|
; This method will not be vectorized, as scalar cost is lower than any of vector costs.
|
|
;
|
|
|
|
define void @not_vectorized(float* noalias nocapture %A, float* noalias nocapture %B) {
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
|
|
%arrayidx = getelementptr inbounds float, float* %B, i64 %indvars.iv
|
|
%0 = load float, float* %arrayidx, align 4, !llvm.access.group !13
|
|
%call = tail call float @llvm.sin.f32(float %0)
|
|
%arrayidx2 = getelementptr inbounds float, float* %A, i64 %indvars.iv
|
|
store float %call, float* %arrayidx2, align 4, !llvm.access.group !13
|
|
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
|
|
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
|
|
%exitcond = icmp eq i32 %lftr.wideiv, 1000
|
|
br i1 %exitcond, label %for.end.loopexit, label %for.body, !llvm.loop !3
|
|
|
|
for.end.loopexit:
|
|
br label %for.end
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
declare float @llvm.sin.f32(float) nounwind readnone
|
|
|
|
; Dummy metadata
|
|
!3 = !{!3, !{!"llvm.loop.parallel_accesses", !13}}
|
|
!13 = distinct !{}
|
|
|