Files
clang-p2996/llvm/test/Transforms/LoopVectorize/runtime-checks-difference.ll
Florian Hahn b7315ffc3c [LAA,LV] Add initial support for pointer-diff memory checks.
This patch adds initial support for a pointer diff based runtime check
scheme for vectorization. This scheme requires fewer computations and
checks than the existing full overlap checking, if it is applicable.

The main idea is to only check if source and sink of a dependency are
far enough apart so the accesses won't overlap in the vector loop. To do
so, it is sufficient to compute the difference and compare it to the
`VF * UF * AccessSize`. It is sufficient to check
`(Sink - Src) <u VF * UF * AccessSize` to rule out a backwards
dependence in the vector loop with the given VF and UF. If Src >=u Sink,
there is not dependence preventing vectorization, hence the overflow
should not matter and using the ULT should be sufficient.

Note that the initial version is restricted in multiple ways:

1. Pointers must only either be read or written, by a single
   instruction (this allows re-constructing source/sink for
   dependences with the available information)
 2. Source and sink pointers must be add-recs, with matching steps
 3. The step must be a constant.
 3. abs(step) == AccessSize.

Most of those restrictions can be relaxed in the future.

See https://github.com/llvm/llvm-project/issues/53590.

Reviewed By: dmgreen

Differential Revision: https://reviews.llvm.org/D119078
2022-05-16 15:27:22 +01:00

176 lines
6.4 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt %s -passes=loop-vectorize -force-vector-width=4 -force-vector-interleave=1 -S | FileCheck %s
target datalayout = "e-m:e-i64:64-i128:128-n32:64-S128"
define void @same_step_and_size(i32* %a, i32* %b, i64 %n) {
; CHECK-LABEL: @same_step_and_size(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A2:%.*]] = ptrtoint i32* [[A:%.*]] to i64
; CHECK-NEXT: [[B1:%.*]] = ptrtoint i32* [[B:%.*]] to i64
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], 4
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %scalar.ph, label %vector.memcheck
; CHECK: vector.memcheck:
; CHECK-NEXT: [[TMP0:%.*]] = sub i64 [[B1]], [[A2]]
; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP0]], 16
; CHECK-NEXT: br i1 [[DIFF_CHECK]], label %scalar.ph, label %vector.ph
;
entry:
br label %loop
loop:
%iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
%gep.a = getelementptr inbounds i32, i32* %a, i64 %iv
%l = load i32, i32* %gep.a
%mul = mul nsw i32 %l, 3
%gep.b = getelementptr inbounds i32, i32* %b, i64 %iv
store i32 %mul, i32* %gep.b
%iv.next = add nuw nsw i64 %iv, 1
%exitcond = icmp eq i64 %iv.next, %n
br i1 %exitcond, label %exit, label %loop
exit:
ret void
}
define void @same_step_and_size_no_dominance_between_accesses(i32* %a, i32* %b, i64 %n, i64 %x) {
; CHECK-LABEL: @same_step_and_size_no_dominance_between_accesses(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[B2:%.*]] = ptrtoint i32* [[B:%.*]] to i64
; CHECK-NEXT: [[A1:%.*]] = ptrtoint i32* [[A:%.*]] to i64
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], 4
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %scalar.ph, label %vector.memcheck
; CHECK: vector.memcheck:
; CHECK-NEXT: [[TMP0:%.*]] = sub i64 [[A1]], [[B2]]
; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP0]], 16
; CHECK-NEXT: br i1 [[DIFF_CHECK]], label %scalar.ph, label %vector.ph
;
entry:
br label %loop
loop:
%iv = phi i64 [ 0, %entry ], [ %iv.next, %loop.latch ]
%cmp = icmp ne i64 %iv, %x
br i1 %cmp, label %then, label %else
then:
%gep.a = getelementptr inbounds i32, i32* %a, i64 %iv
store i32 0, i32* %gep.a
br label %loop.latch
else:
%gep.b = getelementptr inbounds i32, i32* %b, i64 %iv
store i32 10, i32* %gep.b
br label %loop.latch
loop.latch:
%iv.next = add nuw nsw i64 %iv, 1
%exitcond = icmp eq i64 %iv.next, %n
br i1 %exitcond, label %exit, label %loop
exit:
ret void
}
define void @different_steps_and_different_access_sizes(i16* %a, i32* %b, i64 %n) {
; CHECK-LABEL: @different_steps_and_different_access_sizes(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[B1:%.*]] = bitcast i32* [[B:%.*]] to i8*
; CHECK-NEXT: [[A3:%.*]] = bitcast i16* [[A:%.*]] to i8*
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], 4
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %scalar.ph, label %vector.memcheck
; CHECK: vector.memcheck:
; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i32, i32* [[B]], i64 [[N]]
; CHECK-NEXT: [[SCEVGEP2:%.*]] = bitcast i32* [[SCEVGEP]] to i8*
; CHECK-NEXT: [[SCEVGEP4:%.*]] = getelementptr i16, i16* [[A]], i64 [[N]]
; CHECK-NEXT: [[SCEVGEP45:%.*]] = bitcast i16* [[SCEVGEP4]] to i8*
; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult i8* [[B1]], [[SCEVGEP45]]
; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult i8* [[A3]], [[SCEVGEP2]]
; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label %scalar.ph, label %vector.ph
;
entry:
br label %loop
loop:
%iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
%gep.a = getelementptr inbounds i16, i16* %a, i64 %iv
%l = load i16, i16* %gep.a
%l.ext = sext i16 %l to i32
%mul = mul nsw i32 %l.ext, 3
%gep.b = getelementptr inbounds i32, i32* %b, i64 %iv
store i32 %mul, i32* %gep.b
%iv.next = add nuw nsw i64 %iv, 1
%exitcond = icmp eq i64 %iv.next, %n
br i1 %exitcond, label %exit, label %loop
exit:
ret void
}
define void @steps_match_but_different_access_sizes_1([2 x i16]* %a, i32* %b, i64 %n) {
; CHECK-LABEL: @steps_match_but_different_access_sizes_1(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[A2:%.*]] = ptrtoint [2 x i16]* [[A:%.*]] to i64
; CHECK-NEXT: [[B1:%.*]] = ptrtoint i32* [[B:%.*]] to i64
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], 4
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %scalar.ph, label %vector.memcheck
; CHECK: vector.memcheck:
; CHECK-NEXT: [[TMP0:%.*]] = add nuw i64 [[A2]], 2
; CHECK-NEXT: [[TMP1:%.*]] = sub i64 [[B1]], [[TMP0]]
; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP1]], 16
; CHECK-NEXT: br i1 [[DIFF_CHECK]], label %scalar.ph, label %vector.ph
;
entry:
br label %loop
loop:
%iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
%gep.a = getelementptr inbounds [2 x i16], [2 x i16]* %a, i64 %iv, i64 1
%l = load i16, i16* %gep.a
%l.ext = sext i16 %l to i32
%mul = mul nsw i32 %l.ext, 3
%gep.b = getelementptr inbounds i32, i32* %b, i64 %iv
store i32 %mul, i32* %gep.b
%iv.next = add nuw nsw i64 %iv, 1
%exitcond = icmp eq i64 %iv.next, %n
br i1 %exitcond, label %exit, label %loop
exit:
ret void
}
; Same as @steps_match_but_different_access_sizes_1, but with source and sink
; accesses flipped.
define void @steps_match_but_different_access_sizes_2([2 x i16]* %a, i32* %b, i64 %n) {
; CHECK-LABEL: @steps_match_but_different_access_sizes_2(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[B2:%.*]] = ptrtoint i32* [[B:%.*]] to i64
; CHECK-NEXT: [[A1:%.*]] = ptrtoint [2 x i16]* [[A:%.*]] to i64
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], 4
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %scalar.ph, label %vector.memcheck
; CHECK: vector.memcheck:
; CHECK-NEXT: [[TMP0:%.*]] = add nuw i64 [[A1]], 2
; CHECK-NEXT: [[TMP1:%.*]] = sub i64 [[TMP0]], [[B2]]
; CHECK-NEXT: [[DIFF_CHECK:%.*]] = icmp ult i64 [[TMP1]], 16
; CHECK-NEXT: br i1 [[DIFF_CHECK]], label %scalar.ph, label %vector.ph
;
entry:
br label %loop
loop:
%iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
%gep.b = getelementptr inbounds i32, i32* %b, i64 %iv
%l = load i32, i32* %gep.b
%mul = mul nsw i32 %l, 3
%gep.a = getelementptr inbounds [2 x i16], [2 x i16]* %a, i64 %iv, i64 1
%trunc = trunc i32 %mul to i16
store i16 %trunc, i16* %gep.a
%iv.next = add nuw nsw i64 %iv, 1
%exitcond = icmp eq i64 %iv.next, %n
br i1 %exitcond, label %exit, label %loop
exit:
ret void
}