Files
clang-p2996/llvm/test/Transforms/PhaseOrdering/inlining-alignment-assumptions.ll
Roman Lebedev 7ea46aee36 Revert "[AssumeBundles] Use operand bundles to encode alignment assumptions"
Assume bundle can have more than one entry with the same name,
but at least AlignmentFromAssumptionsPass::extractAlignmentInfo() uses
getOperandBundle("align"), which internally assumes that it isn't the
case, and happily crashes otherwise.

Minimal reduced reproducer: run `opt -alignment-from-assumptions` on

target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"

%0 = type { i64, %1*, i8*, i64, %2, i32, %3*, i8* }
%1 = type opaque
%2 = type { i8, i8, i16 }
%3 = type { i32, i32, i32, i32 }

; Function Attrs: nounwind
define i32 @f(%0* noalias nocapture readonly %arg, %0* noalias %arg1) local_unnamed_addr #0 {
bb:
  call void @llvm.assume(i1 true) [ "align"(%0* %arg, i64 8), "align"(%0* %arg1, i64 8) ]
  ret i32 0
}

; Function Attrs: nounwind willreturn
declare void @llvm.assume(i1) #1

attributes #0 = { nounwind "reciprocal-estimates"="none" }
attributes #1 = { nounwind willreturn }


This is what we'd have with -mllvm -enable-knowledge-retention

This reverts commit c95ffadb24.
2020-07-04 23:49:23 +03:00

120 lines
5.4 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -S -O2 -preserve-alignment-assumptions-during-inlining=0 < %s | FileCheck %s --check-prefixes=CHECK,ASSUMPTIONS-OFF,FALLBACK-0
; RUN: opt -S -O2 -preserve-alignment-assumptions-during-inlining=1 < %s | FileCheck %s --check-prefixes=CHECK,ASSUMPTIONS-ON,FALLBACK-1
; RUN: opt -S -O2 < %s | FileCheck %s --check-prefixes=CHECK,ASSUMPTIONS-OFF,FALLBACK-DEFAULT
target datalayout = "e-p:64:64-p5:32:32-A5"
; This illustrates an optimization difference caused by instruction counting
; heuristics, which are affected by the additional instructions of the
; alignment assumption.
define internal i1 @callee1(i1 %c, i64* align 8 %ptr) {
store volatile i64 0, i64* %ptr
ret i1 %c
}
define void @caller1(i1 %c, i64* align 1 %ptr) {
; ASSUMPTIONS-OFF-LABEL: @caller1(
; ASSUMPTIONS-OFF-NEXT: br i1 [[C:%.*]], label [[TRUE2:%.*]], label [[FALSE2:%.*]]
; ASSUMPTIONS-OFF: true2:
; ASSUMPTIONS-OFF-NEXT: store volatile i64 0, i64* [[PTR:%.*]], align 8
; ASSUMPTIONS-OFF-NEXT: store volatile i64 -1, i64* [[PTR]], align 4
; ASSUMPTIONS-OFF-NEXT: store volatile i64 -1, i64* [[PTR]], align 4
; ASSUMPTIONS-OFF-NEXT: store volatile i64 -1, i64* [[PTR]], align 4
; ASSUMPTIONS-OFF-NEXT: store volatile i64 -1, i64* [[PTR]], align 4
; ASSUMPTIONS-OFF-NEXT: store volatile i64 -1, i64* [[PTR]], align 4
; ASSUMPTIONS-OFF-NEXT: store volatile i64 2, i64* [[PTR]], align 4
; ASSUMPTIONS-OFF-NEXT: ret void
; ASSUMPTIONS-OFF: false2:
; ASSUMPTIONS-OFF-NEXT: store volatile i64 1, i64* [[PTR]], align 4
; ASSUMPTIONS-OFF-NEXT: store volatile i64 0, i64* [[PTR]], align 8
; ASSUMPTIONS-OFF-NEXT: store volatile i64 -1, i64* [[PTR]], align 4
; ASSUMPTIONS-OFF-NEXT: store volatile i64 -1, i64* [[PTR]], align 4
; ASSUMPTIONS-OFF-NEXT: store volatile i64 -1, i64* [[PTR]], align 4
; ASSUMPTIONS-OFF-NEXT: store volatile i64 -1, i64* [[PTR]], align 4
; ASSUMPTIONS-OFF-NEXT: store volatile i64 -1, i64* [[PTR]], align 4
; ASSUMPTIONS-OFF-NEXT: store volatile i64 3, i64* [[PTR]], align 4
; ASSUMPTIONS-OFF-NEXT: ret void
;
; ASSUMPTIONS-ON-LABEL: @caller1(
; ASSUMPTIONS-ON-NEXT: br i1 [[C:%.*]], label [[TRUE2_CRITEDGE:%.*]], label [[FALSE1:%.*]]
; ASSUMPTIONS-ON: false1:
; ASSUMPTIONS-ON-NEXT: store volatile i64 1, i64* [[PTR:%.*]], align 8
; ASSUMPTIONS-ON-NEXT: [[PTRINT:%.*]] = ptrtoint i64* [[PTR]] to i64
; ASSUMPTIONS-ON-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 7
; ASSUMPTIONS-ON-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
; ASSUMPTIONS-ON-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]])
; ASSUMPTIONS-ON-NEXT: store volatile i64 0, i64* [[PTR]], align 8
; ASSUMPTIONS-ON-NEXT: store volatile i64 -1, i64* [[PTR]], align 8
; ASSUMPTIONS-ON-NEXT: store volatile i64 -1, i64* [[PTR]], align 8
; ASSUMPTIONS-ON-NEXT: store volatile i64 -1, i64* [[PTR]], align 8
; ASSUMPTIONS-ON-NEXT: store volatile i64 -1, i64* [[PTR]], align 8
; ASSUMPTIONS-ON-NEXT: store volatile i64 -1, i64* [[PTR]], align 8
; ASSUMPTIONS-ON-NEXT: store volatile i64 3, i64* [[PTR]], align 8
; ASSUMPTIONS-ON-NEXT: ret void
; ASSUMPTIONS-ON: true2.critedge:
; ASSUMPTIONS-ON-NEXT: [[PTRINT_C:%.*]] = ptrtoint i64* [[PTR]] to i64
; ASSUMPTIONS-ON-NEXT: [[MASKEDPTR_C:%.*]] = and i64 [[PTRINT_C]], 7
; ASSUMPTIONS-ON-NEXT: [[MASKCOND_C:%.*]] = icmp eq i64 [[MASKEDPTR_C]], 0
; ASSUMPTIONS-ON-NEXT: tail call void @llvm.assume(i1 [[MASKCOND_C]])
; ASSUMPTIONS-ON-NEXT: store volatile i64 0, i64* [[PTR]], align 8
; ASSUMPTIONS-ON-NEXT: store volatile i64 -1, i64* [[PTR]], align 8
; ASSUMPTIONS-ON-NEXT: store volatile i64 -1, i64* [[PTR]], align 8
; ASSUMPTIONS-ON-NEXT: store volatile i64 -1, i64* [[PTR]], align 8
; ASSUMPTIONS-ON-NEXT: store volatile i64 -1, i64* [[PTR]], align 8
; ASSUMPTIONS-ON-NEXT: store volatile i64 -1, i64* [[PTR]], align 8
; ASSUMPTIONS-ON-NEXT: store volatile i64 2, i64* [[PTR]], align 8
; ASSUMPTIONS-ON-NEXT: ret void
;
br i1 %c, label %true1, label %false1
true1:
%c2 = call i1 @callee1(i1 %c, i64* %ptr)
store volatile i64 -1, i64* %ptr
store volatile i64 -1, i64* %ptr
store volatile i64 -1, i64* %ptr
store volatile i64 -1, i64* %ptr
store volatile i64 -1, i64* %ptr
br i1 %c2, label %true2, label %false2
false1:
store volatile i64 1, i64* %ptr
br label %true1
true2:
store volatile i64 2, i64* %ptr
ret void
false2:
store volatile i64 3, i64* %ptr
ret void
}
; This test checks that alignment assumptions do not prevent SROA.
; See PR45763.
define internal void @callee2(i64* noalias sret align 8 %arg) {
store i64 0, i64* %arg, align 8
ret void
}
define amdgpu_kernel void @caller2() {
; ASSUMPTIONS-OFF-LABEL: @caller2(
; ASSUMPTIONS-OFF-NEXT: ret void
;
; ASSUMPTIONS-ON-LABEL: @caller2(
; ASSUMPTIONS-ON-NEXT: [[ALLOCA:%.*]] = alloca i64, align 8, addrspace(5)
; ASSUMPTIONS-ON-NEXT: [[CAST:%.*]] = addrspacecast i64 addrspace(5)* [[ALLOCA]] to i64*
; ASSUMPTIONS-ON-NEXT: [[PTRINT:%.*]] = ptrtoint i64* [[CAST]] to i64
; ASSUMPTIONS-ON-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 7
; ASSUMPTIONS-ON-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0
; ASSUMPTIONS-ON-NEXT: call void @llvm.assume(i1 [[MASKCOND]])
; ASSUMPTIONS-ON-NEXT: ret void
;
%alloca = alloca i64, align 8, addrspace(5)
%cast = addrspacecast i64 addrspace(5)* %alloca to i64*
call void @callee2(i64* sret align 8 %cast)
ret void
}