Files
clang-p2996/llvm/test/Transforms/SLPVectorizer/X86/opt.ll
Nikita Popov 90ba33099c [InstCombine] Canonicalize constant GEPs to i8 source element type (#68882)
This patch canonicalizes getelementptr instructions with constant
indices to use the `i8` source element type. This makes it easier for
optimizations to recognize that two GEPs are identical, because they
don't need to see past many different ways to express the same offset.

This is a first step towards
https://discourse.llvm.org/t/rfc-replacing-getelementptr-with-ptradd/68699.
This is limited to constant GEPs only for now, as they have a clear
canonical form, while we're not yet sure how exactly to deal with
variable indices.

The test llvm/test/Transforms/PhaseOrdering/switch_with_geps.ll gives
two representative examples of the kind of optimization improvement we
expect from this change. In the first test SimplifyCFG can now realize
that all switch branches are actually the same. In the second test it
can convert it into simple arithmetic. These are representative of
common optimization failures we see in Rust.

Fixes https://github.com/llvm/llvm-project/issues/69841.
2024-01-24 15:25:29 +01:00

48 lines
2.3 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -O3 -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s --check-prefix=SLP
; RUN: opt < %s -O3 -vectorize-slp=false -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s --check-prefix=NOSLP
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.8.0"
; Make sure we can disable slp vectorization in opt.
define void @test1(ptr %a, ptr %b, ptr %c) {
; SLP-LABEL: @test1(
; SLP-NEXT: entry:
; SLP-NEXT: [[TMP0:%.*]] = load <2 x double>, ptr [[A:%.*]], align 8
; SLP-NEXT: [[TMP1:%.*]] = load <2 x double>, ptr [[B:%.*]], align 8
; SLP-NEXT: [[TMP2:%.*]] = fmul <2 x double> [[TMP0]], [[TMP1]]
; SLP-NEXT: store <2 x double> [[TMP2]], ptr [[C:%.*]], align 8
; SLP-NEXT: ret void
;
; NOSLP-LABEL: @test1(
; NOSLP-NEXT: entry:
; NOSLP-NEXT: [[I0:%.*]] = load double, ptr [[A:%.*]], align 8
; NOSLP-NEXT: [[I1:%.*]] = load double, ptr [[B:%.*]], align 8
; NOSLP-NEXT: [[MUL:%.*]] = fmul double [[I0]], [[I1]]
; NOSLP-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 8
; NOSLP-NEXT: [[I3:%.*]] = load double, ptr [[ARRAYIDX3]], align 8
; NOSLP-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 8
; NOSLP-NEXT: [[I4:%.*]] = load double, ptr [[ARRAYIDX4]], align 8
; NOSLP-NEXT: [[MUL5:%.*]] = fmul double [[I3]], [[I4]]
; NOSLP-NEXT: store double [[MUL]], ptr [[C:%.*]], align 8
; NOSLP-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i8, ptr [[C]], i64 8
; NOSLP-NEXT: store double [[MUL5]], ptr [[ARRAYIDX5]], align 8
; NOSLP-NEXT: ret void
;
entry:
%i0 = load double, ptr %a, align 8
%i1 = load double, ptr %b, align 8
%mul = fmul double %i0, %i1
%arrayidx3 = getelementptr inbounds double, ptr %a, i64 1
%i3 = load double, ptr %arrayidx3, align 8
%arrayidx4 = getelementptr inbounds double, ptr %b, i64 1
%i4 = load double, ptr %arrayidx4, align 8
%mul5 = fmul double %i3, %i4
store double %mul, ptr %c, align 8
%arrayidx5 = getelementptr inbounds double, ptr %c, i64 1
store double %mul5, ptr %arrayidx5, align 8
ret void
}