Files
clang-p2996/llvm/test/Transforms/SLPVectorizer/AArch64/matmul.ll
Sjoerd Meijer d827865e9f Recommit "[AArch64][TTI] Cost model FADD/FSUB/FNEG""
Fixed two test cases that relied on Asserts, and added a fallthrough
annotation to the switch case.
2023-04-11 12:48:15 +01:00

140 lines
9.0 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -passes=slp-vectorizer -S -mtriple=aarch64-unknown-unknown -mcpu=cortex-a53 | FileCheck %s
target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
; This test is reduced from the matrix multiplication benchmark in the test-suite:
; https://github.com/llvm/llvm-test-suite/tree/main/SingleSource/Benchmarks/Misc/matmul_f64_4x4.c
; The operations here are expected to be vectorized to <2 x double>.
; Otherwise, performance will suffer on Cortex-A53.
define void @wrap_mul4(ptr nocapture %Out, ptr nocapture readonly %A, ptr nocapture readonly %B) {
; CHECK-LABEL: @wrap_mul4(
; CHECK-NEXT: [[TEMP:%.*]] = load double, ptr [[A:%.*]], align 8
; CHECK-NEXT: [[TEMP1:%.*]] = load double, ptr [[B:%.*]], align 8
; CHECK-NEXT: [[MUL_I:%.*]] = fmul double [[TEMP]], [[TEMP1]]
; CHECK-NEXT: [[ARRAYIDX5_I:%.*]] = getelementptr inbounds [2 x double], ptr [[A]], i64 0, i64 1
; CHECK-NEXT: [[TEMP2:%.*]] = load double, ptr [[ARRAYIDX5_I]], align 8
; CHECK-NEXT: [[ARRAYIDX7_I:%.*]] = getelementptr inbounds [4 x double], ptr [[B]], i64 1, i64 0
; CHECK-NEXT: [[TEMP3:%.*]] = load double, ptr [[ARRAYIDX7_I]], align 8
; CHECK-NEXT: [[MUL8_I:%.*]] = fmul double [[TEMP2]], [[TEMP3]]
; CHECK-NEXT: [[ADD_I:%.*]] = fadd double [[MUL_I]], [[MUL8_I]]
; CHECK-NEXT: [[ARRAYIDX13_I:%.*]] = getelementptr inbounds [4 x double], ptr [[B]], i64 0, i64 1
; CHECK-NEXT: [[TEMP4:%.*]] = load double, ptr [[ARRAYIDX13_I]], align 8
; CHECK-NEXT: [[MUL14_I:%.*]] = fmul double [[TEMP]], [[TEMP4]]
; CHECK-NEXT: [[ARRAYIDX18_I:%.*]] = getelementptr inbounds [4 x double], ptr [[B]], i64 1, i64 1
; CHECK-NEXT: [[TEMP5:%.*]] = load double, ptr [[ARRAYIDX18_I]], align 8
; CHECK-NEXT: [[MUL19_I:%.*]] = fmul double [[TEMP2]], [[TEMP5]]
; CHECK-NEXT: [[ADD20_I:%.*]] = fadd double [[MUL14_I]], [[MUL19_I]]
; CHECK-NEXT: [[ARRAYIDX25_I:%.*]] = getelementptr inbounds [4 x double], ptr [[B]], i64 0, i64 2
; CHECK-NEXT: [[TEMP6:%.*]] = load double, ptr [[ARRAYIDX25_I]], align 8
; CHECK-NEXT: [[MUL26_I:%.*]] = fmul double [[TEMP]], [[TEMP6]]
; CHECK-NEXT: [[ARRAYIDX30_I:%.*]] = getelementptr inbounds [4 x double], ptr [[B]], i64 1, i64 2
; CHECK-NEXT: [[TEMP7:%.*]] = load double, ptr [[ARRAYIDX30_I]], align 8
; CHECK-NEXT: [[MUL31_I:%.*]] = fmul double [[TEMP2]], [[TEMP7]]
; CHECK-NEXT: [[ADD32_I:%.*]] = fadd double [[MUL26_I]], [[MUL31_I]]
; CHECK-NEXT: [[ARRAYIDX37_I:%.*]] = getelementptr inbounds [4 x double], ptr [[B]], i64 0, i64 3
; CHECK-NEXT: [[TEMP8:%.*]] = load double, ptr [[ARRAYIDX37_I]], align 8
; CHECK-NEXT: [[MUL38_I:%.*]] = fmul double [[TEMP]], [[TEMP8]]
; CHECK-NEXT: [[ARRAYIDX42_I:%.*]] = getelementptr inbounds [4 x double], ptr [[B]], i64 1, i64 3
; CHECK-NEXT: [[TEMP9:%.*]] = load double, ptr [[ARRAYIDX42_I]], align 8
; CHECK-NEXT: [[MUL43_I:%.*]] = fmul double [[TEMP2]], [[TEMP9]]
; CHECK-NEXT: [[ADD44_I:%.*]] = fadd double [[MUL38_I]], [[MUL43_I]]
; CHECK-NEXT: [[ARRAYIDX47_I:%.*]] = getelementptr inbounds [2 x double], ptr [[A]], i64 1, i64 0
; CHECK-NEXT: [[TEMP10:%.*]] = load double, ptr [[ARRAYIDX47_I]], align 8
; CHECK-NEXT: [[MUL50_I:%.*]] = fmul double [[TEMP1]], [[TEMP10]]
; CHECK-NEXT: [[ARRAYIDX52_I:%.*]] = getelementptr inbounds [2 x double], ptr [[A]], i64 1, i64 1
; CHECK-NEXT: [[TEMP11:%.*]] = load double, ptr [[ARRAYIDX52_I]], align 8
; CHECK-NEXT: [[MUL55_I:%.*]] = fmul double [[TEMP3]], [[TEMP11]]
; CHECK-NEXT: [[ADD56_I:%.*]] = fadd double [[MUL50_I]], [[MUL55_I]]
; CHECK-NEXT: [[MUL62_I:%.*]] = fmul double [[TEMP4]], [[TEMP10]]
; CHECK-NEXT: [[MUL67_I:%.*]] = fmul double [[TEMP5]], [[TEMP11]]
; CHECK-NEXT: [[ADD68_I:%.*]] = fadd double [[MUL62_I]], [[MUL67_I]]
; CHECK-NEXT: [[MUL74_I:%.*]] = fmul double [[TEMP6]], [[TEMP10]]
; CHECK-NEXT: [[MUL79_I:%.*]] = fmul double [[TEMP7]], [[TEMP11]]
; CHECK-NEXT: [[ADD80_I:%.*]] = fadd double [[MUL74_I]], [[MUL79_I]]
; CHECK-NEXT: [[MUL86_I:%.*]] = fmul double [[TEMP8]], [[TEMP10]]
; CHECK-NEXT: [[MUL91_I:%.*]] = fmul double [[TEMP9]], [[TEMP11]]
; CHECK-NEXT: [[ADD92_I:%.*]] = fadd double [[MUL86_I]], [[MUL91_I]]
; CHECK-NEXT: store double [[ADD_I]], ptr [[OUT:%.*]], align 8
; CHECK-NEXT: [[RES_I_SROA_4_0_OUT2_I_SROA_IDX2:%.*]] = getelementptr inbounds double, ptr [[OUT]], i64 1
; CHECK-NEXT: store double [[ADD20_I]], ptr [[RES_I_SROA_4_0_OUT2_I_SROA_IDX2]], align 8
; CHECK-NEXT: [[RES_I_SROA_5_0_OUT2_I_SROA_IDX4:%.*]] = getelementptr inbounds double, ptr [[OUT]], i64 2
; CHECK-NEXT: store double [[ADD32_I]], ptr [[RES_I_SROA_5_0_OUT2_I_SROA_IDX4]], align 8
; CHECK-NEXT: [[RES_I_SROA_6_0_OUT2_I_SROA_IDX6:%.*]] = getelementptr inbounds double, ptr [[OUT]], i64 3
; CHECK-NEXT: store double [[ADD44_I]], ptr [[RES_I_SROA_6_0_OUT2_I_SROA_IDX6]], align 8
; CHECK-NEXT: [[RES_I_SROA_7_0_OUT2_I_SROA_IDX8:%.*]] = getelementptr inbounds double, ptr [[OUT]], i64 4
; CHECK-NEXT: store double [[ADD56_I]], ptr [[RES_I_SROA_7_0_OUT2_I_SROA_IDX8]], align 8
; CHECK-NEXT: [[RES_I_SROA_8_0_OUT2_I_SROA_IDX10:%.*]] = getelementptr inbounds double, ptr [[OUT]], i64 5
; CHECK-NEXT: store double [[ADD68_I]], ptr [[RES_I_SROA_8_0_OUT2_I_SROA_IDX10]], align 8
; CHECK-NEXT: [[RES_I_SROA_9_0_OUT2_I_SROA_IDX12:%.*]] = getelementptr inbounds double, ptr [[OUT]], i64 6
; CHECK-NEXT: store double [[ADD80_I]], ptr [[RES_I_SROA_9_0_OUT2_I_SROA_IDX12]], align 8
; CHECK-NEXT: [[RES_I_SROA_10_0_OUT2_I_SROA_IDX14:%.*]] = getelementptr inbounds double, ptr [[OUT]], i64 7
; CHECK-NEXT: store double [[ADD92_I]], ptr [[RES_I_SROA_10_0_OUT2_I_SROA_IDX14]], align 8
; CHECK-NEXT: ret void
;
%temp = load double, ptr %A, align 8
%temp1 = load double, ptr %B, align 8
%mul.i = fmul double %temp, %temp1
%arrayidx5.i = getelementptr inbounds [2 x double], ptr %A, i64 0, i64 1
%temp2 = load double, ptr %arrayidx5.i, align 8
%arrayidx7.i = getelementptr inbounds [4 x double], ptr %B, i64 1, i64 0
%temp3 = load double, ptr %arrayidx7.i, align 8
%mul8.i = fmul double %temp2, %temp3
%add.i = fadd double %mul.i, %mul8.i
%arrayidx13.i = getelementptr inbounds [4 x double], ptr %B, i64 0, i64 1
%temp4 = load double, ptr %arrayidx13.i, align 8
%mul14.i = fmul double %temp, %temp4
%arrayidx18.i = getelementptr inbounds [4 x double], ptr %B, i64 1, i64 1
%temp5 = load double, ptr %arrayidx18.i, align 8
%mul19.i = fmul double %temp2, %temp5
%add20.i = fadd double %mul14.i, %mul19.i
%arrayidx25.i = getelementptr inbounds [4 x double], ptr %B, i64 0, i64 2
%temp6 = load double, ptr %arrayidx25.i, align 8
%mul26.i = fmul double %temp, %temp6
%arrayidx30.i = getelementptr inbounds [4 x double], ptr %B, i64 1, i64 2
%temp7 = load double, ptr %arrayidx30.i, align 8
%mul31.i = fmul double %temp2, %temp7
%add32.i = fadd double %mul26.i, %mul31.i
%arrayidx37.i = getelementptr inbounds [4 x double], ptr %B, i64 0, i64 3
%temp8 = load double, ptr %arrayidx37.i, align 8
%mul38.i = fmul double %temp, %temp8
%arrayidx42.i = getelementptr inbounds [4 x double], ptr %B, i64 1, i64 3
%temp9 = load double, ptr %arrayidx42.i, align 8
%mul43.i = fmul double %temp2, %temp9
%add44.i = fadd double %mul38.i, %mul43.i
%arrayidx47.i = getelementptr inbounds [2 x double], ptr %A, i64 1, i64 0
%temp10 = load double, ptr %arrayidx47.i, align 8
%mul50.i = fmul double %temp1, %temp10
%arrayidx52.i = getelementptr inbounds [2 x double], ptr %A, i64 1, i64 1
%temp11 = load double, ptr %arrayidx52.i, align 8
%mul55.i = fmul double %temp3, %temp11
%add56.i = fadd double %mul50.i, %mul55.i
%mul62.i = fmul double %temp4, %temp10
%mul67.i = fmul double %temp5, %temp11
%add68.i = fadd double %mul62.i, %mul67.i
%mul74.i = fmul double %temp6, %temp10
%mul79.i = fmul double %temp7, %temp11
%add80.i = fadd double %mul74.i, %mul79.i
%mul86.i = fmul double %temp8, %temp10
%mul91.i = fmul double %temp9, %temp11
%add92.i = fadd double %mul86.i, %mul91.i
store double %add.i, ptr %Out, align 8
%Res.i.sroa.4.0.Out2.i.sroa_idx2 = getelementptr inbounds double, ptr %Out, i64 1
store double %add20.i, ptr %Res.i.sroa.4.0.Out2.i.sroa_idx2, align 8
%Res.i.sroa.5.0.Out2.i.sroa_idx4 = getelementptr inbounds double, ptr %Out, i64 2
store double %add32.i, ptr %Res.i.sroa.5.0.Out2.i.sroa_idx4, align 8
%Res.i.sroa.6.0.Out2.i.sroa_idx6 = getelementptr inbounds double, ptr %Out, i64 3
store double %add44.i, ptr %Res.i.sroa.6.0.Out2.i.sroa_idx6, align 8
%Res.i.sroa.7.0.Out2.i.sroa_idx8 = getelementptr inbounds double, ptr %Out, i64 4
store double %add56.i, ptr %Res.i.sroa.7.0.Out2.i.sroa_idx8, align 8
%Res.i.sroa.8.0.Out2.i.sroa_idx10 = getelementptr inbounds double, ptr %Out, i64 5
store double %add68.i, ptr %Res.i.sroa.8.0.Out2.i.sroa_idx10, align 8
%Res.i.sroa.9.0.Out2.i.sroa_idx12 = getelementptr inbounds double, ptr %Out, i64 6
store double %add80.i, ptr %Res.i.sroa.9.0.Out2.i.sroa_idx12, align 8
%Res.i.sroa.10.0.Out2.i.sroa_idx14 = getelementptr inbounds double, ptr %Out, i64 7
store double %add92.i, ptr %Res.i.sroa.10.0.Out2.i.sroa_idx14, align 8
ret void
}