This patch prepares the ground for #76060. * Unifies ArmPL and SLEEF tests for better coverage * Replaces deprecated float* and double* types with ptr * Adds noalias attribute to pointer arguments * Adds some cmd-line options to the RUN lines to simplify output * Removes datalayout since target triple is provided * Removes checks for return statements * Refactors the regex filter for autogenerated checks * Removes redundant test file suffix (already under the AArch64 dir)
2642 lines
118 KiB
LLVM
2642 lines
118 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --filter "call.*(cos|sin|tan|cbrt|erf|exp|gamma|log|sqrt|copysign|dim|min|mod|hypot|nextafter|pow|fma)" --version 2
|
|
; RUN: opt -mattr=+neon -vector-library=sleefgnuabi -passes=inject-tli-mappings,loop-vectorize,simplifycfg -force-vector-interleave=1 -S < %s | FileCheck %s --check-prefix=SLEEF-NEON
|
|
; RUN: opt -mattr=+sve -vector-library=sleefgnuabi -passes=inject-tli-mappings,loop-vectorize,simplifycfg -force-vector-interleave=1 -prefer-predicate-over-epilogue=predicate-dont-vectorize -S < %s | FileCheck %s --check-prefix=SLEEF-SVE
|
|
; RUN: opt -mattr=+neon -vector-library=ArmPL -passes=inject-tli-mappings,loop-vectorize,simplifycfg -force-vector-interleave=1 -S < %s | FileCheck %s --check-prefix=ARMPL-NEON
|
|
; RUN: opt -mattr=+sve -vector-library=ArmPL -passes=inject-tli-mappings,loop-vectorize,simplifycfg -force-vector-interleave=1 -prefer-predicate-over-epilogue=predicate-dont-vectorize -S < %s | FileCheck %s --check-prefix=ARMPL-SVE
|
|
|
|
target triple = "aarch64-unknown-linux-gnu"
|
|
|
|
; We are checking whether loops containing function calls can be vectorized,
|
|
; when the compiler provides TLI mappings to their vector variants. The tests
|
|
; are checking fixed width vectorization with NEON and scalable vectorization
|
|
; with SVE.
|
|
|
|
declare double @acos(double)
|
|
declare float @acosf(float)
|
|
|
|
define void @acos_f64(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @acos_f64
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0:[0-9]+]] {
|
|
; SLEEF-NEON: [[TMP3:%.*]] = call <2 x double> @_ZGVnN2v_acos(<2 x double> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @acos_f64
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0:[0-9]+]] {
|
|
; SLEEF-SVE: [[TMP15:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_acos(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @acos_f64
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0:[0-9]+]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <2 x double> @armpl_vacosq_f64(<2 x double> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @acos_f64
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0:[0-9]+]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 2 x double> @armpl_svacos_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds double, ptr %in.ptr, i64 %iv
|
|
%in = load double, ptr %in.gep, align 8
|
|
%call = tail call double @acos(double %in)
|
|
%out.gep = getelementptr inbounds double, ptr %out.ptr, i64 %iv
|
|
store double %call, ptr %out.gep, align 8
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
define void @acos_f32(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @acos_f32
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[TMP3:%.*]] = call <4 x float> @_ZGVnN4v_acosf(<4 x float> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @acos_f32
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[TMP15:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_acosf(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @acos_f32
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <4 x float> @armpl_vacosq_f32(<4 x float> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @acos_f32
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 4 x float> @armpl_svacos_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds float, ptr %in.ptr, i64 %iv
|
|
%in = load float, ptr %in.gep, align 8
|
|
%call = tail call float @acosf(float %in)
|
|
%out.gep = getelementptr inbounds float, ptr %out.ptr, i64 %iv
|
|
store float %call, ptr %out.gep, align 4
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
declare double @acosh(double)
|
|
declare float @acoshf(float)
|
|
|
|
define void @acosh_f64(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @acosh_f64
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[CALL:%.*]] = tail call double @acosh(double [[IN:%.*]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @acosh_f64
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[CALL:%.*]] = tail call double @acosh(double [[IN:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @acosh_f64
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <2 x double> @armpl_vacoshq_f64(<2 x double> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @acosh_f64
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 2 x double> @armpl_svacosh_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds double, ptr %in.ptr, i64 %iv
|
|
%in = load double, ptr %in.gep, align 8
|
|
%call = tail call double @acosh(double %in)
|
|
%out.gep = getelementptr inbounds double, ptr %out.ptr, i64 %iv
|
|
store double %call, ptr %out.gep, align 8
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
define void @acosh_f32(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @acosh_f32
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[CALL:%.*]] = tail call float @acoshf(float [[IN:%.*]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @acosh_f32
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[CALL:%.*]] = tail call float @acoshf(float [[IN:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @acosh_f32
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <4 x float> @armpl_vacoshq_f32(<4 x float> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @acosh_f32
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 4 x float> @armpl_svacosh_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds float, ptr %in.ptr, i64 %iv
|
|
%in = load float, ptr %in.gep, align 8
|
|
%call = tail call float @acoshf(float %in)
|
|
%out.gep = getelementptr inbounds float, ptr %out.ptr, i64 %iv
|
|
store float %call, ptr %out.gep, align 4
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
declare double @asin(double)
|
|
declare float @asinf(float)
|
|
|
|
define void @asin_f64(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @asin_f64
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[TMP3:%.*]] = call <2 x double> @_ZGVnN2v_asin(<2 x double> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @asin_f64
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[TMP15:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_asin(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @asin_f64
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <2 x double> @armpl_vasinq_f64(<2 x double> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @asin_f64
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 2 x double> @armpl_svasin_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds double, ptr %in.ptr, i64 %iv
|
|
%in = load double, ptr %in.gep, align 8
|
|
%call = tail call double @asin(double %in)
|
|
%out.gep = getelementptr inbounds double, ptr %out.ptr, i64 %iv
|
|
store double %call, ptr %out.gep, align 8
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
define void @asin_f32(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @asin_f32
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[TMP3:%.*]] = call <4 x float> @_ZGVnN4v_asinf(<4 x float> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @asin_f32
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[TMP15:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_asinf(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @asin_f32
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <4 x float> @armpl_vasinq_f32(<4 x float> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @asin_f32
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 4 x float> @armpl_svasin_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds float, ptr %in.ptr, i64 %iv
|
|
%in = load float, ptr %in.gep, align 8
|
|
%call = tail call float @asinf(float %in)
|
|
%out.gep = getelementptr inbounds float, ptr %out.ptr, i64 %iv
|
|
store float %call, ptr %out.gep, align 4
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
declare double @asinh(double)
|
|
declare float @asinhf(float)
|
|
|
|
define void @asinh_f64(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @asinh_f64
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[CALL:%.*]] = tail call double @asinh(double [[IN:%.*]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @asinh_f64
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[CALL:%.*]] = tail call double @asinh(double [[IN:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @asinh_f64
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <2 x double> @armpl_vasinhq_f64(<2 x double> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @asinh_f64
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 2 x double> @armpl_svasinh_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds double, ptr %in.ptr, i64 %iv
|
|
%in = load double, ptr %in.gep, align 8
|
|
%call = tail call double @asinh(double %in)
|
|
%out.gep = getelementptr inbounds double, ptr %out.ptr, i64 %iv
|
|
store double %call, ptr %out.gep, align 8
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
define void @asinh_f32(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @asinh_f32
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[CALL:%.*]] = tail call float @asinhf(float [[IN:%.*]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @asinh_f32
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[CALL:%.*]] = tail call float @asinhf(float [[IN:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @asinh_f32
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <4 x float> @armpl_vasinhq_f32(<4 x float> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @asinh_f32
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 4 x float> @armpl_svasinh_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds float, ptr %in.ptr, i64 %iv
|
|
%in = load float, ptr %in.gep, align 8
|
|
%call = tail call float @asinhf(float %in)
|
|
%out.gep = getelementptr inbounds float, ptr %out.ptr, i64 %iv
|
|
store float %call, ptr %out.gep, align 4
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
declare double @atan(double)
|
|
declare float @atanf(float)
|
|
|
|
define void @atan_f64(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @atan_f64
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[TMP3:%.*]] = call <2 x double> @_ZGVnN2v_atan(<2 x double> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @atan_f64
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[TMP15:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_atan(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @atan_f64
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <2 x double> @armpl_vatanq_f64(<2 x double> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @atan_f64
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 2 x double> @armpl_svatan_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds double, ptr %in.ptr, i64 %iv
|
|
%in = load double, ptr %in.gep, align 8
|
|
%call = tail call double @atan(double %in)
|
|
%out.gep = getelementptr inbounds double, ptr %out.ptr, i64 %iv
|
|
store double %call, ptr %out.gep, align 8
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
define void @atan_f32(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @atan_f32
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[TMP3:%.*]] = call <4 x float> @_ZGVnN4v_atanf(<4 x float> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @atan_f32
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[TMP15:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_atanf(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @atan_f32
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <4 x float> @armpl_vatanq_f32(<4 x float> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @atan_f32
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 4 x float> @armpl_svatan_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds float, ptr %in.ptr, i64 %iv
|
|
%in = load float, ptr %in.gep, align 8
|
|
%call = tail call float @atanf(float %in)
|
|
%out.gep = getelementptr inbounds float, ptr %out.ptr, i64 %iv
|
|
store float %call, ptr %out.gep, align 4
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
declare double @atanh(double)
|
|
declare float @atanhf(float)
|
|
|
|
define void @atanh_f64(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @atanh_f64
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[TMP3:%.*]] = call <2 x double> @_ZGVnN2v_atanh(<2 x double> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @atanh_f64
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[TMP15:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_atanh(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @atanh_f64
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <2 x double> @armpl_vatanhq_f64(<2 x double> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @atanh_f64
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 2 x double> @armpl_svatanh_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds double, ptr %in.ptr, i64 %iv
|
|
%in = load double, ptr %in.gep, align 8
|
|
%call = tail call double @atanh(double %in)
|
|
%out.gep = getelementptr inbounds double, ptr %out.ptr, i64 %iv
|
|
store double %call, ptr %out.gep, align 8
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
define void @atanh_f32(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @atanh_f32
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[TMP3:%.*]] = call <4 x float> @_ZGVnN4v_atanhf(<4 x float> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @atanh_f32
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[TMP15:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_atanhf(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @atanh_f32
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <4 x float> @armpl_vatanhq_f32(<4 x float> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @atanh_f32
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 4 x float> @armpl_svatanh_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds float, ptr %in.ptr, i64 %iv
|
|
%in = load float, ptr %in.gep, align 8
|
|
%call = tail call float @atanhf(float %in)
|
|
%out.gep = getelementptr inbounds float, ptr %out.ptr, i64 %iv
|
|
store float %call, ptr %out.gep, align 4
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
declare double @cbrt(double)
|
|
declare float @cbrtf(float)
|
|
|
|
define void @cbrt_f64(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @cbrt_f64
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[CALL:%.*]] = tail call double @cbrt(double [[IN:%.*]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @cbrt_f64
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[CALL:%.*]] = tail call double @cbrt(double [[IN:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @cbrt_f64
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <2 x double> @armpl_vcbrtq_f64(<2 x double> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @cbrt_f64
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 2 x double> @armpl_svcbrt_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds double, ptr %in.ptr, i64 %iv
|
|
%in = load double, ptr %in.gep, align 8
|
|
%call = tail call double @cbrt(double %in)
|
|
%out.gep = getelementptr inbounds double, ptr %out.ptr, i64 %iv
|
|
store double %call, ptr %out.gep, align 8
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
define void @cbrt_f32(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @cbrt_f32
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[CALL:%.*]] = tail call float @cbrtf(float [[IN:%.*]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @cbrt_f32
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[CALL:%.*]] = tail call float @cbrtf(float [[IN:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @cbrt_f32
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <4 x float> @armpl_vcbrtq_f32(<4 x float> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @cbrt_f32
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 4 x float> @armpl_svcbrt_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds float, ptr %in.ptr, i64 %iv
|
|
%in = load float, ptr %in.gep, align 8
|
|
%call = tail call float @cbrtf(float %in)
|
|
%out.gep = getelementptr inbounds float, ptr %out.ptr, i64 %iv
|
|
store float %call, ptr %out.gep, align 4
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
declare double @cos(double)
|
|
declare float @cosf(float)
|
|
|
|
define void @cos_f64(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @cos_f64
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[TMP3:%.*]] = call <2 x double> @_ZGVnN2v_cos(<2 x double> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @cos_f64
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[TMP15:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_cos(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @cos_f64
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <2 x double> @armpl_vcosq_f64(<2 x double> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @cos_f64
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 2 x double> @armpl_svcos_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds double, ptr %in.ptr, i64 %iv
|
|
%in = load double, ptr %in.gep, align 8
|
|
%call = tail call double @cos(double %in)
|
|
%out.gep = getelementptr inbounds double, ptr %out.ptr, i64 %iv
|
|
store double %call, ptr %out.gep, align 8
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
define void @cos_f32(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @cos_f32
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[TMP3:%.*]] = call <4 x float> @_ZGVnN4v_cosf(<4 x float> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @cos_f32
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[TMP15:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_cosf(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @cos_f32
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <4 x float> @armpl_vcosq_f32(<4 x float> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @cos_f32
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 4 x float> @armpl_svcos_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds float, ptr %in.ptr, i64 %iv
|
|
%in = load float, ptr %in.gep, align 8
|
|
%call = tail call float @cosf(float %in)
|
|
%out.gep = getelementptr inbounds float, ptr %out.ptr, i64 %iv
|
|
store float %call, ptr %out.gep, align 4
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
declare double @cosh(double)
|
|
declare float @coshf(float)
|
|
|
|
define void @cosh_f64(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @cosh_f64
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[TMP3:%.*]] = call <2 x double> @_ZGVnN2v_cosh(<2 x double> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @cosh_f64
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[TMP15:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_cosh(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @cosh_f64
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <2 x double> @armpl_vcoshq_f64(<2 x double> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @cosh_f64
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 2 x double> @armpl_svcosh_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds double, ptr %in.ptr, i64 %iv
|
|
%in = load double, ptr %in.gep, align 8
|
|
%call = tail call double @cosh(double %in)
|
|
%out.gep = getelementptr inbounds double, ptr %out.ptr, i64 %iv
|
|
store double %call, ptr %out.gep, align 8
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
define void @cosh_f32(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @cosh_f32
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[TMP3:%.*]] = call <4 x float> @_ZGVnN4v_coshf(<4 x float> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @cosh_f32
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[TMP15:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_coshf(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @cosh_f32
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <4 x float> @armpl_vcoshq_f32(<4 x float> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @cosh_f32
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 4 x float> @armpl_svcosh_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds float, ptr %in.ptr, i64 %iv
|
|
%in = load float, ptr %in.gep, align 8
|
|
%call = tail call float @coshf(float %in)
|
|
%out.gep = getelementptr inbounds float, ptr %out.ptr, i64 %iv
|
|
store float %call, ptr %out.gep, align 4
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
declare double @erf(double)
|
|
declare float @erff(float)
|
|
|
|
define void @erf_f64(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @erf_f64
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[CALL:%.*]] = tail call double @erf(double [[IN:%.*]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @erf_f64
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[CALL:%.*]] = tail call double @erf(double [[IN:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @erf_f64
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <2 x double> @armpl_verfq_f64(<2 x double> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @erf_f64
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 2 x double> @armpl_sverf_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds double, ptr %in.ptr, i64 %iv
|
|
%in = load double, ptr %in.gep, align 8
|
|
%call = tail call double @erf(double %in)
|
|
%out.gep = getelementptr inbounds double, ptr %out.ptr, i64 %iv
|
|
store double %call, ptr %out.gep, align 8
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
define void @erf_f32(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @erf_f32
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[CALL:%.*]] = tail call float @erff(float [[IN:%.*]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @erf_f32
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[CALL:%.*]] = tail call float @erff(float [[IN:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @erf_f32
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <4 x float> @armpl_verfq_f32(<4 x float> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @erf_f32
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 4 x float> @armpl_sverf_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds float, ptr %in.ptr, i64 %iv
|
|
%in = load float, ptr %in.gep, align 8
|
|
%call = tail call float @erff(float %in)
|
|
%out.gep = getelementptr inbounds float, ptr %out.ptr, i64 %iv
|
|
store float %call, ptr %out.gep, align 4
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
declare double @erfc(double)
|
|
declare float @erfcf(float)
|
|
|
|
define void @erfc_f64(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @erfc_f64
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[CALL:%.*]] = tail call double @erfc(double [[IN:%.*]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @erfc_f64
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[CALL:%.*]] = tail call double @erfc(double [[IN:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @erfc_f64
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <2 x double> @armpl_verfcq_f64(<2 x double> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @erfc_f64
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 2 x double> @armpl_sverfc_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds double, ptr %in.ptr, i64 %iv
|
|
%in = load double, ptr %in.gep, align 8
|
|
%call = tail call double @erfc(double %in)
|
|
%out.gep = getelementptr inbounds double, ptr %out.ptr, i64 %iv
|
|
store double %call, ptr %out.gep, align 8
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
define void @erfc_f32(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @erfc_f32
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[CALL:%.*]] = tail call float @erfcf(float [[IN:%.*]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @erfc_f32
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[CALL:%.*]] = tail call float @erfcf(float [[IN:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @erfc_f32
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <4 x float> @armpl_verfcq_f32(<4 x float> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @erfc_f32
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 4 x float> @armpl_sverfc_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds float, ptr %in.ptr, i64 %iv
|
|
%in = load float, ptr %in.gep, align 8
|
|
%call = tail call float @erfcf(float %in)
|
|
%out.gep = getelementptr inbounds float, ptr %out.ptr, i64 %iv
|
|
store float %call, ptr %out.gep, align 4
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
declare double @exp(double)
|
|
declare float @expf(float)
|
|
|
|
define void @exp_f64(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @exp_f64
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[TMP3:%.*]] = call <2 x double> @_ZGVnN2v_exp(<2 x double> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @exp_f64
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[TMP15:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_exp(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @exp_f64
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <2 x double> @armpl_vexpq_f64(<2 x double> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @exp_f64
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 2 x double> @armpl_svexp_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds double, ptr %in.ptr, i64 %iv
|
|
%in = load double, ptr %in.gep, align 8
|
|
%call = tail call double @exp(double %in)
|
|
%out.gep = getelementptr inbounds double, ptr %out.ptr, i64 %iv
|
|
store double %call, ptr %out.gep, align 8
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
define void @exp_f32(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @exp_f32
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[TMP3:%.*]] = call <4 x float> @_ZGVnN4v_expf(<4 x float> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @exp_f32
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[TMP15:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_expf(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @exp_f32
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <4 x float> @armpl_vexpq_f32(<4 x float> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @exp_f32
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 4 x float> @armpl_svexp_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds float, ptr %in.ptr, i64 %iv
|
|
%in = load float, ptr %in.gep, align 8
|
|
%call = tail call float @expf(float %in)
|
|
%out.gep = getelementptr inbounds float, ptr %out.ptr, i64 %iv
|
|
store float %call, ptr %out.gep, align 4
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
declare double @exp2(double)
|
|
declare float @exp2f(float)
|
|
|
|
define void @exp2_f64(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @exp2_f64
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[TMP3:%.*]] = call <2 x double> @_ZGVnN2v_exp2(<2 x double> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @exp2_f64
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[TMP15:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_exp2(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @exp2_f64
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <2 x double> @armpl_vexp2q_f64(<2 x double> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @exp2_f64
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 2 x double> @armpl_svexp2_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds double, ptr %in.ptr, i64 %iv
|
|
%in = load double, ptr %in.gep, align 8
|
|
%call = tail call double @exp2(double %in)
|
|
%out.gep = getelementptr inbounds double, ptr %out.ptr, i64 %iv
|
|
store double %call, ptr %out.gep, align 8
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
define void @exp2_f32(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @exp2_f32
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[TMP3:%.*]] = call <4 x float> @_ZGVnN4v_exp2f(<4 x float> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @exp2_f32
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[TMP15:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_exp2f(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @exp2_f32
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <4 x float> @armpl_vexp2q_f32(<4 x float> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @exp2_f32
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 4 x float> @armpl_svexp2_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds float, ptr %in.ptr, i64 %iv
|
|
%in = load float, ptr %in.gep, align 8
|
|
%call = tail call float @exp2f(float %in)
|
|
%out.gep = getelementptr inbounds float, ptr %out.ptr, i64 %iv
|
|
store float %call, ptr %out.gep, align 4
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
declare double @exp10(double)
|
|
declare float @exp10f(float)
|
|
|
|
define void @exp10_f64(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @exp10_f64
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[TMP3:%.*]] = call <2 x double> @_ZGVnN2v_exp10(<2 x double> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @exp10_f64
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[TMP15:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_exp10(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @exp10_f64
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <2 x double> @armpl_vexp10q_f64(<2 x double> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @exp10_f64
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 2 x double> @armpl_svexp10_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds double, ptr %in.ptr, i64 %iv
|
|
%in = load double, ptr %in.gep, align 8
|
|
%call = tail call double @exp10(double %in)
|
|
%out.gep = getelementptr inbounds double, ptr %out.ptr, i64 %iv
|
|
store double %call, ptr %out.gep, align 8
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
define void @exp10_f32(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @exp10_f32
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[TMP3:%.*]] = call <4 x float> @_ZGVnN4v_exp10f(<4 x float> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @exp10_f32
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[TMP15:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_exp10f(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @exp10_f32
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <4 x float> @armpl_vexp10q_f32(<4 x float> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @exp10_f32
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 4 x float> @armpl_svexp10_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds float, ptr %in.ptr, i64 %iv
|
|
%in = load float, ptr %in.gep, align 8
|
|
%call = tail call float @exp10f(float %in)
|
|
%out.gep = getelementptr inbounds float, ptr %out.ptr, i64 %iv
|
|
store float %call, ptr %out.gep, align 4
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
declare double @expm1(double)
|
|
declare float @expm1f(float)
|
|
|
|
define void @expm1_f64(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @expm1_f64
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[CALL:%.*]] = tail call double @expm1(double [[IN:%.*]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @expm1_f64
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[CALL:%.*]] = tail call double @expm1(double [[IN:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @expm1_f64
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <2 x double> @armpl_vexpm1q_f64(<2 x double> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @expm1_f64
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 2 x double> @armpl_svexpm1_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds double, ptr %in.ptr, i64 %iv
|
|
%in = load double, ptr %in.gep, align 8
|
|
%call = tail call double @expm1(double %in)
|
|
%out.gep = getelementptr inbounds double, ptr %out.ptr, i64 %iv
|
|
store double %call, ptr %out.gep, align 8
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
define void @expm1_f32(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @expm1_f32
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[CALL:%.*]] = tail call float @expm1f(float [[IN:%.*]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @expm1_f32
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[CALL:%.*]] = tail call float @expm1f(float [[IN:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @expm1_f32
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <4 x float> @armpl_vexpm1q_f32(<4 x float> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @expm1_f32
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 4 x float> @armpl_svexpm1_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds float, ptr %in.ptr, i64 %iv
|
|
%in = load float, ptr %in.gep, align 8
|
|
%call = tail call float @expm1f(float %in)
|
|
%out.gep = getelementptr inbounds float, ptr %out.ptr, i64 %iv
|
|
store float %call, ptr %out.gep, align 4
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
declare double @lgamma(double)
|
|
declare float @lgammaf(float)
|
|
|
|
define void @lgamma_f64(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @lgamma_f64
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[TMP3:%.*]] = call <2 x double> @_ZGVnN2v_lgamma(<2 x double> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @lgamma_f64
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[TMP15:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_lgamma(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @lgamma_f64
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <2 x double> @armpl_vlgammaq_f64(<2 x double> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @lgamma_f64
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 2 x double> @armpl_svlgamma_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds double, ptr %in.ptr, i64 %iv
|
|
%in = load double, ptr %in.gep, align 8
|
|
%call = tail call double @lgamma(double %in)
|
|
%out.gep = getelementptr inbounds double, ptr %out.ptr, i64 %iv
|
|
store double %call, ptr %out.gep, align 8
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
define void @lgamma_f32(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @lgamma_f32
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[TMP3:%.*]] = call <4 x float> @_ZGVnN4v_lgammaf(<4 x float> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @lgamma_f32
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[TMP15:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_lgammaf(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @lgamma_f32
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <4 x float> @armpl_vlgammaq_f32(<4 x float> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @lgamma_f32
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 4 x float> @armpl_svlgamma_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds float, ptr %in.ptr, i64 %iv
|
|
%in = load float, ptr %in.gep, align 8
|
|
%call = tail call float @lgammaf(float %in)
|
|
%out.gep = getelementptr inbounds float, ptr %out.ptr, i64 %iv
|
|
store float %call, ptr %out.gep, align 4
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
declare double @log(double)
|
|
declare float @logf(float)
|
|
|
|
define void @log_f64(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @log_f64
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[TMP3:%.*]] = call <2 x double> @_ZGVnN2v_log(<2 x double> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @log_f64
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[TMP15:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_log(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @log_f64
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <2 x double> @armpl_vlogq_f64(<2 x double> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @log_f64
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 2 x double> @armpl_svlog_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds double, ptr %in.ptr, i64 %iv
|
|
%in = load double, ptr %in.gep, align 8
|
|
%call = tail call double @log(double %in)
|
|
%out.gep = getelementptr inbounds double, ptr %out.ptr, i64 %iv
|
|
store double %call, ptr %out.gep, align 8
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
define void @log_f32(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @log_f32
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[TMP3:%.*]] = call <4 x float> @_ZGVnN4v_logf(<4 x float> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @log_f32
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[TMP15:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_logf(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @log_f32
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <4 x float> @armpl_vlogq_f32(<4 x float> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @log_f32
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 4 x float> @armpl_svlog_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds float, ptr %in.ptr, i64 %iv
|
|
%in = load float, ptr %in.gep, align 8
|
|
%call = tail call float @logf(float %in)
|
|
%out.gep = getelementptr inbounds float, ptr %out.ptr, i64 %iv
|
|
store float %call, ptr %out.gep, align 4
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
declare double @log1p(double)
|
|
declare float @log1pf(float)
|
|
|
|
define void @log1p_f64(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @log1p_f64
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[CALL:%.*]] = tail call double @log1p(double [[IN:%.*]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @log1p_f64
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[CALL:%.*]] = tail call double @log1p(double [[IN:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @log1p_f64
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <2 x double> @armpl_vlog1pq_f64(<2 x double> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @log1p_f64
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 2 x double> @armpl_svlog1p_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds double, ptr %in.ptr, i64 %iv
|
|
%in = load double, ptr %in.gep, align 8
|
|
%call = tail call double @log1p(double %in)
|
|
%out.gep = getelementptr inbounds double, ptr %out.ptr, i64 %iv
|
|
store double %call, ptr %out.gep, align 8
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
define void @log1p_f32(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @log1p_f32
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[CALL:%.*]] = tail call float @log1pf(float [[IN:%.*]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @log1p_f32
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[CALL:%.*]] = tail call float @log1pf(float [[IN:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @log1p_f32
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <4 x float> @armpl_vlog1pq_f32(<4 x float> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @log1p_f32
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 4 x float> @armpl_svlog1p_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds float, ptr %in.ptr, i64 %iv
|
|
%in = load float, ptr %in.gep, align 8
|
|
%call = tail call float @log1pf(float %in)
|
|
%out.gep = getelementptr inbounds float, ptr %out.ptr, i64 %iv
|
|
store float %call, ptr %out.gep, align 4
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
declare double @log2(double)
|
|
declare float @log2f(float)
|
|
|
|
define void @log2_f64(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @log2_f64
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[TMP3:%.*]] = call <2 x double> @_ZGVnN2v_log2(<2 x double> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @log2_f64
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[TMP15:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_log2(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @log2_f64
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <2 x double> @armpl_vlog2q_f64(<2 x double> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @log2_f64
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 2 x double> @armpl_svlog2_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds double, ptr %in.ptr, i64 %iv
|
|
%in = load double, ptr %in.gep, align 8
|
|
%call = tail call double @log2(double %in)
|
|
%out.gep = getelementptr inbounds double, ptr %out.ptr, i64 %iv
|
|
store double %call, ptr %out.gep, align 8
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
define void @log2_f32(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @log2_f32
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[TMP3:%.*]] = call <4 x float> @_ZGVnN4v_log2f(<4 x float> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @log2_f32
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[TMP15:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_log2f(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @log2_f32
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <4 x float> @armpl_vlog2q_f32(<4 x float> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @log2_f32
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 4 x float> @armpl_svlog2_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds float, ptr %in.ptr, i64 %iv
|
|
%in = load float, ptr %in.gep, align 8
|
|
%call = tail call float @log2f(float %in)
|
|
%out.gep = getelementptr inbounds float, ptr %out.ptr, i64 %iv
|
|
store float %call, ptr %out.gep, align 4
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
declare double @log10(double)
|
|
declare float @log10f(float)
|
|
|
|
define void @log10_f64(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @log10_f64
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[TMP3:%.*]] = call <2 x double> @_ZGVnN2v_log10(<2 x double> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @log10_f64
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[TMP15:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_log10(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @log10_f64
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <2 x double> @armpl_vlog10q_f64(<2 x double> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @log10_f64
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 2 x double> @armpl_svlog10_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds double, ptr %in.ptr, i64 %iv
|
|
%in = load double, ptr %in.gep, align 8
|
|
%call = tail call double @log10(double %in)
|
|
%out.gep = getelementptr inbounds double, ptr %out.ptr, i64 %iv
|
|
store double %call, ptr %out.gep, align 8
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
define void @log10_f32(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @log10_f32
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[TMP3:%.*]] = call <4 x float> @_ZGVnN4v_log10f(<4 x float> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @log10_f32
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[TMP15:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_log10f(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @log10_f32
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <4 x float> @armpl_vlog10q_f32(<4 x float> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @log10_f32
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 4 x float> @armpl_svlog10_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds float, ptr %in.ptr, i64 %iv
|
|
%in = load float, ptr %in.gep, align 8
|
|
%call = tail call float @log10f(float %in)
|
|
%out.gep = getelementptr inbounds float, ptr %out.ptr, i64 %iv
|
|
store float %call, ptr %out.gep, align 4
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
declare double @sin(double)
|
|
declare float @sinf(float)
|
|
|
|
define void @sin_f64(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @sin_f64
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[TMP3:%.*]] = call <2 x double> @_ZGVnN2v_sin(<2 x double> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @sin_f64
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[TMP15:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_sin(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @sin_f64
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <2 x double> @armpl_vsinq_f64(<2 x double> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @sin_f64
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 2 x double> @armpl_svsin_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds double, ptr %in.ptr, i64 %iv
|
|
%in = load double, ptr %in.gep, align 8
|
|
%call = tail call double @sin(double %in)
|
|
%out.gep = getelementptr inbounds double, ptr %out.ptr, i64 %iv
|
|
store double %call, ptr %out.gep, align 8
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
define void @sin_f32(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @sin_f32
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[TMP3:%.*]] = call <4 x float> @_ZGVnN4v_sinf(<4 x float> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @sin_f32
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[TMP15:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_sinf(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @sin_f32
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <4 x float> @armpl_vsinq_f32(<4 x float> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @sin_f32
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 4 x float> @armpl_svsin_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds float, ptr %in.ptr, i64 %iv
|
|
%in = load float, ptr %in.gep, align 8
|
|
%call = tail call float @sinf(float %in)
|
|
%out.gep = getelementptr inbounds float, ptr %out.ptr, i64 %iv
|
|
store float %call, ptr %out.gep, align 4
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
declare double @sinh(double)
|
|
declare float @sinhf(float)
|
|
|
|
define void @sinh_f64(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @sinh_f64
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[TMP3:%.*]] = call <2 x double> @_ZGVnN2v_sinh(<2 x double> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @sinh_f64
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[TMP15:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_sinh(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @sinh_f64
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <2 x double> @armpl_vsinhq_f64(<2 x double> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @sinh_f64
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 2 x double> @armpl_svsinh_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds double, ptr %in.ptr, i64 %iv
|
|
%in = load double, ptr %in.gep, align 8
|
|
%call = tail call double @sinh(double %in)
|
|
%out.gep = getelementptr inbounds double, ptr %out.ptr, i64 %iv
|
|
store double %call, ptr %out.gep, align 8
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
define void @sinh_f32(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @sinh_f32
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[TMP3:%.*]] = call <4 x float> @_ZGVnN4v_sinhf(<4 x float> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @sinh_f32
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[TMP15:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_sinhf(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @sinh_f32
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <4 x float> @armpl_vsinhq_f32(<4 x float> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @sinh_f32
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 4 x float> @armpl_svsinh_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds float, ptr %in.ptr, i64 %iv
|
|
%in = load float, ptr %in.gep, align 8
|
|
%call = tail call float @sinhf(float %in)
|
|
%out.gep = getelementptr inbounds float, ptr %out.ptr, i64 %iv
|
|
store float %call, ptr %out.gep, align 4
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
declare double @sinpi(double)
|
|
declare float @sinpif(float)
|
|
|
|
define void @sinpi_f64(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @sinpi_f64
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[CALL:%.*]] = tail call double @sinpi(double [[IN:%.*]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @sinpi_f64
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[CALL:%.*]] = tail call double @sinpi(double [[IN:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @sinpi_f64
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <2 x double> @armpl_vsinpiq_f64(<2 x double> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @sinpi_f64
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 2 x double> @armpl_svsinpi_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds double, ptr %in.ptr, i64 %iv
|
|
%in = load double, ptr %in.gep, align 8
|
|
%call = tail call double @sinpi(double %in)
|
|
%out.gep = getelementptr inbounds double, ptr %out.ptr, i64 %iv
|
|
store double %call, ptr %out.gep, align 8
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
define void @sinpi_f32(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @sinpi_f32
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[CALL:%.*]] = tail call float @sinpif(float [[IN:%.*]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @sinpi_f32
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[CALL:%.*]] = tail call float @sinpif(float [[IN:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @sinpi_f32
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <4 x float> @armpl_vsinpiq_f32(<4 x float> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @sinpi_f32
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 4 x float> @armpl_svsinpi_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds float, ptr %in.ptr, i64 %iv
|
|
%in = load float, ptr %in.gep, align 8
|
|
%call = tail call float @sinpif(float %in)
|
|
%out.gep = getelementptr inbounds float, ptr %out.ptr, i64 %iv
|
|
store float %call, ptr %out.gep, align 4
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
declare double @sqrt(double)
|
|
declare float @sqrtf(float)
|
|
|
|
define void @sqrt_f64(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @sqrt_f64
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[TMP3:%.*]] = call <2 x double> @_ZGVnN2v_sqrt(<2 x double> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @sqrt_f64
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[TMP15:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_sqrt(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @sqrt_f64
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <2 x double> @armpl_vsqrtq_f64(<2 x double> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @sqrt_f64
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 2 x double> @armpl_svsqrt_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds double, ptr %in.ptr, i64 %iv
|
|
%in = load double, ptr %in.gep, align 8
|
|
%call = tail call double @sqrt(double %in)
|
|
%out.gep = getelementptr inbounds double, ptr %out.ptr, i64 %iv
|
|
store double %call, ptr %out.gep, align 8
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
define void @sqrt_f32(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @sqrt_f32
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[TMP3:%.*]] = call <4 x float> @_ZGVnN4v_sqrtf(<4 x float> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @sqrt_f32
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[TMP15:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_sqrtf(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @sqrt_f32
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <4 x float> @armpl_vsqrtq_f32(<4 x float> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @sqrt_f32
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 4 x float> @armpl_svsqrt_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds float, ptr %in.ptr, i64 %iv
|
|
%in = load float, ptr %in.gep, align 8
|
|
%call = tail call float @sqrtf(float %in)
|
|
%out.gep = getelementptr inbounds float, ptr %out.ptr, i64 %iv
|
|
store float %call, ptr %out.gep, align 4
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
declare double @tan(double)
|
|
declare float @tanf(float)
|
|
|
|
define void @tan_f64(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @tan_f64
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[TMP3:%.*]] = call <2 x double> @_ZGVnN2v_tan(<2 x double> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @tan_f64
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[TMP15:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_tan(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @tan_f64
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <2 x double> @armpl_vtanq_f64(<2 x double> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @tan_f64
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 2 x double> @armpl_svtan_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds double, ptr %in.ptr, i64 %iv
|
|
%in = load double, ptr %in.gep, align 8
|
|
%call = tail call double @tan(double %in)
|
|
%out.gep = getelementptr inbounds double, ptr %out.ptr, i64 %iv
|
|
store double %call, ptr %out.gep, align 8
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
define void @tan_f32(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @tan_f32
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[TMP3:%.*]] = call <4 x float> @_ZGVnN4v_tanf(<4 x float> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @tan_f32
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[TMP15:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_tanf(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @tan_f32
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <4 x float> @armpl_vtanq_f32(<4 x float> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @tan_f32
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 4 x float> @armpl_svtan_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds float, ptr %in.ptr, i64 %iv
|
|
%in = load float, ptr %in.gep, align 8
|
|
%call = tail call float @tanf(float %in)
|
|
%out.gep = getelementptr inbounds float, ptr %out.ptr, i64 %iv
|
|
store float %call, ptr %out.gep, align 4
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
declare double @tanh(double)
|
|
declare float @tanhf(float)
|
|
|
|
define void @tanh_f64(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @tanh_f64
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[TMP3:%.*]] = call <2 x double> @_ZGVnN2v_tanh(<2 x double> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @tanh_f64
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[TMP15:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_tanh(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @tanh_f64
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <2 x double> @armpl_vtanhq_f64(<2 x double> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @tanh_f64
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 2 x double> @armpl_svtanh_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds double, ptr %in.ptr, i64 %iv
|
|
%in = load double, ptr %in.gep, align 8
|
|
%call = tail call double @tanh(double %in)
|
|
%out.gep = getelementptr inbounds double, ptr %out.ptr, i64 %iv
|
|
store double %call, ptr %out.gep, align 8
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
define void @tanh_f32(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @tanh_f32
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[TMP3:%.*]] = call <4 x float> @_ZGVnN4v_tanhf(<4 x float> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @tanh_f32
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[TMP15:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_tanhf(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @tanh_f32
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <4 x float> @armpl_vtanhq_f32(<4 x float> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @tanh_f32
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 4 x float> @armpl_svtanh_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds float, ptr %in.ptr, i64 %iv
|
|
%in = load float, ptr %in.gep, align 8
|
|
%call = tail call float @tanhf(float %in)
|
|
%out.gep = getelementptr inbounds float, ptr %out.ptr, i64 %iv
|
|
store float %call, ptr %out.gep, align 4
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
declare double @tgamma(double)
|
|
declare float @tgammaf(float)
|
|
|
|
define void @tgamma_f64(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @tgamma_f64
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[TMP3:%.*]] = call <2 x double> @_ZGVnN2v_tgamma(<2 x double> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @tgamma_f64
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[TMP15:%.*]] = call <vscale x 2 x double> @_ZGVsMxv_tgamma(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @tgamma_f64
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <2 x double> @armpl_vtgammaq_f64(<2 x double> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @tgamma_f64
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 2 x double> @armpl_svtgamma_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds double, ptr %in.ptr, i64 %iv
|
|
%in = load double, ptr %in.gep, align 8
|
|
%call = tail call double @tgamma(double %in)
|
|
%out.gep = getelementptr inbounds double, ptr %out.ptr, i64 %iv
|
|
store double %call, ptr %out.gep, align 8
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
define void @tgamma_f32(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @tgamma_f32
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[TMP3:%.*]] = call <4 x float> @_ZGVnN4v_tgammaf(<4 x float> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @tgamma_f32
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[TMP15:%.*]] = call <vscale x 4 x float> @_ZGVsMxv_tgammaf(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @tgamma_f32
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <4 x float> @armpl_vtgammaq_f32(<4 x float> [[WIDE_LOAD:%.*]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @tgamma_f32
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 4 x float> @armpl_svtgamma_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds float, ptr %in.ptr, i64 %iv
|
|
%in = load float, ptr %in.gep, align 8
|
|
%call = tail call float @tgammaf(float %in)
|
|
%out.gep = getelementptr inbounds float, ptr %out.ptr, i64 %iv
|
|
store float %call, ptr %out.gep, align 4
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
declare double @atan2(double, double)
|
|
declare float @atan2f(float, float)
|
|
|
|
define void @atan2_f64(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @atan2_f64
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[TMP3:%.*]] = call <2 x double> @_ZGVnN2vv_atan2(<2 x double> [[WIDE_LOAD:%.*]], <2 x double> [[WIDE_LOAD]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @atan2_f64
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[TMP15:%.*]] = call <vscale x 2 x double> @_ZGVsMxvv_atan2(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x double> [[WIDE_MASKED_LOAD]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @atan2_f64
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <2 x double> @armpl_vatan2q_f64(<2 x double> [[WIDE_LOAD:%.*]], <2 x double> [[WIDE_LOAD]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @atan2_f64
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 2 x double> @armpl_svatan2_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x double> [[WIDE_MASKED_LOAD]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds double, ptr %in.ptr, i64 %iv
|
|
%in = load double, ptr %in.gep, align 8
|
|
%call = tail call double @atan2(double %in, double %in)
|
|
%out.gep = getelementptr inbounds double, ptr %out.ptr, i64 %iv
|
|
store double %call, ptr %out.gep, align 8
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
define void @atan2_f32(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @atan2_f32
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[TMP3:%.*]] = call <4 x float> @_ZGVnN4vv_atan2f(<4 x float> [[WIDE_LOAD:%.*]], <4 x float> [[WIDE_LOAD]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @atan2_f32
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[TMP15:%.*]] = call <vscale x 4 x float> @_ZGVsMxvv_atan2f(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x float> [[WIDE_MASKED_LOAD]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @atan2_f32
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <4 x float> @armpl_vatan2q_f32(<4 x float> [[WIDE_LOAD:%.*]], <4 x float> [[WIDE_LOAD]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @atan2_f32
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 4 x float> @armpl_svatan2_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x float> [[WIDE_MASKED_LOAD]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds float, ptr %in.ptr, i64 %iv
|
|
%in = load float, ptr %in.gep, align 8
|
|
%call = tail call float @atan2f(float %in, float %in)
|
|
%out.gep = getelementptr inbounds float, ptr %out.ptr, i64 %iv
|
|
store float %call, ptr %out.gep, align 4
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
declare double @copysign(double, double)
|
|
declare float @copysignf(float, float)
|
|
|
|
define void @copysign_f64(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @copysign_f64
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[CALL:%.*]] = tail call double @copysign(double [[IN:%.*]], double [[IN]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @copysign_f64
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[CALL:%.*]] = tail call double @copysign(double [[IN:%.*]], double [[IN]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @copysign_f64
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <2 x double> @armpl_vcopysignq_f64(<2 x double> [[WIDE_LOAD:%.*]], <2 x double> [[WIDE_LOAD]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @copysign_f64
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 2 x double> @armpl_svcopysign_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x double> [[WIDE_MASKED_LOAD]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds double, ptr %in.ptr, i64 %iv
|
|
%in = load double, ptr %in.gep, align 8
|
|
%call = tail call double @copysign(double %in, double %in)
|
|
%out.gep = getelementptr inbounds double, ptr %out.ptr, i64 %iv
|
|
store double %call, ptr %out.gep, align 8
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
define void @copysign_f32(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @copysign_f32
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[CALL:%.*]] = tail call float @copysignf(float [[IN:%.*]], float [[IN]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @copysign_f32
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[CALL:%.*]] = tail call float @copysignf(float [[IN:%.*]], float [[IN]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @copysign_f32
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <4 x float> @armpl_vcopysignq_f32(<4 x float> [[WIDE_LOAD:%.*]], <4 x float> [[WIDE_LOAD]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @copysign_f32
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 4 x float> @armpl_svcopysign_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x float> [[WIDE_MASKED_LOAD]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds float, ptr %in.ptr, i64 %iv
|
|
%in = load float, ptr %in.gep, align 8
|
|
%call = tail call float @copysignf(float %in, float %in)
|
|
%out.gep = getelementptr inbounds float, ptr %out.ptr, i64 %iv
|
|
store float %call, ptr %out.gep, align 4
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
declare double @fdim(double, double)
|
|
declare float @fdimf(float, float)
|
|
|
|
define void @fdim_f64(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @fdim_f64
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[CALL:%.*]] = tail call double @fdim(double [[IN:%.*]], double [[IN]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @fdim_f64
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[CALL:%.*]] = tail call double @fdim(double [[IN:%.*]], double [[IN]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @fdim_f64
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <2 x double> @armpl_vfdimq_f64(<2 x double> [[WIDE_LOAD:%.*]], <2 x double> [[WIDE_LOAD]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @fdim_f64
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 2 x double> @armpl_svfdim_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x double> [[WIDE_MASKED_LOAD]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds double, ptr %in.ptr, i64 %iv
|
|
%in = load double, ptr %in.gep, align 8
|
|
%call = tail call double @fdim(double %in, double %in)
|
|
%out.gep = getelementptr inbounds double, ptr %out.ptr, i64 %iv
|
|
store double %call, ptr %out.gep, align 8
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
define void @fdim_f32(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @fdim_f32
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[CALL:%.*]] = tail call float @fdimf(float [[IN:%.*]], float [[IN]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @fdim_f32
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[CALL:%.*]] = tail call float @fdimf(float [[IN:%.*]], float [[IN]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @fdim_f32
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <4 x float> @armpl_vfdimq_f32(<4 x float> [[WIDE_LOAD:%.*]], <4 x float> [[WIDE_LOAD]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @fdim_f32
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 4 x float> @armpl_svfdim_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x float> [[WIDE_MASKED_LOAD]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds float, ptr %in.ptr, i64 %iv
|
|
%in = load float, ptr %in.gep, align 8
|
|
%call = tail call float @fdimf(float %in, float %in)
|
|
%out.gep = getelementptr inbounds float, ptr %out.ptr, i64 %iv
|
|
store float %call, ptr %out.gep, align 4
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
declare double @fmin(double, double)
|
|
declare float @fminf(float, float)
|
|
|
|
define void @fmin_f64(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @fmin_f64
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[CALL:%.*]] = tail call double @fmin(double [[IN:%.*]], double [[IN]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @fmin_f64
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[CALL:%.*]] = tail call double @fmin(double [[IN:%.*]], double [[IN]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @fmin_f64
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <2 x double> @armpl_vfminq_f64(<2 x double> [[WIDE_LOAD:%.*]], <2 x double> [[WIDE_LOAD]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @fmin_f64
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 2 x double> @armpl_svfmin_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x double> [[WIDE_MASKED_LOAD]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds double, ptr %in.ptr, i64 %iv
|
|
%in = load double, ptr %in.gep, align 8
|
|
%call = tail call double @fmin(double %in, double %in)
|
|
%out.gep = getelementptr inbounds double, ptr %out.ptr, i64 %iv
|
|
store double %call, ptr %out.gep, align 8
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
define void @fmin_f32(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @fmin_f32
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[CALL:%.*]] = tail call float @fminf(float [[IN:%.*]], float [[IN]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @fmin_f32
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[CALL:%.*]] = tail call float @fminf(float [[IN:%.*]], float [[IN]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @fmin_f32
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <4 x float> @armpl_vfminq_f32(<4 x float> [[WIDE_LOAD:%.*]], <4 x float> [[WIDE_LOAD]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @fmin_f32
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 4 x float> @armpl_svfmin_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x float> [[WIDE_MASKED_LOAD]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds float, ptr %in.ptr, i64 %iv
|
|
%in = load float, ptr %in.gep, align 8
|
|
%call = tail call float @fminf(float %in, float %in)
|
|
%out.gep = getelementptr inbounds float, ptr %out.ptr, i64 %iv
|
|
store float %call, ptr %out.gep, align 4
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
declare double @fmod(double, double)
|
|
declare float @fmodf(float, float)
|
|
|
|
define void @fmod_f64(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @fmod_f64
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[TMP3:%.*]] = call <2 x double> @_ZGVnN2vv_fmod(<2 x double> [[WIDE_LOAD:%.*]], <2 x double> [[WIDE_LOAD]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @fmod_f64
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[TMP15:%.*]] = call <vscale x 2 x double> @_ZGVsMxvv_fmod(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x double> [[WIDE_MASKED_LOAD]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @fmod_f64
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <2 x double> @armpl_vfmodq_f64(<2 x double> [[WIDE_LOAD:%.*]], <2 x double> [[WIDE_LOAD]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @fmod_f64
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 2 x double> @armpl_svfmod_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x double> [[WIDE_MASKED_LOAD]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds double, ptr %in.ptr, i64 %iv
|
|
%in = load double, ptr %in.gep, align 8
|
|
%call = tail call double @fmod(double %in, double %in)
|
|
%out.gep = getelementptr inbounds double, ptr %out.ptr, i64 %iv
|
|
store double %call, ptr %out.gep, align 8
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
define void @fmod_f32(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @fmod_f32
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[TMP3:%.*]] = call <4 x float> @_ZGVnN4vv_fmodf(<4 x float> [[WIDE_LOAD:%.*]], <4 x float> [[WIDE_LOAD]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @fmod_f32
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[TMP15:%.*]] = call <vscale x 4 x float> @_ZGVsMxvv_fmodf(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x float> [[WIDE_MASKED_LOAD]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @fmod_f32
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <4 x float> @armpl_vfmodq_f32(<4 x float> [[WIDE_LOAD:%.*]], <4 x float> [[WIDE_LOAD]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @fmod_f32
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 4 x float> @armpl_svfmod_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x float> [[WIDE_MASKED_LOAD]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds float, ptr %in.ptr, i64 %iv
|
|
%in = load float, ptr %in.gep, align 8
|
|
%call = tail call float @fmodf(float %in, float %in)
|
|
%out.gep = getelementptr inbounds float, ptr %out.ptr, i64 %iv
|
|
store float %call, ptr %out.gep, align 4
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
declare double @hypot(double, double)
|
|
declare float @hypotf(float, float)
|
|
|
|
define void @hypot_f64(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @hypot_f64
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[CALL:%.*]] = tail call double @hypot(double [[IN:%.*]], double [[IN]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @hypot_f64
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[CALL:%.*]] = tail call double @hypot(double [[IN:%.*]], double [[IN]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @hypot_f64
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <2 x double> @armpl_vhypotq_f64(<2 x double> [[WIDE_LOAD:%.*]], <2 x double> [[WIDE_LOAD]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @hypot_f64
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 2 x double> @armpl_svhypot_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x double> [[WIDE_MASKED_LOAD]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds double, ptr %in.ptr, i64 %iv
|
|
%in = load double, ptr %in.gep, align 8
|
|
%call = tail call double @hypot(double %in, double %in)
|
|
%out.gep = getelementptr inbounds double, ptr %out.ptr, i64 %iv
|
|
store double %call, ptr %out.gep, align 8
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
define void @hypot_f32(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @hypot_f32
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[CALL:%.*]] = tail call float @hypotf(float [[IN:%.*]], float [[IN]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @hypot_f32
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[CALL:%.*]] = tail call float @hypotf(float [[IN:%.*]], float [[IN]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @hypot_f32
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <4 x float> @armpl_vhypotq_f32(<4 x float> [[WIDE_LOAD:%.*]], <4 x float> [[WIDE_LOAD]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @hypot_f32
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 4 x float> @armpl_svhypot_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x float> [[WIDE_MASKED_LOAD]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds float, ptr %in.ptr, i64 %iv
|
|
%in = load float, ptr %in.gep, align 8
|
|
%call = tail call float @hypotf(float %in, float %in)
|
|
%out.gep = getelementptr inbounds float, ptr %out.ptr, i64 %iv
|
|
store float %call, ptr %out.gep, align 4
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
declare double @nextafter(double, double)
|
|
declare float @nextafterf(float, float)
|
|
|
|
define void @nextafter_f64(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @nextafter_f64
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[CALL:%.*]] = tail call double @nextafter(double [[IN:%.*]], double [[IN]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @nextafter_f64
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[CALL:%.*]] = tail call double @nextafter(double [[IN:%.*]], double [[IN]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @nextafter_f64
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <2 x double> @armpl_vnextafterq_f64(<2 x double> [[WIDE_LOAD:%.*]], <2 x double> [[WIDE_LOAD]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @nextafter_f64
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 2 x double> @armpl_svnextafter_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x double> [[WIDE_MASKED_LOAD]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds double, ptr %in.ptr, i64 %iv
|
|
%in = load double, ptr %in.gep, align 8
|
|
%call = tail call double @nextafter(double %in, double %in)
|
|
%out.gep = getelementptr inbounds double, ptr %out.ptr, i64 %iv
|
|
store double %call, ptr %out.gep, align 8
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
define void @nextafter_f32(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @nextafter_f32
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[CALL:%.*]] = tail call float @nextafterf(float [[IN:%.*]], float [[IN]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @nextafter_f32
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[CALL:%.*]] = tail call float @nextafterf(float [[IN:%.*]], float [[IN]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @nextafter_f32
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <4 x float> @armpl_vnextafterq_f32(<4 x float> [[WIDE_LOAD:%.*]], <4 x float> [[WIDE_LOAD]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @nextafter_f32
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 4 x float> @armpl_svnextafter_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x float> [[WIDE_MASKED_LOAD]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds float, ptr %in.ptr, i64 %iv
|
|
%in = load float, ptr %in.gep, align 8
|
|
%call = tail call float @nextafterf(float %in, float %in)
|
|
%out.gep = getelementptr inbounds float, ptr %out.ptr, i64 %iv
|
|
store float %call, ptr %out.gep, align 4
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
declare double @pow(double, double)
|
|
declare float @powf(float, float)
|
|
|
|
define void @pow_f64(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @pow_f64
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[TMP3:%.*]] = call <2 x double> @_ZGVnN2vv_pow(<2 x double> [[WIDE_LOAD:%.*]], <2 x double> [[WIDE_LOAD]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @pow_f64
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[TMP15:%.*]] = call <vscale x 2 x double> @_ZGVsMxvv_pow(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x double> [[WIDE_MASKED_LOAD]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @pow_f64
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <2 x double> @armpl_vpowq_f64(<2 x double> [[WIDE_LOAD:%.*]], <2 x double> [[WIDE_LOAD]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @pow_f64
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 2 x double> @armpl_svpow_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x double> [[WIDE_MASKED_LOAD]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds double, ptr %in.ptr, i64 %iv
|
|
%in = load double, ptr %in.gep, align 8
|
|
%call = tail call double @pow(double %in, double %in)
|
|
%out.gep = getelementptr inbounds double, ptr %out.ptr, i64 %iv
|
|
store double %call, ptr %out.gep, align 8
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
define void @pow_f32(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @pow_f32
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[TMP3:%.*]] = call <4 x float> @_ZGVnN4vv_powf(<4 x float> [[WIDE_LOAD:%.*]], <4 x float> [[WIDE_LOAD]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @pow_f32
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[TMP15:%.*]] = call <vscale x 4 x float> @_ZGVsMxvv_powf(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x float> [[WIDE_MASKED_LOAD]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @pow_f32
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <4 x float> @armpl_vpowq_f32(<4 x float> [[WIDE_LOAD:%.*]], <4 x float> [[WIDE_LOAD]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @pow_f32
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 4 x float> @armpl_svpow_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x float> [[WIDE_MASKED_LOAD]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds float, ptr %in.ptr, i64 %iv
|
|
%in = load float, ptr %in.gep, align 8
|
|
%call = tail call float @powf(float %in, float %in)
|
|
%out.gep = getelementptr inbounds float, ptr %out.ptr, i64 %iv
|
|
store float %call, ptr %out.gep, align 4
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
declare double @fma(double, double, double)
|
|
declare float @fmaf(float, float, float)
|
|
|
|
define void @fma_f64(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @fma_f64
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[CALL:%.*]] = tail call double @fma(double [[IN:%.*]], double [[IN]], double [[IN]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @fma_f64
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[CALL:%.*]] = tail call double @fma(double [[IN:%.*]], double [[IN]], double [[IN]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @fma_f64
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <2 x double> @armpl_vfmaq_f64(<2 x double> [[WIDE_LOAD:%.*]], <2 x double> [[WIDE_LOAD]], <2 x double> [[WIDE_LOAD]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @fma_f64
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 2 x double> @armpl_svfma_f64_x(<vscale x 2 x double> [[WIDE_MASKED_LOAD:%.*]], <vscale x 2 x double> [[WIDE_MASKED_LOAD]], <vscale x 2 x double> [[WIDE_MASKED_LOAD]], <vscale x 2 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds double, ptr %in.ptr, i64 %iv
|
|
%in = load double, ptr %in.gep, align 8
|
|
%call = tail call double @fma(double %in, double %in, double %in)
|
|
%out.gep = getelementptr inbounds double, ptr %out.ptr, i64 %iv
|
|
store double %call, ptr %out.gep, align 8
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|
|
define void @fma_f32(ptr noalias %in.ptr, ptr noalias %out.ptr) {
|
|
; SLEEF-NEON-LABEL: define void @fma_f32
|
|
; SLEEF-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-NEON: [[CALL:%.*]] = tail call float @fmaf(float [[IN:%.*]], float [[IN]], float [[IN]])
|
|
;
|
|
; SLEEF-SVE-LABEL: define void @fma_f32
|
|
; SLEEF-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; SLEEF-SVE: [[CALL:%.*]] = tail call float @fmaf(float [[IN:%.*]], float [[IN]], float [[IN]])
|
|
;
|
|
; ARMPL-NEON-LABEL: define void @fma_f32
|
|
; ARMPL-NEON-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-NEON: [[TMP3:%.*]] = call <4 x float> @armpl_vfmaq_f32(<4 x float> [[WIDE_LOAD:%.*]], <4 x float> [[WIDE_LOAD]], <4 x float> [[WIDE_LOAD]])
|
|
;
|
|
; ARMPL-SVE-LABEL: define void @fma_f32
|
|
; ARMPL-SVE-SAME: (ptr noalias [[IN_PTR:%.*]], ptr noalias [[OUT_PTR:%.*]]) #[[ATTR0]] {
|
|
; ARMPL-SVE: [[TMP15:%.*]] = call <vscale x 4 x float> @armpl_svfma_f32_x(<vscale x 4 x float> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x float> [[WIDE_MASKED_LOAD]], <vscale x 4 x float> [[WIDE_MASKED_LOAD]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
|
|
;
|
|
entry:
|
|
br label %for.body
|
|
|
|
for.body:
|
|
%iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
|
|
%in.gep = getelementptr inbounds float, ptr %in.ptr, i64 %iv
|
|
%in = load float, ptr %in.gep, align 8
|
|
%call = tail call float @fmaf(float %in, float %in, float %in)
|
|
%out.gep = getelementptr inbounds float, ptr %out.ptr, i64 %iv
|
|
store float %call, ptr %out.gep, align 4
|
|
%iv.next = add nuw nsw i64 %iv, 1
|
|
%exitcond = icmp eq i64 %iv.next, 1000
|
|
br i1 %exitcond, label %for.end, label %for.body
|
|
|
|
for.end:
|
|
ret void
|
|
}
|
|
|