A kernel implicit parameter (dyn_ptr) was introduced some time back. This patch increments the kernel args version for a compiler supporting dyn_ptr. The version will be used by the runtime to determine whether the implicit parameter is generated by the compiler. The versioning is required to support use cases where code generated by an older compiler is linked with a newer runtime. If approved, this patch should be backported to release 18.
9387 lines
639 KiB
C++
9387 lines
639 KiB
C++
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
|
|
// RUN: %clang_cc1 -DCHECK -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1
|
|
// RUN: %clang_cc1 -DCHECK -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
|
|
// RUN: %clang_cc1 -DCHECK -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK1
|
|
// RUN: %clang_cc1 -DCHECK -verify -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK3
|
|
// RUN: %clang_cc1 -DCHECK -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
|
|
// RUN: %clang_cc1 -DCHECK -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK3
|
|
|
|
// RUN: %clang_cc1 -DCHECK -verify -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
|
|
// RUN: %clang_cc1 -DCHECK -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
|
|
// RUN: %clang_cc1 -DCHECK -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
|
|
// RUN: %clang_cc1 -DCHECK -verify -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
|
|
// RUN: %clang_cc1 -DCHECK -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
|
|
// RUN: %clang_cc1 -DCHECK -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
|
|
|
|
// RUN: %clang_cc1 -DLAMBDA -verify -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK9
|
|
// RUN: %clang_cc1 -DLAMBDA -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
|
|
// RUN: %clang_cc1 -DLAMBDA -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK9
|
|
|
|
// RUN: %clang_cc1 -DLAMBDA -verify -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
|
|
// RUN: %clang_cc1 -DLAMBDA -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
|
|
// RUN: %clang_cc1 -DLAMBDA -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
|
|
|
|
// expected-no-diagnostics
|
|
#ifndef HEADER
|
|
#define HEADER
|
|
|
|
template <typename T>
|
|
T tmain() {
|
|
T t_var = T();
|
|
T vec[] = {1, 2};
|
|
#pragma omp target teams distribute reduction(+: t_var)
|
|
for (int i = 0; i < 2; ++i) {
|
|
t_var += (T) i;
|
|
}
|
|
|
|
#pragma omp target teams distribute reduction(-: t_var)
|
|
for (int i = 0; i < 2; ++i) {
|
|
t_var -= vec[i];
|
|
}
|
|
|
|
t_var = T(1);
|
|
#pragma omp target teams distribute reduction(*: t_var)
|
|
for (int i = 0; i < 2; ++i) {
|
|
t_var = t_var * vec[i];
|
|
}
|
|
|
|
bool and_var = true;
|
|
#pragma omp target teams distribute reduction(&&: and_var)
|
|
for (int i = 0; i < 2; ++i) {
|
|
and_var = and_var && (vec[i]%2 == 0);
|
|
}
|
|
|
|
bool or_var = false;
|
|
#pragma omp target teams distribute reduction(||: or_var)
|
|
for (int i = 0; i < 2; ++i) {
|
|
or_var = or_var || (vec[i]%2 == 0);
|
|
}
|
|
|
|
unsigned int bit_var = 1;
|
|
#pragma omp target teams distribute reduction(&: bit_var)
|
|
for (int i = 0; i < 2; ++i) {
|
|
bit_var = (bit_var & vec[i]);
|
|
}
|
|
|
|
#pragma omp target teams distribute reduction(|: bit_var)
|
|
for (int i = 0; i < 2; ++i) {
|
|
bit_var = (bit_var | vec[i]);
|
|
}
|
|
|
|
#pragma omp target teams distribute reduction(^: bit_var)
|
|
for (int i = 0; i < 2; ++i) {
|
|
bit_var = (bit_var ^ vec[i]);
|
|
}
|
|
|
|
t_var = T(0);
|
|
#pragma omp target teams distribute reduction(max: t_var)
|
|
for (int i = 0; i < 2; ++i) {
|
|
t_var = t_var >= vec[i] ? t_var : vec[i];
|
|
}
|
|
|
|
t_var = T(10);
|
|
#pragma omp target teams distribute reduction(min: t_var)
|
|
for (int i = 0; i < 2; ++i) {
|
|
t_var = t_var <= vec[i] ? t_var : vec[i];
|
|
}
|
|
|
|
return T();
|
|
}
|
|
|
|
int main() {
|
|
static int sivar;
|
|
#ifdef LAMBDA
|
|
|
|
[&]() {
|
|
#pragma omp target teams distribute reduction(+: sivar)
|
|
for (int i = 0; i < 2; ++i) {
|
|
|
|
// Skip global and bound tid vars
|
|
|
|
sivar += i;
|
|
|
|
[&]() {
|
|
|
|
sivar += 4;
|
|
|
|
}();
|
|
}
|
|
}();
|
|
|
|
[&]() {
|
|
#pragma omp target teams distribute reduction(-: sivar)
|
|
for (int i = 0; i < 2; ++i) {
|
|
sivar -= i;
|
|
[&]() {
|
|
sivar += 4;
|
|
}();
|
|
}
|
|
}();
|
|
|
|
[&]() {
|
|
#pragma omp target teams distribute reduction(*: sivar)
|
|
for (int i = 0; i < 2; ++i) {
|
|
sivar *= i;
|
|
[&]() {
|
|
sivar += 4;
|
|
}();
|
|
}
|
|
}();
|
|
|
|
[&]() {
|
|
bool and_var = true;
|
|
#pragma omp target teams distribute reduction(&&: and_var)
|
|
for (int i = 0; i < 2; ++i) {
|
|
and_var = and_var && (i == 0);
|
|
[&]() {
|
|
sivar += 4;
|
|
}();
|
|
}
|
|
}();
|
|
|
|
[&]() {
|
|
bool or_var = true;
|
|
#pragma omp target teams distribute reduction(||: or_var)
|
|
for (int i = 0; i < 2; ++i) {
|
|
or_var = or_var || (i == 0);
|
|
[&]() {
|
|
sivar += 4;
|
|
}();
|
|
}
|
|
}();
|
|
|
|
[&]() {
|
|
unsigned int bit_var = true;
|
|
#pragma omp target teams distribute reduction(&: bit_var)
|
|
for (int i = 0; i < 2; ++i) {
|
|
bit_var = (bit_var & i);
|
|
[&]() {
|
|
sivar += 4;
|
|
}();
|
|
}
|
|
}();
|
|
|
|
[&]() {
|
|
unsigned int bit_var = false;
|
|
#pragma omp target teams distribute reduction(|: bit_var)
|
|
for (int i = 0; i < 2; ++i) {
|
|
bit_var = (bit_var | i);
|
|
[&]() {
|
|
sivar += 4;
|
|
}();
|
|
}
|
|
}();
|
|
|
|
[&]() {
|
|
unsigned int bit_var = false;
|
|
#pragma omp target teams distribute reduction(^: bit_var)
|
|
for (int i = 0; i < 2; ++i) {
|
|
bit_var = (bit_var ^ i);
|
|
[&]() {
|
|
sivar += 4;
|
|
}();
|
|
}
|
|
}();
|
|
|
|
[&]() {
|
|
int max_var = 0;
|
|
#pragma omp target teams distribute reduction(max: max_var)
|
|
for (int i = 0; i < 2; ++i) {
|
|
max_var = max_var >= i ? max_var : i;
|
|
[&]() {
|
|
max_var += 4;
|
|
}();
|
|
}
|
|
}();
|
|
|
|
[&]() {
|
|
int min_var = 10;
|
|
#pragma omp target teams distribute reduction(min: min_var)
|
|
for (int i = 0; i < 2; ++i) {
|
|
min_var = min_var <= i ? min_var : i;
|
|
[&]() {
|
|
min_var += 4;
|
|
}();
|
|
}
|
|
}();
|
|
return 0;
|
|
#else
|
|
#pragma omp target teams distribute reduction(+: sivar)
|
|
for (int i = 0; i < 2; ++i) {
|
|
sivar += i;
|
|
}
|
|
|
|
#pragma omp target teams distribute reduction(-: sivar)
|
|
for (int i = 0; i < 2; ++i) {
|
|
sivar -= i;
|
|
}
|
|
|
|
sivar = 1;
|
|
#pragma omp target teams distribute reduction(*: sivar)
|
|
for (int i = 0; i < 2; ++i) {
|
|
sivar = sivar * i;
|
|
}
|
|
|
|
bool and_var = true;
|
|
#pragma omp target teams distribute reduction(&&: and_var)
|
|
for (int i = 0; i < 2; ++i) {
|
|
and_var = and_var && (i == 0);
|
|
}
|
|
|
|
bool or_var = false;
|
|
#pragma omp target teams distribute reduction(||: or_var)
|
|
for (int i = 0; i < 2; ++i) {
|
|
or_var = or_var || (i == 0);
|
|
}
|
|
|
|
unsigned int bit_var = 1;
|
|
#pragma omp target teams distribute reduction(&: bit_var)
|
|
for (int i = 0; i < 2; ++i) {
|
|
bit_var = (bit_var & i);
|
|
}
|
|
|
|
#pragma omp target teams distribute reduction(|: bit_var)
|
|
for (int i = 0; i < 2; ++i) {
|
|
bit_var = (bit_var | i);
|
|
}
|
|
|
|
#pragma omp target teams distribute reduction(^: bit_var)
|
|
for (int i = 0; i < 2; ++i) {
|
|
bit_var = (bit_var ^ i);
|
|
}
|
|
|
|
sivar = 0;
|
|
#pragma omp target teams distribute reduction(max: sivar)
|
|
for (int i = 0; i < 2; ++i) {
|
|
sivar = sivar >= i ? sivar : i;
|
|
}
|
|
|
|
sivar = 10;
|
|
#pragma omp target teams distribute reduction(min: sivar)
|
|
for (int i = 0; i < 2; ++i) {
|
|
sivar = sivar <= i ? sivar : i;
|
|
}
|
|
|
|
return tmain<int>();
|
|
#endif
|
|
}
|
|
|
|
|
|
#endif
|
|
// CHECK1-LABEL: define {{[^@]+}}@main
|
|
// CHECK1-SAME: () #[[ATTR0:[0-9]+]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS1:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_PTRS2:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS3:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK1-NEXT: [[_TMP4:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[KERNEL_ARGS5:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS8:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_PTRS9:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS10:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK1-NEXT: [[_TMP11:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[KERNEL_ARGS12:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
|
|
// CHECK1-NEXT: [[AND_VAR:%.*]] = alloca i8, align 1
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS15:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_PTRS16:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS17:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK1-NEXT: [[_TMP18:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[KERNEL_ARGS19:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
|
|
// CHECK1-NEXT: [[OR_VAR:%.*]] = alloca i8, align 1
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS22:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_PTRS23:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS24:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK1-NEXT: [[_TMP25:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[KERNEL_ARGS26:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
|
|
// CHECK1-NEXT: [[BIT_VAR:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS29:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_PTRS30:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS31:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK1-NEXT: [[_TMP32:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[KERNEL_ARGS33:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS36:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_PTRS37:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS38:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK1-NEXT: [[_TMP39:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[KERNEL_ARGS40:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS43:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_PTRS44:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS45:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK1-NEXT: [[_TMP46:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[KERNEL_ARGS47:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS50:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_PTRS51:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS52:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK1-NEXT: [[_TMP53:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[KERNEL_ARGS54:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS57:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_PTRS58:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS59:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK1-NEXT: [[_TMP60:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[KERNEL_ARGS61:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
|
|
// CHECK1-NEXT: store i32 0, ptr [[RETVAL]], align 4
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
|
|
// CHECK1-NEXT: store ptr @_ZZ4mainE5sivar, ptr [[TMP0]], align 8
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
|
|
// CHECK1-NEXT: store ptr @_ZZ4mainE5sivar, ptr [[TMP1]], align 8
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP2]], align 8
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
|
|
// CHECK1-NEXT: store i32 3, ptr [[TMP5]], align 4
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
|
|
// CHECK1-NEXT: store i32 1, ptr [[TMP6]], align 4
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
|
|
// CHECK1-NEXT: store ptr [[TMP3]], ptr [[TMP7]], align 8
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
|
|
// CHECK1-NEXT: store ptr [[TMP4]], ptr [[TMP8]], align 8
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
|
|
// CHECK1-NEXT: store ptr @.offload_sizes, ptr [[TMP9]], align 8
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
|
|
// CHECK1-NEXT: store ptr @.offload_maptypes, ptr [[TMP10]], align 8
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP11]], align 8
|
|
// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP12]], align 8
|
|
// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
|
|
// CHECK1-NEXT: store i64 2, ptr [[TMP13]], align 8
|
|
// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
|
|
// CHECK1-NEXT: store i64 0, ptr [[TMP14]], align 8
|
|
// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
|
|
// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP15]], align 4
|
|
// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
|
|
// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP16]], align 4
|
|
// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
|
|
// CHECK1-NEXT: store i32 0, ptr [[TMP17]], align 4
|
|
// CHECK1-NEXT: [[TMP18:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l209.region_id, ptr [[KERNEL_ARGS]])
|
|
// CHECK1-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0
|
|
// CHECK1-NEXT: br i1 [[TMP19]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
|
|
// CHECK1: omp_offload.failed:
|
|
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l209(ptr @_ZZ4mainE5sivar) #[[ATTR2:[0-9]+]]
|
|
// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]]
|
|
// CHECK1: omp_offload.cont:
|
|
// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
|
|
// CHECK1-NEXT: store ptr @_ZZ4mainE5sivar, ptr [[TMP20]], align 8
|
|
// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
|
|
// CHECK1-NEXT: store ptr @_ZZ4mainE5sivar, ptr [[TMP21]], align 8
|
|
// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS3]], i64 0, i64 0
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP22]], align 8
|
|
// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
|
|
// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
|
|
// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 0
|
|
// CHECK1-NEXT: store i32 3, ptr [[TMP25]], align 4
|
|
// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 1
|
|
// CHECK1-NEXT: store i32 1, ptr [[TMP26]], align 4
|
|
// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 2
|
|
// CHECK1-NEXT: store ptr [[TMP23]], ptr [[TMP27]], align 8
|
|
// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 3
|
|
// CHECK1-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 8
|
|
// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 4
|
|
// CHECK1-NEXT: store ptr @.offload_sizes.1, ptr [[TMP29]], align 8
|
|
// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 5
|
|
// CHECK1-NEXT: store ptr @.offload_maptypes.2, ptr [[TMP30]], align 8
|
|
// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 6
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP31]], align 8
|
|
// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 7
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP32]], align 8
|
|
// CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 8
|
|
// CHECK1-NEXT: store i64 2, ptr [[TMP33]], align 8
|
|
// CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 9
|
|
// CHECK1-NEXT: store i64 0, ptr [[TMP34]], align 8
|
|
// CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 10
|
|
// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP35]], align 4
|
|
// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 11
|
|
// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP36]], align 4
|
|
// CHECK1-NEXT: [[TMP37:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 12
|
|
// CHECK1-NEXT: store i32 0, ptr [[TMP37]], align 4
|
|
// CHECK1-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l214.region_id, ptr [[KERNEL_ARGS5]])
|
|
// CHECK1-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0
|
|
// CHECK1-NEXT: br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED6:%.*]], label [[OMP_OFFLOAD_CONT7:%.*]]
|
|
// CHECK1: omp_offload.failed6:
|
|
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l214(ptr @_ZZ4mainE5sivar) #[[ATTR2]]
|
|
// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT7]]
|
|
// CHECK1: omp_offload.cont7:
|
|
// CHECK1-NEXT: store i32 1, ptr @_ZZ4mainE5sivar, align 4
|
|
// CHECK1-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS8]], i32 0, i32 0
|
|
// CHECK1-NEXT: store ptr @_ZZ4mainE5sivar, ptr [[TMP40]], align 8
|
|
// CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS9]], i32 0, i32 0
|
|
// CHECK1-NEXT: store ptr @_ZZ4mainE5sivar, ptr [[TMP41]], align 8
|
|
// CHECK1-NEXT: [[TMP42:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS10]], i64 0, i64 0
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP42]], align 8
|
|
// CHECK1-NEXT: [[TMP43:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS8]], i32 0, i32 0
|
|
// CHECK1-NEXT: [[TMP44:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS9]], i32 0, i32 0
|
|
// CHECK1-NEXT: [[TMP45:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 0
|
|
// CHECK1-NEXT: store i32 3, ptr [[TMP45]], align 4
|
|
// CHECK1-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 1
|
|
// CHECK1-NEXT: store i32 1, ptr [[TMP46]], align 4
|
|
// CHECK1-NEXT: [[TMP47:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 2
|
|
// CHECK1-NEXT: store ptr [[TMP43]], ptr [[TMP47]], align 8
|
|
// CHECK1-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 3
|
|
// CHECK1-NEXT: store ptr [[TMP44]], ptr [[TMP48]], align 8
|
|
// CHECK1-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 4
|
|
// CHECK1-NEXT: store ptr @.offload_sizes.3, ptr [[TMP49]], align 8
|
|
// CHECK1-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 5
|
|
// CHECK1-NEXT: store ptr @.offload_maptypes.4, ptr [[TMP50]], align 8
|
|
// CHECK1-NEXT: [[TMP51:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 6
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP51]], align 8
|
|
// CHECK1-NEXT: [[TMP52:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 7
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP52]], align 8
|
|
// CHECK1-NEXT: [[TMP53:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 8
|
|
// CHECK1-NEXT: store i64 2, ptr [[TMP53]], align 8
|
|
// CHECK1-NEXT: [[TMP54:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 9
|
|
// CHECK1-NEXT: store i64 0, ptr [[TMP54]], align 8
|
|
// CHECK1-NEXT: [[TMP55:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 10
|
|
// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP55]], align 4
|
|
// CHECK1-NEXT: [[TMP56:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 11
|
|
// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP56]], align 4
|
|
// CHECK1-NEXT: [[TMP57:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 12
|
|
// CHECK1-NEXT: store i32 0, ptr [[TMP57]], align 4
|
|
// CHECK1-NEXT: [[TMP58:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l220.region_id, ptr [[KERNEL_ARGS12]])
|
|
// CHECK1-NEXT: [[TMP59:%.*]] = icmp ne i32 [[TMP58]], 0
|
|
// CHECK1-NEXT: br i1 [[TMP59]], label [[OMP_OFFLOAD_FAILED13:%.*]], label [[OMP_OFFLOAD_CONT14:%.*]]
|
|
// CHECK1: omp_offload.failed13:
|
|
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l220(ptr @_ZZ4mainE5sivar) #[[ATTR2]]
|
|
// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT14]]
|
|
// CHECK1: omp_offload.cont14:
|
|
// CHECK1-NEXT: store i8 1, ptr [[AND_VAR]], align 1
|
|
// CHECK1-NEXT: [[TMP60:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS15]], i32 0, i32 0
|
|
// CHECK1-NEXT: store ptr [[AND_VAR]], ptr [[TMP60]], align 8
|
|
// CHECK1-NEXT: [[TMP61:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS16]], i32 0, i32 0
|
|
// CHECK1-NEXT: store ptr [[AND_VAR]], ptr [[TMP61]], align 8
|
|
// CHECK1-NEXT: [[TMP62:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS17]], i64 0, i64 0
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP62]], align 8
|
|
// CHECK1-NEXT: [[TMP63:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS15]], i32 0, i32 0
|
|
// CHECK1-NEXT: [[TMP64:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS16]], i32 0, i32 0
|
|
// CHECK1-NEXT: [[TMP65:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 0
|
|
// CHECK1-NEXT: store i32 3, ptr [[TMP65]], align 4
|
|
// CHECK1-NEXT: [[TMP66:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 1
|
|
// CHECK1-NEXT: store i32 1, ptr [[TMP66]], align 4
|
|
// CHECK1-NEXT: [[TMP67:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 2
|
|
// CHECK1-NEXT: store ptr [[TMP63]], ptr [[TMP67]], align 8
|
|
// CHECK1-NEXT: [[TMP68:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 3
|
|
// CHECK1-NEXT: store ptr [[TMP64]], ptr [[TMP68]], align 8
|
|
// CHECK1-NEXT: [[TMP69:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 4
|
|
// CHECK1-NEXT: store ptr @.offload_sizes.5, ptr [[TMP69]], align 8
|
|
// CHECK1-NEXT: [[TMP70:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 5
|
|
// CHECK1-NEXT: store ptr @.offload_maptypes.6, ptr [[TMP70]], align 8
|
|
// CHECK1-NEXT: [[TMP71:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 6
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP71]], align 8
|
|
// CHECK1-NEXT: [[TMP72:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 7
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP72]], align 8
|
|
// CHECK1-NEXT: [[TMP73:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 8
|
|
// CHECK1-NEXT: store i64 2, ptr [[TMP73]], align 8
|
|
// CHECK1-NEXT: [[TMP74:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 9
|
|
// CHECK1-NEXT: store i64 0, ptr [[TMP74]], align 8
|
|
// CHECK1-NEXT: [[TMP75:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 10
|
|
// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP75]], align 4
|
|
// CHECK1-NEXT: [[TMP76:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 11
|
|
// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP76]], align 4
|
|
// CHECK1-NEXT: [[TMP77:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 12
|
|
// CHECK1-NEXT: store i32 0, ptr [[TMP77]], align 4
|
|
// CHECK1-NEXT: [[TMP78:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l226.region_id, ptr [[KERNEL_ARGS19]])
|
|
// CHECK1-NEXT: [[TMP79:%.*]] = icmp ne i32 [[TMP78]], 0
|
|
// CHECK1-NEXT: br i1 [[TMP79]], label [[OMP_OFFLOAD_FAILED20:%.*]], label [[OMP_OFFLOAD_CONT21:%.*]]
|
|
// CHECK1: omp_offload.failed20:
|
|
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l226(ptr [[AND_VAR]]) #[[ATTR2]]
|
|
// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT21]]
|
|
// CHECK1: omp_offload.cont21:
|
|
// CHECK1-NEXT: store i8 0, ptr [[OR_VAR]], align 1
|
|
// CHECK1-NEXT: [[TMP80:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0
|
|
// CHECK1-NEXT: store ptr [[OR_VAR]], ptr [[TMP80]], align 8
|
|
// CHECK1-NEXT: [[TMP81:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS23]], i32 0, i32 0
|
|
// CHECK1-NEXT: store ptr [[OR_VAR]], ptr [[TMP81]], align 8
|
|
// CHECK1-NEXT: [[TMP82:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 0
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP82]], align 8
|
|
// CHECK1-NEXT: [[TMP83:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0
|
|
// CHECK1-NEXT: [[TMP84:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS23]], i32 0, i32 0
|
|
// CHECK1-NEXT: [[TMP85:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS26]], i32 0, i32 0
|
|
// CHECK1-NEXT: store i32 3, ptr [[TMP85]], align 4
|
|
// CHECK1-NEXT: [[TMP86:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS26]], i32 0, i32 1
|
|
// CHECK1-NEXT: store i32 1, ptr [[TMP86]], align 4
|
|
// CHECK1-NEXT: [[TMP87:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS26]], i32 0, i32 2
|
|
// CHECK1-NEXT: store ptr [[TMP83]], ptr [[TMP87]], align 8
|
|
// CHECK1-NEXT: [[TMP88:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS26]], i32 0, i32 3
|
|
// CHECK1-NEXT: store ptr [[TMP84]], ptr [[TMP88]], align 8
|
|
// CHECK1-NEXT: [[TMP89:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS26]], i32 0, i32 4
|
|
// CHECK1-NEXT: store ptr @.offload_sizes.7, ptr [[TMP89]], align 8
|
|
// CHECK1-NEXT: [[TMP90:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS26]], i32 0, i32 5
|
|
// CHECK1-NEXT: store ptr @.offload_maptypes.8, ptr [[TMP90]], align 8
|
|
// CHECK1-NEXT: [[TMP91:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS26]], i32 0, i32 6
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP91]], align 8
|
|
// CHECK1-NEXT: [[TMP92:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS26]], i32 0, i32 7
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP92]], align 8
|
|
// CHECK1-NEXT: [[TMP93:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS26]], i32 0, i32 8
|
|
// CHECK1-NEXT: store i64 2, ptr [[TMP93]], align 8
|
|
// CHECK1-NEXT: [[TMP94:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS26]], i32 0, i32 9
|
|
// CHECK1-NEXT: store i64 0, ptr [[TMP94]], align 8
|
|
// CHECK1-NEXT: [[TMP95:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS26]], i32 0, i32 10
|
|
// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP95]], align 4
|
|
// CHECK1-NEXT: [[TMP96:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS26]], i32 0, i32 11
|
|
// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP96]], align 4
|
|
// CHECK1-NEXT: [[TMP97:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS26]], i32 0, i32 12
|
|
// CHECK1-NEXT: store i32 0, ptr [[TMP97]], align 4
|
|
// CHECK1-NEXT: [[TMP98:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l232.region_id, ptr [[KERNEL_ARGS26]])
|
|
// CHECK1-NEXT: [[TMP99:%.*]] = icmp ne i32 [[TMP98]], 0
|
|
// CHECK1-NEXT: br i1 [[TMP99]], label [[OMP_OFFLOAD_FAILED27:%.*]], label [[OMP_OFFLOAD_CONT28:%.*]]
|
|
// CHECK1: omp_offload.failed27:
|
|
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l232(ptr [[OR_VAR]]) #[[ATTR2]]
|
|
// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT28]]
|
|
// CHECK1: omp_offload.cont28:
|
|
// CHECK1-NEXT: store i32 1, ptr [[BIT_VAR]], align 4
|
|
// CHECK1-NEXT: [[TMP100:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 0
|
|
// CHECK1-NEXT: store ptr [[BIT_VAR]], ptr [[TMP100]], align 8
|
|
// CHECK1-NEXT: [[TMP101:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS30]], i32 0, i32 0
|
|
// CHECK1-NEXT: store ptr [[BIT_VAR]], ptr [[TMP101]], align 8
|
|
// CHECK1-NEXT: [[TMP102:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 0
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP102]], align 8
|
|
// CHECK1-NEXT: [[TMP103:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 0
|
|
// CHECK1-NEXT: [[TMP104:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS30]], i32 0, i32 0
|
|
// CHECK1-NEXT: [[TMP105:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS33]], i32 0, i32 0
|
|
// CHECK1-NEXT: store i32 3, ptr [[TMP105]], align 4
|
|
// CHECK1-NEXT: [[TMP106:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS33]], i32 0, i32 1
|
|
// CHECK1-NEXT: store i32 1, ptr [[TMP106]], align 4
|
|
// CHECK1-NEXT: [[TMP107:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS33]], i32 0, i32 2
|
|
// CHECK1-NEXT: store ptr [[TMP103]], ptr [[TMP107]], align 8
|
|
// CHECK1-NEXT: [[TMP108:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS33]], i32 0, i32 3
|
|
// CHECK1-NEXT: store ptr [[TMP104]], ptr [[TMP108]], align 8
|
|
// CHECK1-NEXT: [[TMP109:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS33]], i32 0, i32 4
|
|
// CHECK1-NEXT: store ptr @.offload_sizes.9, ptr [[TMP109]], align 8
|
|
// CHECK1-NEXT: [[TMP110:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS33]], i32 0, i32 5
|
|
// CHECK1-NEXT: store ptr @.offload_maptypes.10, ptr [[TMP110]], align 8
|
|
// CHECK1-NEXT: [[TMP111:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS33]], i32 0, i32 6
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP111]], align 8
|
|
// CHECK1-NEXT: [[TMP112:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS33]], i32 0, i32 7
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP112]], align 8
|
|
// CHECK1-NEXT: [[TMP113:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS33]], i32 0, i32 8
|
|
// CHECK1-NEXT: store i64 2, ptr [[TMP113]], align 8
|
|
// CHECK1-NEXT: [[TMP114:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS33]], i32 0, i32 9
|
|
// CHECK1-NEXT: store i64 0, ptr [[TMP114]], align 8
|
|
// CHECK1-NEXT: [[TMP115:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS33]], i32 0, i32 10
|
|
// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP115]], align 4
|
|
// CHECK1-NEXT: [[TMP116:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS33]], i32 0, i32 11
|
|
// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP116]], align 4
|
|
// CHECK1-NEXT: [[TMP117:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS33]], i32 0, i32 12
|
|
// CHECK1-NEXT: store i32 0, ptr [[TMP117]], align 4
|
|
// CHECK1-NEXT: [[TMP118:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l238.region_id, ptr [[KERNEL_ARGS33]])
|
|
// CHECK1-NEXT: [[TMP119:%.*]] = icmp ne i32 [[TMP118]], 0
|
|
// CHECK1-NEXT: br i1 [[TMP119]], label [[OMP_OFFLOAD_FAILED34:%.*]], label [[OMP_OFFLOAD_CONT35:%.*]]
|
|
// CHECK1: omp_offload.failed34:
|
|
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l238(ptr [[BIT_VAR]]) #[[ATTR2]]
|
|
// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT35]]
|
|
// CHECK1: omp_offload.cont35:
|
|
// CHECK1-NEXT: [[TMP120:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS36]], i32 0, i32 0
|
|
// CHECK1-NEXT: store ptr [[BIT_VAR]], ptr [[TMP120]], align 8
|
|
// CHECK1-NEXT: [[TMP121:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS37]], i32 0, i32 0
|
|
// CHECK1-NEXT: store ptr [[BIT_VAR]], ptr [[TMP121]], align 8
|
|
// CHECK1-NEXT: [[TMP122:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS38]], i64 0, i64 0
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP122]], align 8
|
|
// CHECK1-NEXT: [[TMP123:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS36]], i32 0, i32 0
|
|
// CHECK1-NEXT: [[TMP124:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS37]], i32 0, i32 0
|
|
// CHECK1-NEXT: [[TMP125:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS40]], i32 0, i32 0
|
|
// CHECK1-NEXT: store i32 3, ptr [[TMP125]], align 4
|
|
// CHECK1-NEXT: [[TMP126:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS40]], i32 0, i32 1
|
|
// CHECK1-NEXT: store i32 1, ptr [[TMP126]], align 4
|
|
// CHECK1-NEXT: [[TMP127:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS40]], i32 0, i32 2
|
|
// CHECK1-NEXT: store ptr [[TMP123]], ptr [[TMP127]], align 8
|
|
// CHECK1-NEXT: [[TMP128:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS40]], i32 0, i32 3
|
|
// CHECK1-NEXT: store ptr [[TMP124]], ptr [[TMP128]], align 8
|
|
// CHECK1-NEXT: [[TMP129:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS40]], i32 0, i32 4
|
|
// CHECK1-NEXT: store ptr @.offload_sizes.11, ptr [[TMP129]], align 8
|
|
// CHECK1-NEXT: [[TMP130:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS40]], i32 0, i32 5
|
|
// CHECK1-NEXT: store ptr @.offload_maptypes.12, ptr [[TMP130]], align 8
|
|
// CHECK1-NEXT: [[TMP131:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS40]], i32 0, i32 6
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP131]], align 8
|
|
// CHECK1-NEXT: [[TMP132:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS40]], i32 0, i32 7
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP132]], align 8
|
|
// CHECK1-NEXT: [[TMP133:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS40]], i32 0, i32 8
|
|
// CHECK1-NEXT: store i64 2, ptr [[TMP133]], align 8
|
|
// CHECK1-NEXT: [[TMP134:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS40]], i32 0, i32 9
|
|
// CHECK1-NEXT: store i64 0, ptr [[TMP134]], align 8
|
|
// CHECK1-NEXT: [[TMP135:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS40]], i32 0, i32 10
|
|
// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP135]], align 4
|
|
// CHECK1-NEXT: [[TMP136:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS40]], i32 0, i32 11
|
|
// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP136]], align 4
|
|
// CHECK1-NEXT: [[TMP137:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS40]], i32 0, i32 12
|
|
// CHECK1-NEXT: store i32 0, ptr [[TMP137]], align 4
|
|
// CHECK1-NEXT: [[TMP138:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l243.region_id, ptr [[KERNEL_ARGS40]])
|
|
// CHECK1-NEXT: [[TMP139:%.*]] = icmp ne i32 [[TMP138]], 0
|
|
// CHECK1-NEXT: br i1 [[TMP139]], label [[OMP_OFFLOAD_FAILED41:%.*]], label [[OMP_OFFLOAD_CONT42:%.*]]
|
|
// CHECK1: omp_offload.failed41:
|
|
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l243(ptr [[BIT_VAR]]) #[[ATTR2]]
|
|
// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT42]]
|
|
// CHECK1: omp_offload.cont42:
|
|
// CHECK1-NEXT: [[TMP140:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS43]], i32 0, i32 0
|
|
// CHECK1-NEXT: store ptr [[BIT_VAR]], ptr [[TMP140]], align 8
|
|
// CHECK1-NEXT: [[TMP141:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS44]], i32 0, i32 0
|
|
// CHECK1-NEXT: store ptr [[BIT_VAR]], ptr [[TMP141]], align 8
|
|
// CHECK1-NEXT: [[TMP142:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS45]], i64 0, i64 0
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP142]], align 8
|
|
// CHECK1-NEXT: [[TMP143:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS43]], i32 0, i32 0
|
|
// CHECK1-NEXT: [[TMP144:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS44]], i32 0, i32 0
|
|
// CHECK1-NEXT: [[TMP145:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS47]], i32 0, i32 0
|
|
// CHECK1-NEXT: store i32 3, ptr [[TMP145]], align 4
|
|
// CHECK1-NEXT: [[TMP146:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS47]], i32 0, i32 1
|
|
// CHECK1-NEXT: store i32 1, ptr [[TMP146]], align 4
|
|
// CHECK1-NEXT: [[TMP147:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS47]], i32 0, i32 2
|
|
// CHECK1-NEXT: store ptr [[TMP143]], ptr [[TMP147]], align 8
|
|
// CHECK1-NEXT: [[TMP148:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS47]], i32 0, i32 3
|
|
// CHECK1-NEXT: store ptr [[TMP144]], ptr [[TMP148]], align 8
|
|
// CHECK1-NEXT: [[TMP149:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS47]], i32 0, i32 4
|
|
// CHECK1-NEXT: store ptr @.offload_sizes.13, ptr [[TMP149]], align 8
|
|
// CHECK1-NEXT: [[TMP150:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS47]], i32 0, i32 5
|
|
// CHECK1-NEXT: store ptr @.offload_maptypes.14, ptr [[TMP150]], align 8
|
|
// CHECK1-NEXT: [[TMP151:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS47]], i32 0, i32 6
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP151]], align 8
|
|
// CHECK1-NEXT: [[TMP152:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS47]], i32 0, i32 7
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP152]], align 8
|
|
// CHECK1-NEXT: [[TMP153:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS47]], i32 0, i32 8
|
|
// CHECK1-NEXT: store i64 2, ptr [[TMP153]], align 8
|
|
// CHECK1-NEXT: [[TMP154:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS47]], i32 0, i32 9
|
|
// CHECK1-NEXT: store i64 0, ptr [[TMP154]], align 8
|
|
// CHECK1-NEXT: [[TMP155:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS47]], i32 0, i32 10
|
|
// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP155]], align 4
|
|
// CHECK1-NEXT: [[TMP156:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS47]], i32 0, i32 11
|
|
// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP156]], align 4
|
|
// CHECK1-NEXT: [[TMP157:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS47]], i32 0, i32 12
|
|
// CHECK1-NEXT: store i32 0, ptr [[TMP157]], align 4
|
|
// CHECK1-NEXT: [[TMP158:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l248.region_id, ptr [[KERNEL_ARGS47]])
|
|
// CHECK1-NEXT: [[TMP159:%.*]] = icmp ne i32 [[TMP158]], 0
|
|
// CHECK1-NEXT: br i1 [[TMP159]], label [[OMP_OFFLOAD_FAILED48:%.*]], label [[OMP_OFFLOAD_CONT49:%.*]]
|
|
// CHECK1: omp_offload.failed48:
|
|
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l248(ptr [[BIT_VAR]]) #[[ATTR2]]
|
|
// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT49]]
|
|
// CHECK1: omp_offload.cont49:
|
|
// CHECK1-NEXT: store i32 0, ptr @_ZZ4mainE5sivar, align 4
|
|
// CHECK1-NEXT: [[TMP160:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0
|
|
// CHECK1-NEXT: store ptr @_ZZ4mainE5sivar, ptr [[TMP160]], align 8
|
|
// CHECK1-NEXT: [[TMP161:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS51]], i32 0, i32 0
|
|
// CHECK1-NEXT: store ptr @_ZZ4mainE5sivar, ptr [[TMP161]], align 8
|
|
// CHECK1-NEXT: [[TMP162:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 0
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP162]], align 8
|
|
// CHECK1-NEXT: [[TMP163:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0
|
|
// CHECK1-NEXT: [[TMP164:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS51]], i32 0, i32 0
|
|
// CHECK1-NEXT: [[TMP165:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS54]], i32 0, i32 0
|
|
// CHECK1-NEXT: store i32 3, ptr [[TMP165]], align 4
|
|
// CHECK1-NEXT: [[TMP166:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS54]], i32 0, i32 1
|
|
// CHECK1-NEXT: store i32 1, ptr [[TMP166]], align 4
|
|
// CHECK1-NEXT: [[TMP167:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS54]], i32 0, i32 2
|
|
// CHECK1-NEXT: store ptr [[TMP163]], ptr [[TMP167]], align 8
|
|
// CHECK1-NEXT: [[TMP168:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS54]], i32 0, i32 3
|
|
// CHECK1-NEXT: store ptr [[TMP164]], ptr [[TMP168]], align 8
|
|
// CHECK1-NEXT: [[TMP169:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS54]], i32 0, i32 4
|
|
// CHECK1-NEXT: store ptr @.offload_sizes.15, ptr [[TMP169]], align 8
|
|
// CHECK1-NEXT: [[TMP170:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS54]], i32 0, i32 5
|
|
// CHECK1-NEXT: store ptr @.offload_maptypes.16, ptr [[TMP170]], align 8
|
|
// CHECK1-NEXT: [[TMP171:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS54]], i32 0, i32 6
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP171]], align 8
|
|
// CHECK1-NEXT: [[TMP172:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS54]], i32 0, i32 7
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP172]], align 8
|
|
// CHECK1-NEXT: [[TMP173:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS54]], i32 0, i32 8
|
|
// CHECK1-NEXT: store i64 2, ptr [[TMP173]], align 8
|
|
// CHECK1-NEXT: [[TMP174:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS54]], i32 0, i32 9
|
|
// CHECK1-NEXT: store i64 0, ptr [[TMP174]], align 8
|
|
// CHECK1-NEXT: [[TMP175:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS54]], i32 0, i32 10
|
|
// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP175]], align 4
|
|
// CHECK1-NEXT: [[TMP176:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS54]], i32 0, i32 11
|
|
// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP176]], align 4
|
|
// CHECK1-NEXT: [[TMP177:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS54]], i32 0, i32 12
|
|
// CHECK1-NEXT: store i32 0, ptr [[TMP177]], align 4
|
|
// CHECK1-NEXT: [[TMP178:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l254.region_id, ptr [[KERNEL_ARGS54]])
|
|
// CHECK1-NEXT: [[TMP179:%.*]] = icmp ne i32 [[TMP178]], 0
|
|
// CHECK1-NEXT: br i1 [[TMP179]], label [[OMP_OFFLOAD_FAILED55:%.*]], label [[OMP_OFFLOAD_CONT56:%.*]]
|
|
// CHECK1: omp_offload.failed55:
|
|
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l254(ptr @_ZZ4mainE5sivar) #[[ATTR2]]
|
|
// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT56]]
|
|
// CHECK1: omp_offload.cont56:
|
|
// CHECK1-NEXT: store i32 10, ptr @_ZZ4mainE5sivar, align 4
|
|
// CHECK1-NEXT: [[TMP180:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 0
|
|
// CHECK1-NEXT: store ptr @_ZZ4mainE5sivar, ptr [[TMP180]], align 8
|
|
// CHECK1-NEXT: [[TMP181:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS58]], i32 0, i32 0
|
|
// CHECK1-NEXT: store ptr @_ZZ4mainE5sivar, ptr [[TMP181]], align 8
|
|
// CHECK1-NEXT: [[TMP182:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS59]], i64 0, i64 0
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP182]], align 8
|
|
// CHECK1-NEXT: [[TMP183:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 0
|
|
// CHECK1-NEXT: [[TMP184:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS58]], i32 0, i32 0
|
|
// CHECK1-NEXT: [[TMP185:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 0
|
|
// CHECK1-NEXT: store i32 3, ptr [[TMP185]], align 4
|
|
// CHECK1-NEXT: [[TMP186:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 1
|
|
// CHECK1-NEXT: store i32 1, ptr [[TMP186]], align 4
|
|
// CHECK1-NEXT: [[TMP187:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 2
|
|
// CHECK1-NEXT: store ptr [[TMP183]], ptr [[TMP187]], align 8
|
|
// CHECK1-NEXT: [[TMP188:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 3
|
|
// CHECK1-NEXT: store ptr [[TMP184]], ptr [[TMP188]], align 8
|
|
// CHECK1-NEXT: [[TMP189:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 4
|
|
// CHECK1-NEXT: store ptr @.offload_sizes.17, ptr [[TMP189]], align 8
|
|
// CHECK1-NEXT: [[TMP190:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 5
|
|
// CHECK1-NEXT: store ptr @.offload_maptypes.18, ptr [[TMP190]], align 8
|
|
// CHECK1-NEXT: [[TMP191:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 6
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP191]], align 8
|
|
// CHECK1-NEXT: [[TMP192:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 7
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP192]], align 8
|
|
// CHECK1-NEXT: [[TMP193:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 8
|
|
// CHECK1-NEXT: store i64 2, ptr [[TMP193]], align 8
|
|
// CHECK1-NEXT: [[TMP194:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 9
|
|
// CHECK1-NEXT: store i64 0, ptr [[TMP194]], align 8
|
|
// CHECK1-NEXT: [[TMP195:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 10
|
|
// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP195]], align 4
|
|
// CHECK1-NEXT: [[TMP196:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 11
|
|
// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP196]], align 4
|
|
// CHECK1-NEXT: [[TMP197:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 12
|
|
// CHECK1-NEXT: store i32 0, ptr [[TMP197]], align 4
|
|
// CHECK1-NEXT: [[TMP198:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l260.region_id, ptr [[KERNEL_ARGS61]])
|
|
// CHECK1-NEXT: [[TMP199:%.*]] = icmp ne i32 [[TMP198]], 0
|
|
// CHECK1-NEXT: br i1 [[TMP199]], label [[OMP_OFFLOAD_FAILED62:%.*]], label [[OMP_OFFLOAD_CONT63:%.*]]
|
|
// CHECK1: omp_offload.failed62:
|
|
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l260(ptr @_ZZ4mainE5sivar) #[[ATTR2]]
|
|
// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT63]]
|
|
// CHECK1: omp_offload.cont63:
|
|
// CHECK1-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiET_v()
|
|
// CHECK1-NEXT: ret i32 [[CALL]]
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l209
|
|
// CHECK1-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR1:[0-9]+]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[SIVAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: store ptr [[SIVAR]], ptr [[SIVAR_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[SIVAR_ADDR]], align 8
|
|
// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l209.omp_outlined, ptr [[TMP0]])
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l209.omp_outlined
|
|
// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[SIVAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[SIVAR1:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[SIVAR]], ptr [[SIVAR_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[SIVAR_ADDR]], align 8
|
|
// CHECK1-NEXT: store i32 0, ptr [[SIVAR1]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
|
|
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK1: cond.true:
|
|
// CHECK1-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK1: cond.false:
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END]]
|
|
// CHECK1: cond.end:
|
|
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
|
|
// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK1: omp.inner.for.cond:
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
|
|
// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK1: omp.inner.for.body:
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
|
|
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[SIVAR1]], align 4
|
|
// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], [[TMP9]]
|
|
// CHECK1-NEXT: store i32 [[ADD3]], ptr [[SIVAR1]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK1: omp.body.continue:
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK1: omp.inner.for.inc:
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP11]], 1
|
|
// CHECK1-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK1: omp.inner.for.end:
|
|
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK1: omp.loop.exit:
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
|
|
// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
|
|
// CHECK1-NEXT: store ptr [[SIVAR1]], ptr [[TMP12]], align 8
|
|
// CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2:[0-9]+]], i32 [[TMP2]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l209.omp_outlined.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: switch i32 [[TMP13]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
|
|
// CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
|
|
// CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
|
|
// CHECK1-NEXT: ]
|
|
// CHECK1: .omp.reduction.case1:
|
|
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP0]], align 4
|
|
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[SIVAR1]], align 4
|
|
// CHECK1-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP14]], [[TMP15]]
|
|
// CHECK1-NEXT: store i32 [[ADD5]], ptr [[TMP0]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK1: .omp.reduction.case2:
|
|
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[SIVAR1]], align 4
|
|
// CHECK1-NEXT: [[TMP17:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP16]] monotonic, align 4
|
|
// CHECK1-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK1: .omp.reduction.default:
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l209.omp_outlined.omp.reduction.reduction_func
|
|
// CHECK1-SAME: (ptr noundef [[TMP0:%.*]], ptr noundef [[TMP1:%.*]]) #[[ATTR3:[0-9]+]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i64 0, i64 0
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], [[TMP9]]
|
|
// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP7]], align 4
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l214
|
|
// CHECK1-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[SIVAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: store ptr [[SIVAR]], ptr [[SIVAR_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[SIVAR_ADDR]], align 8
|
|
// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l214.omp_outlined, ptr [[TMP0]])
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l214.omp_outlined
|
|
// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[SIVAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[SIVAR1:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[SIVAR]], ptr [[SIVAR_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[SIVAR_ADDR]], align 8
|
|
// CHECK1-NEXT: store i32 0, ptr [[SIVAR1]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
|
|
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK1: cond.true:
|
|
// CHECK1-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK1: cond.false:
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END]]
|
|
// CHECK1: cond.end:
|
|
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
|
|
// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK1: omp.inner.for.cond:
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
|
|
// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK1: omp.inner.for.body:
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
|
|
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[SIVAR1]], align 4
|
|
// CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP10]], [[TMP9]]
|
|
// CHECK1-NEXT: store i32 [[SUB]], ptr [[SIVAR1]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK1: omp.body.continue:
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK1: omp.inner.for.inc:
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP11]], 1
|
|
// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK1: omp.inner.for.end:
|
|
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK1: omp.loop.exit:
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
|
|
// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
|
|
// CHECK1-NEXT: store ptr [[SIVAR1]], ptr [[TMP12]], align 8
|
|
// CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2]], i32 [[TMP2]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l214.omp_outlined.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: switch i32 [[TMP13]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
|
|
// CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
|
|
// CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
|
|
// CHECK1-NEXT: ]
|
|
// CHECK1: .omp.reduction.case1:
|
|
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP0]], align 4
|
|
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[SIVAR1]], align 4
|
|
// CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP14]], [[TMP15]]
|
|
// CHECK1-NEXT: store i32 [[ADD4]], ptr [[TMP0]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK1: .omp.reduction.case2:
|
|
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[SIVAR1]], align 4
|
|
// CHECK1-NEXT: [[TMP17:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP16]] monotonic, align 4
|
|
// CHECK1-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK1: .omp.reduction.default:
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l214.omp_outlined.omp.reduction.reduction_func
|
|
// CHECK1-SAME: (ptr noundef [[TMP0:%.*]], ptr noundef [[TMP1:%.*]]) #[[ATTR3]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i64 0, i64 0
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], [[TMP9]]
|
|
// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP7]], align 4
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l220
|
|
// CHECK1-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[SIVAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: store ptr [[SIVAR]], ptr [[SIVAR_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[SIVAR_ADDR]], align 8
|
|
// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l220.omp_outlined, ptr [[TMP0]])
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l220.omp_outlined
|
|
// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[SIVAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[SIVAR1:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK1-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[_TMP6:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[SIVAR]], ptr [[SIVAR_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[SIVAR_ADDR]], align 8
|
|
// CHECK1-NEXT: store i32 1, ptr [[SIVAR1]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
|
|
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK1: cond.true:
|
|
// CHECK1-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK1: cond.false:
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END]]
|
|
// CHECK1: cond.end:
|
|
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
|
|
// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK1: omp.inner.for.cond:
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
|
|
// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK1: omp.inner.for.body:
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
|
|
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[SIVAR1]], align 4
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[MUL3:%.*]] = mul nsw i32 [[TMP9]], [[TMP10]]
|
|
// CHECK1-NEXT: store i32 [[MUL3]], ptr [[SIVAR1]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK1: omp.body.continue:
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK1: omp.inner.for.inc:
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP11]], 1
|
|
// CHECK1-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK1: omp.inner.for.end:
|
|
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK1: omp.loop.exit:
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
|
|
// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
|
|
// CHECK1-NEXT: store ptr [[SIVAR1]], ptr [[TMP12]], align 8
|
|
// CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2]], i32 [[TMP2]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l220.omp_outlined.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: switch i32 [[TMP13]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
|
|
// CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
|
|
// CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
|
|
// CHECK1-NEXT: ]
|
|
// CHECK1: .omp.reduction.case1:
|
|
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP0]], align 4
|
|
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[SIVAR1]], align 4
|
|
// CHECK1-NEXT: [[MUL5:%.*]] = mul nsw i32 [[TMP14]], [[TMP15]]
|
|
// CHECK1-NEXT: store i32 [[MUL5]], ptr [[TMP0]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK1: .omp.reduction.case2:
|
|
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[SIVAR1]], align 4
|
|
// CHECK1-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i32, ptr [[TMP0]] monotonic, align 4
|
|
// CHECK1-NEXT: br label [[ATOMIC_CONT:%.*]]
|
|
// CHECK1: atomic_cont:
|
|
// CHECK1-NEXT: [[TMP17:%.*]] = phi i32 [ [[ATOMIC_LOAD]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[TMP22:%.*]], [[ATOMIC_CONT]] ]
|
|
// CHECK1-NEXT: store i32 [[TMP17]], ptr [[_TMP6]], align 4
|
|
// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[_TMP6]], align 4
|
|
// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[SIVAR1]], align 4
|
|
// CHECK1-NEXT: [[MUL7:%.*]] = mul nsw i32 [[TMP18]], [[TMP19]]
|
|
// CHECK1-NEXT: store i32 [[MUL7]], ptr [[ATOMIC_TEMP]], align 4
|
|
// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[ATOMIC_TEMP]], align 4
|
|
// CHECK1-NEXT: [[TMP21:%.*]] = cmpxchg ptr [[TMP0]], i32 [[TMP17]], i32 [[TMP20]] monotonic monotonic, align 4
|
|
// CHECK1-NEXT: [[TMP22]] = extractvalue { i32, i1 } [[TMP21]], 0
|
|
// CHECK1-NEXT: [[TMP23:%.*]] = extractvalue { i32, i1 } [[TMP21]], 1
|
|
// CHECK1-NEXT: br i1 [[TMP23]], label [[ATOMIC_EXIT:%.*]], label [[ATOMIC_CONT]]
|
|
// CHECK1: atomic_exit:
|
|
// CHECK1-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK1: .omp.reduction.default:
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l220.omp_outlined.omp.reduction.reduction_func
|
|
// CHECK1-SAME: (ptr noundef [[TMP0:%.*]], ptr noundef [[TMP1:%.*]]) #[[ATTR3]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i64 0, i64 0
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], [[TMP9]]
|
|
// CHECK1-NEXT: store i32 [[MUL]], ptr [[TMP7]], align 4
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l226
|
|
// CHECK1-SAME: (ptr noundef nonnull align 1 dereferenceable(1) [[AND_VAR:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[AND_VAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: store ptr [[AND_VAR]], ptr [[AND_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AND_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l226.omp_outlined, ptr [[TMP0]])
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l226.omp_outlined
|
|
// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[AND_VAR:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[AND_VAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[AND_VAR1:%.*]] = alloca i8, align 1
|
|
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK1-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i8, align 1
|
|
// CHECK1-NEXT: [[_TMP12:%.*]] = alloca i8, align 1
|
|
// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[AND_VAR]], ptr [[AND_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AND_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: store i8 1, ptr [[AND_VAR1]], align 1
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
|
|
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK1: cond.true:
|
|
// CHECK1-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK1: cond.false:
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END]]
|
|
// CHECK1: cond.end:
|
|
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
|
|
// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK1: omp.inner.for.cond:
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
|
|
// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK1: omp.inner.for.body:
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
|
|
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i8, ptr [[AND_VAR1]], align 1
|
|
// CHECK1-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP9]] to i1
|
|
// CHECK1-NEXT: br i1 [[TOBOOL]], label [[LAND_RHS:%.*]], label [[LAND_END:%.*]]
|
|
// CHECK1: land.rhs:
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[CMP3:%.*]] = icmp eq i32 [[TMP10]], 0
|
|
// CHECK1-NEXT: br label [[LAND_END]]
|
|
// CHECK1: land.end:
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = phi i1 [ false, [[OMP_INNER_FOR_BODY]] ], [ [[CMP3]], [[LAND_RHS]] ]
|
|
// CHECK1-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TMP11]] to i8
|
|
// CHECK1-NEXT: store i8 [[FROMBOOL]], ptr [[AND_VAR1]], align 1
|
|
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK1: omp.body.continue:
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK1: omp.inner.for.inc:
|
|
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP12]], 1
|
|
// CHECK1-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK1: omp.inner.for.end:
|
|
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK1: omp.loop.exit:
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
|
|
// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
|
|
// CHECK1-NEXT: store ptr [[AND_VAR1]], ptr [[TMP13]], align 8
|
|
// CHECK1-NEXT: [[TMP14:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2]], i32 [[TMP2]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l226.omp_outlined.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: switch i32 [[TMP14]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
|
|
// CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
|
|
// CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
|
|
// CHECK1-NEXT: ]
|
|
// CHECK1: .omp.reduction.case1:
|
|
// CHECK1-NEXT: [[TMP15:%.*]] = load i8, ptr [[TMP0]], align 1
|
|
// CHECK1-NEXT: [[TOBOOL5:%.*]] = trunc i8 [[TMP15]] to i1
|
|
// CHECK1-NEXT: br i1 [[TOBOOL5]], label [[LAND_RHS6:%.*]], label [[LAND_END8:%.*]]
|
|
// CHECK1: land.rhs6:
|
|
// CHECK1-NEXT: [[TMP16:%.*]] = load i8, ptr [[AND_VAR1]], align 1
|
|
// CHECK1-NEXT: [[TOBOOL7:%.*]] = trunc i8 [[TMP16]] to i1
|
|
// CHECK1-NEXT: br label [[LAND_END8]]
|
|
// CHECK1: land.end8:
|
|
// CHECK1-NEXT: [[TMP17:%.*]] = phi i1 [ false, [[DOTOMP_REDUCTION_CASE1]] ], [ [[TOBOOL7]], [[LAND_RHS6]] ]
|
|
// CHECK1-NEXT: [[FROMBOOL9:%.*]] = zext i1 [[TMP17]] to i8
|
|
// CHECK1-NEXT: store i8 [[FROMBOOL9]], ptr [[TMP0]], align 1
|
|
// CHECK1-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK1: .omp.reduction.case2:
|
|
// CHECK1-NEXT: [[TMP18:%.*]] = load i8, ptr [[AND_VAR1]], align 1
|
|
// CHECK1-NEXT: [[TOBOOL10:%.*]] = trunc i8 [[TMP18]] to i1
|
|
// CHECK1-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i8, ptr [[TMP0]] monotonic, align 1
|
|
// CHECK1-NEXT: br label [[ATOMIC_CONT:%.*]]
|
|
// CHECK1: atomic_cont:
|
|
// CHECK1-NEXT: [[TMP19:%.*]] = phi i8 [ [[ATOMIC_LOAD]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[TMP25:%.*]], [[LAND_END17:%.*]] ]
|
|
// CHECK1-NEXT: [[TOBOOL11:%.*]] = trunc i8 [[TMP19]] to i1
|
|
// CHECK1-NEXT: [[FROMBOOL13:%.*]] = zext i1 [[TOBOOL11]] to i8
|
|
// CHECK1-NEXT: store i8 [[FROMBOOL13]], ptr [[_TMP12]], align 1
|
|
// CHECK1-NEXT: [[TMP20:%.*]] = load i8, ptr [[_TMP12]], align 1
|
|
// CHECK1-NEXT: [[TOBOOL14:%.*]] = trunc i8 [[TMP20]] to i1
|
|
// CHECK1-NEXT: br i1 [[TOBOOL14]], label [[LAND_RHS15:%.*]], label [[LAND_END17]]
|
|
// CHECK1: land.rhs15:
|
|
// CHECK1-NEXT: [[TMP21:%.*]] = load i8, ptr [[AND_VAR1]], align 1
|
|
// CHECK1-NEXT: [[TOBOOL16:%.*]] = trunc i8 [[TMP21]] to i1
|
|
// CHECK1-NEXT: br label [[LAND_END17]]
|
|
// CHECK1: land.end17:
|
|
// CHECK1-NEXT: [[TMP22:%.*]] = phi i1 [ false, [[ATOMIC_CONT]] ], [ [[TOBOOL16]], [[LAND_RHS15]] ]
|
|
// CHECK1-NEXT: [[FROMBOOL18:%.*]] = zext i1 [[TMP22]] to i8
|
|
// CHECK1-NEXT: store i8 [[FROMBOOL18]], ptr [[ATOMIC_TEMP]], align 1
|
|
// CHECK1-NEXT: [[TMP23:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1
|
|
// CHECK1-NEXT: [[TMP24:%.*]] = cmpxchg ptr [[TMP0]], i8 [[TMP19]], i8 [[TMP23]] monotonic monotonic, align 1
|
|
// CHECK1-NEXT: [[TMP25]] = extractvalue { i8, i1 } [[TMP24]], 0
|
|
// CHECK1-NEXT: [[TMP26:%.*]] = extractvalue { i8, i1 } [[TMP24]], 1
|
|
// CHECK1-NEXT: br i1 [[TMP26]], label [[ATOMIC_EXIT:%.*]], label [[ATOMIC_CONT]]
|
|
// CHECK1: atomic_exit:
|
|
// CHECK1-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK1: .omp.reduction.default:
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l226.omp_outlined.omp.reduction.reduction_func
|
|
// CHECK1-SAME: (ptr noundef [[TMP0:%.*]], ptr noundef [[TMP1:%.*]]) #[[ATTR3]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i64 0, i64 0
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i8, ptr [[TMP7]], align 1
|
|
// CHECK1-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP8]] to i1
|
|
// CHECK1-NEXT: br i1 [[TOBOOL]], label [[LAND_RHS:%.*]], label [[LAND_END:%.*]]
|
|
// CHECK1: land.rhs:
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i8, ptr [[TMP5]], align 1
|
|
// CHECK1-NEXT: [[TOBOOL2:%.*]] = trunc i8 [[TMP9]] to i1
|
|
// CHECK1-NEXT: br label [[LAND_END]]
|
|
// CHECK1: land.end:
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = phi i1 [ false, [[ENTRY:%.*]] ], [ [[TOBOOL2]], [[LAND_RHS]] ]
|
|
// CHECK1-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TMP10]] to i8
|
|
// CHECK1-NEXT: store i8 [[FROMBOOL]], ptr [[TMP7]], align 1
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l232
|
|
// CHECK1-SAME: (ptr noundef nonnull align 1 dereferenceable(1) [[OR_VAR:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[OR_VAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: store ptr [[OR_VAR]], ptr [[OR_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[OR_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l232.omp_outlined, ptr [[TMP0]])
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l232.omp_outlined
|
|
// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[OR_VAR:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[OR_VAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[OR_VAR1:%.*]] = alloca i8, align 1
|
|
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK1-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i8, align 1
|
|
// CHECK1-NEXT: [[_TMP12:%.*]] = alloca i8, align 1
|
|
// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[OR_VAR]], ptr [[OR_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[OR_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: store i8 0, ptr [[OR_VAR1]], align 1
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
|
|
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK1: cond.true:
|
|
// CHECK1-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK1: cond.false:
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END]]
|
|
// CHECK1: cond.end:
|
|
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
|
|
// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK1: omp.inner.for.cond:
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
|
|
// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK1: omp.inner.for.body:
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
|
|
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i8, ptr [[OR_VAR1]], align 1
|
|
// CHECK1-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP9]] to i1
|
|
// CHECK1-NEXT: br i1 [[TOBOOL]], label [[LOR_END:%.*]], label [[LOR_RHS:%.*]]
|
|
// CHECK1: lor.rhs:
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[CMP3:%.*]] = icmp eq i32 [[TMP10]], 0
|
|
// CHECK1-NEXT: br label [[LOR_END]]
|
|
// CHECK1: lor.end:
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = phi i1 [ true, [[OMP_INNER_FOR_BODY]] ], [ [[CMP3]], [[LOR_RHS]] ]
|
|
// CHECK1-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TMP11]] to i8
|
|
// CHECK1-NEXT: store i8 [[FROMBOOL]], ptr [[OR_VAR1]], align 1
|
|
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK1: omp.body.continue:
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK1: omp.inner.for.inc:
|
|
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP12]], 1
|
|
// CHECK1-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK1: omp.inner.for.end:
|
|
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK1: omp.loop.exit:
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
|
|
// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
|
|
// CHECK1-NEXT: store ptr [[OR_VAR1]], ptr [[TMP13]], align 8
|
|
// CHECK1-NEXT: [[TMP14:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2]], i32 [[TMP2]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l232.omp_outlined.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: switch i32 [[TMP14]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
|
|
// CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
|
|
// CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
|
|
// CHECK1-NEXT: ]
|
|
// CHECK1: .omp.reduction.case1:
|
|
// CHECK1-NEXT: [[TMP15:%.*]] = load i8, ptr [[TMP0]], align 1
|
|
// CHECK1-NEXT: [[TOBOOL5:%.*]] = trunc i8 [[TMP15]] to i1
|
|
// CHECK1-NEXT: br i1 [[TOBOOL5]], label [[LOR_END8:%.*]], label [[LOR_RHS6:%.*]]
|
|
// CHECK1: lor.rhs6:
|
|
// CHECK1-NEXT: [[TMP16:%.*]] = load i8, ptr [[OR_VAR1]], align 1
|
|
// CHECK1-NEXT: [[TOBOOL7:%.*]] = trunc i8 [[TMP16]] to i1
|
|
// CHECK1-NEXT: br label [[LOR_END8]]
|
|
// CHECK1: lor.end8:
|
|
// CHECK1-NEXT: [[TMP17:%.*]] = phi i1 [ true, [[DOTOMP_REDUCTION_CASE1]] ], [ [[TOBOOL7]], [[LOR_RHS6]] ]
|
|
// CHECK1-NEXT: [[FROMBOOL9:%.*]] = zext i1 [[TMP17]] to i8
|
|
// CHECK1-NEXT: store i8 [[FROMBOOL9]], ptr [[TMP0]], align 1
|
|
// CHECK1-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK1: .omp.reduction.case2:
|
|
// CHECK1-NEXT: [[TMP18:%.*]] = load i8, ptr [[OR_VAR1]], align 1
|
|
// CHECK1-NEXT: [[TOBOOL10:%.*]] = trunc i8 [[TMP18]] to i1
|
|
// CHECK1-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i8, ptr [[TMP0]] monotonic, align 1
|
|
// CHECK1-NEXT: br label [[ATOMIC_CONT:%.*]]
|
|
// CHECK1: atomic_cont:
|
|
// CHECK1-NEXT: [[TMP19:%.*]] = phi i8 [ [[ATOMIC_LOAD]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[TMP25:%.*]], [[LOR_END17:%.*]] ]
|
|
// CHECK1-NEXT: [[TOBOOL11:%.*]] = trunc i8 [[TMP19]] to i1
|
|
// CHECK1-NEXT: [[FROMBOOL13:%.*]] = zext i1 [[TOBOOL11]] to i8
|
|
// CHECK1-NEXT: store i8 [[FROMBOOL13]], ptr [[_TMP12]], align 1
|
|
// CHECK1-NEXT: [[TMP20:%.*]] = load i8, ptr [[_TMP12]], align 1
|
|
// CHECK1-NEXT: [[TOBOOL14:%.*]] = trunc i8 [[TMP20]] to i1
|
|
// CHECK1-NEXT: br i1 [[TOBOOL14]], label [[LOR_END17]], label [[LOR_RHS15:%.*]]
|
|
// CHECK1: lor.rhs15:
|
|
// CHECK1-NEXT: [[TMP21:%.*]] = load i8, ptr [[OR_VAR1]], align 1
|
|
// CHECK1-NEXT: [[TOBOOL16:%.*]] = trunc i8 [[TMP21]] to i1
|
|
// CHECK1-NEXT: br label [[LOR_END17]]
|
|
// CHECK1: lor.end17:
|
|
// CHECK1-NEXT: [[TMP22:%.*]] = phi i1 [ true, [[ATOMIC_CONT]] ], [ [[TOBOOL16]], [[LOR_RHS15]] ]
|
|
// CHECK1-NEXT: [[FROMBOOL18:%.*]] = zext i1 [[TMP22]] to i8
|
|
// CHECK1-NEXT: store i8 [[FROMBOOL18]], ptr [[ATOMIC_TEMP]], align 1
|
|
// CHECK1-NEXT: [[TMP23:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1
|
|
// CHECK1-NEXT: [[TMP24:%.*]] = cmpxchg ptr [[TMP0]], i8 [[TMP19]], i8 [[TMP23]] monotonic monotonic, align 1
|
|
// CHECK1-NEXT: [[TMP25]] = extractvalue { i8, i1 } [[TMP24]], 0
|
|
// CHECK1-NEXT: [[TMP26:%.*]] = extractvalue { i8, i1 } [[TMP24]], 1
|
|
// CHECK1-NEXT: br i1 [[TMP26]], label [[ATOMIC_EXIT:%.*]], label [[ATOMIC_CONT]]
|
|
// CHECK1: atomic_exit:
|
|
// CHECK1-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK1: .omp.reduction.default:
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l232.omp_outlined.omp.reduction.reduction_func
|
|
// CHECK1-SAME: (ptr noundef [[TMP0:%.*]], ptr noundef [[TMP1:%.*]]) #[[ATTR3]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i64 0, i64 0
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i8, ptr [[TMP7]], align 1
|
|
// CHECK1-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP8]] to i1
|
|
// CHECK1-NEXT: br i1 [[TOBOOL]], label [[LOR_END:%.*]], label [[LOR_RHS:%.*]]
|
|
// CHECK1: lor.rhs:
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i8, ptr [[TMP5]], align 1
|
|
// CHECK1-NEXT: [[TOBOOL2:%.*]] = trunc i8 [[TMP9]] to i1
|
|
// CHECK1-NEXT: br label [[LOR_END]]
|
|
// CHECK1: lor.end:
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = phi i1 [ true, [[ENTRY:%.*]] ], [ [[TOBOOL2]], [[LOR_RHS]] ]
|
|
// CHECK1-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TMP10]] to i8
|
|
// CHECK1-NEXT: store i8 [[FROMBOOL]], ptr [[TMP7]], align 1
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l238
|
|
// CHECK1-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[BIT_VAR:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[BIT_VAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: store ptr [[BIT_VAR]], ptr [[BIT_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[BIT_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l238.omp_outlined, ptr [[TMP0]])
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l238.omp_outlined
|
|
// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[BIT_VAR:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[BIT_VAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[BIT_VAR1:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[BIT_VAR]], ptr [[BIT_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[BIT_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: store i32 -1, ptr [[BIT_VAR1]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
|
|
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK1: cond.true:
|
|
// CHECK1-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK1: cond.false:
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END]]
|
|
// CHECK1: cond.end:
|
|
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
|
|
// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK1: omp.inner.for.cond:
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
|
|
// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK1: omp.inner.for.body:
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
|
|
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[BIT_VAR1]], align 4
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[AND:%.*]] = and i32 [[TMP9]], [[TMP10]]
|
|
// CHECK1-NEXT: store i32 [[AND]], ptr [[BIT_VAR1]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK1: omp.body.continue:
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK1: omp.inner.for.inc:
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP11]], 1
|
|
// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK1: omp.inner.for.end:
|
|
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK1: omp.loop.exit:
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
|
|
// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
|
|
// CHECK1-NEXT: store ptr [[BIT_VAR1]], ptr [[TMP12]], align 8
|
|
// CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2]], i32 [[TMP2]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l238.omp_outlined.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: switch i32 [[TMP13]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
|
|
// CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
|
|
// CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
|
|
// CHECK1-NEXT: ]
|
|
// CHECK1: .omp.reduction.case1:
|
|
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP0]], align 4
|
|
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[BIT_VAR1]], align 4
|
|
// CHECK1-NEXT: [[AND4:%.*]] = and i32 [[TMP14]], [[TMP15]]
|
|
// CHECK1-NEXT: store i32 [[AND4]], ptr [[TMP0]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK1: .omp.reduction.case2:
|
|
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[BIT_VAR1]], align 4
|
|
// CHECK1-NEXT: [[TMP17:%.*]] = atomicrmw and ptr [[TMP0]], i32 [[TMP16]] monotonic, align 4
|
|
// CHECK1-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK1: .omp.reduction.default:
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l238.omp_outlined.omp.reduction.reduction_func
|
|
// CHECK1-SAME: (ptr noundef [[TMP0:%.*]], ptr noundef [[TMP1:%.*]]) #[[ATTR3]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i64 0, i64 0
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK1-NEXT: [[AND:%.*]] = and i32 [[TMP8]], [[TMP9]]
|
|
// CHECK1-NEXT: store i32 [[AND]], ptr [[TMP7]], align 4
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l243
|
|
// CHECK1-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[BIT_VAR:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[BIT_VAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: store ptr [[BIT_VAR]], ptr [[BIT_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[BIT_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l243.omp_outlined, ptr [[TMP0]])
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l243.omp_outlined
|
|
// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[BIT_VAR:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[BIT_VAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[BIT_VAR1:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[BIT_VAR]], ptr [[BIT_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[BIT_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: store i32 0, ptr [[BIT_VAR1]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
|
|
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK1: cond.true:
|
|
// CHECK1-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK1: cond.false:
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END]]
|
|
// CHECK1: cond.end:
|
|
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
|
|
// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK1: omp.inner.for.cond:
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
|
|
// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK1: omp.inner.for.body:
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
|
|
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[BIT_VAR1]], align 4
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[OR:%.*]] = or i32 [[TMP9]], [[TMP10]]
|
|
// CHECK1-NEXT: store i32 [[OR]], ptr [[BIT_VAR1]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK1: omp.body.continue:
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK1: omp.inner.for.inc:
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP11]], 1
|
|
// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK1: omp.inner.for.end:
|
|
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK1: omp.loop.exit:
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
|
|
// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
|
|
// CHECK1-NEXT: store ptr [[BIT_VAR1]], ptr [[TMP12]], align 8
|
|
// CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2]], i32 [[TMP2]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l243.omp_outlined.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: switch i32 [[TMP13]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
|
|
// CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
|
|
// CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
|
|
// CHECK1-NEXT: ]
|
|
// CHECK1: .omp.reduction.case1:
|
|
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP0]], align 4
|
|
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[BIT_VAR1]], align 4
|
|
// CHECK1-NEXT: [[OR4:%.*]] = or i32 [[TMP14]], [[TMP15]]
|
|
// CHECK1-NEXT: store i32 [[OR4]], ptr [[TMP0]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK1: .omp.reduction.case2:
|
|
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[BIT_VAR1]], align 4
|
|
// CHECK1-NEXT: [[TMP17:%.*]] = atomicrmw or ptr [[TMP0]], i32 [[TMP16]] monotonic, align 4
|
|
// CHECK1-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK1: .omp.reduction.default:
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l243.omp_outlined.omp.reduction.reduction_func
|
|
// CHECK1-SAME: (ptr noundef [[TMP0:%.*]], ptr noundef [[TMP1:%.*]]) #[[ATTR3]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i64 0, i64 0
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK1-NEXT: [[OR:%.*]] = or i32 [[TMP8]], [[TMP9]]
|
|
// CHECK1-NEXT: store i32 [[OR]], ptr [[TMP7]], align 4
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l248
|
|
// CHECK1-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[BIT_VAR:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[BIT_VAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: store ptr [[BIT_VAR]], ptr [[BIT_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[BIT_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l248.omp_outlined, ptr [[TMP0]])
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l248.omp_outlined
|
|
// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[BIT_VAR:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[BIT_VAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[BIT_VAR1:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[BIT_VAR]], ptr [[BIT_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[BIT_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: store i32 0, ptr [[BIT_VAR1]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
|
|
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK1: cond.true:
|
|
// CHECK1-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK1: cond.false:
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END]]
|
|
// CHECK1: cond.end:
|
|
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
|
|
// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK1: omp.inner.for.cond:
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
|
|
// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK1: omp.inner.for.body:
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
|
|
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[BIT_VAR1]], align 4
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[XOR:%.*]] = xor i32 [[TMP9]], [[TMP10]]
|
|
// CHECK1-NEXT: store i32 [[XOR]], ptr [[BIT_VAR1]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK1: omp.body.continue:
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK1: omp.inner.for.inc:
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP11]], 1
|
|
// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK1: omp.inner.for.end:
|
|
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK1: omp.loop.exit:
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
|
|
// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
|
|
// CHECK1-NEXT: store ptr [[BIT_VAR1]], ptr [[TMP12]], align 8
|
|
// CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2]], i32 [[TMP2]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l248.omp_outlined.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: switch i32 [[TMP13]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
|
|
// CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
|
|
// CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
|
|
// CHECK1-NEXT: ]
|
|
// CHECK1: .omp.reduction.case1:
|
|
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP0]], align 4
|
|
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[BIT_VAR1]], align 4
|
|
// CHECK1-NEXT: [[XOR4:%.*]] = xor i32 [[TMP14]], [[TMP15]]
|
|
// CHECK1-NEXT: store i32 [[XOR4]], ptr [[TMP0]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK1: .omp.reduction.case2:
|
|
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[BIT_VAR1]], align 4
|
|
// CHECK1-NEXT: [[TMP17:%.*]] = atomicrmw xor ptr [[TMP0]], i32 [[TMP16]] monotonic, align 4
|
|
// CHECK1-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK1: .omp.reduction.default:
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l248.omp_outlined.omp.reduction.reduction_func
|
|
// CHECK1-SAME: (ptr noundef [[TMP0:%.*]], ptr noundef [[TMP1:%.*]]) #[[ATTR3]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i64 0, i64 0
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK1-NEXT: [[XOR:%.*]] = xor i32 [[TMP8]], [[TMP9]]
|
|
// CHECK1-NEXT: store i32 [[XOR]], ptr [[TMP7]], align 4
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l254
|
|
// CHECK1-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[SIVAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: store ptr [[SIVAR]], ptr [[SIVAR_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[SIVAR_ADDR]], align 8
|
|
// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l254.omp_outlined, ptr [[TMP0]])
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l254.omp_outlined
|
|
// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[SIVAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[SIVAR1:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[SIVAR]], ptr [[SIVAR_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[SIVAR_ADDR]], align 8
|
|
// CHECK1-NEXT: store i32 -2147483648, ptr [[SIVAR1]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
|
|
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK1: cond.true:
|
|
// CHECK1-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK1: cond.false:
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END]]
|
|
// CHECK1: cond.end:
|
|
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
|
|
// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK1: omp.inner.for.cond:
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
|
|
// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK1: omp.inner.for.body:
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
|
|
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[SIVAR1]], align 4
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[CMP3:%.*]] = icmp sge i32 [[TMP9]], [[TMP10]]
|
|
// CHECK1-NEXT: br i1 [[CMP3]], label [[COND_TRUE4:%.*]], label [[COND_FALSE5:%.*]]
|
|
// CHECK1: cond.true4:
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[SIVAR1]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END6:%.*]]
|
|
// CHECK1: cond.false5:
|
|
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END6]]
|
|
// CHECK1: cond.end6:
|
|
// CHECK1-NEXT: [[COND7:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE4]] ], [ [[TMP12]], [[COND_FALSE5]] ]
|
|
// CHECK1-NEXT: store i32 [[COND7]], ptr [[SIVAR1]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK1: omp.body.continue:
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK1: omp.inner.for.inc:
|
|
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP13]], 1
|
|
// CHECK1-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK1: omp.inner.for.end:
|
|
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK1: omp.loop.exit:
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
|
|
// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
|
|
// CHECK1-NEXT: store ptr [[SIVAR1]], ptr [[TMP14]], align 8
|
|
// CHECK1-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2]], i32 [[TMP2]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l254.omp_outlined.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: switch i32 [[TMP15]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
|
|
// CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
|
|
// CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
|
|
// CHECK1-NEXT: ]
|
|
// CHECK1: .omp.reduction.case1:
|
|
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP0]], align 4
|
|
// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[SIVAR1]], align 4
|
|
// CHECK1-NEXT: [[CMP9:%.*]] = icmp sgt i32 [[TMP16]], [[TMP17]]
|
|
// CHECK1-NEXT: br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
|
|
// CHECK1: cond.true10:
|
|
// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP0]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END12:%.*]]
|
|
// CHECK1: cond.false11:
|
|
// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[SIVAR1]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END12]]
|
|
// CHECK1: cond.end12:
|
|
// CHECK1-NEXT: [[COND13:%.*]] = phi i32 [ [[TMP18]], [[COND_TRUE10]] ], [ [[TMP19]], [[COND_FALSE11]] ]
|
|
// CHECK1-NEXT: store i32 [[COND13]], ptr [[TMP0]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK1: .omp.reduction.case2:
|
|
// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[SIVAR1]], align 4
|
|
// CHECK1-NEXT: [[TMP21:%.*]] = atomicrmw max ptr [[TMP0]], i32 [[TMP20]] monotonic, align 4
|
|
// CHECK1-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK1: .omp.reduction.default:
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l254.omp_outlined.omp.reduction.reduction_func
|
|
// CHECK1-SAME: (ptr noundef [[TMP0:%.*]], ptr noundef [[TMP1:%.*]]) #[[ATTR3]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i64 0, i64 0
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
|
|
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK1: cond.true:
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK1: cond.false:
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END]]
|
|
// CHECK1: cond.end:
|
|
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
|
|
// CHECK1-NEXT: store i32 [[COND]], ptr [[TMP7]], align 4
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l260
|
|
// CHECK1-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[SIVAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: store ptr [[SIVAR]], ptr [[SIVAR_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[SIVAR_ADDR]], align 8
|
|
// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l260.omp_outlined, ptr [[TMP0]])
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l260.omp_outlined
|
|
// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[SIVAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[SIVAR1:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[SIVAR]], ptr [[SIVAR_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[SIVAR_ADDR]], align 8
|
|
// CHECK1-NEXT: store i32 2147483647, ptr [[SIVAR1]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
|
|
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK1: cond.true:
|
|
// CHECK1-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK1: cond.false:
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END]]
|
|
// CHECK1: cond.end:
|
|
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
|
|
// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK1: omp.inner.for.cond:
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
|
|
// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK1: omp.inner.for.body:
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
|
|
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[SIVAR1]], align 4
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
|
|
// CHECK1-NEXT: br i1 [[CMP3]], label [[COND_TRUE4:%.*]], label [[COND_FALSE5:%.*]]
|
|
// CHECK1: cond.true4:
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[SIVAR1]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END6:%.*]]
|
|
// CHECK1: cond.false5:
|
|
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END6]]
|
|
// CHECK1: cond.end6:
|
|
// CHECK1-NEXT: [[COND7:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE4]] ], [ [[TMP12]], [[COND_FALSE5]] ]
|
|
// CHECK1-NEXT: store i32 [[COND7]], ptr [[SIVAR1]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK1: omp.body.continue:
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK1: omp.inner.for.inc:
|
|
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP13]], 1
|
|
// CHECK1-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK1: omp.inner.for.end:
|
|
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK1: omp.loop.exit:
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
|
|
// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
|
|
// CHECK1-NEXT: store ptr [[SIVAR1]], ptr [[TMP14]], align 8
|
|
// CHECK1-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2]], i32 [[TMP2]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l260.omp_outlined.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: switch i32 [[TMP15]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
|
|
// CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
|
|
// CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
|
|
// CHECK1-NEXT: ]
|
|
// CHECK1: .omp.reduction.case1:
|
|
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP0]], align 4
|
|
// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[SIVAR1]], align 4
|
|
// CHECK1-NEXT: [[CMP9:%.*]] = icmp slt i32 [[TMP16]], [[TMP17]]
|
|
// CHECK1-NEXT: br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
|
|
// CHECK1: cond.true10:
|
|
// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP0]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END12:%.*]]
|
|
// CHECK1: cond.false11:
|
|
// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[SIVAR1]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END12]]
|
|
// CHECK1: cond.end12:
|
|
// CHECK1-NEXT: [[COND13:%.*]] = phi i32 [ [[TMP18]], [[COND_TRUE10]] ], [ [[TMP19]], [[COND_FALSE11]] ]
|
|
// CHECK1-NEXT: store i32 [[COND13]], ptr [[TMP0]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK1: .omp.reduction.case2:
|
|
// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[SIVAR1]], align 4
|
|
// CHECK1-NEXT: [[TMP21:%.*]] = atomicrmw min ptr [[TMP0]], i32 [[TMP20]] monotonic, align 4
|
|
// CHECK1-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK1: .omp.reduction.default:
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l260.omp_outlined.omp.reduction.reduction_func
|
|
// CHECK1-SAME: (ptr noundef [[TMP0:%.*]], ptr noundef [[TMP1:%.*]]) #[[ATTR3]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i64 0, i64 0
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP8]], [[TMP9]]
|
|
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK1: cond.true:
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK1: cond.false:
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END]]
|
|
// CHECK1: cond.end:
|
|
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
|
|
// CHECK1-NEXT: store i32 [[COND]], ptr [[TMP7]], align 4
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
|
|
// CHECK1-SAME: () #[[ATTR5:[0-9]+]] comdat {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[T_VAR:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[VEC:%.*]] = alloca [2 x i32], align 4
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS1:%.*]] = alloca [2 x ptr], align 8
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_PTRS2:%.*]] = alloca [2 x ptr], align 8
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS3:%.*]] = alloca [2 x ptr], align 8
|
|
// CHECK1-NEXT: [[_TMP4:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[KERNEL_ARGS5:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS8:%.*]] = alloca [2 x ptr], align 8
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_PTRS9:%.*]] = alloca [2 x ptr], align 8
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS10:%.*]] = alloca [2 x ptr], align 8
|
|
// CHECK1-NEXT: [[_TMP11:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[KERNEL_ARGS12:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
|
|
// CHECK1-NEXT: [[AND_VAR:%.*]] = alloca i8, align 1
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS15:%.*]] = alloca [2 x ptr], align 8
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_PTRS16:%.*]] = alloca [2 x ptr], align 8
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS17:%.*]] = alloca [2 x ptr], align 8
|
|
// CHECK1-NEXT: [[_TMP18:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[KERNEL_ARGS19:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
|
|
// CHECK1-NEXT: [[OR_VAR:%.*]] = alloca i8, align 1
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS22:%.*]] = alloca [2 x ptr], align 8
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_PTRS23:%.*]] = alloca [2 x ptr], align 8
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS24:%.*]] = alloca [2 x ptr], align 8
|
|
// CHECK1-NEXT: [[_TMP25:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[KERNEL_ARGS26:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
|
|
// CHECK1-NEXT: [[BIT_VAR:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS29:%.*]] = alloca [2 x ptr], align 8
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_PTRS30:%.*]] = alloca [2 x ptr], align 8
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS31:%.*]] = alloca [2 x ptr], align 8
|
|
// CHECK1-NEXT: [[_TMP32:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[KERNEL_ARGS33:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS36:%.*]] = alloca [2 x ptr], align 8
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_PTRS37:%.*]] = alloca [2 x ptr], align 8
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS38:%.*]] = alloca [2 x ptr], align 8
|
|
// CHECK1-NEXT: [[_TMP39:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[KERNEL_ARGS40:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS43:%.*]] = alloca [2 x ptr], align 8
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_PTRS44:%.*]] = alloca [2 x ptr], align 8
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS45:%.*]] = alloca [2 x ptr], align 8
|
|
// CHECK1-NEXT: [[_TMP46:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[KERNEL_ARGS47:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS50:%.*]] = alloca [2 x ptr], align 8
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_PTRS51:%.*]] = alloca [2 x ptr], align 8
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS52:%.*]] = alloca [2 x ptr], align 8
|
|
// CHECK1-NEXT: [[_TMP53:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[KERNEL_ARGS54:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS57:%.*]] = alloca [2 x ptr], align 8
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_PTRS58:%.*]] = alloca [2 x ptr], align 8
|
|
// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS59:%.*]] = alloca [2 x ptr], align 8
|
|
// CHECK1-NEXT: [[_TMP60:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[KERNEL_ARGS61:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
|
|
// CHECK1-NEXT: store i32 0, ptr [[T_VAR]], align 4
|
|
// CHECK1-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[VEC]], ptr align 4 @__const._Z5tmainIiET_v.vec, i64 8, i1 false)
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
|
|
// CHECK1-NEXT: store ptr [[T_VAR]], ptr [[TMP0]], align 8
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
|
|
// CHECK1-NEXT: store ptr [[T_VAR]], ptr [[TMP1]], align 8
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP2]], align 8
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
|
|
// CHECK1-NEXT: store i32 3, ptr [[TMP5]], align 4
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
|
|
// CHECK1-NEXT: store i32 1, ptr [[TMP6]], align 4
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
|
|
// CHECK1-NEXT: store ptr [[TMP3]], ptr [[TMP7]], align 8
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
|
|
// CHECK1-NEXT: store ptr [[TMP4]], ptr [[TMP8]], align 8
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
|
|
// CHECK1-NEXT: store ptr @.offload_sizes.19, ptr [[TMP9]], align 8
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
|
|
// CHECK1-NEXT: store ptr @.offload_maptypes.20, ptr [[TMP10]], align 8
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP11]], align 8
|
|
// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP12]], align 8
|
|
// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
|
|
// CHECK1-NEXT: store i64 2, ptr [[TMP13]], align 8
|
|
// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
|
|
// CHECK1-NEXT: store i64 0, ptr [[TMP14]], align 8
|
|
// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
|
|
// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP15]], align 4
|
|
// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
|
|
// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP16]], align 4
|
|
// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
|
|
// CHECK1-NEXT: store i32 0, ptr [[TMP17]], align 4
|
|
// CHECK1-NEXT: [[TMP18:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32.region_id, ptr [[KERNEL_ARGS]])
|
|
// CHECK1-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0
|
|
// CHECK1-NEXT: br i1 [[TMP19]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
|
|
// CHECK1: omp_offload.failed:
|
|
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32(ptr [[T_VAR]]) #[[ATTR2]]
|
|
// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]]
|
|
// CHECK1: omp_offload.cont:
|
|
// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
|
|
// CHECK1-NEXT: store ptr [[T_VAR]], ptr [[TMP20]], align 8
|
|
// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
|
|
// CHECK1-NEXT: store ptr [[T_VAR]], ptr [[TMP21]], align 8
|
|
// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS3]], i64 0, i64 0
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP22]], align 8
|
|
// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 1
|
|
// CHECK1-NEXT: store ptr [[VEC]], ptr [[TMP23]], align 8
|
|
// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS2]], i32 0, i32 1
|
|
// CHECK1-NEXT: store ptr [[VEC]], ptr [[TMP24]], align 8
|
|
// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS3]], i64 0, i64 1
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP25]], align 8
|
|
// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
|
|
// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
|
|
// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 0
|
|
// CHECK1-NEXT: store i32 3, ptr [[TMP28]], align 4
|
|
// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 1
|
|
// CHECK1-NEXT: store i32 2, ptr [[TMP29]], align 4
|
|
// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 2
|
|
// CHECK1-NEXT: store ptr [[TMP26]], ptr [[TMP30]], align 8
|
|
// CHECK1-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 3
|
|
// CHECK1-NEXT: store ptr [[TMP27]], ptr [[TMP31]], align 8
|
|
// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 4
|
|
// CHECK1-NEXT: store ptr @.offload_sizes.21, ptr [[TMP32]], align 8
|
|
// CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 5
|
|
// CHECK1-NEXT: store ptr @.offload_maptypes.22, ptr [[TMP33]], align 8
|
|
// CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 6
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP34]], align 8
|
|
// CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 7
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP35]], align 8
|
|
// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 8
|
|
// CHECK1-NEXT: store i64 2, ptr [[TMP36]], align 8
|
|
// CHECK1-NEXT: [[TMP37:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 9
|
|
// CHECK1-NEXT: store i64 0, ptr [[TMP37]], align 8
|
|
// CHECK1-NEXT: [[TMP38:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 10
|
|
// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP38]], align 4
|
|
// CHECK1-NEXT: [[TMP39:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 11
|
|
// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP39]], align 4
|
|
// CHECK1-NEXT: [[TMP40:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 12
|
|
// CHECK1-NEXT: store i32 0, ptr [[TMP40]], align 4
|
|
// CHECK1-NEXT: [[TMP41:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l37.region_id, ptr [[KERNEL_ARGS5]])
|
|
// CHECK1-NEXT: [[TMP42:%.*]] = icmp ne i32 [[TMP41]], 0
|
|
// CHECK1-NEXT: br i1 [[TMP42]], label [[OMP_OFFLOAD_FAILED6:%.*]], label [[OMP_OFFLOAD_CONT7:%.*]]
|
|
// CHECK1: omp_offload.failed6:
|
|
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l37(ptr [[T_VAR]], ptr [[VEC]]) #[[ATTR2]]
|
|
// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT7]]
|
|
// CHECK1: omp_offload.cont7:
|
|
// CHECK1-NEXT: store i32 1, ptr [[T_VAR]], align 4
|
|
// CHECK1-NEXT: [[TMP43:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS8]], i32 0, i32 0
|
|
// CHECK1-NEXT: store ptr [[T_VAR]], ptr [[TMP43]], align 8
|
|
// CHECK1-NEXT: [[TMP44:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS9]], i32 0, i32 0
|
|
// CHECK1-NEXT: store ptr [[T_VAR]], ptr [[TMP44]], align 8
|
|
// CHECK1-NEXT: [[TMP45:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS10]], i64 0, i64 0
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP45]], align 8
|
|
// CHECK1-NEXT: [[TMP46:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS8]], i32 0, i32 1
|
|
// CHECK1-NEXT: store ptr [[VEC]], ptr [[TMP46]], align 8
|
|
// CHECK1-NEXT: [[TMP47:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS9]], i32 0, i32 1
|
|
// CHECK1-NEXT: store ptr [[VEC]], ptr [[TMP47]], align 8
|
|
// CHECK1-NEXT: [[TMP48:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS10]], i64 0, i64 1
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP48]], align 8
|
|
// CHECK1-NEXT: [[TMP49:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS8]], i32 0, i32 0
|
|
// CHECK1-NEXT: [[TMP50:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS9]], i32 0, i32 0
|
|
// CHECK1-NEXT: [[TMP51:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 0
|
|
// CHECK1-NEXT: store i32 3, ptr [[TMP51]], align 4
|
|
// CHECK1-NEXT: [[TMP52:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 1
|
|
// CHECK1-NEXT: store i32 2, ptr [[TMP52]], align 4
|
|
// CHECK1-NEXT: [[TMP53:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 2
|
|
// CHECK1-NEXT: store ptr [[TMP49]], ptr [[TMP53]], align 8
|
|
// CHECK1-NEXT: [[TMP54:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 3
|
|
// CHECK1-NEXT: store ptr [[TMP50]], ptr [[TMP54]], align 8
|
|
// CHECK1-NEXT: [[TMP55:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 4
|
|
// CHECK1-NEXT: store ptr @.offload_sizes.23, ptr [[TMP55]], align 8
|
|
// CHECK1-NEXT: [[TMP56:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 5
|
|
// CHECK1-NEXT: store ptr @.offload_maptypes.24, ptr [[TMP56]], align 8
|
|
// CHECK1-NEXT: [[TMP57:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 6
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP57]], align 8
|
|
// CHECK1-NEXT: [[TMP58:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 7
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP58]], align 8
|
|
// CHECK1-NEXT: [[TMP59:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 8
|
|
// CHECK1-NEXT: store i64 2, ptr [[TMP59]], align 8
|
|
// CHECK1-NEXT: [[TMP60:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 9
|
|
// CHECK1-NEXT: store i64 0, ptr [[TMP60]], align 8
|
|
// CHECK1-NEXT: [[TMP61:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 10
|
|
// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP61]], align 4
|
|
// CHECK1-NEXT: [[TMP62:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 11
|
|
// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP62]], align 4
|
|
// CHECK1-NEXT: [[TMP63:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 12
|
|
// CHECK1-NEXT: store i32 0, ptr [[TMP63]], align 4
|
|
// CHECK1-NEXT: [[TMP64:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l43.region_id, ptr [[KERNEL_ARGS12]])
|
|
// CHECK1-NEXT: [[TMP65:%.*]] = icmp ne i32 [[TMP64]], 0
|
|
// CHECK1-NEXT: br i1 [[TMP65]], label [[OMP_OFFLOAD_FAILED13:%.*]], label [[OMP_OFFLOAD_CONT14:%.*]]
|
|
// CHECK1: omp_offload.failed13:
|
|
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l43(ptr [[T_VAR]], ptr [[VEC]]) #[[ATTR2]]
|
|
// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT14]]
|
|
// CHECK1: omp_offload.cont14:
|
|
// CHECK1-NEXT: store i8 1, ptr [[AND_VAR]], align 1
|
|
// CHECK1-NEXT: [[TMP66:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS15]], i32 0, i32 0
|
|
// CHECK1-NEXT: store ptr [[AND_VAR]], ptr [[TMP66]], align 8
|
|
// CHECK1-NEXT: [[TMP67:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS16]], i32 0, i32 0
|
|
// CHECK1-NEXT: store ptr [[AND_VAR]], ptr [[TMP67]], align 8
|
|
// CHECK1-NEXT: [[TMP68:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS17]], i64 0, i64 0
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP68]], align 8
|
|
// CHECK1-NEXT: [[TMP69:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS15]], i32 0, i32 1
|
|
// CHECK1-NEXT: store ptr [[VEC]], ptr [[TMP69]], align 8
|
|
// CHECK1-NEXT: [[TMP70:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS16]], i32 0, i32 1
|
|
// CHECK1-NEXT: store ptr [[VEC]], ptr [[TMP70]], align 8
|
|
// CHECK1-NEXT: [[TMP71:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS17]], i64 0, i64 1
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP71]], align 8
|
|
// CHECK1-NEXT: [[TMP72:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS15]], i32 0, i32 0
|
|
// CHECK1-NEXT: [[TMP73:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS16]], i32 0, i32 0
|
|
// CHECK1-NEXT: [[TMP74:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 0
|
|
// CHECK1-NEXT: store i32 3, ptr [[TMP74]], align 4
|
|
// CHECK1-NEXT: [[TMP75:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 1
|
|
// CHECK1-NEXT: store i32 2, ptr [[TMP75]], align 4
|
|
// CHECK1-NEXT: [[TMP76:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 2
|
|
// CHECK1-NEXT: store ptr [[TMP72]], ptr [[TMP76]], align 8
|
|
// CHECK1-NEXT: [[TMP77:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 3
|
|
// CHECK1-NEXT: store ptr [[TMP73]], ptr [[TMP77]], align 8
|
|
// CHECK1-NEXT: [[TMP78:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 4
|
|
// CHECK1-NEXT: store ptr @.offload_sizes.25, ptr [[TMP78]], align 8
|
|
// CHECK1-NEXT: [[TMP79:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 5
|
|
// CHECK1-NEXT: store ptr @.offload_maptypes.26, ptr [[TMP79]], align 8
|
|
// CHECK1-NEXT: [[TMP80:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 6
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP80]], align 8
|
|
// CHECK1-NEXT: [[TMP81:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 7
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP81]], align 8
|
|
// CHECK1-NEXT: [[TMP82:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 8
|
|
// CHECK1-NEXT: store i64 2, ptr [[TMP82]], align 8
|
|
// CHECK1-NEXT: [[TMP83:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 9
|
|
// CHECK1-NEXT: store i64 0, ptr [[TMP83]], align 8
|
|
// CHECK1-NEXT: [[TMP84:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 10
|
|
// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP84]], align 4
|
|
// CHECK1-NEXT: [[TMP85:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 11
|
|
// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP85]], align 4
|
|
// CHECK1-NEXT: [[TMP86:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 12
|
|
// CHECK1-NEXT: store i32 0, ptr [[TMP86]], align 4
|
|
// CHECK1-NEXT: [[TMP87:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l49.region_id, ptr [[KERNEL_ARGS19]])
|
|
// CHECK1-NEXT: [[TMP88:%.*]] = icmp ne i32 [[TMP87]], 0
|
|
// CHECK1-NEXT: br i1 [[TMP88]], label [[OMP_OFFLOAD_FAILED20:%.*]], label [[OMP_OFFLOAD_CONT21:%.*]]
|
|
// CHECK1: omp_offload.failed20:
|
|
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l49(ptr [[AND_VAR]], ptr [[VEC]]) #[[ATTR2]]
|
|
// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT21]]
|
|
// CHECK1: omp_offload.cont21:
|
|
// CHECK1-NEXT: store i8 0, ptr [[OR_VAR]], align 1
|
|
// CHECK1-NEXT: [[TMP89:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0
|
|
// CHECK1-NEXT: store ptr [[OR_VAR]], ptr [[TMP89]], align 8
|
|
// CHECK1-NEXT: [[TMP90:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS23]], i32 0, i32 0
|
|
// CHECK1-NEXT: store ptr [[OR_VAR]], ptr [[TMP90]], align 8
|
|
// CHECK1-NEXT: [[TMP91:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 0
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP91]], align 8
|
|
// CHECK1-NEXT: [[TMP92:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 1
|
|
// CHECK1-NEXT: store ptr [[VEC]], ptr [[TMP92]], align 8
|
|
// CHECK1-NEXT: [[TMP93:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS23]], i32 0, i32 1
|
|
// CHECK1-NEXT: store ptr [[VEC]], ptr [[TMP93]], align 8
|
|
// CHECK1-NEXT: [[TMP94:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 1
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP94]], align 8
|
|
// CHECK1-NEXT: [[TMP95:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0
|
|
// CHECK1-NEXT: [[TMP96:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS23]], i32 0, i32 0
|
|
// CHECK1-NEXT: [[TMP97:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS26]], i32 0, i32 0
|
|
// CHECK1-NEXT: store i32 3, ptr [[TMP97]], align 4
|
|
// CHECK1-NEXT: [[TMP98:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS26]], i32 0, i32 1
|
|
// CHECK1-NEXT: store i32 2, ptr [[TMP98]], align 4
|
|
// CHECK1-NEXT: [[TMP99:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS26]], i32 0, i32 2
|
|
// CHECK1-NEXT: store ptr [[TMP95]], ptr [[TMP99]], align 8
|
|
// CHECK1-NEXT: [[TMP100:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS26]], i32 0, i32 3
|
|
// CHECK1-NEXT: store ptr [[TMP96]], ptr [[TMP100]], align 8
|
|
// CHECK1-NEXT: [[TMP101:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS26]], i32 0, i32 4
|
|
// CHECK1-NEXT: store ptr @.offload_sizes.27, ptr [[TMP101]], align 8
|
|
// CHECK1-NEXT: [[TMP102:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS26]], i32 0, i32 5
|
|
// CHECK1-NEXT: store ptr @.offload_maptypes.28, ptr [[TMP102]], align 8
|
|
// CHECK1-NEXT: [[TMP103:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS26]], i32 0, i32 6
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP103]], align 8
|
|
// CHECK1-NEXT: [[TMP104:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS26]], i32 0, i32 7
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP104]], align 8
|
|
// CHECK1-NEXT: [[TMP105:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS26]], i32 0, i32 8
|
|
// CHECK1-NEXT: store i64 2, ptr [[TMP105]], align 8
|
|
// CHECK1-NEXT: [[TMP106:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS26]], i32 0, i32 9
|
|
// CHECK1-NEXT: store i64 0, ptr [[TMP106]], align 8
|
|
// CHECK1-NEXT: [[TMP107:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS26]], i32 0, i32 10
|
|
// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP107]], align 4
|
|
// CHECK1-NEXT: [[TMP108:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS26]], i32 0, i32 11
|
|
// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP108]], align 4
|
|
// CHECK1-NEXT: [[TMP109:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS26]], i32 0, i32 12
|
|
// CHECK1-NEXT: store i32 0, ptr [[TMP109]], align 4
|
|
// CHECK1-NEXT: [[TMP110:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l55.region_id, ptr [[KERNEL_ARGS26]])
|
|
// CHECK1-NEXT: [[TMP111:%.*]] = icmp ne i32 [[TMP110]], 0
|
|
// CHECK1-NEXT: br i1 [[TMP111]], label [[OMP_OFFLOAD_FAILED27:%.*]], label [[OMP_OFFLOAD_CONT28:%.*]]
|
|
// CHECK1: omp_offload.failed27:
|
|
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l55(ptr [[OR_VAR]], ptr [[VEC]]) #[[ATTR2]]
|
|
// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT28]]
|
|
// CHECK1: omp_offload.cont28:
|
|
// CHECK1-NEXT: store i32 1, ptr [[BIT_VAR]], align 4
|
|
// CHECK1-NEXT: [[TMP112:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 0
|
|
// CHECK1-NEXT: store ptr [[BIT_VAR]], ptr [[TMP112]], align 8
|
|
// CHECK1-NEXT: [[TMP113:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS30]], i32 0, i32 0
|
|
// CHECK1-NEXT: store ptr [[BIT_VAR]], ptr [[TMP113]], align 8
|
|
// CHECK1-NEXT: [[TMP114:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 0
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP114]], align 8
|
|
// CHECK1-NEXT: [[TMP115:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 1
|
|
// CHECK1-NEXT: store ptr [[VEC]], ptr [[TMP115]], align 8
|
|
// CHECK1-NEXT: [[TMP116:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS30]], i32 0, i32 1
|
|
// CHECK1-NEXT: store ptr [[VEC]], ptr [[TMP116]], align 8
|
|
// CHECK1-NEXT: [[TMP117:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 1
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP117]], align 8
|
|
// CHECK1-NEXT: [[TMP118:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 0
|
|
// CHECK1-NEXT: [[TMP119:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS30]], i32 0, i32 0
|
|
// CHECK1-NEXT: [[TMP120:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS33]], i32 0, i32 0
|
|
// CHECK1-NEXT: store i32 3, ptr [[TMP120]], align 4
|
|
// CHECK1-NEXT: [[TMP121:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS33]], i32 0, i32 1
|
|
// CHECK1-NEXT: store i32 2, ptr [[TMP121]], align 4
|
|
// CHECK1-NEXT: [[TMP122:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS33]], i32 0, i32 2
|
|
// CHECK1-NEXT: store ptr [[TMP118]], ptr [[TMP122]], align 8
|
|
// CHECK1-NEXT: [[TMP123:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS33]], i32 0, i32 3
|
|
// CHECK1-NEXT: store ptr [[TMP119]], ptr [[TMP123]], align 8
|
|
// CHECK1-NEXT: [[TMP124:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS33]], i32 0, i32 4
|
|
// CHECK1-NEXT: store ptr @.offload_sizes.29, ptr [[TMP124]], align 8
|
|
// CHECK1-NEXT: [[TMP125:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS33]], i32 0, i32 5
|
|
// CHECK1-NEXT: store ptr @.offload_maptypes.30, ptr [[TMP125]], align 8
|
|
// CHECK1-NEXT: [[TMP126:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS33]], i32 0, i32 6
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP126]], align 8
|
|
// CHECK1-NEXT: [[TMP127:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS33]], i32 0, i32 7
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP127]], align 8
|
|
// CHECK1-NEXT: [[TMP128:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS33]], i32 0, i32 8
|
|
// CHECK1-NEXT: store i64 2, ptr [[TMP128]], align 8
|
|
// CHECK1-NEXT: [[TMP129:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS33]], i32 0, i32 9
|
|
// CHECK1-NEXT: store i64 0, ptr [[TMP129]], align 8
|
|
// CHECK1-NEXT: [[TMP130:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS33]], i32 0, i32 10
|
|
// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP130]], align 4
|
|
// CHECK1-NEXT: [[TMP131:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS33]], i32 0, i32 11
|
|
// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP131]], align 4
|
|
// CHECK1-NEXT: [[TMP132:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS33]], i32 0, i32 12
|
|
// CHECK1-NEXT: store i32 0, ptr [[TMP132]], align 4
|
|
// CHECK1-NEXT: [[TMP133:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l61.region_id, ptr [[KERNEL_ARGS33]])
|
|
// CHECK1-NEXT: [[TMP134:%.*]] = icmp ne i32 [[TMP133]], 0
|
|
// CHECK1-NEXT: br i1 [[TMP134]], label [[OMP_OFFLOAD_FAILED34:%.*]], label [[OMP_OFFLOAD_CONT35:%.*]]
|
|
// CHECK1: omp_offload.failed34:
|
|
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l61(ptr [[BIT_VAR]], ptr [[VEC]]) #[[ATTR2]]
|
|
// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT35]]
|
|
// CHECK1: omp_offload.cont35:
|
|
// CHECK1-NEXT: [[TMP135:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS36]], i32 0, i32 0
|
|
// CHECK1-NEXT: store ptr [[BIT_VAR]], ptr [[TMP135]], align 8
|
|
// CHECK1-NEXT: [[TMP136:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS37]], i32 0, i32 0
|
|
// CHECK1-NEXT: store ptr [[BIT_VAR]], ptr [[TMP136]], align 8
|
|
// CHECK1-NEXT: [[TMP137:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS38]], i64 0, i64 0
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP137]], align 8
|
|
// CHECK1-NEXT: [[TMP138:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS36]], i32 0, i32 1
|
|
// CHECK1-NEXT: store ptr [[VEC]], ptr [[TMP138]], align 8
|
|
// CHECK1-NEXT: [[TMP139:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS37]], i32 0, i32 1
|
|
// CHECK1-NEXT: store ptr [[VEC]], ptr [[TMP139]], align 8
|
|
// CHECK1-NEXT: [[TMP140:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS38]], i64 0, i64 1
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP140]], align 8
|
|
// CHECK1-NEXT: [[TMP141:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS36]], i32 0, i32 0
|
|
// CHECK1-NEXT: [[TMP142:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS37]], i32 0, i32 0
|
|
// CHECK1-NEXT: [[TMP143:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS40]], i32 0, i32 0
|
|
// CHECK1-NEXT: store i32 3, ptr [[TMP143]], align 4
|
|
// CHECK1-NEXT: [[TMP144:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS40]], i32 0, i32 1
|
|
// CHECK1-NEXT: store i32 2, ptr [[TMP144]], align 4
|
|
// CHECK1-NEXT: [[TMP145:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS40]], i32 0, i32 2
|
|
// CHECK1-NEXT: store ptr [[TMP141]], ptr [[TMP145]], align 8
|
|
// CHECK1-NEXT: [[TMP146:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS40]], i32 0, i32 3
|
|
// CHECK1-NEXT: store ptr [[TMP142]], ptr [[TMP146]], align 8
|
|
// CHECK1-NEXT: [[TMP147:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS40]], i32 0, i32 4
|
|
// CHECK1-NEXT: store ptr @.offload_sizes.31, ptr [[TMP147]], align 8
|
|
// CHECK1-NEXT: [[TMP148:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS40]], i32 0, i32 5
|
|
// CHECK1-NEXT: store ptr @.offload_maptypes.32, ptr [[TMP148]], align 8
|
|
// CHECK1-NEXT: [[TMP149:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS40]], i32 0, i32 6
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP149]], align 8
|
|
// CHECK1-NEXT: [[TMP150:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS40]], i32 0, i32 7
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP150]], align 8
|
|
// CHECK1-NEXT: [[TMP151:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS40]], i32 0, i32 8
|
|
// CHECK1-NEXT: store i64 2, ptr [[TMP151]], align 8
|
|
// CHECK1-NEXT: [[TMP152:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS40]], i32 0, i32 9
|
|
// CHECK1-NEXT: store i64 0, ptr [[TMP152]], align 8
|
|
// CHECK1-NEXT: [[TMP153:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS40]], i32 0, i32 10
|
|
// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP153]], align 4
|
|
// CHECK1-NEXT: [[TMP154:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS40]], i32 0, i32 11
|
|
// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP154]], align 4
|
|
// CHECK1-NEXT: [[TMP155:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS40]], i32 0, i32 12
|
|
// CHECK1-NEXT: store i32 0, ptr [[TMP155]], align 4
|
|
// CHECK1-NEXT: [[TMP156:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l66.region_id, ptr [[KERNEL_ARGS40]])
|
|
// CHECK1-NEXT: [[TMP157:%.*]] = icmp ne i32 [[TMP156]], 0
|
|
// CHECK1-NEXT: br i1 [[TMP157]], label [[OMP_OFFLOAD_FAILED41:%.*]], label [[OMP_OFFLOAD_CONT42:%.*]]
|
|
// CHECK1: omp_offload.failed41:
|
|
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l66(ptr [[BIT_VAR]], ptr [[VEC]]) #[[ATTR2]]
|
|
// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT42]]
|
|
// CHECK1: omp_offload.cont42:
|
|
// CHECK1-NEXT: [[TMP158:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS43]], i32 0, i32 0
|
|
// CHECK1-NEXT: store ptr [[BIT_VAR]], ptr [[TMP158]], align 8
|
|
// CHECK1-NEXT: [[TMP159:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS44]], i32 0, i32 0
|
|
// CHECK1-NEXT: store ptr [[BIT_VAR]], ptr [[TMP159]], align 8
|
|
// CHECK1-NEXT: [[TMP160:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS45]], i64 0, i64 0
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP160]], align 8
|
|
// CHECK1-NEXT: [[TMP161:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS43]], i32 0, i32 1
|
|
// CHECK1-NEXT: store ptr [[VEC]], ptr [[TMP161]], align 8
|
|
// CHECK1-NEXT: [[TMP162:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS44]], i32 0, i32 1
|
|
// CHECK1-NEXT: store ptr [[VEC]], ptr [[TMP162]], align 8
|
|
// CHECK1-NEXT: [[TMP163:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS45]], i64 0, i64 1
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP163]], align 8
|
|
// CHECK1-NEXT: [[TMP164:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS43]], i32 0, i32 0
|
|
// CHECK1-NEXT: [[TMP165:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS44]], i32 0, i32 0
|
|
// CHECK1-NEXT: [[TMP166:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS47]], i32 0, i32 0
|
|
// CHECK1-NEXT: store i32 3, ptr [[TMP166]], align 4
|
|
// CHECK1-NEXT: [[TMP167:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS47]], i32 0, i32 1
|
|
// CHECK1-NEXT: store i32 2, ptr [[TMP167]], align 4
|
|
// CHECK1-NEXT: [[TMP168:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS47]], i32 0, i32 2
|
|
// CHECK1-NEXT: store ptr [[TMP164]], ptr [[TMP168]], align 8
|
|
// CHECK1-NEXT: [[TMP169:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS47]], i32 0, i32 3
|
|
// CHECK1-NEXT: store ptr [[TMP165]], ptr [[TMP169]], align 8
|
|
// CHECK1-NEXT: [[TMP170:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS47]], i32 0, i32 4
|
|
// CHECK1-NEXT: store ptr @.offload_sizes.33, ptr [[TMP170]], align 8
|
|
// CHECK1-NEXT: [[TMP171:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS47]], i32 0, i32 5
|
|
// CHECK1-NEXT: store ptr @.offload_maptypes.34, ptr [[TMP171]], align 8
|
|
// CHECK1-NEXT: [[TMP172:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS47]], i32 0, i32 6
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP172]], align 8
|
|
// CHECK1-NEXT: [[TMP173:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS47]], i32 0, i32 7
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP173]], align 8
|
|
// CHECK1-NEXT: [[TMP174:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS47]], i32 0, i32 8
|
|
// CHECK1-NEXT: store i64 2, ptr [[TMP174]], align 8
|
|
// CHECK1-NEXT: [[TMP175:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS47]], i32 0, i32 9
|
|
// CHECK1-NEXT: store i64 0, ptr [[TMP175]], align 8
|
|
// CHECK1-NEXT: [[TMP176:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS47]], i32 0, i32 10
|
|
// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP176]], align 4
|
|
// CHECK1-NEXT: [[TMP177:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS47]], i32 0, i32 11
|
|
// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP177]], align 4
|
|
// CHECK1-NEXT: [[TMP178:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS47]], i32 0, i32 12
|
|
// CHECK1-NEXT: store i32 0, ptr [[TMP178]], align 4
|
|
// CHECK1-NEXT: [[TMP179:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l71.region_id, ptr [[KERNEL_ARGS47]])
|
|
// CHECK1-NEXT: [[TMP180:%.*]] = icmp ne i32 [[TMP179]], 0
|
|
// CHECK1-NEXT: br i1 [[TMP180]], label [[OMP_OFFLOAD_FAILED48:%.*]], label [[OMP_OFFLOAD_CONT49:%.*]]
|
|
// CHECK1: omp_offload.failed48:
|
|
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l71(ptr [[BIT_VAR]], ptr [[VEC]]) #[[ATTR2]]
|
|
// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT49]]
|
|
// CHECK1: omp_offload.cont49:
|
|
// CHECK1-NEXT: store i32 0, ptr [[T_VAR]], align 4
|
|
// CHECK1-NEXT: [[TMP181:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0
|
|
// CHECK1-NEXT: store ptr [[T_VAR]], ptr [[TMP181]], align 8
|
|
// CHECK1-NEXT: [[TMP182:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS51]], i32 0, i32 0
|
|
// CHECK1-NEXT: store ptr [[T_VAR]], ptr [[TMP182]], align 8
|
|
// CHECK1-NEXT: [[TMP183:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 0
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP183]], align 8
|
|
// CHECK1-NEXT: [[TMP184:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 1
|
|
// CHECK1-NEXT: store ptr [[VEC]], ptr [[TMP184]], align 8
|
|
// CHECK1-NEXT: [[TMP185:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS51]], i32 0, i32 1
|
|
// CHECK1-NEXT: store ptr [[VEC]], ptr [[TMP185]], align 8
|
|
// CHECK1-NEXT: [[TMP186:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 1
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP186]], align 8
|
|
// CHECK1-NEXT: [[TMP187:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0
|
|
// CHECK1-NEXT: [[TMP188:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS51]], i32 0, i32 0
|
|
// CHECK1-NEXT: [[TMP189:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS54]], i32 0, i32 0
|
|
// CHECK1-NEXT: store i32 3, ptr [[TMP189]], align 4
|
|
// CHECK1-NEXT: [[TMP190:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS54]], i32 0, i32 1
|
|
// CHECK1-NEXT: store i32 2, ptr [[TMP190]], align 4
|
|
// CHECK1-NEXT: [[TMP191:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS54]], i32 0, i32 2
|
|
// CHECK1-NEXT: store ptr [[TMP187]], ptr [[TMP191]], align 8
|
|
// CHECK1-NEXT: [[TMP192:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS54]], i32 0, i32 3
|
|
// CHECK1-NEXT: store ptr [[TMP188]], ptr [[TMP192]], align 8
|
|
// CHECK1-NEXT: [[TMP193:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS54]], i32 0, i32 4
|
|
// CHECK1-NEXT: store ptr @.offload_sizes.35, ptr [[TMP193]], align 8
|
|
// CHECK1-NEXT: [[TMP194:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS54]], i32 0, i32 5
|
|
// CHECK1-NEXT: store ptr @.offload_maptypes.36, ptr [[TMP194]], align 8
|
|
// CHECK1-NEXT: [[TMP195:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS54]], i32 0, i32 6
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP195]], align 8
|
|
// CHECK1-NEXT: [[TMP196:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS54]], i32 0, i32 7
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP196]], align 8
|
|
// CHECK1-NEXT: [[TMP197:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS54]], i32 0, i32 8
|
|
// CHECK1-NEXT: store i64 2, ptr [[TMP197]], align 8
|
|
// CHECK1-NEXT: [[TMP198:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS54]], i32 0, i32 9
|
|
// CHECK1-NEXT: store i64 0, ptr [[TMP198]], align 8
|
|
// CHECK1-NEXT: [[TMP199:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS54]], i32 0, i32 10
|
|
// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP199]], align 4
|
|
// CHECK1-NEXT: [[TMP200:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS54]], i32 0, i32 11
|
|
// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP200]], align 4
|
|
// CHECK1-NEXT: [[TMP201:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS54]], i32 0, i32 12
|
|
// CHECK1-NEXT: store i32 0, ptr [[TMP201]], align 4
|
|
// CHECK1-NEXT: [[TMP202:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l77.region_id, ptr [[KERNEL_ARGS54]])
|
|
// CHECK1-NEXT: [[TMP203:%.*]] = icmp ne i32 [[TMP202]], 0
|
|
// CHECK1-NEXT: br i1 [[TMP203]], label [[OMP_OFFLOAD_FAILED55:%.*]], label [[OMP_OFFLOAD_CONT56:%.*]]
|
|
// CHECK1: omp_offload.failed55:
|
|
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l77(ptr [[T_VAR]], ptr [[VEC]]) #[[ATTR2]]
|
|
// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT56]]
|
|
// CHECK1: omp_offload.cont56:
|
|
// CHECK1-NEXT: store i32 10, ptr [[T_VAR]], align 4
|
|
// CHECK1-NEXT: [[TMP204:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 0
|
|
// CHECK1-NEXT: store ptr [[T_VAR]], ptr [[TMP204]], align 8
|
|
// CHECK1-NEXT: [[TMP205:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS58]], i32 0, i32 0
|
|
// CHECK1-NEXT: store ptr [[T_VAR]], ptr [[TMP205]], align 8
|
|
// CHECK1-NEXT: [[TMP206:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS59]], i64 0, i64 0
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP206]], align 8
|
|
// CHECK1-NEXT: [[TMP207:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 1
|
|
// CHECK1-NEXT: store ptr [[VEC]], ptr [[TMP207]], align 8
|
|
// CHECK1-NEXT: [[TMP208:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS58]], i32 0, i32 1
|
|
// CHECK1-NEXT: store ptr [[VEC]], ptr [[TMP208]], align 8
|
|
// CHECK1-NEXT: [[TMP209:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS59]], i64 0, i64 1
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP209]], align 8
|
|
// CHECK1-NEXT: [[TMP210:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 0
|
|
// CHECK1-NEXT: [[TMP211:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS58]], i32 0, i32 0
|
|
// CHECK1-NEXT: [[TMP212:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 0
|
|
// CHECK1-NEXT: store i32 3, ptr [[TMP212]], align 4
|
|
// CHECK1-NEXT: [[TMP213:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 1
|
|
// CHECK1-NEXT: store i32 2, ptr [[TMP213]], align 4
|
|
// CHECK1-NEXT: [[TMP214:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 2
|
|
// CHECK1-NEXT: store ptr [[TMP210]], ptr [[TMP214]], align 8
|
|
// CHECK1-NEXT: [[TMP215:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 3
|
|
// CHECK1-NEXT: store ptr [[TMP211]], ptr [[TMP215]], align 8
|
|
// CHECK1-NEXT: [[TMP216:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 4
|
|
// CHECK1-NEXT: store ptr @.offload_sizes.37, ptr [[TMP216]], align 8
|
|
// CHECK1-NEXT: [[TMP217:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 5
|
|
// CHECK1-NEXT: store ptr @.offload_maptypes.38, ptr [[TMP217]], align 8
|
|
// CHECK1-NEXT: [[TMP218:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 6
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP218]], align 8
|
|
// CHECK1-NEXT: [[TMP219:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 7
|
|
// CHECK1-NEXT: store ptr null, ptr [[TMP219]], align 8
|
|
// CHECK1-NEXT: [[TMP220:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 8
|
|
// CHECK1-NEXT: store i64 2, ptr [[TMP220]], align 8
|
|
// CHECK1-NEXT: [[TMP221:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 9
|
|
// CHECK1-NEXT: store i64 0, ptr [[TMP221]], align 8
|
|
// CHECK1-NEXT: [[TMP222:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 10
|
|
// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP222]], align 4
|
|
// CHECK1-NEXT: [[TMP223:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 11
|
|
// CHECK1-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP223]], align 4
|
|
// CHECK1-NEXT: [[TMP224:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 12
|
|
// CHECK1-NEXT: store i32 0, ptr [[TMP224]], align 4
|
|
// CHECK1-NEXT: [[TMP225:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l83.region_id, ptr [[KERNEL_ARGS61]])
|
|
// CHECK1-NEXT: [[TMP226:%.*]] = icmp ne i32 [[TMP225]], 0
|
|
// CHECK1-NEXT: br i1 [[TMP226]], label [[OMP_OFFLOAD_FAILED62:%.*]], label [[OMP_OFFLOAD_CONT63:%.*]]
|
|
// CHECK1: omp_offload.failed62:
|
|
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l83(ptr [[T_VAR]], ptr [[VEC]]) #[[ATTR2]]
|
|
// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT63]]
|
|
// CHECK1: omp_offload.cont63:
|
|
// CHECK1-NEXT: ret i32 0
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32
|
|
// CHECK1-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[T_VAR:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[T_VAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: store ptr [[T_VAR]], ptr [[T_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[T_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32.omp_outlined, ptr [[TMP0]])
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32.omp_outlined
|
|
// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[T_VAR:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[T_VAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[T_VAR1:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[T_VAR]], ptr [[T_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[T_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: store i32 0, ptr [[T_VAR1]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
|
|
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK1: cond.true:
|
|
// CHECK1-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK1: cond.false:
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END]]
|
|
// CHECK1: cond.end:
|
|
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
|
|
// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK1: omp.inner.for.cond:
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
|
|
// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK1: omp.inner.for.body:
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
|
|
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[T_VAR1]], align 4
|
|
// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], [[TMP9]]
|
|
// CHECK1-NEXT: store i32 [[ADD3]], ptr [[T_VAR1]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK1: omp.body.continue:
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK1: omp.inner.for.inc:
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP11]], 1
|
|
// CHECK1-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK1: omp.inner.for.end:
|
|
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK1: omp.loop.exit:
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
|
|
// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
|
|
// CHECK1-NEXT: store ptr [[T_VAR1]], ptr [[TMP12]], align 8
|
|
// CHECK1-NEXT: [[TMP13:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2]], i32 [[TMP2]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32.omp_outlined.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: switch i32 [[TMP13]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
|
|
// CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
|
|
// CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
|
|
// CHECK1-NEXT: ]
|
|
// CHECK1: .omp.reduction.case1:
|
|
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP0]], align 4
|
|
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[T_VAR1]], align 4
|
|
// CHECK1-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP14]], [[TMP15]]
|
|
// CHECK1-NEXT: store i32 [[ADD5]], ptr [[TMP0]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK1: .omp.reduction.case2:
|
|
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[T_VAR1]], align 4
|
|
// CHECK1-NEXT: [[TMP17:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP16]] monotonic, align 4
|
|
// CHECK1-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK1: .omp.reduction.default:
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32.omp_outlined.omp.reduction.reduction_func
|
|
// CHECK1-SAME: (ptr noundef [[TMP0:%.*]], ptr noundef [[TMP1:%.*]]) #[[ATTR3]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i64 0, i64 0
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], [[TMP9]]
|
|
// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP7]], align 4
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l37
|
|
// CHECK1-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[T_VAR:%.*]], ptr noundef nonnull align 4 dereferenceable(8) [[VEC:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[T_VAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[VEC_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: store ptr [[T_VAR]], ptr [[T_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[VEC]], ptr [[VEC_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[T_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[VEC_ADDR]], align 8
|
|
// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l37.omp_outlined, ptr [[TMP0]], ptr [[TMP1]])
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l37.omp_outlined
|
|
// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[T_VAR:%.*]], ptr noundef nonnull align 4 dereferenceable(8) [[VEC:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[T_VAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[VEC_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[T_VAR1:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[T_VAR]], ptr [[T_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[VEC]], ptr [[VEC_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[T_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[VEC_ADDR]], align 8
|
|
// CHECK1-NEXT: store i32 0, ptr [[T_VAR1]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP3]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 1
|
|
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK1: cond.true:
|
|
// CHECK1-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK1: cond.false:
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END]]
|
|
// CHECK1: cond.end:
|
|
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
|
|
// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK1: omp.inner.for.cond:
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
|
|
// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK1: omp.inner.for.body:
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
|
|
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP10]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], ptr [[TMP1]], i64 0, i64 [[IDXPROM]]
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
|
|
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[T_VAR1]], align 4
|
|
// CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP12]], [[TMP11]]
|
|
// CHECK1-NEXT: store i32 [[SUB]], ptr [[T_VAR1]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK1: omp.body.continue:
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK1: omp.inner.for.inc:
|
|
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP13]], 1
|
|
// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK1: omp.inner.for.end:
|
|
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK1: omp.loop.exit:
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]])
|
|
// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
|
|
// CHECK1-NEXT: store ptr [[T_VAR1]], ptr [[TMP14]], align 8
|
|
// CHECK1-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2]], i32 [[TMP3]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l37.omp_outlined.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: switch i32 [[TMP15]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
|
|
// CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
|
|
// CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
|
|
// CHECK1-NEXT: ]
|
|
// CHECK1: .omp.reduction.case1:
|
|
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP0]], align 4
|
|
// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[T_VAR1]], align 4
|
|
// CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP16]], [[TMP17]]
|
|
// CHECK1-NEXT: store i32 [[ADD4]], ptr [[TMP0]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP3]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK1: .omp.reduction.case2:
|
|
// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[T_VAR1]], align 4
|
|
// CHECK1-NEXT: [[TMP19:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP18]] monotonic, align 4
|
|
// CHECK1-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP3]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK1: .omp.reduction.default:
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l37.omp_outlined.omp.reduction.reduction_func
|
|
// CHECK1-SAME: (ptr noundef [[TMP0:%.*]], ptr noundef [[TMP1:%.*]]) #[[ATTR3]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i64 0, i64 0
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], [[TMP9]]
|
|
// CHECK1-NEXT: store i32 [[ADD]], ptr [[TMP7]], align 4
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l43
|
|
// CHECK1-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[T_VAR:%.*]], ptr noundef nonnull align 4 dereferenceable(8) [[VEC:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[T_VAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[VEC_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: store ptr [[T_VAR]], ptr [[T_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[VEC]], ptr [[VEC_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[T_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[VEC_ADDR]], align 8
|
|
// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l43.omp_outlined, ptr [[TMP0]], ptr [[TMP1]])
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l43.omp_outlined
|
|
// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[T_VAR:%.*]], ptr noundef nonnull align 4 dereferenceable(8) [[VEC:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[T_VAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[VEC_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[T_VAR1:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK1-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[_TMP6:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[T_VAR]], ptr [[T_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[VEC]], ptr [[VEC_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[T_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[VEC_ADDR]], align 8
|
|
// CHECK1-NEXT: store i32 1, ptr [[T_VAR1]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP3]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 1
|
|
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK1: cond.true:
|
|
// CHECK1-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK1: cond.false:
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END]]
|
|
// CHECK1: cond.end:
|
|
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
|
|
// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK1: omp.inner.for.cond:
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
|
|
// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK1: omp.inner.for.body:
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
|
|
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[T_VAR1]], align 4
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], ptr [[TMP1]], i64 0, i64 [[IDXPROM]]
|
|
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
|
|
// CHECK1-NEXT: [[MUL3:%.*]] = mul nsw i32 [[TMP10]], [[TMP12]]
|
|
// CHECK1-NEXT: store i32 [[MUL3]], ptr [[T_VAR1]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK1: omp.body.continue:
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK1: omp.inner.for.inc:
|
|
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP13]], 1
|
|
// CHECK1-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK1: omp.inner.for.end:
|
|
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK1: omp.loop.exit:
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]])
|
|
// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
|
|
// CHECK1-NEXT: store ptr [[T_VAR1]], ptr [[TMP14]], align 8
|
|
// CHECK1-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2]], i32 [[TMP3]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l43.omp_outlined.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: switch i32 [[TMP15]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
|
|
// CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
|
|
// CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
|
|
// CHECK1-NEXT: ]
|
|
// CHECK1: .omp.reduction.case1:
|
|
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP0]], align 4
|
|
// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[T_VAR1]], align 4
|
|
// CHECK1-NEXT: [[MUL5:%.*]] = mul nsw i32 [[TMP16]], [[TMP17]]
|
|
// CHECK1-NEXT: store i32 [[MUL5]], ptr [[TMP0]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP3]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK1: .omp.reduction.case2:
|
|
// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[T_VAR1]], align 4
|
|
// CHECK1-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i32, ptr [[TMP0]] monotonic, align 4
|
|
// CHECK1-NEXT: br label [[ATOMIC_CONT:%.*]]
|
|
// CHECK1: atomic_cont:
|
|
// CHECK1-NEXT: [[TMP19:%.*]] = phi i32 [ [[ATOMIC_LOAD]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[TMP24:%.*]], [[ATOMIC_CONT]] ]
|
|
// CHECK1-NEXT: store i32 [[TMP19]], ptr [[_TMP6]], align 4
|
|
// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[_TMP6]], align 4
|
|
// CHECK1-NEXT: [[TMP21:%.*]] = load i32, ptr [[T_VAR1]], align 4
|
|
// CHECK1-NEXT: [[MUL7:%.*]] = mul nsw i32 [[TMP20]], [[TMP21]]
|
|
// CHECK1-NEXT: store i32 [[MUL7]], ptr [[ATOMIC_TEMP]], align 4
|
|
// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[ATOMIC_TEMP]], align 4
|
|
// CHECK1-NEXT: [[TMP23:%.*]] = cmpxchg ptr [[TMP0]], i32 [[TMP19]], i32 [[TMP22]] monotonic monotonic, align 4
|
|
// CHECK1-NEXT: [[TMP24]] = extractvalue { i32, i1 } [[TMP23]], 0
|
|
// CHECK1-NEXT: [[TMP25:%.*]] = extractvalue { i32, i1 } [[TMP23]], 1
|
|
// CHECK1-NEXT: br i1 [[TMP25]], label [[ATOMIC_EXIT:%.*]], label [[ATOMIC_CONT]]
|
|
// CHECK1: atomic_exit:
|
|
// CHECK1-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP3]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK1: .omp.reduction.default:
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l43.omp_outlined.omp.reduction.reduction_func
|
|
// CHECK1-SAME: (ptr noundef [[TMP0:%.*]], ptr noundef [[TMP1:%.*]]) #[[ATTR3]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i64 0, i64 0
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], [[TMP9]]
|
|
// CHECK1-NEXT: store i32 [[MUL]], ptr [[TMP7]], align 4
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l49
|
|
// CHECK1-SAME: (ptr noundef nonnull align 1 dereferenceable(1) [[AND_VAR:%.*]], ptr noundef nonnull align 4 dereferenceable(8) [[VEC:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[AND_VAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[VEC_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: store ptr [[AND_VAR]], ptr [[AND_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[VEC]], ptr [[VEC_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AND_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[VEC_ADDR]], align 8
|
|
// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l49.omp_outlined, ptr [[TMP0]], ptr [[TMP1]])
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l49.omp_outlined
|
|
// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[AND_VAR:%.*]], ptr noundef nonnull align 4 dereferenceable(8) [[VEC:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[AND_VAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[VEC_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[AND_VAR1:%.*]] = alloca i8, align 1
|
|
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK1-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i8, align 1
|
|
// CHECK1-NEXT: [[_TMP12:%.*]] = alloca i8, align 1
|
|
// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[AND_VAR]], ptr [[AND_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[VEC]], ptr [[VEC_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AND_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[VEC_ADDR]], align 8
|
|
// CHECK1-NEXT: store i8 1, ptr [[AND_VAR1]], align 1
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP3]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 1
|
|
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK1: cond.true:
|
|
// CHECK1-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK1: cond.false:
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END]]
|
|
// CHECK1: cond.end:
|
|
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
|
|
// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK1: omp.inner.for.cond:
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
|
|
// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK1: omp.inner.for.body:
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
|
|
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = load i8, ptr [[AND_VAR1]], align 1
|
|
// CHECK1-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP10]] to i1
|
|
// CHECK1-NEXT: br i1 [[TOBOOL]], label [[LAND_RHS:%.*]], label [[LAND_END:%.*]]
|
|
// CHECK1: land.rhs:
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], ptr [[TMP1]], i64 0, i64 [[IDXPROM]]
|
|
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
|
|
// CHECK1-NEXT: [[REM:%.*]] = srem i32 [[TMP12]], 2
|
|
// CHECK1-NEXT: [[CMP3:%.*]] = icmp eq i32 [[REM]], 0
|
|
// CHECK1-NEXT: br label [[LAND_END]]
|
|
// CHECK1: land.end:
|
|
// CHECK1-NEXT: [[TMP13:%.*]] = phi i1 [ false, [[OMP_INNER_FOR_BODY]] ], [ [[CMP3]], [[LAND_RHS]] ]
|
|
// CHECK1-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TMP13]] to i8
|
|
// CHECK1-NEXT: store i8 [[FROMBOOL]], ptr [[AND_VAR1]], align 1
|
|
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK1: omp.body.continue:
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK1: omp.inner.for.inc:
|
|
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP14]], 1
|
|
// CHECK1-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK1: omp.inner.for.end:
|
|
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK1: omp.loop.exit:
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]])
|
|
// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
|
|
// CHECK1-NEXT: store ptr [[AND_VAR1]], ptr [[TMP15]], align 8
|
|
// CHECK1-NEXT: [[TMP16:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2]], i32 [[TMP3]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l49.omp_outlined.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: switch i32 [[TMP16]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
|
|
// CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
|
|
// CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
|
|
// CHECK1-NEXT: ]
|
|
// CHECK1: .omp.reduction.case1:
|
|
// CHECK1-NEXT: [[TMP17:%.*]] = load i8, ptr [[TMP0]], align 1
|
|
// CHECK1-NEXT: [[TOBOOL5:%.*]] = trunc i8 [[TMP17]] to i1
|
|
// CHECK1-NEXT: br i1 [[TOBOOL5]], label [[LAND_RHS6:%.*]], label [[LAND_END8:%.*]]
|
|
// CHECK1: land.rhs6:
|
|
// CHECK1-NEXT: [[TMP18:%.*]] = load i8, ptr [[AND_VAR1]], align 1
|
|
// CHECK1-NEXT: [[TOBOOL7:%.*]] = trunc i8 [[TMP18]] to i1
|
|
// CHECK1-NEXT: br label [[LAND_END8]]
|
|
// CHECK1: land.end8:
|
|
// CHECK1-NEXT: [[TMP19:%.*]] = phi i1 [ false, [[DOTOMP_REDUCTION_CASE1]] ], [ [[TOBOOL7]], [[LAND_RHS6]] ]
|
|
// CHECK1-NEXT: [[FROMBOOL9:%.*]] = zext i1 [[TMP19]] to i8
|
|
// CHECK1-NEXT: store i8 [[FROMBOOL9]], ptr [[TMP0]], align 1
|
|
// CHECK1-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP3]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK1: .omp.reduction.case2:
|
|
// CHECK1-NEXT: [[TMP20:%.*]] = load i8, ptr [[AND_VAR1]], align 1
|
|
// CHECK1-NEXT: [[TOBOOL10:%.*]] = trunc i8 [[TMP20]] to i1
|
|
// CHECK1-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i8, ptr [[TMP0]] monotonic, align 1
|
|
// CHECK1-NEXT: br label [[ATOMIC_CONT:%.*]]
|
|
// CHECK1: atomic_cont:
|
|
// CHECK1-NEXT: [[TMP21:%.*]] = phi i8 [ [[ATOMIC_LOAD]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[TMP27:%.*]], [[LAND_END17:%.*]] ]
|
|
// CHECK1-NEXT: [[TOBOOL11:%.*]] = trunc i8 [[TMP21]] to i1
|
|
// CHECK1-NEXT: [[FROMBOOL13:%.*]] = zext i1 [[TOBOOL11]] to i8
|
|
// CHECK1-NEXT: store i8 [[FROMBOOL13]], ptr [[_TMP12]], align 1
|
|
// CHECK1-NEXT: [[TMP22:%.*]] = load i8, ptr [[_TMP12]], align 1
|
|
// CHECK1-NEXT: [[TOBOOL14:%.*]] = trunc i8 [[TMP22]] to i1
|
|
// CHECK1-NEXT: br i1 [[TOBOOL14]], label [[LAND_RHS15:%.*]], label [[LAND_END17]]
|
|
// CHECK1: land.rhs15:
|
|
// CHECK1-NEXT: [[TMP23:%.*]] = load i8, ptr [[AND_VAR1]], align 1
|
|
// CHECK1-NEXT: [[TOBOOL16:%.*]] = trunc i8 [[TMP23]] to i1
|
|
// CHECK1-NEXT: br label [[LAND_END17]]
|
|
// CHECK1: land.end17:
|
|
// CHECK1-NEXT: [[TMP24:%.*]] = phi i1 [ false, [[ATOMIC_CONT]] ], [ [[TOBOOL16]], [[LAND_RHS15]] ]
|
|
// CHECK1-NEXT: [[FROMBOOL18:%.*]] = zext i1 [[TMP24]] to i8
|
|
// CHECK1-NEXT: store i8 [[FROMBOOL18]], ptr [[ATOMIC_TEMP]], align 1
|
|
// CHECK1-NEXT: [[TMP25:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1
|
|
// CHECK1-NEXT: [[TMP26:%.*]] = cmpxchg ptr [[TMP0]], i8 [[TMP21]], i8 [[TMP25]] monotonic monotonic, align 1
|
|
// CHECK1-NEXT: [[TMP27]] = extractvalue { i8, i1 } [[TMP26]], 0
|
|
// CHECK1-NEXT: [[TMP28:%.*]] = extractvalue { i8, i1 } [[TMP26]], 1
|
|
// CHECK1-NEXT: br i1 [[TMP28]], label [[ATOMIC_EXIT:%.*]], label [[ATOMIC_CONT]]
|
|
// CHECK1: atomic_exit:
|
|
// CHECK1-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP3]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK1: .omp.reduction.default:
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l49.omp_outlined.omp.reduction.reduction_func
|
|
// CHECK1-SAME: (ptr noundef [[TMP0:%.*]], ptr noundef [[TMP1:%.*]]) #[[ATTR3]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i64 0, i64 0
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i8, ptr [[TMP7]], align 1
|
|
// CHECK1-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP8]] to i1
|
|
// CHECK1-NEXT: br i1 [[TOBOOL]], label [[LAND_RHS:%.*]], label [[LAND_END:%.*]]
|
|
// CHECK1: land.rhs:
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i8, ptr [[TMP5]], align 1
|
|
// CHECK1-NEXT: [[TOBOOL2:%.*]] = trunc i8 [[TMP9]] to i1
|
|
// CHECK1-NEXT: br label [[LAND_END]]
|
|
// CHECK1: land.end:
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = phi i1 [ false, [[ENTRY:%.*]] ], [ [[TOBOOL2]], [[LAND_RHS]] ]
|
|
// CHECK1-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TMP10]] to i8
|
|
// CHECK1-NEXT: store i8 [[FROMBOOL]], ptr [[TMP7]], align 1
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l55
|
|
// CHECK1-SAME: (ptr noundef nonnull align 1 dereferenceable(1) [[OR_VAR:%.*]], ptr noundef nonnull align 4 dereferenceable(8) [[VEC:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[OR_VAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[VEC_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: store ptr [[OR_VAR]], ptr [[OR_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[VEC]], ptr [[VEC_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[OR_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[VEC_ADDR]], align 8
|
|
// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l55.omp_outlined, ptr [[TMP0]], ptr [[TMP1]])
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l55.omp_outlined
|
|
// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[OR_VAR:%.*]], ptr noundef nonnull align 4 dereferenceable(8) [[VEC:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[OR_VAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[VEC_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[OR_VAR1:%.*]] = alloca i8, align 1
|
|
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK1-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i8, align 1
|
|
// CHECK1-NEXT: [[_TMP12:%.*]] = alloca i8, align 1
|
|
// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[OR_VAR]], ptr [[OR_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[VEC]], ptr [[VEC_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[OR_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[VEC_ADDR]], align 8
|
|
// CHECK1-NEXT: store i8 0, ptr [[OR_VAR1]], align 1
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP3]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 1
|
|
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK1: cond.true:
|
|
// CHECK1-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK1: cond.false:
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END]]
|
|
// CHECK1: cond.end:
|
|
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
|
|
// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK1: omp.inner.for.cond:
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
|
|
// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK1: omp.inner.for.body:
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
|
|
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = load i8, ptr [[OR_VAR1]], align 1
|
|
// CHECK1-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP10]] to i1
|
|
// CHECK1-NEXT: br i1 [[TOBOOL]], label [[LOR_END:%.*]], label [[LOR_RHS:%.*]]
|
|
// CHECK1: lor.rhs:
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], ptr [[TMP1]], i64 0, i64 [[IDXPROM]]
|
|
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
|
|
// CHECK1-NEXT: [[REM:%.*]] = srem i32 [[TMP12]], 2
|
|
// CHECK1-NEXT: [[CMP3:%.*]] = icmp eq i32 [[REM]], 0
|
|
// CHECK1-NEXT: br label [[LOR_END]]
|
|
// CHECK1: lor.end:
|
|
// CHECK1-NEXT: [[TMP13:%.*]] = phi i1 [ true, [[OMP_INNER_FOR_BODY]] ], [ [[CMP3]], [[LOR_RHS]] ]
|
|
// CHECK1-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TMP13]] to i8
|
|
// CHECK1-NEXT: store i8 [[FROMBOOL]], ptr [[OR_VAR1]], align 1
|
|
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK1: omp.body.continue:
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK1: omp.inner.for.inc:
|
|
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP14]], 1
|
|
// CHECK1-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK1: omp.inner.for.end:
|
|
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK1: omp.loop.exit:
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]])
|
|
// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
|
|
// CHECK1-NEXT: store ptr [[OR_VAR1]], ptr [[TMP15]], align 8
|
|
// CHECK1-NEXT: [[TMP16:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2]], i32 [[TMP3]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l55.omp_outlined.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: switch i32 [[TMP16]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
|
|
// CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
|
|
// CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
|
|
// CHECK1-NEXT: ]
|
|
// CHECK1: .omp.reduction.case1:
|
|
// CHECK1-NEXT: [[TMP17:%.*]] = load i8, ptr [[TMP0]], align 1
|
|
// CHECK1-NEXT: [[TOBOOL5:%.*]] = trunc i8 [[TMP17]] to i1
|
|
// CHECK1-NEXT: br i1 [[TOBOOL5]], label [[LOR_END8:%.*]], label [[LOR_RHS6:%.*]]
|
|
// CHECK1: lor.rhs6:
|
|
// CHECK1-NEXT: [[TMP18:%.*]] = load i8, ptr [[OR_VAR1]], align 1
|
|
// CHECK1-NEXT: [[TOBOOL7:%.*]] = trunc i8 [[TMP18]] to i1
|
|
// CHECK1-NEXT: br label [[LOR_END8]]
|
|
// CHECK1: lor.end8:
|
|
// CHECK1-NEXT: [[TMP19:%.*]] = phi i1 [ true, [[DOTOMP_REDUCTION_CASE1]] ], [ [[TOBOOL7]], [[LOR_RHS6]] ]
|
|
// CHECK1-NEXT: [[FROMBOOL9:%.*]] = zext i1 [[TMP19]] to i8
|
|
// CHECK1-NEXT: store i8 [[FROMBOOL9]], ptr [[TMP0]], align 1
|
|
// CHECK1-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP3]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK1: .omp.reduction.case2:
|
|
// CHECK1-NEXT: [[TMP20:%.*]] = load i8, ptr [[OR_VAR1]], align 1
|
|
// CHECK1-NEXT: [[TOBOOL10:%.*]] = trunc i8 [[TMP20]] to i1
|
|
// CHECK1-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i8, ptr [[TMP0]] monotonic, align 1
|
|
// CHECK1-NEXT: br label [[ATOMIC_CONT:%.*]]
|
|
// CHECK1: atomic_cont:
|
|
// CHECK1-NEXT: [[TMP21:%.*]] = phi i8 [ [[ATOMIC_LOAD]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[TMP27:%.*]], [[LOR_END17:%.*]] ]
|
|
// CHECK1-NEXT: [[TOBOOL11:%.*]] = trunc i8 [[TMP21]] to i1
|
|
// CHECK1-NEXT: [[FROMBOOL13:%.*]] = zext i1 [[TOBOOL11]] to i8
|
|
// CHECK1-NEXT: store i8 [[FROMBOOL13]], ptr [[_TMP12]], align 1
|
|
// CHECK1-NEXT: [[TMP22:%.*]] = load i8, ptr [[_TMP12]], align 1
|
|
// CHECK1-NEXT: [[TOBOOL14:%.*]] = trunc i8 [[TMP22]] to i1
|
|
// CHECK1-NEXT: br i1 [[TOBOOL14]], label [[LOR_END17]], label [[LOR_RHS15:%.*]]
|
|
// CHECK1: lor.rhs15:
|
|
// CHECK1-NEXT: [[TMP23:%.*]] = load i8, ptr [[OR_VAR1]], align 1
|
|
// CHECK1-NEXT: [[TOBOOL16:%.*]] = trunc i8 [[TMP23]] to i1
|
|
// CHECK1-NEXT: br label [[LOR_END17]]
|
|
// CHECK1: lor.end17:
|
|
// CHECK1-NEXT: [[TMP24:%.*]] = phi i1 [ true, [[ATOMIC_CONT]] ], [ [[TOBOOL16]], [[LOR_RHS15]] ]
|
|
// CHECK1-NEXT: [[FROMBOOL18:%.*]] = zext i1 [[TMP24]] to i8
|
|
// CHECK1-NEXT: store i8 [[FROMBOOL18]], ptr [[ATOMIC_TEMP]], align 1
|
|
// CHECK1-NEXT: [[TMP25:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1
|
|
// CHECK1-NEXT: [[TMP26:%.*]] = cmpxchg ptr [[TMP0]], i8 [[TMP21]], i8 [[TMP25]] monotonic monotonic, align 1
|
|
// CHECK1-NEXT: [[TMP27]] = extractvalue { i8, i1 } [[TMP26]], 0
|
|
// CHECK1-NEXT: [[TMP28:%.*]] = extractvalue { i8, i1 } [[TMP26]], 1
|
|
// CHECK1-NEXT: br i1 [[TMP28]], label [[ATOMIC_EXIT:%.*]], label [[ATOMIC_CONT]]
|
|
// CHECK1: atomic_exit:
|
|
// CHECK1-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP3]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK1: .omp.reduction.default:
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l55.omp_outlined.omp.reduction.reduction_func
|
|
// CHECK1-SAME: (ptr noundef [[TMP0:%.*]], ptr noundef [[TMP1:%.*]]) #[[ATTR3]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i64 0, i64 0
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i8, ptr [[TMP7]], align 1
|
|
// CHECK1-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP8]] to i1
|
|
// CHECK1-NEXT: br i1 [[TOBOOL]], label [[LOR_END:%.*]], label [[LOR_RHS:%.*]]
|
|
// CHECK1: lor.rhs:
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i8, ptr [[TMP5]], align 1
|
|
// CHECK1-NEXT: [[TOBOOL2:%.*]] = trunc i8 [[TMP9]] to i1
|
|
// CHECK1-NEXT: br label [[LOR_END]]
|
|
// CHECK1: lor.end:
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = phi i1 [ true, [[ENTRY:%.*]] ], [ [[TOBOOL2]], [[LOR_RHS]] ]
|
|
// CHECK1-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TMP10]] to i8
|
|
// CHECK1-NEXT: store i8 [[FROMBOOL]], ptr [[TMP7]], align 1
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l61
|
|
// CHECK1-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[BIT_VAR:%.*]], ptr noundef nonnull align 4 dereferenceable(8) [[VEC:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[BIT_VAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[VEC_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: store ptr [[BIT_VAR]], ptr [[BIT_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[VEC]], ptr [[VEC_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[BIT_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[VEC_ADDR]], align 8
|
|
// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l61.omp_outlined, ptr [[TMP0]], ptr [[TMP1]])
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l61.omp_outlined
|
|
// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[BIT_VAR:%.*]], ptr noundef nonnull align 4 dereferenceable(8) [[VEC:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[BIT_VAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[VEC_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[BIT_VAR1:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[BIT_VAR]], ptr [[BIT_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[VEC]], ptr [[VEC_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[BIT_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[VEC_ADDR]], align 8
|
|
// CHECK1-NEXT: store i32 -1, ptr [[BIT_VAR1]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP3]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 1
|
|
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK1: cond.true:
|
|
// CHECK1-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK1: cond.false:
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END]]
|
|
// CHECK1: cond.end:
|
|
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
|
|
// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK1: omp.inner.for.cond:
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
|
|
// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK1: omp.inner.for.body:
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
|
|
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[BIT_VAR1]], align 4
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], ptr [[TMP1]], i64 0, i64 [[IDXPROM]]
|
|
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
|
|
// CHECK1-NEXT: [[AND:%.*]] = and i32 [[TMP10]], [[TMP12]]
|
|
// CHECK1-NEXT: store i32 [[AND]], ptr [[BIT_VAR1]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK1: omp.body.continue:
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK1: omp.inner.for.inc:
|
|
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP13]], 1
|
|
// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK1: omp.inner.for.end:
|
|
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK1: omp.loop.exit:
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]])
|
|
// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
|
|
// CHECK1-NEXT: store ptr [[BIT_VAR1]], ptr [[TMP14]], align 8
|
|
// CHECK1-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2]], i32 [[TMP3]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l61.omp_outlined.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: switch i32 [[TMP15]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
|
|
// CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
|
|
// CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
|
|
// CHECK1-NEXT: ]
|
|
// CHECK1: .omp.reduction.case1:
|
|
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP0]], align 4
|
|
// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[BIT_VAR1]], align 4
|
|
// CHECK1-NEXT: [[AND4:%.*]] = and i32 [[TMP16]], [[TMP17]]
|
|
// CHECK1-NEXT: store i32 [[AND4]], ptr [[TMP0]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP3]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK1: .omp.reduction.case2:
|
|
// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[BIT_VAR1]], align 4
|
|
// CHECK1-NEXT: [[TMP19:%.*]] = atomicrmw and ptr [[TMP0]], i32 [[TMP18]] monotonic, align 4
|
|
// CHECK1-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP3]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK1: .omp.reduction.default:
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l61.omp_outlined.omp.reduction.reduction_func
|
|
// CHECK1-SAME: (ptr noundef [[TMP0:%.*]], ptr noundef [[TMP1:%.*]]) #[[ATTR3]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i64 0, i64 0
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK1-NEXT: [[AND:%.*]] = and i32 [[TMP8]], [[TMP9]]
|
|
// CHECK1-NEXT: store i32 [[AND]], ptr [[TMP7]], align 4
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l66
|
|
// CHECK1-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[BIT_VAR:%.*]], ptr noundef nonnull align 4 dereferenceable(8) [[VEC:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[BIT_VAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[VEC_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: store ptr [[BIT_VAR]], ptr [[BIT_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[VEC]], ptr [[VEC_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[BIT_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[VEC_ADDR]], align 8
|
|
// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l66.omp_outlined, ptr [[TMP0]], ptr [[TMP1]])
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l66.omp_outlined
|
|
// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[BIT_VAR:%.*]], ptr noundef nonnull align 4 dereferenceable(8) [[VEC:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[BIT_VAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[VEC_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[BIT_VAR1:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[BIT_VAR]], ptr [[BIT_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[VEC]], ptr [[VEC_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[BIT_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[VEC_ADDR]], align 8
|
|
// CHECK1-NEXT: store i32 0, ptr [[BIT_VAR1]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP3]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 1
|
|
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK1: cond.true:
|
|
// CHECK1-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK1: cond.false:
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END]]
|
|
// CHECK1: cond.end:
|
|
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
|
|
// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK1: omp.inner.for.cond:
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
|
|
// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK1: omp.inner.for.body:
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
|
|
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[BIT_VAR1]], align 4
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], ptr [[TMP1]], i64 0, i64 [[IDXPROM]]
|
|
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
|
|
// CHECK1-NEXT: [[OR:%.*]] = or i32 [[TMP10]], [[TMP12]]
|
|
// CHECK1-NEXT: store i32 [[OR]], ptr [[BIT_VAR1]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK1: omp.body.continue:
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK1: omp.inner.for.inc:
|
|
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP13]], 1
|
|
// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK1: omp.inner.for.end:
|
|
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK1: omp.loop.exit:
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]])
|
|
// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
|
|
// CHECK1-NEXT: store ptr [[BIT_VAR1]], ptr [[TMP14]], align 8
|
|
// CHECK1-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2]], i32 [[TMP3]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l66.omp_outlined.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: switch i32 [[TMP15]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
|
|
// CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
|
|
// CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
|
|
// CHECK1-NEXT: ]
|
|
// CHECK1: .omp.reduction.case1:
|
|
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP0]], align 4
|
|
// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[BIT_VAR1]], align 4
|
|
// CHECK1-NEXT: [[OR4:%.*]] = or i32 [[TMP16]], [[TMP17]]
|
|
// CHECK1-NEXT: store i32 [[OR4]], ptr [[TMP0]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP3]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK1: .omp.reduction.case2:
|
|
// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[BIT_VAR1]], align 4
|
|
// CHECK1-NEXT: [[TMP19:%.*]] = atomicrmw or ptr [[TMP0]], i32 [[TMP18]] monotonic, align 4
|
|
// CHECK1-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP3]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK1: .omp.reduction.default:
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l66.omp_outlined.omp.reduction.reduction_func
|
|
// CHECK1-SAME: (ptr noundef [[TMP0:%.*]], ptr noundef [[TMP1:%.*]]) #[[ATTR3]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i64 0, i64 0
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK1-NEXT: [[OR:%.*]] = or i32 [[TMP8]], [[TMP9]]
|
|
// CHECK1-NEXT: store i32 [[OR]], ptr [[TMP7]], align 4
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l71
|
|
// CHECK1-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[BIT_VAR:%.*]], ptr noundef nonnull align 4 dereferenceable(8) [[VEC:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[BIT_VAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[VEC_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: store ptr [[BIT_VAR]], ptr [[BIT_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[VEC]], ptr [[VEC_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[BIT_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[VEC_ADDR]], align 8
|
|
// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l71.omp_outlined, ptr [[TMP0]], ptr [[TMP1]])
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l71.omp_outlined
|
|
// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[BIT_VAR:%.*]], ptr noundef nonnull align 4 dereferenceable(8) [[VEC:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[BIT_VAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[VEC_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[BIT_VAR1:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[BIT_VAR]], ptr [[BIT_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[VEC]], ptr [[VEC_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[BIT_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[VEC_ADDR]], align 8
|
|
// CHECK1-NEXT: store i32 0, ptr [[BIT_VAR1]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP3]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 1
|
|
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK1: cond.true:
|
|
// CHECK1-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK1: cond.false:
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END]]
|
|
// CHECK1: cond.end:
|
|
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
|
|
// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK1: omp.inner.for.cond:
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
|
|
// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK1: omp.inner.for.body:
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
|
|
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[BIT_VAR1]], align 4
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], ptr [[TMP1]], i64 0, i64 [[IDXPROM]]
|
|
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
|
|
// CHECK1-NEXT: [[XOR:%.*]] = xor i32 [[TMP10]], [[TMP12]]
|
|
// CHECK1-NEXT: store i32 [[XOR]], ptr [[BIT_VAR1]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK1: omp.body.continue:
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK1: omp.inner.for.inc:
|
|
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP13]], 1
|
|
// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK1: omp.inner.for.end:
|
|
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK1: omp.loop.exit:
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]])
|
|
// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
|
|
// CHECK1-NEXT: store ptr [[BIT_VAR1]], ptr [[TMP14]], align 8
|
|
// CHECK1-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2]], i32 [[TMP3]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l71.omp_outlined.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: switch i32 [[TMP15]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
|
|
// CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
|
|
// CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
|
|
// CHECK1-NEXT: ]
|
|
// CHECK1: .omp.reduction.case1:
|
|
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP0]], align 4
|
|
// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[BIT_VAR1]], align 4
|
|
// CHECK1-NEXT: [[XOR4:%.*]] = xor i32 [[TMP16]], [[TMP17]]
|
|
// CHECK1-NEXT: store i32 [[XOR4]], ptr [[TMP0]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP3]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK1: .omp.reduction.case2:
|
|
// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[BIT_VAR1]], align 4
|
|
// CHECK1-NEXT: [[TMP19:%.*]] = atomicrmw xor ptr [[TMP0]], i32 [[TMP18]] monotonic, align 4
|
|
// CHECK1-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP3]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK1: .omp.reduction.default:
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l71.omp_outlined.omp.reduction.reduction_func
|
|
// CHECK1-SAME: (ptr noundef [[TMP0:%.*]], ptr noundef [[TMP1:%.*]]) #[[ATTR3]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i64 0, i64 0
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK1-NEXT: [[XOR:%.*]] = xor i32 [[TMP8]], [[TMP9]]
|
|
// CHECK1-NEXT: store i32 [[XOR]], ptr [[TMP7]], align 4
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l77
|
|
// CHECK1-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[T_VAR:%.*]], ptr noundef nonnull align 4 dereferenceable(8) [[VEC:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[T_VAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[VEC_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: store ptr [[T_VAR]], ptr [[T_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[VEC]], ptr [[VEC_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[T_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[VEC_ADDR]], align 8
|
|
// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l77.omp_outlined, ptr [[TMP0]], ptr [[TMP1]])
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l77.omp_outlined
|
|
// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[T_VAR:%.*]], ptr noundef nonnull align 4 dereferenceable(8) [[VEC:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[T_VAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[VEC_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[T_VAR1:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[T_VAR]], ptr [[T_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[VEC]], ptr [[VEC_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[T_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[VEC_ADDR]], align 8
|
|
// CHECK1-NEXT: store i32 -2147483648, ptr [[T_VAR1]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP3]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 1
|
|
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK1: cond.true:
|
|
// CHECK1-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK1: cond.false:
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END]]
|
|
// CHECK1: cond.end:
|
|
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
|
|
// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK1: omp.inner.for.cond:
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
|
|
// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK1: omp.inner.for.body:
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
|
|
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[T_VAR1]], align 4
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], ptr [[TMP1]], i64 0, i64 [[IDXPROM]]
|
|
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
|
|
// CHECK1-NEXT: [[CMP3:%.*]] = icmp sge i32 [[TMP10]], [[TMP12]]
|
|
// CHECK1-NEXT: br i1 [[CMP3]], label [[COND_TRUE4:%.*]], label [[COND_FALSE5:%.*]]
|
|
// CHECK1: cond.true4:
|
|
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[T_VAR1]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END8:%.*]]
|
|
// CHECK1: cond.false5:
|
|
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[IDXPROM6:%.*]] = sext i32 [[TMP14]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds [2 x i32], ptr [[TMP1]], i64 0, i64 [[IDXPROM6]]
|
|
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[ARRAYIDX7]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END8]]
|
|
// CHECK1: cond.end8:
|
|
// CHECK1-NEXT: [[COND9:%.*]] = phi i32 [ [[TMP13]], [[COND_TRUE4]] ], [ [[TMP15]], [[COND_FALSE5]] ]
|
|
// CHECK1-NEXT: store i32 [[COND9]], ptr [[T_VAR1]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK1: omp.body.continue:
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK1: omp.inner.for.inc:
|
|
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP16]], 1
|
|
// CHECK1-NEXT: store i32 [[ADD10]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK1: omp.inner.for.end:
|
|
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK1: omp.loop.exit:
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]])
|
|
// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
|
|
// CHECK1-NEXT: store ptr [[T_VAR1]], ptr [[TMP17]], align 8
|
|
// CHECK1-NEXT: [[TMP18:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2]], i32 [[TMP3]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l77.omp_outlined.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: switch i32 [[TMP18]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
|
|
// CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
|
|
// CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
|
|
// CHECK1-NEXT: ]
|
|
// CHECK1: .omp.reduction.case1:
|
|
// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP0]], align 4
|
|
// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[T_VAR1]], align 4
|
|
// CHECK1-NEXT: [[CMP11:%.*]] = icmp sgt i32 [[TMP19]], [[TMP20]]
|
|
// CHECK1-NEXT: br i1 [[CMP11]], label [[COND_TRUE12:%.*]], label [[COND_FALSE13:%.*]]
|
|
// CHECK1: cond.true12:
|
|
// CHECK1-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP0]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END14:%.*]]
|
|
// CHECK1: cond.false13:
|
|
// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[T_VAR1]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END14]]
|
|
// CHECK1: cond.end14:
|
|
// CHECK1-NEXT: [[COND15:%.*]] = phi i32 [ [[TMP21]], [[COND_TRUE12]] ], [ [[TMP22]], [[COND_FALSE13]] ]
|
|
// CHECK1-NEXT: store i32 [[COND15]], ptr [[TMP0]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP3]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK1: .omp.reduction.case2:
|
|
// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[T_VAR1]], align 4
|
|
// CHECK1-NEXT: [[TMP24:%.*]] = atomicrmw max ptr [[TMP0]], i32 [[TMP23]] monotonic, align 4
|
|
// CHECK1-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP3]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK1: .omp.reduction.default:
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l77.omp_outlined.omp.reduction.reduction_func
|
|
// CHECK1-SAME: (ptr noundef [[TMP0:%.*]], ptr noundef [[TMP1:%.*]]) #[[ATTR3]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i64 0, i64 0
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
|
|
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK1: cond.true:
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK1: cond.false:
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END]]
|
|
// CHECK1: cond.end:
|
|
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
|
|
// CHECK1-NEXT: store i32 [[COND]], ptr [[TMP7]], align 4
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l83
|
|
// CHECK1-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[T_VAR:%.*]], ptr noundef nonnull align 4 dereferenceable(8) [[VEC:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[T_VAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[VEC_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: store ptr [[T_VAR]], ptr [[T_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[VEC]], ptr [[VEC_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[T_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[VEC_ADDR]], align 8
|
|
// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l83.omp_outlined, ptr [[TMP0]], ptr [[TMP1]])
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l83.omp_outlined
|
|
// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[T_VAR:%.*]], ptr noundef nonnull align 4 dereferenceable(8) [[VEC:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[T_VAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[VEC_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[T_VAR1:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[T_VAR]], ptr [[T_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[VEC]], ptr [[VEC_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[T_VAR_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[VEC_ADDR]], align 8
|
|
// CHECK1-NEXT: store i32 2147483647, ptr [[T_VAR1]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP3]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 1
|
|
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK1: cond.true:
|
|
// CHECK1-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK1: cond.false:
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END]]
|
|
// CHECK1: cond.end:
|
|
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
|
|
// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK1: omp.inner.for.cond:
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
|
|
// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK1: omp.inner.for.body:
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
|
|
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[T_VAR1]], align 4
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], ptr [[TMP1]], i64 0, i64 [[IDXPROM]]
|
|
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
|
|
// CHECK1-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP10]], [[TMP12]]
|
|
// CHECK1-NEXT: br i1 [[CMP3]], label [[COND_TRUE4:%.*]], label [[COND_FALSE5:%.*]]
|
|
// CHECK1: cond.true4:
|
|
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[T_VAR1]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END8:%.*]]
|
|
// CHECK1: cond.false5:
|
|
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[IDXPROM6:%.*]] = sext i32 [[TMP14]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds [2 x i32], ptr [[TMP1]], i64 0, i64 [[IDXPROM6]]
|
|
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[ARRAYIDX7]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END8]]
|
|
// CHECK1: cond.end8:
|
|
// CHECK1-NEXT: [[COND9:%.*]] = phi i32 [ [[TMP13]], [[COND_TRUE4]] ], [ [[TMP15]], [[COND_FALSE5]] ]
|
|
// CHECK1-NEXT: store i32 [[COND9]], ptr [[T_VAR1]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK1: omp.body.continue:
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK1: omp.inner.for.inc:
|
|
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP16]], 1
|
|
// CHECK1-NEXT: store i32 [[ADD10]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK1: omp.inner.for.end:
|
|
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK1: omp.loop.exit:
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]])
|
|
// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
|
|
// CHECK1-NEXT: store ptr [[T_VAR1]], ptr [[TMP17]], align 8
|
|
// CHECK1-NEXT: [[TMP18:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2]], i32 [[TMP3]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l83.omp_outlined.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: switch i32 [[TMP18]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
|
|
// CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
|
|
// CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
|
|
// CHECK1-NEXT: ]
|
|
// CHECK1: .omp.reduction.case1:
|
|
// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP0]], align 4
|
|
// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[T_VAR1]], align 4
|
|
// CHECK1-NEXT: [[CMP11:%.*]] = icmp slt i32 [[TMP19]], [[TMP20]]
|
|
// CHECK1-NEXT: br i1 [[CMP11]], label [[COND_TRUE12:%.*]], label [[COND_FALSE13:%.*]]
|
|
// CHECK1: cond.true12:
|
|
// CHECK1-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP0]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END14:%.*]]
|
|
// CHECK1: cond.false13:
|
|
// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[T_VAR1]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END14]]
|
|
// CHECK1: cond.end14:
|
|
// CHECK1-NEXT: [[COND15:%.*]] = phi i32 [ [[TMP21]], [[COND_TRUE12]] ], [ [[TMP22]], [[COND_FALSE13]] ]
|
|
// CHECK1-NEXT: store i32 [[COND15]], ptr [[TMP0]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP3]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK1: .omp.reduction.case2:
|
|
// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[T_VAR1]], align 4
|
|
// CHECK1-NEXT: [[TMP24:%.*]] = atomicrmw min ptr [[TMP0]], i32 [[TMP23]] monotonic, align 4
|
|
// CHECK1-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP3]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK1: .omp.reduction.default:
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l83.omp_outlined.omp.reduction.reduction_func
|
|
// CHECK1-SAME: (ptr noundef [[TMP0:%.*]], ptr noundef [[TMP1:%.*]]) #[[ATTR3]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i64 0, i64 0
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP8]], [[TMP9]]
|
|
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK1: cond.true:
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK1: cond.false:
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END]]
|
|
// CHECK1: cond.end:
|
|
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
|
|
// CHECK1-NEXT: store i32 [[COND]], ptr [[TMP7]], align 4
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@main
|
|
// CHECK3-SAME: () #[[ATTR0:[0-9]+]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 4
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 4
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 4
|
|
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS1:%.*]] = alloca [1 x ptr], align 4
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_PTRS2:%.*]] = alloca [1 x ptr], align 4
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS3:%.*]] = alloca [1 x ptr], align 4
|
|
// CHECK3-NEXT: [[_TMP4:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[KERNEL_ARGS5:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS8:%.*]] = alloca [1 x ptr], align 4
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_PTRS9:%.*]] = alloca [1 x ptr], align 4
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS10:%.*]] = alloca [1 x ptr], align 4
|
|
// CHECK3-NEXT: [[_TMP11:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[KERNEL_ARGS12:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
|
|
// CHECK3-NEXT: [[AND_VAR:%.*]] = alloca i8, align 1
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS15:%.*]] = alloca [1 x ptr], align 4
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_PTRS16:%.*]] = alloca [1 x ptr], align 4
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS17:%.*]] = alloca [1 x ptr], align 4
|
|
// CHECK3-NEXT: [[_TMP18:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[KERNEL_ARGS19:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
|
|
// CHECK3-NEXT: [[OR_VAR:%.*]] = alloca i8, align 1
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS22:%.*]] = alloca [1 x ptr], align 4
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_PTRS23:%.*]] = alloca [1 x ptr], align 4
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS24:%.*]] = alloca [1 x ptr], align 4
|
|
// CHECK3-NEXT: [[_TMP25:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[KERNEL_ARGS26:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
|
|
// CHECK3-NEXT: [[BIT_VAR:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS29:%.*]] = alloca [1 x ptr], align 4
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_PTRS30:%.*]] = alloca [1 x ptr], align 4
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS31:%.*]] = alloca [1 x ptr], align 4
|
|
// CHECK3-NEXT: [[_TMP32:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[KERNEL_ARGS33:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS36:%.*]] = alloca [1 x ptr], align 4
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_PTRS37:%.*]] = alloca [1 x ptr], align 4
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS38:%.*]] = alloca [1 x ptr], align 4
|
|
// CHECK3-NEXT: [[_TMP39:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[KERNEL_ARGS40:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS43:%.*]] = alloca [1 x ptr], align 4
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_PTRS44:%.*]] = alloca [1 x ptr], align 4
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS45:%.*]] = alloca [1 x ptr], align 4
|
|
// CHECK3-NEXT: [[_TMP46:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[KERNEL_ARGS47:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS50:%.*]] = alloca [1 x ptr], align 4
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_PTRS51:%.*]] = alloca [1 x ptr], align 4
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS52:%.*]] = alloca [1 x ptr], align 4
|
|
// CHECK3-NEXT: [[_TMP53:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[KERNEL_ARGS54:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS57:%.*]] = alloca [1 x ptr], align 4
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_PTRS58:%.*]] = alloca [1 x ptr], align 4
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS59:%.*]] = alloca [1 x ptr], align 4
|
|
// CHECK3-NEXT: [[_TMP60:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[KERNEL_ARGS61:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
|
|
// CHECK3-NEXT: store i32 0, ptr [[RETVAL]], align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr @_ZZ4mainE5sivar, ptr [[TMP0]], align 4
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr @_ZZ4mainE5sivar, ptr [[TMP1]], align 4
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP2]], align 4
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
|
|
// CHECK3-NEXT: store i32 3, ptr [[TMP5]], align 4
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
|
|
// CHECK3-NEXT: store i32 1, ptr [[TMP6]], align 4
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
|
|
// CHECK3-NEXT: store ptr [[TMP3]], ptr [[TMP7]], align 4
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
|
|
// CHECK3-NEXT: store ptr [[TMP4]], ptr [[TMP8]], align 4
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
|
|
// CHECK3-NEXT: store ptr @.offload_sizes, ptr [[TMP9]], align 4
|
|
// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
|
|
// CHECK3-NEXT: store ptr @.offload_maptypes, ptr [[TMP10]], align 4
|
|
// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP11]], align 4
|
|
// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP12]], align 4
|
|
// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
|
|
// CHECK3-NEXT: store i64 2, ptr [[TMP13]], align 8
|
|
// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
|
|
// CHECK3-NEXT: store i64 0, ptr [[TMP14]], align 8
|
|
// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
|
|
// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP15]], align 4
|
|
// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
|
|
// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP16]], align 4
|
|
// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
|
|
// CHECK3-NEXT: store i32 0, ptr [[TMP17]], align 4
|
|
// CHECK3-NEXT: [[TMP18:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l209.region_id, ptr [[KERNEL_ARGS]])
|
|
// CHECK3-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0
|
|
// CHECK3-NEXT: br i1 [[TMP19]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
|
|
// CHECK3: omp_offload.failed:
|
|
// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l209(ptr @_ZZ4mainE5sivar) #[[ATTR2:[0-9]+]]
|
|
// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]]
|
|
// CHECK3: omp_offload.cont:
|
|
// CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr @_ZZ4mainE5sivar, ptr [[TMP20]], align 4
|
|
// CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr @_ZZ4mainE5sivar, ptr [[TMP21]], align 4
|
|
// CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS3]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP22]], align 4
|
|
// CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 0
|
|
// CHECK3-NEXT: store i32 3, ptr [[TMP25]], align 4
|
|
// CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 1
|
|
// CHECK3-NEXT: store i32 1, ptr [[TMP26]], align 4
|
|
// CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 2
|
|
// CHECK3-NEXT: store ptr [[TMP23]], ptr [[TMP27]], align 4
|
|
// CHECK3-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 3
|
|
// CHECK3-NEXT: store ptr [[TMP24]], ptr [[TMP28]], align 4
|
|
// CHECK3-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 4
|
|
// CHECK3-NEXT: store ptr @.offload_sizes.1, ptr [[TMP29]], align 4
|
|
// CHECK3-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 5
|
|
// CHECK3-NEXT: store ptr @.offload_maptypes.2, ptr [[TMP30]], align 4
|
|
// CHECK3-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 6
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP31]], align 4
|
|
// CHECK3-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 7
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP32]], align 4
|
|
// CHECK3-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 8
|
|
// CHECK3-NEXT: store i64 2, ptr [[TMP33]], align 8
|
|
// CHECK3-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 9
|
|
// CHECK3-NEXT: store i64 0, ptr [[TMP34]], align 8
|
|
// CHECK3-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 10
|
|
// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP35]], align 4
|
|
// CHECK3-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 11
|
|
// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP36]], align 4
|
|
// CHECK3-NEXT: [[TMP37:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 12
|
|
// CHECK3-NEXT: store i32 0, ptr [[TMP37]], align 4
|
|
// CHECK3-NEXT: [[TMP38:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l214.region_id, ptr [[KERNEL_ARGS5]])
|
|
// CHECK3-NEXT: [[TMP39:%.*]] = icmp ne i32 [[TMP38]], 0
|
|
// CHECK3-NEXT: br i1 [[TMP39]], label [[OMP_OFFLOAD_FAILED6:%.*]], label [[OMP_OFFLOAD_CONT7:%.*]]
|
|
// CHECK3: omp_offload.failed6:
|
|
// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l214(ptr @_ZZ4mainE5sivar) #[[ATTR2]]
|
|
// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT7]]
|
|
// CHECK3: omp_offload.cont7:
|
|
// CHECK3-NEXT: store i32 1, ptr @_ZZ4mainE5sivar, align 4
|
|
// CHECK3-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS8]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr @_ZZ4mainE5sivar, ptr [[TMP40]], align 4
|
|
// CHECK3-NEXT: [[TMP41:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS9]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr @_ZZ4mainE5sivar, ptr [[TMP41]], align 4
|
|
// CHECK3-NEXT: [[TMP42:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS10]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP42]], align 4
|
|
// CHECK3-NEXT: [[TMP43:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS8]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP44:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS9]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP45:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 0
|
|
// CHECK3-NEXT: store i32 3, ptr [[TMP45]], align 4
|
|
// CHECK3-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 1
|
|
// CHECK3-NEXT: store i32 1, ptr [[TMP46]], align 4
|
|
// CHECK3-NEXT: [[TMP47:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 2
|
|
// CHECK3-NEXT: store ptr [[TMP43]], ptr [[TMP47]], align 4
|
|
// CHECK3-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 3
|
|
// CHECK3-NEXT: store ptr [[TMP44]], ptr [[TMP48]], align 4
|
|
// CHECK3-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 4
|
|
// CHECK3-NEXT: store ptr @.offload_sizes.3, ptr [[TMP49]], align 4
|
|
// CHECK3-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 5
|
|
// CHECK3-NEXT: store ptr @.offload_maptypes.4, ptr [[TMP50]], align 4
|
|
// CHECK3-NEXT: [[TMP51:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 6
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP51]], align 4
|
|
// CHECK3-NEXT: [[TMP52:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 7
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP52]], align 4
|
|
// CHECK3-NEXT: [[TMP53:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 8
|
|
// CHECK3-NEXT: store i64 2, ptr [[TMP53]], align 8
|
|
// CHECK3-NEXT: [[TMP54:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 9
|
|
// CHECK3-NEXT: store i64 0, ptr [[TMP54]], align 8
|
|
// CHECK3-NEXT: [[TMP55:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 10
|
|
// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP55]], align 4
|
|
// CHECK3-NEXT: [[TMP56:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 11
|
|
// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP56]], align 4
|
|
// CHECK3-NEXT: [[TMP57:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 12
|
|
// CHECK3-NEXT: store i32 0, ptr [[TMP57]], align 4
|
|
// CHECK3-NEXT: [[TMP58:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l220.region_id, ptr [[KERNEL_ARGS12]])
|
|
// CHECK3-NEXT: [[TMP59:%.*]] = icmp ne i32 [[TMP58]], 0
|
|
// CHECK3-NEXT: br i1 [[TMP59]], label [[OMP_OFFLOAD_FAILED13:%.*]], label [[OMP_OFFLOAD_CONT14:%.*]]
|
|
// CHECK3: omp_offload.failed13:
|
|
// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l220(ptr @_ZZ4mainE5sivar) #[[ATTR2]]
|
|
// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT14]]
|
|
// CHECK3: omp_offload.cont14:
|
|
// CHECK3-NEXT: store i8 1, ptr [[AND_VAR]], align 1
|
|
// CHECK3-NEXT: [[TMP60:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS15]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr [[AND_VAR]], ptr [[TMP60]], align 4
|
|
// CHECK3-NEXT: [[TMP61:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS16]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr [[AND_VAR]], ptr [[TMP61]], align 4
|
|
// CHECK3-NEXT: [[TMP62:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS17]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP62]], align 4
|
|
// CHECK3-NEXT: [[TMP63:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS15]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP64:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS16]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP65:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 0
|
|
// CHECK3-NEXT: store i32 3, ptr [[TMP65]], align 4
|
|
// CHECK3-NEXT: [[TMP66:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 1
|
|
// CHECK3-NEXT: store i32 1, ptr [[TMP66]], align 4
|
|
// CHECK3-NEXT: [[TMP67:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 2
|
|
// CHECK3-NEXT: store ptr [[TMP63]], ptr [[TMP67]], align 4
|
|
// CHECK3-NEXT: [[TMP68:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 3
|
|
// CHECK3-NEXT: store ptr [[TMP64]], ptr [[TMP68]], align 4
|
|
// CHECK3-NEXT: [[TMP69:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 4
|
|
// CHECK3-NEXT: store ptr @.offload_sizes.5, ptr [[TMP69]], align 4
|
|
// CHECK3-NEXT: [[TMP70:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 5
|
|
// CHECK3-NEXT: store ptr @.offload_maptypes.6, ptr [[TMP70]], align 4
|
|
// CHECK3-NEXT: [[TMP71:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 6
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP71]], align 4
|
|
// CHECK3-NEXT: [[TMP72:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 7
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP72]], align 4
|
|
// CHECK3-NEXT: [[TMP73:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 8
|
|
// CHECK3-NEXT: store i64 2, ptr [[TMP73]], align 8
|
|
// CHECK3-NEXT: [[TMP74:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 9
|
|
// CHECK3-NEXT: store i64 0, ptr [[TMP74]], align 8
|
|
// CHECK3-NEXT: [[TMP75:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 10
|
|
// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP75]], align 4
|
|
// CHECK3-NEXT: [[TMP76:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 11
|
|
// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP76]], align 4
|
|
// CHECK3-NEXT: [[TMP77:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 12
|
|
// CHECK3-NEXT: store i32 0, ptr [[TMP77]], align 4
|
|
// CHECK3-NEXT: [[TMP78:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l226.region_id, ptr [[KERNEL_ARGS19]])
|
|
// CHECK3-NEXT: [[TMP79:%.*]] = icmp ne i32 [[TMP78]], 0
|
|
// CHECK3-NEXT: br i1 [[TMP79]], label [[OMP_OFFLOAD_FAILED20:%.*]], label [[OMP_OFFLOAD_CONT21:%.*]]
|
|
// CHECK3: omp_offload.failed20:
|
|
// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l226(ptr [[AND_VAR]]) #[[ATTR2]]
|
|
// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT21]]
|
|
// CHECK3: omp_offload.cont21:
|
|
// CHECK3-NEXT: store i8 0, ptr [[OR_VAR]], align 1
|
|
// CHECK3-NEXT: [[TMP80:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr [[OR_VAR]], ptr [[TMP80]], align 4
|
|
// CHECK3-NEXT: [[TMP81:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS23]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr [[OR_VAR]], ptr [[TMP81]], align 4
|
|
// CHECK3-NEXT: [[TMP82:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS24]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP82]], align 4
|
|
// CHECK3-NEXT: [[TMP83:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP84:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS23]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP85:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS26]], i32 0, i32 0
|
|
// CHECK3-NEXT: store i32 3, ptr [[TMP85]], align 4
|
|
// CHECK3-NEXT: [[TMP86:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS26]], i32 0, i32 1
|
|
// CHECK3-NEXT: store i32 1, ptr [[TMP86]], align 4
|
|
// CHECK3-NEXT: [[TMP87:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS26]], i32 0, i32 2
|
|
// CHECK3-NEXT: store ptr [[TMP83]], ptr [[TMP87]], align 4
|
|
// CHECK3-NEXT: [[TMP88:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS26]], i32 0, i32 3
|
|
// CHECK3-NEXT: store ptr [[TMP84]], ptr [[TMP88]], align 4
|
|
// CHECK3-NEXT: [[TMP89:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS26]], i32 0, i32 4
|
|
// CHECK3-NEXT: store ptr @.offload_sizes.7, ptr [[TMP89]], align 4
|
|
// CHECK3-NEXT: [[TMP90:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS26]], i32 0, i32 5
|
|
// CHECK3-NEXT: store ptr @.offload_maptypes.8, ptr [[TMP90]], align 4
|
|
// CHECK3-NEXT: [[TMP91:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS26]], i32 0, i32 6
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP91]], align 4
|
|
// CHECK3-NEXT: [[TMP92:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS26]], i32 0, i32 7
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP92]], align 4
|
|
// CHECK3-NEXT: [[TMP93:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS26]], i32 0, i32 8
|
|
// CHECK3-NEXT: store i64 2, ptr [[TMP93]], align 8
|
|
// CHECK3-NEXT: [[TMP94:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS26]], i32 0, i32 9
|
|
// CHECK3-NEXT: store i64 0, ptr [[TMP94]], align 8
|
|
// CHECK3-NEXT: [[TMP95:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS26]], i32 0, i32 10
|
|
// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP95]], align 4
|
|
// CHECK3-NEXT: [[TMP96:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS26]], i32 0, i32 11
|
|
// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP96]], align 4
|
|
// CHECK3-NEXT: [[TMP97:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS26]], i32 0, i32 12
|
|
// CHECK3-NEXT: store i32 0, ptr [[TMP97]], align 4
|
|
// CHECK3-NEXT: [[TMP98:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l232.region_id, ptr [[KERNEL_ARGS26]])
|
|
// CHECK3-NEXT: [[TMP99:%.*]] = icmp ne i32 [[TMP98]], 0
|
|
// CHECK3-NEXT: br i1 [[TMP99]], label [[OMP_OFFLOAD_FAILED27:%.*]], label [[OMP_OFFLOAD_CONT28:%.*]]
|
|
// CHECK3: omp_offload.failed27:
|
|
// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l232(ptr [[OR_VAR]]) #[[ATTR2]]
|
|
// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT28]]
|
|
// CHECK3: omp_offload.cont28:
|
|
// CHECK3-NEXT: store i32 1, ptr [[BIT_VAR]], align 4
|
|
// CHECK3-NEXT: [[TMP100:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr [[BIT_VAR]], ptr [[TMP100]], align 4
|
|
// CHECK3-NEXT: [[TMP101:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS30]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr [[BIT_VAR]], ptr [[TMP101]], align 4
|
|
// CHECK3-NEXT: [[TMP102:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS31]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP102]], align 4
|
|
// CHECK3-NEXT: [[TMP103:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP104:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS30]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP105:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS33]], i32 0, i32 0
|
|
// CHECK3-NEXT: store i32 3, ptr [[TMP105]], align 4
|
|
// CHECK3-NEXT: [[TMP106:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS33]], i32 0, i32 1
|
|
// CHECK3-NEXT: store i32 1, ptr [[TMP106]], align 4
|
|
// CHECK3-NEXT: [[TMP107:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS33]], i32 0, i32 2
|
|
// CHECK3-NEXT: store ptr [[TMP103]], ptr [[TMP107]], align 4
|
|
// CHECK3-NEXT: [[TMP108:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS33]], i32 0, i32 3
|
|
// CHECK3-NEXT: store ptr [[TMP104]], ptr [[TMP108]], align 4
|
|
// CHECK3-NEXT: [[TMP109:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS33]], i32 0, i32 4
|
|
// CHECK3-NEXT: store ptr @.offload_sizes.9, ptr [[TMP109]], align 4
|
|
// CHECK3-NEXT: [[TMP110:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS33]], i32 0, i32 5
|
|
// CHECK3-NEXT: store ptr @.offload_maptypes.10, ptr [[TMP110]], align 4
|
|
// CHECK3-NEXT: [[TMP111:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS33]], i32 0, i32 6
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP111]], align 4
|
|
// CHECK3-NEXT: [[TMP112:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS33]], i32 0, i32 7
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP112]], align 4
|
|
// CHECK3-NEXT: [[TMP113:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS33]], i32 0, i32 8
|
|
// CHECK3-NEXT: store i64 2, ptr [[TMP113]], align 8
|
|
// CHECK3-NEXT: [[TMP114:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS33]], i32 0, i32 9
|
|
// CHECK3-NEXT: store i64 0, ptr [[TMP114]], align 8
|
|
// CHECK3-NEXT: [[TMP115:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS33]], i32 0, i32 10
|
|
// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP115]], align 4
|
|
// CHECK3-NEXT: [[TMP116:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS33]], i32 0, i32 11
|
|
// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP116]], align 4
|
|
// CHECK3-NEXT: [[TMP117:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS33]], i32 0, i32 12
|
|
// CHECK3-NEXT: store i32 0, ptr [[TMP117]], align 4
|
|
// CHECK3-NEXT: [[TMP118:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l238.region_id, ptr [[KERNEL_ARGS33]])
|
|
// CHECK3-NEXT: [[TMP119:%.*]] = icmp ne i32 [[TMP118]], 0
|
|
// CHECK3-NEXT: br i1 [[TMP119]], label [[OMP_OFFLOAD_FAILED34:%.*]], label [[OMP_OFFLOAD_CONT35:%.*]]
|
|
// CHECK3: omp_offload.failed34:
|
|
// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l238(ptr [[BIT_VAR]]) #[[ATTR2]]
|
|
// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT35]]
|
|
// CHECK3: omp_offload.cont35:
|
|
// CHECK3-NEXT: [[TMP120:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS36]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr [[BIT_VAR]], ptr [[TMP120]], align 4
|
|
// CHECK3-NEXT: [[TMP121:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS37]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr [[BIT_VAR]], ptr [[TMP121]], align 4
|
|
// CHECK3-NEXT: [[TMP122:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS38]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP122]], align 4
|
|
// CHECK3-NEXT: [[TMP123:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS36]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP124:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS37]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP125:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS40]], i32 0, i32 0
|
|
// CHECK3-NEXT: store i32 3, ptr [[TMP125]], align 4
|
|
// CHECK3-NEXT: [[TMP126:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS40]], i32 0, i32 1
|
|
// CHECK3-NEXT: store i32 1, ptr [[TMP126]], align 4
|
|
// CHECK3-NEXT: [[TMP127:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS40]], i32 0, i32 2
|
|
// CHECK3-NEXT: store ptr [[TMP123]], ptr [[TMP127]], align 4
|
|
// CHECK3-NEXT: [[TMP128:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS40]], i32 0, i32 3
|
|
// CHECK3-NEXT: store ptr [[TMP124]], ptr [[TMP128]], align 4
|
|
// CHECK3-NEXT: [[TMP129:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS40]], i32 0, i32 4
|
|
// CHECK3-NEXT: store ptr @.offload_sizes.11, ptr [[TMP129]], align 4
|
|
// CHECK3-NEXT: [[TMP130:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS40]], i32 0, i32 5
|
|
// CHECK3-NEXT: store ptr @.offload_maptypes.12, ptr [[TMP130]], align 4
|
|
// CHECK3-NEXT: [[TMP131:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS40]], i32 0, i32 6
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP131]], align 4
|
|
// CHECK3-NEXT: [[TMP132:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS40]], i32 0, i32 7
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP132]], align 4
|
|
// CHECK3-NEXT: [[TMP133:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS40]], i32 0, i32 8
|
|
// CHECK3-NEXT: store i64 2, ptr [[TMP133]], align 8
|
|
// CHECK3-NEXT: [[TMP134:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS40]], i32 0, i32 9
|
|
// CHECK3-NEXT: store i64 0, ptr [[TMP134]], align 8
|
|
// CHECK3-NEXT: [[TMP135:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS40]], i32 0, i32 10
|
|
// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP135]], align 4
|
|
// CHECK3-NEXT: [[TMP136:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS40]], i32 0, i32 11
|
|
// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP136]], align 4
|
|
// CHECK3-NEXT: [[TMP137:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS40]], i32 0, i32 12
|
|
// CHECK3-NEXT: store i32 0, ptr [[TMP137]], align 4
|
|
// CHECK3-NEXT: [[TMP138:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l243.region_id, ptr [[KERNEL_ARGS40]])
|
|
// CHECK3-NEXT: [[TMP139:%.*]] = icmp ne i32 [[TMP138]], 0
|
|
// CHECK3-NEXT: br i1 [[TMP139]], label [[OMP_OFFLOAD_FAILED41:%.*]], label [[OMP_OFFLOAD_CONT42:%.*]]
|
|
// CHECK3: omp_offload.failed41:
|
|
// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l243(ptr [[BIT_VAR]]) #[[ATTR2]]
|
|
// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT42]]
|
|
// CHECK3: omp_offload.cont42:
|
|
// CHECK3-NEXT: [[TMP140:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS43]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr [[BIT_VAR]], ptr [[TMP140]], align 4
|
|
// CHECK3-NEXT: [[TMP141:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS44]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr [[BIT_VAR]], ptr [[TMP141]], align 4
|
|
// CHECK3-NEXT: [[TMP142:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS45]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP142]], align 4
|
|
// CHECK3-NEXT: [[TMP143:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS43]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP144:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS44]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP145:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS47]], i32 0, i32 0
|
|
// CHECK3-NEXT: store i32 3, ptr [[TMP145]], align 4
|
|
// CHECK3-NEXT: [[TMP146:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS47]], i32 0, i32 1
|
|
// CHECK3-NEXT: store i32 1, ptr [[TMP146]], align 4
|
|
// CHECK3-NEXT: [[TMP147:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS47]], i32 0, i32 2
|
|
// CHECK3-NEXT: store ptr [[TMP143]], ptr [[TMP147]], align 4
|
|
// CHECK3-NEXT: [[TMP148:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS47]], i32 0, i32 3
|
|
// CHECK3-NEXT: store ptr [[TMP144]], ptr [[TMP148]], align 4
|
|
// CHECK3-NEXT: [[TMP149:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS47]], i32 0, i32 4
|
|
// CHECK3-NEXT: store ptr @.offload_sizes.13, ptr [[TMP149]], align 4
|
|
// CHECK3-NEXT: [[TMP150:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS47]], i32 0, i32 5
|
|
// CHECK3-NEXT: store ptr @.offload_maptypes.14, ptr [[TMP150]], align 4
|
|
// CHECK3-NEXT: [[TMP151:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS47]], i32 0, i32 6
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP151]], align 4
|
|
// CHECK3-NEXT: [[TMP152:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS47]], i32 0, i32 7
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP152]], align 4
|
|
// CHECK3-NEXT: [[TMP153:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS47]], i32 0, i32 8
|
|
// CHECK3-NEXT: store i64 2, ptr [[TMP153]], align 8
|
|
// CHECK3-NEXT: [[TMP154:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS47]], i32 0, i32 9
|
|
// CHECK3-NEXT: store i64 0, ptr [[TMP154]], align 8
|
|
// CHECK3-NEXT: [[TMP155:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS47]], i32 0, i32 10
|
|
// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP155]], align 4
|
|
// CHECK3-NEXT: [[TMP156:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS47]], i32 0, i32 11
|
|
// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP156]], align 4
|
|
// CHECK3-NEXT: [[TMP157:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS47]], i32 0, i32 12
|
|
// CHECK3-NEXT: store i32 0, ptr [[TMP157]], align 4
|
|
// CHECK3-NEXT: [[TMP158:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l248.region_id, ptr [[KERNEL_ARGS47]])
|
|
// CHECK3-NEXT: [[TMP159:%.*]] = icmp ne i32 [[TMP158]], 0
|
|
// CHECK3-NEXT: br i1 [[TMP159]], label [[OMP_OFFLOAD_FAILED48:%.*]], label [[OMP_OFFLOAD_CONT49:%.*]]
|
|
// CHECK3: omp_offload.failed48:
|
|
// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l248(ptr [[BIT_VAR]]) #[[ATTR2]]
|
|
// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT49]]
|
|
// CHECK3: omp_offload.cont49:
|
|
// CHECK3-NEXT: store i32 0, ptr @_ZZ4mainE5sivar, align 4
|
|
// CHECK3-NEXT: [[TMP160:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr @_ZZ4mainE5sivar, ptr [[TMP160]], align 4
|
|
// CHECK3-NEXT: [[TMP161:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS51]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr @_ZZ4mainE5sivar, ptr [[TMP161]], align 4
|
|
// CHECK3-NEXT: [[TMP162:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS52]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP162]], align 4
|
|
// CHECK3-NEXT: [[TMP163:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP164:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS51]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP165:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS54]], i32 0, i32 0
|
|
// CHECK3-NEXT: store i32 3, ptr [[TMP165]], align 4
|
|
// CHECK3-NEXT: [[TMP166:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS54]], i32 0, i32 1
|
|
// CHECK3-NEXT: store i32 1, ptr [[TMP166]], align 4
|
|
// CHECK3-NEXT: [[TMP167:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS54]], i32 0, i32 2
|
|
// CHECK3-NEXT: store ptr [[TMP163]], ptr [[TMP167]], align 4
|
|
// CHECK3-NEXT: [[TMP168:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS54]], i32 0, i32 3
|
|
// CHECK3-NEXT: store ptr [[TMP164]], ptr [[TMP168]], align 4
|
|
// CHECK3-NEXT: [[TMP169:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS54]], i32 0, i32 4
|
|
// CHECK3-NEXT: store ptr @.offload_sizes.15, ptr [[TMP169]], align 4
|
|
// CHECK3-NEXT: [[TMP170:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS54]], i32 0, i32 5
|
|
// CHECK3-NEXT: store ptr @.offload_maptypes.16, ptr [[TMP170]], align 4
|
|
// CHECK3-NEXT: [[TMP171:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS54]], i32 0, i32 6
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP171]], align 4
|
|
// CHECK3-NEXT: [[TMP172:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS54]], i32 0, i32 7
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP172]], align 4
|
|
// CHECK3-NEXT: [[TMP173:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS54]], i32 0, i32 8
|
|
// CHECK3-NEXT: store i64 2, ptr [[TMP173]], align 8
|
|
// CHECK3-NEXT: [[TMP174:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS54]], i32 0, i32 9
|
|
// CHECK3-NEXT: store i64 0, ptr [[TMP174]], align 8
|
|
// CHECK3-NEXT: [[TMP175:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS54]], i32 0, i32 10
|
|
// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP175]], align 4
|
|
// CHECK3-NEXT: [[TMP176:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS54]], i32 0, i32 11
|
|
// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP176]], align 4
|
|
// CHECK3-NEXT: [[TMP177:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS54]], i32 0, i32 12
|
|
// CHECK3-NEXT: store i32 0, ptr [[TMP177]], align 4
|
|
// CHECK3-NEXT: [[TMP178:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l254.region_id, ptr [[KERNEL_ARGS54]])
|
|
// CHECK3-NEXT: [[TMP179:%.*]] = icmp ne i32 [[TMP178]], 0
|
|
// CHECK3-NEXT: br i1 [[TMP179]], label [[OMP_OFFLOAD_FAILED55:%.*]], label [[OMP_OFFLOAD_CONT56:%.*]]
|
|
// CHECK3: omp_offload.failed55:
|
|
// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l254(ptr @_ZZ4mainE5sivar) #[[ATTR2]]
|
|
// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT56]]
|
|
// CHECK3: omp_offload.cont56:
|
|
// CHECK3-NEXT: store i32 10, ptr @_ZZ4mainE5sivar, align 4
|
|
// CHECK3-NEXT: [[TMP180:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr @_ZZ4mainE5sivar, ptr [[TMP180]], align 4
|
|
// CHECK3-NEXT: [[TMP181:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS58]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr @_ZZ4mainE5sivar, ptr [[TMP181]], align 4
|
|
// CHECK3-NEXT: [[TMP182:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS59]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP182]], align 4
|
|
// CHECK3-NEXT: [[TMP183:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP184:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS58]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP185:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 0
|
|
// CHECK3-NEXT: store i32 3, ptr [[TMP185]], align 4
|
|
// CHECK3-NEXT: [[TMP186:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 1
|
|
// CHECK3-NEXT: store i32 1, ptr [[TMP186]], align 4
|
|
// CHECK3-NEXT: [[TMP187:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 2
|
|
// CHECK3-NEXT: store ptr [[TMP183]], ptr [[TMP187]], align 4
|
|
// CHECK3-NEXT: [[TMP188:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 3
|
|
// CHECK3-NEXT: store ptr [[TMP184]], ptr [[TMP188]], align 4
|
|
// CHECK3-NEXT: [[TMP189:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 4
|
|
// CHECK3-NEXT: store ptr @.offload_sizes.17, ptr [[TMP189]], align 4
|
|
// CHECK3-NEXT: [[TMP190:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 5
|
|
// CHECK3-NEXT: store ptr @.offload_maptypes.18, ptr [[TMP190]], align 4
|
|
// CHECK3-NEXT: [[TMP191:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 6
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP191]], align 4
|
|
// CHECK3-NEXT: [[TMP192:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 7
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP192]], align 4
|
|
// CHECK3-NEXT: [[TMP193:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 8
|
|
// CHECK3-NEXT: store i64 2, ptr [[TMP193]], align 8
|
|
// CHECK3-NEXT: [[TMP194:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 9
|
|
// CHECK3-NEXT: store i64 0, ptr [[TMP194]], align 8
|
|
// CHECK3-NEXT: [[TMP195:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 10
|
|
// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP195]], align 4
|
|
// CHECK3-NEXT: [[TMP196:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 11
|
|
// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP196]], align 4
|
|
// CHECK3-NEXT: [[TMP197:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 12
|
|
// CHECK3-NEXT: store i32 0, ptr [[TMP197]], align 4
|
|
// CHECK3-NEXT: [[TMP198:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l260.region_id, ptr [[KERNEL_ARGS61]])
|
|
// CHECK3-NEXT: [[TMP199:%.*]] = icmp ne i32 [[TMP198]], 0
|
|
// CHECK3-NEXT: br i1 [[TMP199]], label [[OMP_OFFLOAD_FAILED62:%.*]], label [[OMP_OFFLOAD_CONT63:%.*]]
|
|
// CHECK3: omp_offload.failed62:
|
|
// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l260(ptr @_ZZ4mainE5sivar) #[[ATTR2]]
|
|
// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT63]]
|
|
// CHECK3: omp_offload.cont63:
|
|
// CHECK3-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiET_v()
|
|
// CHECK3-NEXT: ret i32 [[CALL]]
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l209
|
|
// CHECK3-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR1:[0-9]+]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[SIVAR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: store ptr [[SIVAR]], ptr [[SIVAR_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[SIVAR_ADDR]], align 4
|
|
// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l209.omp_outlined, ptr [[TMP0]])
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l209.omp_outlined
|
|
// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR1]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[SIVAR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[SIVAR1:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 4
|
|
// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[SIVAR]], ptr [[SIVAR_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[SIVAR_ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[SIVAR1]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
|
|
// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK3: cond.true:
|
|
// CHECK3-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK3: cond.false:
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END]]
|
|
// CHECK3: cond.end:
|
|
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
|
|
// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK3: omp.inner.for.cond:
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
|
|
// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK3: omp.inner.for.body:
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
|
|
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[SIVAR1]], align 4
|
|
// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], [[TMP9]]
|
|
// CHECK3-NEXT: store i32 [[ADD3]], ptr [[SIVAR1]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK3: omp.body.continue:
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK3: omp.inner.for.inc:
|
|
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP11]], 1
|
|
// CHECK3-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK3: omp.inner.for.end:
|
|
// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK3: omp.loop.exit:
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
|
|
// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr [[SIVAR1]], ptr [[TMP12]], align 4
|
|
// CHECK3-NEXT: [[TMP13:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2:[0-9]+]], i32 [[TMP2]], i32 1, i32 4, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l209.omp_outlined.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: switch i32 [[TMP13]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
|
|
// CHECK3-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
|
|
// CHECK3-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
|
|
// CHECK3-NEXT: ]
|
|
// CHECK3: .omp.reduction.case1:
|
|
// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP0]], align 4
|
|
// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[SIVAR1]], align 4
|
|
// CHECK3-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP14]], [[TMP15]]
|
|
// CHECK3-NEXT: store i32 [[ADD5]], ptr [[TMP0]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK3: .omp.reduction.case2:
|
|
// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[SIVAR1]], align 4
|
|
// CHECK3-NEXT: [[TMP17:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP16]] monotonic, align 4
|
|
// CHECK3-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK3: .omp.reduction.default:
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l209.omp_outlined.omp.reduction.reduction_func
|
|
// CHECK3-SAME: (ptr noundef [[TMP0:%.*]], ptr noundef [[TMP1:%.*]]) #[[ATTR3:[0-9]+]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 4
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 4
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 4
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 4
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], [[TMP9]]
|
|
// CHECK3-NEXT: store i32 [[ADD]], ptr [[TMP7]], align 4
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l214
|
|
// CHECK3-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR1]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[SIVAR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: store ptr [[SIVAR]], ptr [[SIVAR_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[SIVAR_ADDR]], align 4
|
|
// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l214.omp_outlined, ptr [[TMP0]])
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l214.omp_outlined
|
|
// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR1]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[SIVAR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[SIVAR1:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 4
|
|
// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[SIVAR]], ptr [[SIVAR_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[SIVAR_ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[SIVAR1]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
|
|
// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK3: cond.true:
|
|
// CHECK3-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK3: cond.false:
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END]]
|
|
// CHECK3: cond.end:
|
|
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
|
|
// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK3: omp.inner.for.cond:
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
|
|
// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK3: omp.inner.for.body:
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
|
|
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[SIVAR1]], align 4
|
|
// CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP10]], [[TMP9]]
|
|
// CHECK3-NEXT: store i32 [[SUB]], ptr [[SIVAR1]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK3: omp.body.continue:
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK3: omp.inner.for.inc:
|
|
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP11]], 1
|
|
// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK3: omp.inner.for.end:
|
|
// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK3: omp.loop.exit:
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
|
|
// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr [[SIVAR1]], ptr [[TMP12]], align 4
|
|
// CHECK3-NEXT: [[TMP13:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2]], i32 [[TMP2]], i32 1, i32 4, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l214.omp_outlined.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: switch i32 [[TMP13]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
|
|
// CHECK3-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
|
|
// CHECK3-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
|
|
// CHECK3-NEXT: ]
|
|
// CHECK3: .omp.reduction.case1:
|
|
// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP0]], align 4
|
|
// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[SIVAR1]], align 4
|
|
// CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP14]], [[TMP15]]
|
|
// CHECK3-NEXT: store i32 [[ADD4]], ptr [[TMP0]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK3: .omp.reduction.case2:
|
|
// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[SIVAR1]], align 4
|
|
// CHECK3-NEXT: [[TMP17:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP16]] monotonic, align 4
|
|
// CHECK3-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK3: .omp.reduction.default:
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l214.omp_outlined.omp.reduction.reduction_func
|
|
// CHECK3-SAME: (ptr noundef [[TMP0:%.*]], ptr noundef [[TMP1:%.*]]) #[[ATTR3]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 4
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 4
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 4
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 4
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], [[TMP9]]
|
|
// CHECK3-NEXT: store i32 [[ADD]], ptr [[TMP7]], align 4
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l220
|
|
// CHECK3-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR1]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[SIVAR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: store ptr [[SIVAR]], ptr [[SIVAR_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[SIVAR_ADDR]], align 4
|
|
// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l220.omp_outlined, ptr [[TMP0]])
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l220.omp_outlined
|
|
// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR1]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[SIVAR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[SIVAR1:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 4
|
|
// CHECK3-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[_TMP6:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[SIVAR]], ptr [[SIVAR_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[SIVAR_ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 1, ptr [[SIVAR1]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
|
|
// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK3: cond.true:
|
|
// CHECK3-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK3: cond.false:
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END]]
|
|
// CHECK3: cond.end:
|
|
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
|
|
// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK3: omp.inner.for.cond:
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
|
|
// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK3: omp.inner.for.body:
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
|
|
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[SIVAR1]], align 4
|
|
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK3-NEXT: [[MUL3:%.*]] = mul nsw i32 [[TMP9]], [[TMP10]]
|
|
// CHECK3-NEXT: store i32 [[MUL3]], ptr [[SIVAR1]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK3: omp.body.continue:
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK3: omp.inner.for.inc:
|
|
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP11]], 1
|
|
// CHECK3-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK3: omp.inner.for.end:
|
|
// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK3: omp.loop.exit:
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
|
|
// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr [[SIVAR1]], ptr [[TMP12]], align 4
|
|
// CHECK3-NEXT: [[TMP13:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2]], i32 [[TMP2]], i32 1, i32 4, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l220.omp_outlined.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: switch i32 [[TMP13]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
|
|
// CHECK3-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
|
|
// CHECK3-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
|
|
// CHECK3-NEXT: ]
|
|
// CHECK3: .omp.reduction.case1:
|
|
// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP0]], align 4
|
|
// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[SIVAR1]], align 4
|
|
// CHECK3-NEXT: [[MUL5:%.*]] = mul nsw i32 [[TMP14]], [[TMP15]]
|
|
// CHECK3-NEXT: store i32 [[MUL5]], ptr [[TMP0]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK3: .omp.reduction.case2:
|
|
// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[SIVAR1]], align 4
|
|
// CHECK3-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i32, ptr [[TMP0]] monotonic, align 4
|
|
// CHECK3-NEXT: br label [[ATOMIC_CONT:%.*]]
|
|
// CHECK3: atomic_cont:
|
|
// CHECK3-NEXT: [[TMP17:%.*]] = phi i32 [ [[ATOMIC_LOAD]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[TMP22:%.*]], [[ATOMIC_CONT]] ]
|
|
// CHECK3-NEXT: store i32 [[TMP17]], ptr [[_TMP6]], align 4
|
|
// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[_TMP6]], align 4
|
|
// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[SIVAR1]], align 4
|
|
// CHECK3-NEXT: [[MUL7:%.*]] = mul nsw i32 [[TMP18]], [[TMP19]]
|
|
// CHECK3-NEXT: store i32 [[MUL7]], ptr [[ATOMIC_TEMP]], align 4
|
|
// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[ATOMIC_TEMP]], align 4
|
|
// CHECK3-NEXT: [[TMP21:%.*]] = cmpxchg ptr [[TMP0]], i32 [[TMP17]], i32 [[TMP20]] monotonic monotonic, align 4
|
|
// CHECK3-NEXT: [[TMP22]] = extractvalue { i32, i1 } [[TMP21]], 0
|
|
// CHECK3-NEXT: [[TMP23:%.*]] = extractvalue { i32, i1 } [[TMP21]], 1
|
|
// CHECK3-NEXT: br i1 [[TMP23]], label [[ATOMIC_EXIT:%.*]], label [[ATOMIC_CONT]]
|
|
// CHECK3: atomic_exit:
|
|
// CHECK3-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK3: .omp.reduction.default:
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l220.omp_outlined.omp.reduction.reduction_func
|
|
// CHECK3-SAME: (ptr noundef [[TMP0:%.*]], ptr noundef [[TMP1:%.*]]) #[[ATTR3]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 4
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 4
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 4
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 4
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], [[TMP9]]
|
|
// CHECK3-NEXT: store i32 [[MUL]], ptr [[TMP7]], align 4
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l226
|
|
// CHECK3-SAME: (ptr noundef nonnull align 1 dereferenceable(1) [[AND_VAR:%.*]]) #[[ATTR1]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[AND_VAR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: store ptr [[AND_VAR]], ptr [[AND_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AND_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l226.omp_outlined, ptr [[TMP0]])
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l226.omp_outlined
|
|
// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[AND_VAR:%.*]]) #[[ATTR1]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[AND_VAR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[AND_VAR1:%.*]] = alloca i8, align 1
|
|
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 4
|
|
// CHECK3-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i8, align 1
|
|
// CHECK3-NEXT: [[_TMP12:%.*]] = alloca i8, align 1
|
|
// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[AND_VAR]], ptr [[AND_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AND_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: store i8 1, ptr [[AND_VAR1]], align 1
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
|
|
// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK3: cond.true:
|
|
// CHECK3-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK3: cond.false:
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END]]
|
|
// CHECK3: cond.end:
|
|
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
|
|
// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK3: omp.inner.for.cond:
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
|
|
// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK3: omp.inner.for.body:
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
|
|
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i8, ptr [[AND_VAR1]], align 1
|
|
// CHECK3-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP9]] to i1
|
|
// CHECK3-NEXT: br i1 [[TOBOOL]], label [[LAND_RHS:%.*]], label [[LAND_END:%.*]]
|
|
// CHECK3: land.rhs:
|
|
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK3-NEXT: [[CMP3:%.*]] = icmp eq i32 [[TMP10]], 0
|
|
// CHECK3-NEXT: br label [[LAND_END]]
|
|
// CHECK3: land.end:
|
|
// CHECK3-NEXT: [[TMP11:%.*]] = phi i1 [ false, [[OMP_INNER_FOR_BODY]] ], [ [[CMP3]], [[LAND_RHS]] ]
|
|
// CHECK3-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TMP11]] to i8
|
|
// CHECK3-NEXT: store i8 [[FROMBOOL]], ptr [[AND_VAR1]], align 1
|
|
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK3: omp.body.continue:
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK3: omp.inner.for.inc:
|
|
// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP12]], 1
|
|
// CHECK3-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK3: omp.inner.for.end:
|
|
// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK3: omp.loop.exit:
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
|
|
// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr [[AND_VAR1]], ptr [[TMP13]], align 4
|
|
// CHECK3-NEXT: [[TMP14:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2]], i32 [[TMP2]], i32 1, i32 4, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l226.omp_outlined.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: switch i32 [[TMP14]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
|
|
// CHECK3-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
|
|
// CHECK3-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
|
|
// CHECK3-NEXT: ]
|
|
// CHECK3: .omp.reduction.case1:
|
|
// CHECK3-NEXT: [[TMP15:%.*]] = load i8, ptr [[TMP0]], align 1
|
|
// CHECK3-NEXT: [[TOBOOL5:%.*]] = trunc i8 [[TMP15]] to i1
|
|
// CHECK3-NEXT: br i1 [[TOBOOL5]], label [[LAND_RHS6:%.*]], label [[LAND_END8:%.*]]
|
|
// CHECK3: land.rhs6:
|
|
// CHECK3-NEXT: [[TMP16:%.*]] = load i8, ptr [[AND_VAR1]], align 1
|
|
// CHECK3-NEXT: [[TOBOOL7:%.*]] = trunc i8 [[TMP16]] to i1
|
|
// CHECK3-NEXT: br label [[LAND_END8]]
|
|
// CHECK3: land.end8:
|
|
// CHECK3-NEXT: [[TMP17:%.*]] = phi i1 [ false, [[DOTOMP_REDUCTION_CASE1]] ], [ [[TOBOOL7]], [[LAND_RHS6]] ]
|
|
// CHECK3-NEXT: [[FROMBOOL9:%.*]] = zext i1 [[TMP17]] to i8
|
|
// CHECK3-NEXT: store i8 [[FROMBOOL9]], ptr [[TMP0]], align 1
|
|
// CHECK3-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK3: .omp.reduction.case2:
|
|
// CHECK3-NEXT: [[TMP18:%.*]] = load i8, ptr [[AND_VAR1]], align 1
|
|
// CHECK3-NEXT: [[TOBOOL10:%.*]] = trunc i8 [[TMP18]] to i1
|
|
// CHECK3-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i8, ptr [[TMP0]] monotonic, align 1
|
|
// CHECK3-NEXT: br label [[ATOMIC_CONT:%.*]]
|
|
// CHECK3: atomic_cont:
|
|
// CHECK3-NEXT: [[TMP19:%.*]] = phi i8 [ [[ATOMIC_LOAD]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[TMP25:%.*]], [[LAND_END17:%.*]] ]
|
|
// CHECK3-NEXT: [[TOBOOL11:%.*]] = trunc i8 [[TMP19]] to i1
|
|
// CHECK3-NEXT: [[FROMBOOL13:%.*]] = zext i1 [[TOBOOL11]] to i8
|
|
// CHECK3-NEXT: store i8 [[FROMBOOL13]], ptr [[_TMP12]], align 1
|
|
// CHECK3-NEXT: [[TMP20:%.*]] = load i8, ptr [[_TMP12]], align 1
|
|
// CHECK3-NEXT: [[TOBOOL14:%.*]] = trunc i8 [[TMP20]] to i1
|
|
// CHECK3-NEXT: br i1 [[TOBOOL14]], label [[LAND_RHS15:%.*]], label [[LAND_END17]]
|
|
// CHECK3: land.rhs15:
|
|
// CHECK3-NEXT: [[TMP21:%.*]] = load i8, ptr [[AND_VAR1]], align 1
|
|
// CHECK3-NEXT: [[TOBOOL16:%.*]] = trunc i8 [[TMP21]] to i1
|
|
// CHECK3-NEXT: br label [[LAND_END17]]
|
|
// CHECK3: land.end17:
|
|
// CHECK3-NEXT: [[TMP22:%.*]] = phi i1 [ false, [[ATOMIC_CONT]] ], [ [[TOBOOL16]], [[LAND_RHS15]] ]
|
|
// CHECK3-NEXT: [[FROMBOOL18:%.*]] = zext i1 [[TMP22]] to i8
|
|
// CHECK3-NEXT: store i8 [[FROMBOOL18]], ptr [[ATOMIC_TEMP]], align 1
|
|
// CHECK3-NEXT: [[TMP23:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1
|
|
// CHECK3-NEXT: [[TMP24:%.*]] = cmpxchg ptr [[TMP0]], i8 [[TMP19]], i8 [[TMP23]] monotonic monotonic, align 1
|
|
// CHECK3-NEXT: [[TMP25]] = extractvalue { i8, i1 } [[TMP24]], 0
|
|
// CHECK3-NEXT: [[TMP26:%.*]] = extractvalue { i8, i1 } [[TMP24]], 1
|
|
// CHECK3-NEXT: br i1 [[TMP26]], label [[ATOMIC_EXIT:%.*]], label [[ATOMIC_CONT]]
|
|
// CHECK3: atomic_exit:
|
|
// CHECK3-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK3: .omp.reduction.default:
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l226.omp_outlined.omp.reduction.reduction_func
|
|
// CHECK3-SAME: (ptr noundef [[TMP0:%.*]], ptr noundef [[TMP1:%.*]]) #[[ATTR3]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 4
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 4
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 4
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 4
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i8, ptr [[TMP7]], align 1
|
|
// CHECK3-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP8]] to i1
|
|
// CHECK3-NEXT: br i1 [[TOBOOL]], label [[LAND_RHS:%.*]], label [[LAND_END:%.*]]
|
|
// CHECK3: land.rhs:
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i8, ptr [[TMP5]], align 1
|
|
// CHECK3-NEXT: [[TOBOOL2:%.*]] = trunc i8 [[TMP9]] to i1
|
|
// CHECK3-NEXT: br label [[LAND_END]]
|
|
// CHECK3: land.end:
|
|
// CHECK3-NEXT: [[TMP10:%.*]] = phi i1 [ false, [[ENTRY:%.*]] ], [ [[TOBOOL2]], [[LAND_RHS]] ]
|
|
// CHECK3-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TMP10]] to i8
|
|
// CHECK3-NEXT: store i8 [[FROMBOOL]], ptr [[TMP7]], align 1
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l232
|
|
// CHECK3-SAME: (ptr noundef nonnull align 1 dereferenceable(1) [[OR_VAR:%.*]]) #[[ATTR1]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[OR_VAR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: store ptr [[OR_VAR]], ptr [[OR_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[OR_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l232.omp_outlined, ptr [[TMP0]])
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l232.omp_outlined
|
|
// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[OR_VAR:%.*]]) #[[ATTR1]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[OR_VAR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[OR_VAR1:%.*]] = alloca i8, align 1
|
|
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 4
|
|
// CHECK3-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i8, align 1
|
|
// CHECK3-NEXT: [[_TMP12:%.*]] = alloca i8, align 1
|
|
// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[OR_VAR]], ptr [[OR_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[OR_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: store i8 0, ptr [[OR_VAR1]], align 1
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
|
|
// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK3: cond.true:
|
|
// CHECK3-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK3: cond.false:
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END]]
|
|
// CHECK3: cond.end:
|
|
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
|
|
// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK3: omp.inner.for.cond:
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
|
|
// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK3: omp.inner.for.body:
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
|
|
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i8, ptr [[OR_VAR1]], align 1
|
|
// CHECK3-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP9]] to i1
|
|
// CHECK3-NEXT: br i1 [[TOBOOL]], label [[LOR_END:%.*]], label [[LOR_RHS:%.*]]
|
|
// CHECK3: lor.rhs:
|
|
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK3-NEXT: [[CMP3:%.*]] = icmp eq i32 [[TMP10]], 0
|
|
// CHECK3-NEXT: br label [[LOR_END]]
|
|
// CHECK3: lor.end:
|
|
// CHECK3-NEXT: [[TMP11:%.*]] = phi i1 [ true, [[OMP_INNER_FOR_BODY]] ], [ [[CMP3]], [[LOR_RHS]] ]
|
|
// CHECK3-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TMP11]] to i8
|
|
// CHECK3-NEXT: store i8 [[FROMBOOL]], ptr [[OR_VAR1]], align 1
|
|
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK3: omp.body.continue:
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK3: omp.inner.for.inc:
|
|
// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP12]], 1
|
|
// CHECK3-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK3: omp.inner.for.end:
|
|
// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK3: omp.loop.exit:
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
|
|
// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr [[OR_VAR1]], ptr [[TMP13]], align 4
|
|
// CHECK3-NEXT: [[TMP14:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2]], i32 [[TMP2]], i32 1, i32 4, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l232.omp_outlined.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: switch i32 [[TMP14]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
|
|
// CHECK3-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
|
|
// CHECK3-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
|
|
// CHECK3-NEXT: ]
|
|
// CHECK3: .omp.reduction.case1:
|
|
// CHECK3-NEXT: [[TMP15:%.*]] = load i8, ptr [[TMP0]], align 1
|
|
// CHECK3-NEXT: [[TOBOOL5:%.*]] = trunc i8 [[TMP15]] to i1
|
|
// CHECK3-NEXT: br i1 [[TOBOOL5]], label [[LOR_END8:%.*]], label [[LOR_RHS6:%.*]]
|
|
// CHECK3: lor.rhs6:
|
|
// CHECK3-NEXT: [[TMP16:%.*]] = load i8, ptr [[OR_VAR1]], align 1
|
|
// CHECK3-NEXT: [[TOBOOL7:%.*]] = trunc i8 [[TMP16]] to i1
|
|
// CHECK3-NEXT: br label [[LOR_END8]]
|
|
// CHECK3: lor.end8:
|
|
// CHECK3-NEXT: [[TMP17:%.*]] = phi i1 [ true, [[DOTOMP_REDUCTION_CASE1]] ], [ [[TOBOOL7]], [[LOR_RHS6]] ]
|
|
// CHECK3-NEXT: [[FROMBOOL9:%.*]] = zext i1 [[TMP17]] to i8
|
|
// CHECK3-NEXT: store i8 [[FROMBOOL9]], ptr [[TMP0]], align 1
|
|
// CHECK3-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK3: .omp.reduction.case2:
|
|
// CHECK3-NEXT: [[TMP18:%.*]] = load i8, ptr [[OR_VAR1]], align 1
|
|
// CHECK3-NEXT: [[TOBOOL10:%.*]] = trunc i8 [[TMP18]] to i1
|
|
// CHECK3-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i8, ptr [[TMP0]] monotonic, align 1
|
|
// CHECK3-NEXT: br label [[ATOMIC_CONT:%.*]]
|
|
// CHECK3: atomic_cont:
|
|
// CHECK3-NEXT: [[TMP19:%.*]] = phi i8 [ [[ATOMIC_LOAD]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[TMP25:%.*]], [[LOR_END17:%.*]] ]
|
|
// CHECK3-NEXT: [[TOBOOL11:%.*]] = trunc i8 [[TMP19]] to i1
|
|
// CHECK3-NEXT: [[FROMBOOL13:%.*]] = zext i1 [[TOBOOL11]] to i8
|
|
// CHECK3-NEXT: store i8 [[FROMBOOL13]], ptr [[_TMP12]], align 1
|
|
// CHECK3-NEXT: [[TMP20:%.*]] = load i8, ptr [[_TMP12]], align 1
|
|
// CHECK3-NEXT: [[TOBOOL14:%.*]] = trunc i8 [[TMP20]] to i1
|
|
// CHECK3-NEXT: br i1 [[TOBOOL14]], label [[LOR_END17]], label [[LOR_RHS15:%.*]]
|
|
// CHECK3: lor.rhs15:
|
|
// CHECK3-NEXT: [[TMP21:%.*]] = load i8, ptr [[OR_VAR1]], align 1
|
|
// CHECK3-NEXT: [[TOBOOL16:%.*]] = trunc i8 [[TMP21]] to i1
|
|
// CHECK3-NEXT: br label [[LOR_END17]]
|
|
// CHECK3: lor.end17:
|
|
// CHECK3-NEXT: [[TMP22:%.*]] = phi i1 [ true, [[ATOMIC_CONT]] ], [ [[TOBOOL16]], [[LOR_RHS15]] ]
|
|
// CHECK3-NEXT: [[FROMBOOL18:%.*]] = zext i1 [[TMP22]] to i8
|
|
// CHECK3-NEXT: store i8 [[FROMBOOL18]], ptr [[ATOMIC_TEMP]], align 1
|
|
// CHECK3-NEXT: [[TMP23:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1
|
|
// CHECK3-NEXT: [[TMP24:%.*]] = cmpxchg ptr [[TMP0]], i8 [[TMP19]], i8 [[TMP23]] monotonic monotonic, align 1
|
|
// CHECK3-NEXT: [[TMP25]] = extractvalue { i8, i1 } [[TMP24]], 0
|
|
// CHECK3-NEXT: [[TMP26:%.*]] = extractvalue { i8, i1 } [[TMP24]], 1
|
|
// CHECK3-NEXT: br i1 [[TMP26]], label [[ATOMIC_EXIT:%.*]], label [[ATOMIC_CONT]]
|
|
// CHECK3: atomic_exit:
|
|
// CHECK3-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK3: .omp.reduction.default:
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l232.omp_outlined.omp.reduction.reduction_func
|
|
// CHECK3-SAME: (ptr noundef [[TMP0:%.*]], ptr noundef [[TMP1:%.*]]) #[[ATTR3]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 4
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 4
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 4
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 4
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i8, ptr [[TMP7]], align 1
|
|
// CHECK3-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP8]] to i1
|
|
// CHECK3-NEXT: br i1 [[TOBOOL]], label [[LOR_END:%.*]], label [[LOR_RHS:%.*]]
|
|
// CHECK3: lor.rhs:
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i8, ptr [[TMP5]], align 1
|
|
// CHECK3-NEXT: [[TOBOOL2:%.*]] = trunc i8 [[TMP9]] to i1
|
|
// CHECK3-NEXT: br label [[LOR_END]]
|
|
// CHECK3: lor.end:
|
|
// CHECK3-NEXT: [[TMP10:%.*]] = phi i1 [ true, [[ENTRY:%.*]] ], [ [[TOBOOL2]], [[LOR_RHS]] ]
|
|
// CHECK3-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TMP10]] to i8
|
|
// CHECK3-NEXT: store i8 [[FROMBOOL]], ptr [[TMP7]], align 1
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l238
|
|
// CHECK3-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[BIT_VAR:%.*]]) #[[ATTR1]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[BIT_VAR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: store ptr [[BIT_VAR]], ptr [[BIT_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[BIT_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l238.omp_outlined, ptr [[TMP0]])
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l238.omp_outlined
|
|
// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[BIT_VAR:%.*]]) #[[ATTR1]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[BIT_VAR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[BIT_VAR1:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 4
|
|
// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[BIT_VAR]], ptr [[BIT_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[BIT_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 -1, ptr [[BIT_VAR1]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
|
|
// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK3: cond.true:
|
|
// CHECK3-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK3: cond.false:
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END]]
|
|
// CHECK3: cond.end:
|
|
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
|
|
// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK3: omp.inner.for.cond:
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
|
|
// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK3: omp.inner.for.body:
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
|
|
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[BIT_VAR1]], align 4
|
|
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK3-NEXT: [[AND:%.*]] = and i32 [[TMP9]], [[TMP10]]
|
|
// CHECK3-NEXT: store i32 [[AND]], ptr [[BIT_VAR1]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK3: omp.body.continue:
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK3: omp.inner.for.inc:
|
|
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP11]], 1
|
|
// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK3: omp.inner.for.end:
|
|
// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK3: omp.loop.exit:
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
|
|
// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr [[BIT_VAR1]], ptr [[TMP12]], align 4
|
|
// CHECK3-NEXT: [[TMP13:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2]], i32 [[TMP2]], i32 1, i32 4, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l238.omp_outlined.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: switch i32 [[TMP13]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
|
|
// CHECK3-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
|
|
// CHECK3-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
|
|
// CHECK3-NEXT: ]
|
|
// CHECK3: .omp.reduction.case1:
|
|
// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP0]], align 4
|
|
// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[BIT_VAR1]], align 4
|
|
// CHECK3-NEXT: [[AND4:%.*]] = and i32 [[TMP14]], [[TMP15]]
|
|
// CHECK3-NEXT: store i32 [[AND4]], ptr [[TMP0]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK3: .omp.reduction.case2:
|
|
// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[BIT_VAR1]], align 4
|
|
// CHECK3-NEXT: [[TMP17:%.*]] = atomicrmw and ptr [[TMP0]], i32 [[TMP16]] monotonic, align 4
|
|
// CHECK3-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK3: .omp.reduction.default:
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l238.omp_outlined.omp.reduction.reduction_func
|
|
// CHECK3-SAME: (ptr noundef [[TMP0:%.*]], ptr noundef [[TMP1:%.*]]) #[[ATTR3]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 4
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 4
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 4
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 4
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK3-NEXT: [[AND:%.*]] = and i32 [[TMP8]], [[TMP9]]
|
|
// CHECK3-NEXT: store i32 [[AND]], ptr [[TMP7]], align 4
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l243
|
|
// CHECK3-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[BIT_VAR:%.*]]) #[[ATTR1]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[BIT_VAR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: store ptr [[BIT_VAR]], ptr [[BIT_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[BIT_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l243.omp_outlined, ptr [[TMP0]])
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l243.omp_outlined
|
|
// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[BIT_VAR:%.*]]) #[[ATTR1]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[BIT_VAR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[BIT_VAR1:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 4
|
|
// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[BIT_VAR]], ptr [[BIT_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[BIT_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[BIT_VAR1]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
|
|
// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK3: cond.true:
|
|
// CHECK3-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK3: cond.false:
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END]]
|
|
// CHECK3: cond.end:
|
|
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
|
|
// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK3: omp.inner.for.cond:
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
|
|
// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK3: omp.inner.for.body:
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
|
|
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[BIT_VAR1]], align 4
|
|
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK3-NEXT: [[OR:%.*]] = or i32 [[TMP9]], [[TMP10]]
|
|
// CHECK3-NEXT: store i32 [[OR]], ptr [[BIT_VAR1]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK3: omp.body.continue:
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK3: omp.inner.for.inc:
|
|
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP11]], 1
|
|
// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK3: omp.inner.for.end:
|
|
// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK3: omp.loop.exit:
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
|
|
// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr [[BIT_VAR1]], ptr [[TMP12]], align 4
|
|
// CHECK3-NEXT: [[TMP13:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2]], i32 [[TMP2]], i32 1, i32 4, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l243.omp_outlined.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: switch i32 [[TMP13]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
|
|
// CHECK3-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
|
|
// CHECK3-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
|
|
// CHECK3-NEXT: ]
|
|
// CHECK3: .omp.reduction.case1:
|
|
// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP0]], align 4
|
|
// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[BIT_VAR1]], align 4
|
|
// CHECK3-NEXT: [[OR4:%.*]] = or i32 [[TMP14]], [[TMP15]]
|
|
// CHECK3-NEXT: store i32 [[OR4]], ptr [[TMP0]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK3: .omp.reduction.case2:
|
|
// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[BIT_VAR1]], align 4
|
|
// CHECK3-NEXT: [[TMP17:%.*]] = atomicrmw or ptr [[TMP0]], i32 [[TMP16]] monotonic, align 4
|
|
// CHECK3-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK3: .omp.reduction.default:
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l243.omp_outlined.omp.reduction.reduction_func
|
|
// CHECK3-SAME: (ptr noundef [[TMP0:%.*]], ptr noundef [[TMP1:%.*]]) #[[ATTR3]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 4
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 4
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 4
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 4
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK3-NEXT: [[OR:%.*]] = or i32 [[TMP8]], [[TMP9]]
|
|
// CHECK3-NEXT: store i32 [[OR]], ptr [[TMP7]], align 4
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l248
|
|
// CHECK3-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[BIT_VAR:%.*]]) #[[ATTR1]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[BIT_VAR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: store ptr [[BIT_VAR]], ptr [[BIT_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[BIT_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l248.omp_outlined, ptr [[TMP0]])
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l248.omp_outlined
|
|
// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[BIT_VAR:%.*]]) #[[ATTR1]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[BIT_VAR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[BIT_VAR1:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 4
|
|
// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[BIT_VAR]], ptr [[BIT_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[BIT_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[BIT_VAR1]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
|
|
// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK3: cond.true:
|
|
// CHECK3-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK3: cond.false:
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END]]
|
|
// CHECK3: cond.end:
|
|
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
|
|
// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK3: omp.inner.for.cond:
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
|
|
// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK3: omp.inner.for.body:
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
|
|
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[BIT_VAR1]], align 4
|
|
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK3-NEXT: [[XOR:%.*]] = xor i32 [[TMP9]], [[TMP10]]
|
|
// CHECK3-NEXT: store i32 [[XOR]], ptr [[BIT_VAR1]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK3: omp.body.continue:
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK3: omp.inner.for.inc:
|
|
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP11]], 1
|
|
// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK3: omp.inner.for.end:
|
|
// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK3: omp.loop.exit:
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
|
|
// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr [[BIT_VAR1]], ptr [[TMP12]], align 4
|
|
// CHECK3-NEXT: [[TMP13:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2]], i32 [[TMP2]], i32 1, i32 4, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l248.omp_outlined.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: switch i32 [[TMP13]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
|
|
// CHECK3-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
|
|
// CHECK3-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
|
|
// CHECK3-NEXT: ]
|
|
// CHECK3: .omp.reduction.case1:
|
|
// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP0]], align 4
|
|
// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[BIT_VAR1]], align 4
|
|
// CHECK3-NEXT: [[XOR4:%.*]] = xor i32 [[TMP14]], [[TMP15]]
|
|
// CHECK3-NEXT: store i32 [[XOR4]], ptr [[TMP0]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK3: .omp.reduction.case2:
|
|
// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[BIT_VAR1]], align 4
|
|
// CHECK3-NEXT: [[TMP17:%.*]] = atomicrmw xor ptr [[TMP0]], i32 [[TMP16]] monotonic, align 4
|
|
// CHECK3-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK3: .omp.reduction.default:
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l248.omp_outlined.omp.reduction.reduction_func
|
|
// CHECK3-SAME: (ptr noundef [[TMP0:%.*]], ptr noundef [[TMP1:%.*]]) #[[ATTR3]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 4
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 4
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 4
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 4
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK3-NEXT: [[XOR:%.*]] = xor i32 [[TMP8]], [[TMP9]]
|
|
// CHECK3-NEXT: store i32 [[XOR]], ptr [[TMP7]], align 4
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l254
|
|
// CHECK3-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR1]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[SIVAR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: store ptr [[SIVAR]], ptr [[SIVAR_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[SIVAR_ADDR]], align 4
|
|
// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l254.omp_outlined, ptr [[TMP0]])
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l254.omp_outlined
|
|
// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR1]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[SIVAR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[SIVAR1:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 4
|
|
// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[SIVAR]], ptr [[SIVAR_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[SIVAR_ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 -2147483648, ptr [[SIVAR1]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
|
|
// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK3: cond.true:
|
|
// CHECK3-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK3: cond.false:
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END]]
|
|
// CHECK3: cond.end:
|
|
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
|
|
// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK3: omp.inner.for.cond:
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
|
|
// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK3: omp.inner.for.body:
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
|
|
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[SIVAR1]], align 4
|
|
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK3-NEXT: [[CMP3:%.*]] = icmp sge i32 [[TMP9]], [[TMP10]]
|
|
// CHECK3-NEXT: br i1 [[CMP3]], label [[COND_TRUE4:%.*]], label [[COND_FALSE5:%.*]]
|
|
// CHECK3: cond.true4:
|
|
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[SIVAR1]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END6:%.*]]
|
|
// CHECK3: cond.false5:
|
|
// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END6]]
|
|
// CHECK3: cond.end6:
|
|
// CHECK3-NEXT: [[COND7:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE4]] ], [ [[TMP12]], [[COND_FALSE5]] ]
|
|
// CHECK3-NEXT: store i32 [[COND7]], ptr [[SIVAR1]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK3: omp.body.continue:
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK3: omp.inner.for.inc:
|
|
// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP13]], 1
|
|
// CHECK3-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK3: omp.inner.for.end:
|
|
// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK3: omp.loop.exit:
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
|
|
// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr [[SIVAR1]], ptr [[TMP14]], align 4
|
|
// CHECK3-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2]], i32 [[TMP2]], i32 1, i32 4, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l254.omp_outlined.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: switch i32 [[TMP15]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
|
|
// CHECK3-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
|
|
// CHECK3-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
|
|
// CHECK3-NEXT: ]
|
|
// CHECK3: .omp.reduction.case1:
|
|
// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP0]], align 4
|
|
// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[SIVAR1]], align 4
|
|
// CHECK3-NEXT: [[CMP9:%.*]] = icmp sgt i32 [[TMP16]], [[TMP17]]
|
|
// CHECK3-NEXT: br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
|
|
// CHECK3: cond.true10:
|
|
// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP0]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END12:%.*]]
|
|
// CHECK3: cond.false11:
|
|
// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[SIVAR1]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END12]]
|
|
// CHECK3: cond.end12:
|
|
// CHECK3-NEXT: [[COND13:%.*]] = phi i32 [ [[TMP18]], [[COND_TRUE10]] ], [ [[TMP19]], [[COND_FALSE11]] ]
|
|
// CHECK3-NEXT: store i32 [[COND13]], ptr [[TMP0]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK3: .omp.reduction.case2:
|
|
// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[SIVAR1]], align 4
|
|
// CHECK3-NEXT: [[TMP21:%.*]] = atomicrmw max ptr [[TMP0]], i32 [[TMP20]] monotonic, align 4
|
|
// CHECK3-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK3: .omp.reduction.default:
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l254.omp_outlined.omp.reduction.reduction_func
|
|
// CHECK3-SAME: (ptr noundef [[TMP0:%.*]], ptr noundef [[TMP1:%.*]]) #[[ATTR3]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 4
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 4
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 4
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 4
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
|
|
// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK3: cond.true:
|
|
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK3: cond.false:
|
|
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END]]
|
|
// CHECK3: cond.end:
|
|
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
|
|
// CHECK3-NEXT: store i32 [[COND]], ptr [[TMP7]], align 4
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l260
|
|
// CHECK3-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR1]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[SIVAR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: store ptr [[SIVAR]], ptr [[SIVAR_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[SIVAR_ADDR]], align 4
|
|
// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l260.omp_outlined, ptr [[TMP0]])
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l260.omp_outlined
|
|
// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR1]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[SIVAR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[SIVAR1:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 4
|
|
// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[SIVAR]], ptr [[SIVAR_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[SIVAR_ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 2147483647, ptr [[SIVAR1]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
|
|
// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK3: cond.true:
|
|
// CHECK3-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK3: cond.false:
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END]]
|
|
// CHECK3: cond.end:
|
|
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
|
|
// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK3: omp.inner.for.cond:
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
|
|
// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK3: omp.inner.for.body:
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
|
|
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[SIVAR1]], align 4
|
|
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK3-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
|
|
// CHECK3-NEXT: br i1 [[CMP3]], label [[COND_TRUE4:%.*]], label [[COND_FALSE5:%.*]]
|
|
// CHECK3: cond.true4:
|
|
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[SIVAR1]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END6:%.*]]
|
|
// CHECK3: cond.false5:
|
|
// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END6]]
|
|
// CHECK3: cond.end6:
|
|
// CHECK3-NEXT: [[COND7:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE4]] ], [ [[TMP12]], [[COND_FALSE5]] ]
|
|
// CHECK3-NEXT: store i32 [[COND7]], ptr [[SIVAR1]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK3: omp.body.continue:
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK3: omp.inner.for.inc:
|
|
// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP13]], 1
|
|
// CHECK3-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK3: omp.inner.for.end:
|
|
// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK3: omp.loop.exit:
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
|
|
// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr [[SIVAR1]], ptr [[TMP14]], align 4
|
|
// CHECK3-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2]], i32 [[TMP2]], i32 1, i32 4, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l260.omp_outlined.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: switch i32 [[TMP15]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
|
|
// CHECK3-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
|
|
// CHECK3-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
|
|
// CHECK3-NEXT: ]
|
|
// CHECK3: .omp.reduction.case1:
|
|
// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP0]], align 4
|
|
// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[SIVAR1]], align 4
|
|
// CHECK3-NEXT: [[CMP9:%.*]] = icmp slt i32 [[TMP16]], [[TMP17]]
|
|
// CHECK3-NEXT: br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
|
|
// CHECK3: cond.true10:
|
|
// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP0]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END12:%.*]]
|
|
// CHECK3: cond.false11:
|
|
// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[SIVAR1]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END12]]
|
|
// CHECK3: cond.end12:
|
|
// CHECK3-NEXT: [[COND13:%.*]] = phi i32 [ [[TMP18]], [[COND_TRUE10]] ], [ [[TMP19]], [[COND_FALSE11]] ]
|
|
// CHECK3-NEXT: store i32 [[COND13]], ptr [[TMP0]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK3: .omp.reduction.case2:
|
|
// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[SIVAR1]], align 4
|
|
// CHECK3-NEXT: [[TMP21:%.*]] = atomicrmw min ptr [[TMP0]], i32 [[TMP20]] monotonic, align 4
|
|
// CHECK3-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK3: .omp.reduction.default:
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l260.omp_outlined.omp.reduction.reduction_func
|
|
// CHECK3-SAME: (ptr noundef [[TMP0:%.*]], ptr noundef [[TMP1:%.*]]) #[[ATTR3]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 4
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 4
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 4
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 4
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK3-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP8]], [[TMP9]]
|
|
// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK3: cond.true:
|
|
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK3: cond.false:
|
|
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END]]
|
|
// CHECK3: cond.end:
|
|
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
|
|
// CHECK3-NEXT: store i32 [[COND]], ptr [[TMP7]], align 4
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
|
|
// CHECK3-SAME: () #[[ATTR5:[0-9]+]] comdat {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[T_VAR:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[VEC:%.*]] = alloca [2 x i32], align 4
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 4
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 4
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 4
|
|
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS1:%.*]] = alloca [2 x ptr], align 4
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_PTRS2:%.*]] = alloca [2 x ptr], align 4
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS3:%.*]] = alloca [2 x ptr], align 4
|
|
// CHECK3-NEXT: [[_TMP4:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[KERNEL_ARGS5:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS8:%.*]] = alloca [2 x ptr], align 4
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_PTRS9:%.*]] = alloca [2 x ptr], align 4
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS10:%.*]] = alloca [2 x ptr], align 4
|
|
// CHECK3-NEXT: [[_TMP11:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[KERNEL_ARGS12:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
|
|
// CHECK3-NEXT: [[AND_VAR:%.*]] = alloca i8, align 1
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS15:%.*]] = alloca [2 x ptr], align 4
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_PTRS16:%.*]] = alloca [2 x ptr], align 4
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS17:%.*]] = alloca [2 x ptr], align 4
|
|
// CHECK3-NEXT: [[_TMP18:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[KERNEL_ARGS19:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
|
|
// CHECK3-NEXT: [[OR_VAR:%.*]] = alloca i8, align 1
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS22:%.*]] = alloca [2 x ptr], align 4
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_PTRS23:%.*]] = alloca [2 x ptr], align 4
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS24:%.*]] = alloca [2 x ptr], align 4
|
|
// CHECK3-NEXT: [[_TMP25:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[KERNEL_ARGS26:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
|
|
// CHECK3-NEXT: [[BIT_VAR:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS29:%.*]] = alloca [2 x ptr], align 4
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_PTRS30:%.*]] = alloca [2 x ptr], align 4
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS31:%.*]] = alloca [2 x ptr], align 4
|
|
// CHECK3-NEXT: [[_TMP32:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[KERNEL_ARGS33:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS36:%.*]] = alloca [2 x ptr], align 4
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_PTRS37:%.*]] = alloca [2 x ptr], align 4
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS38:%.*]] = alloca [2 x ptr], align 4
|
|
// CHECK3-NEXT: [[_TMP39:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[KERNEL_ARGS40:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS43:%.*]] = alloca [2 x ptr], align 4
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_PTRS44:%.*]] = alloca [2 x ptr], align 4
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS45:%.*]] = alloca [2 x ptr], align 4
|
|
// CHECK3-NEXT: [[_TMP46:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[KERNEL_ARGS47:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS50:%.*]] = alloca [2 x ptr], align 4
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_PTRS51:%.*]] = alloca [2 x ptr], align 4
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS52:%.*]] = alloca [2 x ptr], align 4
|
|
// CHECK3-NEXT: [[_TMP53:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[KERNEL_ARGS54:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS57:%.*]] = alloca [2 x ptr], align 4
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_PTRS58:%.*]] = alloca [2 x ptr], align 4
|
|
// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS59:%.*]] = alloca [2 x ptr], align 4
|
|
// CHECK3-NEXT: [[_TMP60:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[KERNEL_ARGS61:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
|
|
// CHECK3-NEXT: store i32 0, ptr [[T_VAR]], align 4
|
|
// CHECK3-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 4 [[VEC]], ptr align 4 @__const._Z5tmainIiET_v.vec, i32 8, i1 false)
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr [[T_VAR]], ptr [[TMP0]], align 4
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr [[T_VAR]], ptr [[TMP1]], align 4
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP2]], align 4
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
|
|
// CHECK3-NEXT: store i32 3, ptr [[TMP5]], align 4
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
|
|
// CHECK3-NEXT: store i32 1, ptr [[TMP6]], align 4
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
|
|
// CHECK3-NEXT: store ptr [[TMP3]], ptr [[TMP7]], align 4
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
|
|
// CHECK3-NEXT: store ptr [[TMP4]], ptr [[TMP8]], align 4
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
|
|
// CHECK3-NEXT: store ptr @.offload_sizes.19, ptr [[TMP9]], align 4
|
|
// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
|
|
// CHECK3-NEXT: store ptr @.offload_maptypes.20, ptr [[TMP10]], align 4
|
|
// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP11]], align 4
|
|
// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP12]], align 4
|
|
// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
|
|
// CHECK3-NEXT: store i64 2, ptr [[TMP13]], align 8
|
|
// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
|
|
// CHECK3-NEXT: store i64 0, ptr [[TMP14]], align 8
|
|
// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
|
|
// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP15]], align 4
|
|
// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
|
|
// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP16]], align 4
|
|
// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
|
|
// CHECK3-NEXT: store i32 0, ptr [[TMP17]], align 4
|
|
// CHECK3-NEXT: [[TMP18:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32.region_id, ptr [[KERNEL_ARGS]])
|
|
// CHECK3-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0
|
|
// CHECK3-NEXT: br i1 [[TMP19]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
|
|
// CHECK3: omp_offload.failed:
|
|
// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32(ptr [[T_VAR]]) #[[ATTR2]]
|
|
// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]]
|
|
// CHECK3: omp_offload.cont:
|
|
// CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr [[T_VAR]], ptr [[TMP20]], align 4
|
|
// CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr [[T_VAR]], ptr [[TMP21]], align 4
|
|
// CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS3]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP22]], align 4
|
|
// CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 1
|
|
// CHECK3-NEXT: store ptr [[VEC]], ptr [[TMP23]], align 4
|
|
// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS2]], i32 0, i32 1
|
|
// CHECK3-NEXT: store ptr [[VEC]], ptr [[TMP24]], align 4
|
|
// CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS3]], i32 0, i32 1
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP25]], align 4
|
|
// CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 0
|
|
// CHECK3-NEXT: store i32 3, ptr [[TMP28]], align 4
|
|
// CHECK3-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 1
|
|
// CHECK3-NEXT: store i32 2, ptr [[TMP29]], align 4
|
|
// CHECK3-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 2
|
|
// CHECK3-NEXT: store ptr [[TMP26]], ptr [[TMP30]], align 4
|
|
// CHECK3-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 3
|
|
// CHECK3-NEXT: store ptr [[TMP27]], ptr [[TMP31]], align 4
|
|
// CHECK3-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 4
|
|
// CHECK3-NEXT: store ptr @.offload_sizes.21, ptr [[TMP32]], align 4
|
|
// CHECK3-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 5
|
|
// CHECK3-NEXT: store ptr @.offload_maptypes.22, ptr [[TMP33]], align 4
|
|
// CHECK3-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 6
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP34]], align 4
|
|
// CHECK3-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 7
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP35]], align 4
|
|
// CHECK3-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 8
|
|
// CHECK3-NEXT: store i64 2, ptr [[TMP36]], align 8
|
|
// CHECK3-NEXT: [[TMP37:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 9
|
|
// CHECK3-NEXT: store i64 0, ptr [[TMP37]], align 8
|
|
// CHECK3-NEXT: [[TMP38:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 10
|
|
// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP38]], align 4
|
|
// CHECK3-NEXT: [[TMP39:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 11
|
|
// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP39]], align 4
|
|
// CHECK3-NEXT: [[TMP40:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS5]], i32 0, i32 12
|
|
// CHECK3-NEXT: store i32 0, ptr [[TMP40]], align 4
|
|
// CHECK3-NEXT: [[TMP41:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l37.region_id, ptr [[KERNEL_ARGS5]])
|
|
// CHECK3-NEXT: [[TMP42:%.*]] = icmp ne i32 [[TMP41]], 0
|
|
// CHECK3-NEXT: br i1 [[TMP42]], label [[OMP_OFFLOAD_FAILED6:%.*]], label [[OMP_OFFLOAD_CONT7:%.*]]
|
|
// CHECK3: omp_offload.failed6:
|
|
// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l37(ptr [[T_VAR]], ptr [[VEC]]) #[[ATTR2]]
|
|
// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT7]]
|
|
// CHECK3: omp_offload.cont7:
|
|
// CHECK3-NEXT: store i32 1, ptr [[T_VAR]], align 4
|
|
// CHECK3-NEXT: [[TMP43:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS8]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr [[T_VAR]], ptr [[TMP43]], align 4
|
|
// CHECK3-NEXT: [[TMP44:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS9]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr [[T_VAR]], ptr [[TMP44]], align 4
|
|
// CHECK3-NEXT: [[TMP45:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS10]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP45]], align 4
|
|
// CHECK3-NEXT: [[TMP46:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS8]], i32 0, i32 1
|
|
// CHECK3-NEXT: store ptr [[VEC]], ptr [[TMP46]], align 4
|
|
// CHECK3-NEXT: [[TMP47:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS9]], i32 0, i32 1
|
|
// CHECK3-NEXT: store ptr [[VEC]], ptr [[TMP47]], align 4
|
|
// CHECK3-NEXT: [[TMP48:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS10]], i32 0, i32 1
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP48]], align 4
|
|
// CHECK3-NEXT: [[TMP49:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS8]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP50:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS9]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP51:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 0
|
|
// CHECK3-NEXT: store i32 3, ptr [[TMP51]], align 4
|
|
// CHECK3-NEXT: [[TMP52:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 1
|
|
// CHECK3-NEXT: store i32 2, ptr [[TMP52]], align 4
|
|
// CHECK3-NEXT: [[TMP53:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 2
|
|
// CHECK3-NEXT: store ptr [[TMP49]], ptr [[TMP53]], align 4
|
|
// CHECK3-NEXT: [[TMP54:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 3
|
|
// CHECK3-NEXT: store ptr [[TMP50]], ptr [[TMP54]], align 4
|
|
// CHECK3-NEXT: [[TMP55:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 4
|
|
// CHECK3-NEXT: store ptr @.offload_sizes.23, ptr [[TMP55]], align 4
|
|
// CHECK3-NEXT: [[TMP56:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 5
|
|
// CHECK3-NEXT: store ptr @.offload_maptypes.24, ptr [[TMP56]], align 4
|
|
// CHECK3-NEXT: [[TMP57:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 6
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP57]], align 4
|
|
// CHECK3-NEXT: [[TMP58:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 7
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP58]], align 4
|
|
// CHECK3-NEXT: [[TMP59:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 8
|
|
// CHECK3-NEXT: store i64 2, ptr [[TMP59]], align 8
|
|
// CHECK3-NEXT: [[TMP60:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 9
|
|
// CHECK3-NEXT: store i64 0, ptr [[TMP60]], align 8
|
|
// CHECK3-NEXT: [[TMP61:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 10
|
|
// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP61]], align 4
|
|
// CHECK3-NEXT: [[TMP62:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 11
|
|
// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP62]], align 4
|
|
// CHECK3-NEXT: [[TMP63:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS12]], i32 0, i32 12
|
|
// CHECK3-NEXT: store i32 0, ptr [[TMP63]], align 4
|
|
// CHECK3-NEXT: [[TMP64:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l43.region_id, ptr [[KERNEL_ARGS12]])
|
|
// CHECK3-NEXT: [[TMP65:%.*]] = icmp ne i32 [[TMP64]], 0
|
|
// CHECK3-NEXT: br i1 [[TMP65]], label [[OMP_OFFLOAD_FAILED13:%.*]], label [[OMP_OFFLOAD_CONT14:%.*]]
|
|
// CHECK3: omp_offload.failed13:
|
|
// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l43(ptr [[T_VAR]], ptr [[VEC]]) #[[ATTR2]]
|
|
// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT14]]
|
|
// CHECK3: omp_offload.cont14:
|
|
// CHECK3-NEXT: store i8 1, ptr [[AND_VAR]], align 1
|
|
// CHECK3-NEXT: [[TMP66:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS15]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr [[AND_VAR]], ptr [[TMP66]], align 4
|
|
// CHECK3-NEXT: [[TMP67:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS16]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr [[AND_VAR]], ptr [[TMP67]], align 4
|
|
// CHECK3-NEXT: [[TMP68:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS17]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP68]], align 4
|
|
// CHECK3-NEXT: [[TMP69:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS15]], i32 0, i32 1
|
|
// CHECK3-NEXT: store ptr [[VEC]], ptr [[TMP69]], align 4
|
|
// CHECK3-NEXT: [[TMP70:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS16]], i32 0, i32 1
|
|
// CHECK3-NEXT: store ptr [[VEC]], ptr [[TMP70]], align 4
|
|
// CHECK3-NEXT: [[TMP71:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS17]], i32 0, i32 1
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP71]], align 4
|
|
// CHECK3-NEXT: [[TMP72:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS15]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP73:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS16]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP74:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 0
|
|
// CHECK3-NEXT: store i32 3, ptr [[TMP74]], align 4
|
|
// CHECK3-NEXT: [[TMP75:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 1
|
|
// CHECK3-NEXT: store i32 2, ptr [[TMP75]], align 4
|
|
// CHECK3-NEXT: [[TMP76:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 2
|
|
// CHECK3-NEXT: store ptr [[TMP72]], ptr [[TMP76]], align 4
|
|
// CHECK3-NEXT: [[TMP77:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 3
|
|
// CHECK3-NEXT: store ptr [[TMP73]], ptr [[TMP77]], align 4
|
|
// CHECK3-NEXT: [[TMP78:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 4
|
|
// CHECK3-NEXT: store ptr @.offload_sizes.25, ptr [[TMP78]], align 4
|
|
// CHECK3-NEXT: [[TMP79:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 5
|
|
// CHECK3-NEXT: store ptr @.offload_maptypes.26, ptr [[TMP79]], align 4
|
|
// CHECK3-NEXT: [[TMP80:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 6
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP80]], align 4
|
|
// CHECK3-NEXT: [[TMP81:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 7
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP81]], align 4
|
|
// CHECK3-NEXT: [[TMP82:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 8
|
|
// CHECK3-NEXT: store i64 2, ptr [[TMP82]], align 8
|
|
// CHECK3-NEXT: [[TMP83:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 9
|
|
// CHECK3-NEXT: store i64 0, ptr [[TMP83]], align 8
|
|
// CHECK3-NEXT: [[TMP84:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 10
|
|
// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP84]], align 4
|
|
// CHECK3-NEXT: [[TMP85:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 11
|
|
// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP85]], align 4
|
|
// CHECK3-NEXT: [[TMP86:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS19]], i32 0, i32 12
|
|
// CHECK3-NEXT: store i32 0, ptr [[TMP86]], align 4
|
|
// CHECK3-NEXT: [[TMP87:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l49.region_id, ptr [[KERNEL_ARGS19]])
|
|
// CHECK3-NEXT: [[TMP88:%.*]] = icmp ne i32 [[TMP87]], 0
|
|
// CHECK3-NEXT: br i1 [[TMP88]], label [[OMP_OFFLOAD_FAILED20:%.*]], label [[OMP_OFFLOAD_CONT21:%.*]]
|
|
// CHECK3: omp_offload.failed20:
|
|
// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l49(ptr [[AND_VAR]], ptr [[VEC]]) #[[ATTR2]]
|
|
// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT21]]
|
|
// CHECK3: omp_offload.cont21:
|
|
// CHECK3-NEXT: store i8 0, ptr [[OR_VAR]], align 1
|
|
// CHECK3-NEXT: [[TMP89:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr [[OR_VAR]], ptr [[TMP89]], align 4
|
|
// CHECK3-NEXT: [[TMP90:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS23]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr [[OR_VAR]], ptr [[TMP90]], align 4
|
|
// CHECK3-NEXT: [[TMP91:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS24]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP91]], align 4
|
|
// CHECK3-NEXT: [[TMP92:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 1
|
|
// CHECK3-NEXT: store ptr [[VEC]], ptr [[TMP92]], align 4
|
|
// CHECK3-NEXT: [[TMP93:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS23]], i32 0, i32 1
|
|
// CHECK3-NEXT: store ptr [[VEC]], ptr [[TMP93]], align 4
|
|
// CHECK3-NEXT: [[TMP94:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS24]], i32 0, i32 1
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP94]], align 4
|
|
// CHECK3-NEXT: [[TMP95:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP96:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS23]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP97:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS26]], i32 0, i32 0
|
|
// CHECK3-NEXT: store i32 3, ptr [[TMP97]], align 4
|
|
// CHECK3-NEXT: [[TMP98:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS26]], i32 0, i32 1
|
|
// CHECK3-NEXT: store i32 2, ptr [[TMP98]], align 4
|
|
// CHECK3-NEXT: [[TMP99:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS26]], i32 0, i32 2
|
|
// CHECK3-NEXT: store ptr [[TMP95]], ptr [[TMP99]], align 4
|
|
// CHECK3-NEXT: [[TMP100:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS26]], i32 0, i32 3
|
|
// CHECK3-NEXT: store ptr [[TMP96]], ptr [[TMP100]], align 4
|
|
// CHECK3-NEXT: [[TMP101:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS26]], i32 0, i32 4
|
|
// CHECK3-NEXT: store ptr @.offload_sizes.27, ptr [[TMP101]], align 4
|
|
// CHECK3-NEXT: [[TMP102:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS26]], i32 0, i32 5
|
|
// CHECK3-NEXT: store ptr @.offload_maptypes.28, ptr [[TMP102]], align 4
|
|
// CHECK3-NEXT: [[TMP103:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS26]], i32 0, i32 6
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP103]], align 4
|
|
// CHECK3-NEXT: [[TMP104:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS26]], i32 0, i32 7
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP104]], align 4
|
|
// CHECK3-NEXT: [[TMP105:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS26]], i32 0, i32 8
|
|
// CHECK3-NEXT: store i64 2, ptr [[TMP105]], align 8
|
|
// CHECK3-NEXT: [[TMP106:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS26]], i32 0, i32 9
|
|
// CHECK3-NEXT: store i64 0, ptr [[TMP106]], align 8
|
|
// CHECK3-NEXT: [[TMP107:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS26]], i32 0, i32 10
|
|
// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP107]], align 4
|
|
// CHECK3-NEXT: [[TMP108:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS26]], i32 0, i32 11
|
|
// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP108]], align 4
|
|
// CHECK3-NEXT: [[TMP109:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS26]], i32 0, i32 12
|
|
// CHECK3-NEXT: store i32 0, ptr [[TMP109]], align 4
|
|
// CHECK3-NEXT: [[TMP110:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l55.region_id, ptr [[KERNEL_ARGS26]])
|
|
// CHECK3-NEXT: [[TMP111:%.*]] = icmp ne i32 [[TMP110]], 0
|
|
// CHECK3-NEXT: br i1 [[TMP111]], label [[OMP_OFFLOAD_FAILED27:%.*]], label [[OMP_OFFLOAD_CONT28:%.*]]
|
|
// CHECK3: omp_offload.failed27:
|
|
// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l55(ptr [[OR_VAR]], ptr [[VEC]]) #[[ATTR2]]
|
|
// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT28]]
|
|
// CHECK3: omp_offload.cont28:
|
|
// CHECK3-NEXT: store i32 1, ptr [[BIT_VAR]], align 4
|
|
// CHECK3-NEXT: [[TMP112:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr [[BIT_VAR]], ptr [[TMP112]], align 4
|
|
// CHECK3-NEXT: [[TMP113:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS30]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr [[BIT_VAR]], ptr [[TMP113]], align 4
|
|
// CHECK3-NEXT: [[TMP114:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS31]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP114]], align 4
|
|
// CHECK3-NEXT: [[TMP115:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 1
|
|
// CHECK3-NEXT: store ptr [[VEC]], ptr [[TMP115]], align 4
|
|
// CHECK3-NEXT: [[TMP116:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS30]], i32 0, i32 1
|
|
// CHECK3-NEXT: store ptr [[VEC]], ptr [[TMP116]], align 4
|
|
// CHECK3-NEXT: [[TMP117:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS31]], i32 0, i32 1
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP117]], align 4
|
|
// CHECK3-NEXT: [[TMP118:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP119:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS30]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP120:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS33]], i32 0, i32 0
|
|
// CHECK3-NEXT: store i32 3, ptr [[TMP120]], align 4
|
|
// CHECK3-NEXT: [[TMP121:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS33]], i32 0, i32 1
|
|
// CHECK3-NEXT: store i32 2, ptr [[TMP121]], align 4
|
|
// CHECK3-NEXT: [[TMP122:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS33]], i32 0, i32 2
|
|
// CHECK3-NEXT: store ptr [[TMP118]], ptr [[TMP122]], align 4
|
|
// CHECK3-NEXT: [[TMP123:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS33]], i32 0, i32 3
|
|
// CHECK3-NEXT: store ptr [[TMP119]], ptr [[TMP123]], align 4
|
|
// CHECK3-NEXT: [[TMP124:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS33]], i32 0, i32 4
|
|
// CHECK3-NEXT: store ptr @.offload_sizes.29, ptr [[TMP124]], align 4
|
|
// CHECK3-NEXT: [[TMP125:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS33]], i32 0, i32 5
|
|
// CHECK3-NEXT: store ptr @.offload_maptypes.30, ptr [[TMP125]], align 4
|
|
// CHECK3-NEXT: [[TMP126:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS33]], i32 0, i32 6
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP126]], align 4
|
|
// CHECK3-NEXT: [[TMP127:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS33]], i32 0, i32 7
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP127]], align 4
|
|
// CHECK3-NEXT: [[TMP128:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS33]], i32 0, i32 8
|
|
// CHECK3-NEXT: store i64 2, ptr [[TMP128]], align 8
|
|
// CHECK3-NEXT: [[TMP129:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS33]], i32 0, i32 9
|
|
// CHECK3-NEXT: store i64 0, ptr [[TMP129]], align 8
|
|
// CHECK3-NEXT: [[TMP130:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS33]], i32 0, i32 10
|
|
// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP130]], align 4
|
|
// CHECK3-NEXT: [[TMP131:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS33]], i32 0, i32 11
|
|
// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP131]], align 4
|
|
// CHECK3-NEXT: [[TMP132:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS33]], i32 0, i32 12
|
|
// CHECK3-NEXT: store i32 0, ptr [[TMP132]], align 4
|
|
// CHECK3-NEXT: [[TMP133:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l61.region_id, ptr [[KERNEL_ARGS33]])
|
|
// CHECK3-NEXT: [[TMP134:%.*]] = icmp ne i32 [[TMP133]], 0
|
|
// CHECK3-NEXT: br i1 [[TMP134]], label [[OMP_OFFLOAD_FAILED34:%.*]], label [[OMP_OFFLOAD_CONT35:%.*]]
|
|
// CHECK3: omp_offload.failed34:
|
|
// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l61(ptr [[BIT_VAR]], ptr [[VEC]]) #[[ATTR2]]
|
|
// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT35]]
|
|
// CHECK3: omp_offload.cont35:
|
|
// CHECK3-NEXT: [[TMP135:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS36]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr [[BIT_VAR]], ptr [[TMP135]], align 4
|
|
// CHECK3-NEXT: [[TMP136:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS37]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr [[BIT_VAR]], ptr [[TMP136]], align 4
|
|
// CHECK3-NEXT: [[TMP137:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS38]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP137]], align 4
|
|
// CHECK3-NEXT: [[TMP138:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS36]], i32 0, i32 1
|
|
// CHECK3-NEXT: store ptr [[VEC]], ptr [[TMP138]], align 4
|
|
// CHECK3-NEXT: [[TMP139:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS37]], i32 0, i32 1
|
|
// CHECK3-NEXT: store ptr [[VEC]], ptr [[TMP139]], align 4
|
|
// CHECK3-NEXT: [[TMP140:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS38]], i32 0, i32 1
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP140]], align 4
|
|
// CHECK3-NEXT: [[TMP141:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS36]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP142:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS37]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP143:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS40]], i32 0, i32 0
|
|
// CHECK3-NEXT: store i32 3, ptr [[TMP143]], align 4
|
|
// CHECK3-NEXT: [[TMP144:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS40]], i32 0, i32 1
|
|
// CHECK3-NEXT: store i32 2, ptr [[TMP144]], align 4
|
|
// CHECK3-NEXT: [[TMP145:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS40]], i32 0, i32 2
|
|
// CHECK3-NEXT: store ptr [[TMP141]], ptr [[TMP145]], align 4
|
|
// CHECK3-NEXT: [[TMP146:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS40]], i32 0, i32 3
|
|
// CHECK3-NEXT: store ptr [[TMP142]], ptr [[TMP146]], align 4
|
|
// CHECK3-NEXT: [[TMP147:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS40]], i32 0, i32 4
|
|
// CHECK3-NEXT: store ptr @.offload_sizes.31, ptr [[TMP147]], align 4
|
|
// CHECK3-NEXT: [[TMP148:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS40]], i32 0, i32 5
|
|
// CHECK3-NEXT: store ptr @.offload_maptypes.32, ptr [[TMP148]], align 4
|
|
// CHECK3-NEXT: [[TMP149:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS40]], i32 0, i32 6
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP149]], align 4
|
|
// CHECK3-NEXT: [[TMP150:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS40]], i32 0, i32 7
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP150]], align 4
|
|
// CHECK3-NEXT: [[TMP151:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS40]], i32 0, i32 8
|
|
// CHECK3-NEXT: store i64 2, ptr [[TMP151]], align 8
|
|
// CHECK3-NEXT: [[TMP152:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS40]], i32 0, i32 9
|
|
// CHECK3-NEXT: store i64 0, ptr [[TMP152]], align 8
|
|
// CHECK3-NEXT: [[TMP153:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS40]], i32 0, i32 10
|
|
// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP153]], align 4
|
|
// CHECK3-NEXT: [[TMP154:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS40]], i32 0, i32 11
|
|
// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP154]], align 4
|
|
// CHECK3-NEXT: [[TMP155:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS40]], i32 0, i32 12
|
|
// CHECK3-NEXT: store i32 0, ptr [[TMP155]], align 4
|
|
// CHECK3-NEXT: [[TMP156:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l66.region_id, ptr [[KERNEL_ARGS40]])
|
|
// CHECK3-NEXT: [[TMP157:%.*]] = icmp ne i32 [[TMP156]], 0
|
|
// CHECK3-NEXT: br i1 [[TMP157]], label [[OMP_OFFLOAD_FAILED41:%.*]], label [[OMP_OFFLOAD_CONT42:%.*]]
|
|
// CHECK3: omp_offload.failed41:
|
|
// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l66(ptr [[BIT_VAR]], ptr [[VEC]]) #[[ATTR2]]
|
|
// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT42]]
|
|
// CHECK3: omp_offload.cont42:
|
|
// CHECK3-NEXT: [[TMP158:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS43]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr [[BIT_VAR]], ptr [[TMP158]], align 4
|
|
// CHECK3-NEXT: [[TMP159:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS44]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr [[BIT_VAR]], ptr [[TMP159]], align 4
|
|
// CHECK3-NEXT: [[TMP160:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS45]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP160]], align 4
|
|
// CHECK3-NEXT: [[TMP161:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS43]], i32 0, i32 1
|
|
// CHECK3-NEXT: store ptr [[VEC]], ptr [[TMP161]], align 4
|
|
// CHECK3-NEXT: [[TMP162:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS44]], i32 0, i32 1
|
|
// CHECK3-NEXT: store ptr [[VEC]], ptr [[TMP162]], align 4
|
|
// CHECK3-NEXT: [[TMP163:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS45]], i32 0, i32 1
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP163]], align 4
|
|
// CHECK3-NEXT: [[TMP164:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS43]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP165:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS44]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP166:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS47]], i32 0, i32 0
|
|
// CHECK3-NEXT: store i32 3, ptr [[TMP166]], align 4
|
|
// CHECK3-NEXT: [[TMP167:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS47]], i32 0, i32 1
|
|
// CHECK3-NEXT: store i32 2, ptr [[TMP167]], align 4
|
|
// CHECK3-NEXT: [[TMP168:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS47]], i32 0, i32 2
|
|
// CHECK3-NEXT: store ptr [[TMP164]], ptr [[TMP168]], align 4
|
|
// CHECK3-NEXT: [[TMP169:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS47]], i32 0, i32 3
|
|
// CHECK3-NEXT: store ptr [[TMP165]], ptr [[TMP169]], align 4
|
|
// CHECK3-NEXT: [[TMP170:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS47]], i32 0, i32 4
|
|
// CHECK3-NEXT: store ptr @.offload_sizes.33, ptr [[TMP170]], align 4
|
|
// CHECK3-NEXT: [[TMP171:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS47]], i32 0, i32 5
|
|
// CHECK3-NEXT: store ptr @.offload_maptypes.34, ptr [[TMP171]], align 4
|
|
// CHECK3-NEXT: [[TMP172:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS47]], i32 0, i32 6
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP172]], align 4
|
|
// CHECK3-NEXT: [[TMP173:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS47]], i32 0, i32 7
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP173]], align 4
|
|
// CHECK3-NEXT: [[TMP174:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS47]], i32 0, i32 8
|
|
// CHECK3-NEXT: store i64 2, ptr [[TMP174]], align 8
|
|
// CHECK3-NEXT: [[TMP175:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS47]], i32 0, i32 9
|
|
// CHECK3-NEXT: store i64 0, ptr [[TMP175]], align 8
|
|
// CHECK3-NEXT: [[TMP176:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS47]], i32 0, i32 10
|
|
// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP176]], align 4
|
|
// CHECK3-NEXT: [[TMP177:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS47]], i32 0, i32 11
|
|
// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP177]], align 4
|
|
// CHECK3-NEXT: [[TMP178:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS47]], i32 0, i32 12
|
|
// CHECK3-NEXT: store i32 0, ptr [[TMP178]], align 4
|
|
// CHECK3-NEXT: [[TMP179:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l71.region_id, ptr [[KERNEL_ARGS47]])
|
|
// CHECK3-NEXT: [[TMP180:%.*]] = icmp ne i32 [[TMP179]], 0
|
|
// CHECK3-NEXT: br i1 [[TMP180]], label [[OMP_OFFLOAD_FAILED48:%.*]], label [[OMP_OFFLOAD_CONT49:%.*]]
|
|
// CHECK3: omp_offload.failed48:
|
|
// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l71(ptr [[BIT_VAR]], ptr [[VEC]]) #[[ATTR2]]
|
|
// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT49]]
|
|
// CHECK3: omp_offload.cont49:
|
|
// CHECK3-NEXT: store i32 0, ptr [[T_VAR]], align 4
|
|
// CHECK3-NEXT: [[TMP181:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr [[T_VAR]], ptr [[TMP181]], align 4
|
|
// CHECK3-NEXT: [[TMP182:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS51]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr [[T_VAR]], ptr [[TMP182]], align 4
|
|
// CHECK3-NEXT: [[TMP183:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS52]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP183]], align 4
|
|
// CHECK3-NEXT: [[TMP184:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 1
|
|
// CHECK3-NEXT: store ptr [[VEC]], ptr [[TMP184]], align 4
|
|
// CHECK3-NEXT: [[TMP185:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS51]], i32 0, i32 1
|
|
// CHECK3-NEXT: store ptr [[VEC]], ptr [[TMP185]], align 4
|
|
// CHECK3-NEXT: [[TMP186:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS52]], i32 0, i32 1
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP186]], align 4
|
|
// CHECK3-NEXT: [[TMP187:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP188:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS51]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP189:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS54]], i32 0, i32 0
|
|
// CHECK3-NEXT: store i32 3, ptr [[TMP189]], align 4
|
|
// CHECK3-NEXT: [[TMP190:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS54]], i32 0, i32 1
|
|
// CHECK3-NEXT: store i32 2, ptr [[TMP190]], align 4
|
|
// CHECK3-NEXT: [[TMP191:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS54]], i32 0, i32 2
|
|
// CHECK3-NEXT: store ptr [[TMP187]], ptr [[TMP191]], align 4
|
|
// CHECK3-NEXT: [[TMP192:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS54]], i32 0, i32 3
|
|
// CHECK3-NEXT: store ptr [[TMP188]], ptr [[TMP192]], align 4
|
|
// CHECK3-NEXT: [[TMP193:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS54]], i32 0, i32 4
|
|
// CHECK3-NEXT: store ptr @.offload_sizes.35, ptr [[TMP193]], align 4
|
|
// CHECK3-NEXT: [[TMP194:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS54]], i32 0, i32 5
|
|
// CHECK3-NEXT: store ptr @.offload_maptypes.36, ptr [[TMP194]], align 4
|
|
// CHECK3-NEXT: [[TMP195:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS54]], i32 0, i32 6
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP195]], align 4
|
|
// CHECK3-NEXT: [[TMP196:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS54]], i32 0, i32 7
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP196]], align 4
|
|
// CHECK3-NEXT: [[TMP197:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS54]], i32 0, i32 8
|
|
// CHECK3-NEXT: store i64 2, ptr [[TMP197]], align 8
|
|
// CHECK3-NEXT: [[TMP198:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS54]], i32 0, i32 9
|
|
// CHECK3-NEXT: store i64 0, ptr [[TMP198]], align 8
|
|
// CHECK3-NEXT: [[TMP199:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS54]], i32 0, i32 10
|
|
// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP199]], align 4
|
|
// CHECK3-NEXT: [[TMP200:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS54]], i32 0, i32 11
|
|
// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP200]], align 4
|
|
// CHECK3-NEXT: [[TMP201:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS54]], i32 0, i32 12
|
|
// CHECK3-NEXT: store i32 0, ptr [[TMP201]], align 4
|
|
// CHECK3-NEXT: [[TMP202:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l77.region_id, ptr [[KERNEL_ARGS54]])
|
|
// CHECK3-NEXT: [[TMP203:%.*]] = icmp ne i32 [[TMP202]], 0
|
|
// CHECK3-NEXT: br i1 [[TMP203]], label [[OMP_OFFLOAD_FAILED55:%.*]], label [[OMP_OFFLOAD_CONT56:%.*]]
|
|
// CHECK3: omp_offload.failed55:
|
|
// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l77(ptr [[T_VAR]], ptr [[VEC]]) #[[ATTR2]]
|
|
// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT56]]
|
|
// CHECK3: omp_offload.cont56:
|
|
// CHECK3-NEXT: store i32 10, ptr [[T_VAR]], align 4
|
|
// CHECK3-NEXT: [[TMP204:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr [[T_VAR]], ptr [[TMP204]], align 4
|
|
// CHECK3-NEXT: [[TMP205:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS58]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr [[T_VAR]], ptr [[TMP205]], align 4
|
|
// CHECK3-NEXT: [[TMP206:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS59]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP206]], align 4
|
|
// CHECK3-NEXT: [[TMP207:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 1
|
|
// CHECK3-NEXT: store ptr [[VEC]], ptr [[TMP207]], align 4
|
|
// CHECK3-NEXT: [[TMP208:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS58]], i32 0, i32 1
|
|
// CHECK3-NEXT: store ptr [[VEC]], ptr [[TMP208]], align 4
|
|
// CHECK3-NEXT: [[TMP209:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_MAPPERS59]], i32 0, i32 1
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP209]], align 4
|
|
// CHECK3-NEXT: [[TMP210:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_BASEPTRS57]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP211:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOFFLOAD_PTRS58]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP212:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 0
|
|
// CHECK3-NEXT: store i32 3, ptr [[TMP212]], align 4
|
|
// CHECK3-NEXT: [[TMP213:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 1
|
|
// CHECK3-NEXT: store i32 2, ptr [[TMP213]], align 4
|
|
// CHECK3-NEXT: [[TMP214:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 2
|
|
// CHECK3-NEXT: store ptr [[TMP210]], ptr [[TMP214]], align 4
|
|
// CHECK3-NEXT: [[TMP215:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 3
|
|
// CHECK3-NEXT: store ptr [[TMP211]], ptr [[TMP215]], align 4
|
|
// CHECK3-NEXT: [[TMP216:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 4
|
|
// CHECK3-NEXT: store ptr @.offload_sizes.37, ptr [[TMP216]], align 4
|
|
// CHECK3-NEXT: [[TMP217:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 5
|
|
// CHECK3-NEXT: store ptr @.offload_maptypes.38, ptr [[TMP217]], align 4
|
|
// CHECK3-NEXT: [[TMP218:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 6
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP218]], align 4
|
|
// CHECK3-NEXT: [[TMP219:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 7
|
|
// CHECK3-NEXT: store ptr null, ptr [[TMP219]], align 4
|
|
// CHECK3-NEXT: [[TMP220:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 8
|
|
// CHECK3-NEXT: store i64 2, ptr [[TMP220]], align 8
|
|
// CHECK3-NEXT: [[TMP221:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 9
|
|
// CHECK3-NEXT: store i64 0, ptr [[TMP221]], align 8
|
|
// CHECK3-NEXT: [[TMP222:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 10
|
|
// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP222]], align 4
|
|
// CHECK3-NEXT: [[TMP223:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 11
|
|
// CHECK3-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP223]], align 4
|
|
// CHECK3-NEXT: [[TMP224:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS61]], i32 0, i32 12
|
|
// CHECK3-NEXT: store i32 0, ptr [[TMP224]], align 4
|
|
// CHECK3-NEXT: [[TMP225:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l83.region_id, ptr [[KERNEL_ARGS61]])
|
|
// CHECK3-NEXT: [[TMP226:%.*]] = icmp ne i32 [[TMP225]], 0
|
|
// CHECK3-NEXT: br i1 [[TMP226]], label [[OMP_OFFLOAD_FAILED62:%.*]], label [[OMP_OFFLOAD_CONT63:%.*]]
|
|
// CHECK3: omp_offload.failed62:
|
|
// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l83(ptr [[T_VAR]], ptr [[VEC]]) #[[ATTR2]]
|
|
// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT63]]
|
|
// CHECK3: omp_offload.cont63:
|
|
// CHECK3-NEXT: ret i32 0
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32
|
|
// CHECK3-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[T_VAR:%.*]]) #[[ATTR1]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[T_VAR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: store ptr [[T_VAR]], ptr [[T_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[T_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32.omp_outlined, ptr [[TMP0]])
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32.omp_outlined
|
|
// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[T_VAR:%.*]]) #[[ATTR1]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[T_VAR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[T_VAR1:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 4
|
|
// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[T_VAR]], ptr [[T_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[T_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[T_VAR1]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
|
|
// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK3: cond.true:
|
|
// CHECK3-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK3: cond.false:
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END]]
|
|
// CHECK3: cond.end:
|
|
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
|
|
// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK3: omp.inner.for.cond:
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
|
|
// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK3: omp.inner.for.body:
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
|
|
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[T_VAR1]], align 4
|
|
// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], [[TMP9]]
|
|
// CHECK3-NEXT: store i32 [[ADD3]], ptr [[T_VAR1]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK3: omp.body.continue:
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK3: omp.inner.for.inc:
|
|
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP11]], 1
|
|
// CHECK3-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK3: omp.inner.for.end:
|
|
// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK3: omp.loop.exit:
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
|
|
// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr [[T_VAR1]], ptr [[TMP12]], align 4
|
|
// CHECK3-NEXT: [[TMP13:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2]], i32 [[TMP2]], i32 1, i32 4, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32.omp_outlined.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: switch i32 [[TMP13]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
|
|
// CHECK3-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
|
|
// CHECK3-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
|
|
// CHECK3-NEXT: ]
|
|
// CHECK3: .omp.reduction.case1:
|
|
// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP0]], align 4
|
|
// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[T_VAR1]], align 4
|
|
// CHECK3-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP14]], [[TMP15]]
|
|
// CHECK3-NEXT: store i32 [[ADD5]], ptr [[TMP0]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK3: .omp.reduction.case2:
|
|
// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[T_VAR1]], align 4
|
|
// CHECK3-NEXT: [[TMP17:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP16]] monotonic, align 4
|
|
// CHECK3-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK3: .omp.reduction.default:
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l32.omp_outlined.omp.reduction.reduction_func
|
|
// CHECK3-SAME: (ptr noundef [[TMP0:%.*]], ptr noundef [[TMP1:%.*]]) #[[ATTR3]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 4
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 4
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 4
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 4
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], [[TMP9]]
|
|
// CHECK3-NEXT: store i32 [[ADD]], ptr [[TMP7]], align 4
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l37
|
|
// CHECK3-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[T_VAR:%.*]], ptr noundef nonnull align 4 dereferenceable(8) [[VEC:%.*]]) #[[ATTR1]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[T_VAR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[VEC_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: store ptr [[T_VAR]], ptr [[T_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[VEC]], ptr [[VEC_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[T_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[VEC_ADDR]], align 4
|
|
// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l37.omp_outlined, ptr [[TMP0]], ptr [[TMP1]])
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l37.omp_outlined
|
|
// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[T_VAR:%.*]], ptr noundef nonnull align 4 dereferenceable(8) [[VEC:%.*]]) #[[ATTR1]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[T_VAR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[VEC_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[T_VAR1:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 4
|
|
// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[T_VAR]], ptr [[T_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[VEC]], ptr [[VEC_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[T_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[VEC_ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[T_VAR1]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP3]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 1
|
|
// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK3: cond.true:
|
|
// CHECK3-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK3: cond.false:
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END]]
|
|
// CHECK3: cond.end:
|
|
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
|
|
// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK3: omp.inner.for.cond:
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
|
|
// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK3: omp.inner.for.body:
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
|
|
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4
|
|
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], ptr [[TMP1]], i32 0, i32 [[TMP10]]
|
|
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
|
|
// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[T_VAR1]], align 4
|
|
// CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP12]], [[TMP11]]
|
|
// CHECK3-NEXT: store i32 [[SUB]], ptr [[T_VAR1]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK3: omp.body.continue:
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK3: omp.inner.for.inc:
|
|
// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP13]], 1
|
|
// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK3: omp.inner.for.end:
|
|
// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK3: omp.loop.exit:
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]])
|
|
// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr [[T_VAR1]], ptr [[TMP14]], align 4
|
|
// CHECK3-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2]], i32 [[TMP3]], i32 1, i32 4, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l37.omp_outlined.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: switch i32 [[TMP15]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
|
|
// CHECK3-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
|
|
// CHECK3-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
|
|
// CHECK3-NEXT: ]
|
|
// CHECK3: .omp.reduction.case1:
|
|
// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP0]], align 4
|
|
// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[T_VAR1]], align 4
|
|
// CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP16]], [[TMP17]]
|
|
// CHECK3-NEXT: store i32 [[ADD4]], ptr [[TMP0]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP3]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK3: .omp.reduction.case2:
|
|
// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[T_VAR1]], align 4
|
|
// CHECK3-NEXT: [[TMP19:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP18]] monotonic, align 4
|
|
// CHECK3-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP3]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK3: .omp.reduction.default:
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l37.omp_outlined.omp.reduction.reduction_func
|
|
// CHECK3-SAME: (ptr noundef [[TMP0:%.*]], ptr noundef [[TMP1:%.*]]) #[[ATTR3]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 4
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 4
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 4
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 4
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], [[TMP9]]
|
|
// CHECK3-NEXT: store i32 [[ADD]], ptr [[TMP7]], align 4
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l43
|
|
// CHECK3-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[T_VAR:%.*]], ptr noundef nonnull align 4 dereferenceable(8) [[VEC:%.*]]) #[[ATTR1]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[T_VAR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[VEC_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: store ptr [[T_VAR]], ptr [[T_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[VEC]], ptr [[VEC_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[T_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[VEC_ADDR]], align 4
|
|
// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l43.omp_outlined, ptr [[TMP0]], ptr [[TMP1]])
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l43.omp_outlined
|
|
// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[T_VAR:%.*]], ptr noundef nonnull align 4 dereferenceable(8) [[VEC:%.*]]) #[[ATTR1]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[T_VAR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[VEC_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[T_VAR1:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 4
|
|
// CHECK3-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[_TMP6:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[T_VAR]], ptr [[T_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[VEC]], ptr [[VEC_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[T_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[VEC_ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 1, ptr [[T_VAR1]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP3]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 1
|
|
// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK3: cond.true:
|
|
// CHECK3-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK3: cond.false:
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END]]
|
|
// CHECK3: cond.end:
|
|
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
|
|
// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK3: omp.inner.for.cond:
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
|
|
// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK3: omp.inner.for.body:
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
|
|
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4
|
|
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[T_VAR1]], align 4
|
|
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], ptr [[TMP1]], i32 0, i32 [[TMP11]]
|
|
// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
|
|
// CHECK3-NEXT: [[MUL3:%.*]] = mul nsw i32 [[TMP10]], [[TMP12]]
|
|
// CHECK3-NEXT: store i32 [[MUL3]], ptr [[T_VAR1]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK3: omp.body.continue:
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK3: omp.inner.for.inc:
|
|
// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP13]], 1
|
|
// CHECK3-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK3: omp.inner.for.end:
|
|
// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK3: omp.loop.exit:
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]])
|
|
// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr [[T_VAR1]], ptr [[TMP14]], align 4
|
|
// CHECK3-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2]], i32 [[TMP3]], i32 1, i32 4, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l43.omp_outlined.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: switch i32 [[TMP15]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
|
|
// CHECK3-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
|
|
// CHECK3-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
|
|
// CHECK3-NEXT: ]
|
|
// CHECK3: .omp.reduction.case1:
|
|
// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP0]], align 4
|
|
// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[T_VAR1]], align 4
|
|
// CHECK3-NEXT: [[MUL5:%.*]] = mul nsw i32 [[TMP16]], [[TMP17]]
|
|
// CHECK3-NEXT: store i32 [[MUL5]], ptr [[TMP0]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP3]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK3: .omp.reduction.case2:
|
|
// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[T_VAR1]], align 4
|
|
// CHECK3-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i32, ptr [[TMP0]] monotonic, align 4
|
|
// CHECK3-NEXT: br label [[ATOMIC_CONT:%.*]]
|
|
// CHECK3: atomic_cont:
|
|
// CHECK3-NEXT: [[TMP19:%.*]] = phi i32 [ [[ATOMIC_LOAD]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[TMP24:%.*]], [[ATOMIC_CONT]] ]
|
|
// CHECK3-NEXT: store i32 [[TMP19]], ptr [[_TMP6]], align 4
|
|
// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[_TMP6]], align 4
|
|
// CHECK3-NEXT: [[TMP21:%.*]] = load i32, ptr [[T_VAR1]], align 4
|
|
// CHECK3-NEXT: [[MUL7:%.*]] = mul nsw i32 [[TMP20]], [[TMP21]]
|
|
// CHECK3-NEXT: store i32 [[MUL7]], ptr [[ATOMIC_TEMP]], align 4
|
|
// CHECK3-NEXT: [[TMP22:%.*]] = load i32, ptr [[ATOMIC_TEMP]], align 4
|
|
// CHECK3-NEXT: [[TMP23:%.*]] = cmpxchg ptr [[TMP0]], i32 [[TMP19]], i32 [[TMP22]] monotonic monotonic, align 4
|
|
// CHECK3-NEXT: [[TMP24]] = extractvalue { i32, i1 } [[TMP23]], 0
|
|
// CHECK3-NEXT: [[TMP25:%.*]] = extractvalue { i32, i1 } [[TMP23]], 1
|
|
// CHECK3-NEXT: br i1 [[TMP25]], label [[ATOMIC_EXIT:%.*]], label [[ATOMIC_CONT]]
|
|
// CHECK3: atomic_exit:
|
|
// CHECK3-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP3]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK3: .omp.reduction.default:
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l43.omp_outlined.omp.reduction.reduction_func
|
|
// CHECK3-SAME: (ptr noundef [[TMP0:%.*]], ptr noundef [[TMP1:%.*]]) #[[ATTR3]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 4
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 4
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 4
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 4
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], [[TMP9]]
|
|
// CHECK3-NEXT: store i32 [[MUL]], ptr [[TMP7]], align 4
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l49
|
|
// CHECK3-SAME: (ptr noundef nonnull align 1 dereferenceable(1) [[AND_VAR:%.*]], ptr noundef nonnull align 4 dereferenceable(8) [[VEC:%.*]]) #[[ATTR1]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[AND_VAR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[VEC_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: store ptr [[AND_VAR]], ptr [[AND_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[VEC]], ptr [[VEC_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AND_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[VEC_ADDR]], align 4
|
|
// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l49.omp_outlined, ptr [[TMP0]], ptr [[TMP1]])
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l49.omp_outlined
|
|
// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[AND_VAR:%.*]], ptr noundef nonnull align 4 dereferenceable(8) [[VEC:%.*]]) #[[ATTR1]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[AND_VAR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[VEC_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[AND_VAR1:%.*]] = alloca i8, align 1
|
|
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 4
|
|
// CHECK3-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i8, align 1
|
|
// CHECK3-NEXT: [[_TMP12:%.*]] = alloca i8, align 1
|
|
// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[AND_VAR]], ptr [[AND_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[VEC]], ptr [[VEC_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AND_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[VEC_ADDR]], align 4
|
|
// CHECK3-NEXT: store i8 1, ptr [[AND_VAR1]], align 1
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP3]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 1
|
|
// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK3: cond.true:
|
|
// CHECK3-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK3: cond.false:
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END]]
|
|
// CHECK3: cond.end:
|
|
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
|
|
// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK3: omp.inner.for.cond:
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
|
|
// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK3: omp.inner.for.body:
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
|
|
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4
|
|
// CHECK3-NEXT: [[TMP10:%.*]] = load i8, ptr [[AND_VAR1]], align 1
|
|
// CHECK3-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP10]] to i1
|
|
// CHECK3-NEXT: br i1 [[TOBOOL]], label [[LAND_RHS:%.*]], label [[LAND_END:%.*]]
|
|
// CHECK3: land.rhs:
|
|
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], ptr [[TMP1]], i32 0, i32 [[TMP11]]
|
|
// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
|
|
// CHECK3-NEXT: [[REM:%.*]] = srem i32 [[TMP12]], 2
|
|
// CHECK3-NEXT: [[CMP3:%.*]] = icmp eq i32 [[REM]], 0
|
|
// CHECK3-NEXT: br label [[LAND_END]]
|
|
// CHECK3: land.end:
|
|
// CHECK3-NEXT: [[TMP13:%.*]] = phi i1 [ false, [[OMP_INNER_FOR_BODY]] ], [ [[CMP3]], [[LAND_RHS]] ]
|
|
// CHECK3-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TMP13]] to i8
|
|
// CHECK3-NEXT: store i8 [[FROMBOOL]], ptr [[AND_VAR1]], align 1
|
|
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK3: omp.body.continue:
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK3: omp.inner.for.inc:
|
|
// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP14]], 1
|
|
// CHECK3-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK3: omp.inner.for.end:
|
|
// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK3: omp.loop.exit:
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]])
|
|
// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr [[AND_VAR1]], ptr [[TMP15]], align 4
|
|
// CHECK3-NEXT: [[TMP16:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2]], i32 [[TMP3]], i32 1, i32 4, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l49.omp_outlined.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: switch i32 [[TMP16]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
|
|
// CHECK3-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
|
|
// CHECK3-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
|
|
// CHECK3-NEXT: ]
|
|
// CHECK3: .omp.reduction.case1:
|
|
// CHECK3-NEXT: [[TMP17:%.*]] = load i8, ptr [[TMP0]], align 1
|
|
// CHECK3-NEXT: [[TOBOOL5:%.*]] = trunc i8 [[TMP17]] to i1
|
|
// CHECK3-NEXT: br i1 [[TOBOOL5]], label [[LAND_RHS6:%.*]], label [[LAND_END8:%.*]]
|
|
// CHECK3: land.rhs6:
|
|
// CHECK3-NEXT: [[TMP18:%.*]] = load i8, ptr [[AND_VAR1]], align 1
|
|
// CHECK3-NEXT: [[TOBOOL7:%.*]] = trunc i8 [[TMP18]] to i1
|
|
// CHECK3-NEXT: br label [[LAND_END8]]
|
|
// CHECK3: land.end8:
|
|
// CHECK3-NEXT: [[TMP19:%.*]] = phi i1 [ false, [[DOTOMP_REDUCTION_CASE1]] ], [ [[TOBOOL7]], [[LAND_RHS6]] ]
|
|
// CHECK3-NEXT: [[FROMBOOL9:%.*]] = zext i1 [[TMP19]] to i8
|
|
// CHECK3-NEXT: store i8 [[FROMBOOL9]], ptr [[TMP0]], align 1
|
|
// CHECK3-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP3]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK3: .omp.reduction.case2:
|
|
// CHECK3-NEXT: [[TMP20:%.*]] = load i8, ptr [[AND_VAR1]], align 1
|
|
// CHECK3-NEXT: [[TOBOOL10:%.*]] = trunc i8 [[TMP20]] to i1
|
|
// CHECK3-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i8, ptr [[TMP0]] monotonic, align 1
|
|
// CHECK3-NEXT: br label [[ATOMIC_CONT:%.*]]
|
|
// CHECK3: atomic_cont:
|
|
// CHECK3-NEXT: [[TMP21:%.*]] = phi i8 [ [[ATOMIC_LOAD]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[TMP27:%.*]], [[LAND_END17:%.*]] ]
|
|
// CHECK3-NEXT: [[TOBOOL11:%.*]] = trunc i8 [[TMP21]] to i1
|
|
// CHECK3-NEXT: [[FROMBOOL13:%.*]] = zext i1 [[TOBOOL11]] to i8
|
|
// CHECK3-NEXT: store i8 [[FROMBOOL13]], ptr [[_TMP12]], align 1
|
|
// CHECK3-NEXT: [[TMP22:%.*]] = load i8, ptr [[_TMP12]], align 1
|
|
// CHECK3-NEXT: [[TOBOOL14:%.*]] = trunc i8 [[TMP22]] to i1
|
|
// CHECK3-NEXT: br i1 [[TOBOOL14]], label [[LAND_RHS15:%.*]], label [[LAND_END17]]
|
|
// CHECK3: land.rhs15:
|
|
// CHECK3-NEXT: [[TMP23:%.*]] = load i8, ptr [[AND_VAR1]], align 1
|
|
// CHECK3-NEXT: [[TOBOOL16:%.*]] = trunc i8 [[TMP23]] to i1
|
|
// CHECK3-NEXT: br label [[LAND_END17]]
|
|
// CHECK3: land.end17:
|
|
// CHECK3-NEXT: [[TMP24:%.*]] = phi i1 [ false, [[ATOMIC_CONT]] ], [ [[TOBOOL16]], [[LAND_RHS15]] ]
|
|
// CHECK3-NEXT: [[FROMBOOL18:%.*]] = zext i1 [[TMP24]] to i8
|
|
// CHECK3-NEXT: store i8 [[FROMBOOL18]], ptr [[ATOMIC_TEMP]], align 1
|
|
// CHECK3-NEXT: [[TMP25:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1
|
|
// CHECK3-NEXT: [[TMP26:%.*]] = cmpxchg ptr [[TMP0]], i8 [[TMP21]], i8 [[TMP25]] monotonic monotonic, align 1
|
|
// CHECK3-NEXT: [[TMP27]] = extractvalue { i8, i1 } [[TMP26]], 0
|
|
// CHECK3-NEXT: [[TMP28:%.*]] = extractvalue { i8, i1 } [[TMP26]], 1
|
|
// CHECK3-NEXT: br i1 [[TMP28]], label [[ATOMIC_EXIT:%.*]], label [[ATOMIC_CONT]]
|
|
// CHECK3: atomic_exit:
|
|
// CHECK3-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP3]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK3: .omp.reduction.default:
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l49.omp_outlined.omp.reduction.reduction_func
|
|
// CHECK3-SAME: (ptr noundef [[TMP0:%.*]], ptr noundef [[TMP1:%.*]]) #[[ATTR3]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 4
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 4
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 4
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 4
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i8, ptr [[TMP7]], align 1
|
|
// CHECK3-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP8]] to i1
|
|
// CHECK3-NEXT: br i1 [[TOBOOL]], label [[LAND_RHS:%.*]], label [[LAND_END:%.*]]
|
|
// CHECK3: land.rhs:
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i8, ptr [[TMP5]], align 1
|
|
// CHECK3-NEXT: [[TOBOOL2:%.*]] = trunc i8 [[TMP9]] to i1
|
|
// CHECK3-NEXT: br label [[LAND_END]]
|
|
// CHECK3: land.end:
|
|
// CHECK3-NEXT: [[TMP10:%.*]] = phi i1 [ false, [[ENTRY:%.*]] ], [ [[TOBOOL2]], [[LAND_RHS]] ]
|
|
// CHECK3-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TMP10]] to i8
|
|
// CHECK3-NEXT: store i8 [[FROMBOOL]], ptr [[TMP7]], align 1
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l55
|
|
// CHECK3-SAME: (ptr noundef nonnull align 1 dereferenceable(1) [[OR_VAR:%.*]], ptr noundef nonnull align 4 dereferenceable(8) [[VEC:%.*]]) #[[ATTR1]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[OR_VAR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[VEC_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: store ptr [[OR_VAR]], ptr [[OR_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[VEC]], ptr [[VEC_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[OR_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[VEC_ADDR]], align 4
|
|
// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l55.omp_outlined, ptr [[TMP0]], ptr [[TMP1]])
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l55.omp_outlined
|
|
// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[OR_VAR:%.*]], ptr noundef nonnull align 4 dereferenceable(8) [[VEC:%.*]]) #[[ATTR1]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[OR_VAR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[VEC_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[OR_VAR1:%.*]] = alloca i8, align 1
|
|
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 4
|
|
// CHECK3-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i8, align 1
|
|
// CHECK3-NEXT: [[_TMP12:%.*]] = alloca i8, align 1
|
|
// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[OR_VAR]], ptr [[OR_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[VEC]], ptr [[VEC_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[OR_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[VEC_ADDR]], align 4
|
|
// CHECK3-NEXT: store i8 0, ptr [[OR_VAR1]], align 1
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP3]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 1
|
|
// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK3: cond.true:
|
|
// CHECK3-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK3: cond.false:
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END]]
|
|
// CHECK3: cond.end:
|
|
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
|
|
// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK3: omp.inner.for.cond:
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
|
|
// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK3: omp.inner.for.body:
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
|
|
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4
|
|
// CHECK3-NEXT: [[TMP10:%.*]] = load i8, ptr [[OR_VAR1]], align 1
|
|
// CHECK3-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP10]] to i1
|
|
// CHECK3-NEXT: br i1 [[TOBOOL]], label [[LOR_END:%.*]], label [[LOR_RHS:%.*]]
|
|
// CHECK3: lor.rhs:
|
|
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], ptr [[TMP1]], i32 0, i32 [[TMP11]]
|
|
// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
|
|
// CHECK3-NEXT: [[REM:%.*]] = srem i32 [[TMP12]], 2
|
|
// CHECK3-NEXT: [[CMP3:%.*]] = icmp eq i32 [[REM]], 0
|
|
// CHECK3-NEXT: br label [[LOR_END]]
|
|
// CHECK3: lor.end:
|
|
// CHECK3-NEXT: [[TMP13:%.*]] = phi i1 [ true, [[OMP_INNER_FOR_BODY]] ], [ [[CMP3]], [[LOR_RHS]] ]
|
|
// CHECK3-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TMP13]] to i8
|
|
// CHECK3-NEXT: store i8 [[FROMBOOL]], ptr [[OR_VAR1]], align 1
|
|
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK3: omp.body.continue:
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK3: omp.inner.for.inc:
|
|
// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP14]], 1
|
|
// CHECK3-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK3: omp.inner.for.end:
|
|
// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK3: omp.loop.exit:
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]])
|
|
// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr [[OR_VAR1]], ptr [[TMP15]], align 4
|
|
// CHECK3-NEXT: [[TMP16:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2]], i32 [[TMP3]], i32 1, i32 4, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l55.omp_outlined.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: switch i32 [[TMP16]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
|
|
// CHECK3-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
|
|
// CHECK3-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
|
|
// CHECK3-NEXT: ]
|
|
// CHECK3: .omp.reduction.case1:
|
|
// CHECK3-NEXT: [[TMP17:%.*]] = load i8, ptr [[TMP0]], align 1
|
|
// CHECK3-NEXT: [[TOBOOL5:%.*]] = trunc i8 [[TMP17]] to i1
|
|
// CHECK3-NEXT: br i1 [[TOBOOL5]], label [[LOR_END8:%.*]], label [[LOR_RHS6:%.*]]
|
|
// CHECK3: lor.rhs6:
|
|
// CHECK3-NEXT: [[TMP18:%.*]] = load i8, ptr [[OR_VAR1]], align 1
|
|
// CHECK3-NEXT: [[TOBOOL7:%.*]] = trunc i8 [[TMP18]] to i1
|
|
// CHECK3-NEXT: br label [[LOR_END8]]
|
|
// CHECK3: lor.end8:
|
|
// CHECK3-NEXT: [[TMP19:%.*]] = phi i1 [ true, [[DOTOMP_REDUCTION_CASE1]] ], [ [[TOBOOL7]], [[LOR_RHS6]] ]
|
|
// CHECK3-NEXT: [[FROMBOOL9:%.*]] = zext i1 [[TMP19]] to i8
|
|
// CHECK3-NEXT: store i8 [[FROMBOOL9]], ptr [[TMP0]], align 1
|
|
// CHECK3-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP3]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK3: .omp.reduction.case2:
|
|
// CHECK3-NEXT: [[TMP20:%.*]] = load i8, ptr [[OR_VAR1]], align 1
|
|
// CHECK3-NEXT: [[TOBOOL10:%.*]] = trunc i8 [[TMP20]] to i1
|
|
// CHECK3-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i8, ptr [[TMP0]] monotonic, align 1
|
|
// CHECK3-NEXT: br label [[ATOMIC_CONT:%.*]]
|
|
// CHECK3: atomic_cont:
|
|
// CHECK3-NEXT: [[TMP21:%.*]] = phi i8 [ [[ATOMIC_LOAD]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[TMP27:%.*]], [[LOR_END17:%.*]] ]
|
|
// CHECK3-NEXT: [[TOBOOL11:%.*]] = trunc i8 [[TMP21]] to i1
|
|
// CHECK3-NEXT: [[FROMBOOL13:%.*]] = zext i1 [[TOBOOL11]] to i8
|
|
// CHECK3-NEXT: store i8 [[FROMBOOL13]], ptr [[_TMP12]], align 1
|
|
// CHECK3-NEXT: [[TMP22:%.*]] = load i8, ptr [[_TMP12]], align 1
|
|
// CHECK3-NEXT: [[TOBOOL14:%.*]] = trunc i8 [[TMP22]] to i1
|
|
// CHECK3-NEXT: br i1 [[TOBOOL14]], label [[LOR_END17]], label [[LOR_RHS15:%.*]]
|
|
// CHECK3: lor.rhs15:
|
|
// CHECK3-NEXT: [[TMP23:%.*]] = load i8, ptr [[OR_VAR1]], align 1
|
|
// CHECK3-NEXT: [[TOBOOL16:%.*]] = trunc i8 [[TMP23]] to i1
|
|
// CHECK3-NEXT: br label [[LOR_END17]]
|
|
// CHECK3: lor.end17:
|
|
// CHECK3-NEXT: [[TMP24:%.*]] = phi i1 [ true, [[ATOMIC_CONT]] ], [ [[TOBOOL16]], [[LOR_RHS15]] ]
|
|
// CHECK3-NEXT: [[FROMBOOL18:%.*]] = zext i1 [[TMP24]] to i8
|
|
// CHECK3-NEXT: store i8 [[FROMBOOL18]], ptr [[ATOMIC_TEMP]], align 1
|
|
// CHECK3-NEXT: [[TMP25:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1
|
|
// CHECK3-NEXT: [[TMP26:%.*]] = cmpxchg ptr [[TMP0]], i8 [[TMP21]], i8 [[TMP25]] monotonic monotonic, align 1
|
|
// CHECK3-NEXT: [[TMP27]] = extractvalue { i8, i1 } [[TMP26]], 0
|
|
// CHECK3-NEXT: [[TMP28:%.*]] = extractvalue { i8, i1 } [[TMP26]], 1
|
|
// CHECK3-NEXT: br i1 [[TMP28]], label [[ATOMIC_EXIT:%.*]], label [[ATOMIC_CONT]]
|
|
// CHECK3: atomic_exit:
|
|
// CHECK3-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP3]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK3: .omp.reduction.default:
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l55.omp_outlined.omp.reduction.reduction_func
|
|
// CHECK3-SAME: (ptr noundef [[TMP0:%.*]], ptr noundef [[TMP1:%.*]]) #[[ATTR3]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 4
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 4
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 4
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 4
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i8, ptr [[TMP7]], align 1
|
|
// CHECK3-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP8]] to i1
|
|
// CHECK3-NEXT: br i1 [[TOBOOL]], label [[LOR_END:%.*]], label [[LOR_RHS:%.*]]
|
|
// CHECK3: lor.rhs:
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i8, ptr [[TMP5]], align 1
|
|
// CHECK3-NEXT: [[TOBOOL2:%.*]] = trunc i8 [[TMP9]] to i1
|
|
// CHECK3-NEXT: br label [[LOR_END]]
|
|
// CHECK3: lor.end:
|
|
// CHECK3-NEXT: [[TMP10:%.*]] = phi i1 [ true, [[ENTRY:%.*]] ], [ [[TOBOOL2]], [[LOR_RHS]] ]
|
|
// CHECK3-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TMP10]] to i8
|
|
// CHECK3-NEXT: store i8 [[FROMBOOL]], ptr [[TMP7]], align 1
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l61
|
|
// CHECK3-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[BIT_VAR:%.*]], ptr noundef nonnull align 4 dereferenceable(8) [[VEC:%.*]]) #[[ATTR1]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[BIT_VAR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[VEC_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: store ptr [[BIT_VAR]], ptr [[BIT_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[VEC]], ptr [[VEC_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[BIT_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[VEC_ADDR]], align 4
|
|
// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l61.omp_outlined, ptr [[TMP0]], ptr [[TMP1]])
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l61.omp_outlined
|
|
// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[BIT_VAR:%.*]], ptr noundef nonnull align 4 dereferenceable(8) [[VEC:%.*]]) #[[ATTR1]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[BIT_VAR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[VEC_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[BIT_VAR1:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 4
|
|
// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[BIT_VAR]], ptr [[BIT_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[VEC]], ptr [[VEC_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[BIT_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[VEC_ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 -1, ptr [[BIT_VAR1]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP3]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 1
|
|
// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK3: cond.true:
|
|
// CHECK3-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK3: cond.false:
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END]]
|
|
// CHECK3: cond.end:
|
|
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
|
|
// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK3: omp.inner.for.cond:
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
|
|
// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK3: omp.inner.for.body:
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
|
|
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4
|
|
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[BIT_VAR1]], align 4
|
|
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], ptr [[TMP1]], i32 0, i32 [[TMP11]]
|
|
// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
|
|
// CHECK3-NEXT: [[AND:%.*]] = and i32 [[TMP10]], [[TMP12]]
|
|
// CHECK3-NEXT: store i32 [[AND]], ptr [[BIT_VAR1]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK3: omp.body.continue:
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK3: omp.inner.for.inc:
|
|
// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP13]], 1
|
|
// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK3: omp.inner.for.end:
|
|
// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK3: omp.loop.exit:
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]])
|
|
// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr [[BIT_VAR1]], ptr [[TMP14]], align 4
|
|
// CHECK3-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2]], i32 [[TMP3]], i32 1, i32 4, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l61.omp_outlined.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: switch i32 [[TMP15]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
|
|
// CHECK3-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
|
|
// CHECK3-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
|
|
// CHECK3-NEXT: ]
|
|
// CHECK3: .omp.reduction.case1:
|
|
// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP0]], align 4
|
|
// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[BIT_VAR1]], align 4
|
|
// CHECK3-NEXT: [[AND4:%.*]] = and i32 [[TMP16]], [[TMP17]]
|
|
// CHECK3-NEXT: store i32 [[AND4]], ptr [[TMP0]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP3]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK3: .omp.reduction.case2:
|
|
// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[BIT_VAR1]], align 4
|
|
// CHECK3-NEXT: [[TMP19:%.*]] = atomicrmw and ptr [[TMP0]], i32 [[TMP18]] monotonic, align 4
|
|
// CHECK3-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP3]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK3: .omp.reduction.default:
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l61.omp_outlined.omp.reduction.reduction_func
|
|
// CHECK3-SAME: (ptr noundef [[TMP0:%.*]], ptr noundef [[TMP1:%.*]]) #[[ATTR3]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 4
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 4
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 4
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 4
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK3-NEXT: [[AND:%.*]] = and i32 [[TMP8]], [[TMP9]]
|
|
// CHECK3-NEXT: store i32 [[AND]], ptr [[TMP7]], align 4
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l66
|
|
// CHECK3-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[BIT_VAR:%.*]], ptr noundef nonnull align 4 dereferenceable(8) [[VEC:%.*]]) #[[ATTR1]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[BIT_VAR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[VEC_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: store ptr [[BIT_VAR]], ptr [[BIT_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[VEC]], ptr [[VEC_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[BIT_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[VEC_ADDR]], align 4
|
|
// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l66.omp_outlined, ptr [[TMP0]], ptr [[TMP1]])
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l66.omp_outlined
|
|
// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[BIT_VAR:%.*]], ptr noundef nonnull align 4 dereferenceable(8) [[VEC:%.*]]) #[[ATTR1]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[BIT_VAR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[VEC_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[BIT_VAR1:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 4
|
|
// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[BIT_VAR]], ptr [[BIT_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[VEC]], ptr [[VEC_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[BIT_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[VEC_ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[BIT_VAR1]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP3]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 1
|
|
// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK3: cond.true:
|
|
// CHECK3-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK3: cond.false:
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END]]
|
|
// CHECK3: cond.end:
|
|
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
|
|
// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK3: omp.inner.for.cond:
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
|
|
// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK3: omp.inner.for.body:
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
|
|
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4
|
|
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[BIT_VAR1]], align 4
|
|
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], ptr [[TMP1]], i32 0, i32 [[TMP11]]
|
|
// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
|
|
// CHECK3-NEXT: [[OR:%.*]] = or i32 [[TMP10]], [[TMP12]]
|
|
// CHECK3-NEXT: store i32 [[OR]], ptr [[BIT_VAR1]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK3: omp.body.continue:
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK3: omp.inner.for.inc:
|
|
// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP13]], 1
|
|
// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK3: omp.inner.for.end:
|
|
// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK3: omp.loop.exit:
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]])
|
|
// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr [[BIT_VAR1]], ptr [[TMP14]], align 4
|
|
// CHECK3-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2]], i32 [[TMP3]], i32 1, i32 4, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l66.omp_outlined.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: switch i32 [[TMP15]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
|
|
// CHECK3-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
|
|
// CHECK3-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
|
|
// CHECK3-NEXT: ]
|
|
// CHECK3: .omp.reduction.case1:
|
|
// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP0]], align 4
|
|
// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[BIT_VAR1]], align 4
|
|
// CHECK3-NEXT: [[OR4:%.*]] = or i32 [[TMP16]], [[TMP17]]
|
|
// CHECK3-NEXT: store i32 [[OR4]], ptr [[TMP0]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP3]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK3: .omp.reduction.case2:
|
|
// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[BIT_VAR1]], align 4
|
|
// CHECK3-NEXT: [[TMP19:%.*]] = atomicrmw or ptr [[TMP0]], i32 [[TMP18]] monotonic, align 4
|
|
// CHECK3-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP3]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK3: .omp.reduction.default:
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l66.omp_outlined.omp.reduction.reduction_func
|
|
// CHECK3-SAME: (ptr noundef [[TMP0:%.*]], ptr noundef [[TMP1:%.*]]) #[[ATTR3]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 4
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 4
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 4
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 4
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK3-NEXT: [[OR:%.*]] = or i32 [[TMP8]], [[TMP9]]
|
|
// CHECK3-NEXT: store i32 [[OR]], ptr [[TMP7]], align 4
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l71
|
|
// CHECK3-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[BIT_VAR:%.*]], ptr noundef nonnull align 4 dereferenceable(8) [[VEC:%.*]]) #[[ATTR1]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[BIT_VAR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[VEC_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: store ptr [[BIT_VAR]], ptr [[BIT_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[VEC]], ptr [[VEC_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[BIT_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[VEC_ADDR]], align 4
|
|
// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l71.omp_outlined, ptr [[TMP0]], ptr [[TMP1]])
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l71.omp_outlined
|
|
// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[BIT_VAR:%.*]], ptr noundef nonnull align 4 dereferenceable(8) [[VEC:%.*]]) #[[ATTR1]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[BIT_VAR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[VEC_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[BIT_VAR1:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 4
|
|
// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[BIT_VAR]], ptr [[BIT_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[VEC]], ptr [[VEC_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[BIT_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[VEC_ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[BIT_VAR1]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP3]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 1
|
|
// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK3: cond.true:
|
|
// CHECK3-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK3: cond.false:
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END]]
|
|
// CHECK3: cond.end:
|
|
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
|
|
// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK3: omp.inner.for.cond:
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
|
|
// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK3: omp.inner.for.body:
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
|
|
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4
|
|
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[BIT_VAR1]], align 4
|
|
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], ptr [[TMP1]], i32 0, i32 [[TMP11]]
|
|
// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
|
|
// CHECK3-NEXT: [[XOR:%.*]] = xor i32 [[TMP10]], [[TMP12]]
|
|
// CHECK3-NEXT: store i32 [[XOR]], ptr [[BIT_VAR1]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK3: omp.body.continue:
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK3: omp.inner.for.inc:
|
|
// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP13]], 1
|
|
// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK3: omp.inner.for.end:
|
|
// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK3: omp.loop.exit:
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]])
|
|
// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr [[BIT_VAR1]], ptr [[TMP14]], align 4
|
|
// CHECK3-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2]], i32 [[TMP3]], i32 1, i32 4, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l71.omp_outlined.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: switch i32 [[TMP15]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
|
|
// CHECK3-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
|
|
// CHECK3-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
|
|
// CHECK3-NEXT: ]
|
|
// CHECK3: .omp.reduction.case1:
|
|
// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[TMP0]], align 4
|
|
// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[BIT_VAR1]], align 4
|
|
// CHECK3-NEXT: [[XOR4:%.*]] = xor i32 [[TMP16]], [[TMP17]]
|
|
// CHECK3-NEXT: store i32 [[XOR4]], ptr [[TMP0]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP3]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK3: .omp.reduction.case2:
|
|
// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[BIT_VAR1]], align 4
|
|
// CHECK3-NEXT: [[TMP19:%.*]] = atomicrmw xor ptr [[TMP0]], i32 [[TMP18]] monotonic, align 4
|
|
// CHECK3-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP3]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK3: .omp.reduction.default:
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l71.omp_outlined.omp.reduction.reduction_func
|
|
// CHECK3-SAME: (ptr noundef [[TMP0:%.*]], ptr noundef [[TMP1:%.*]]) #[[ATTR3]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 4
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 4
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 4
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 4
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK3-NEXT: [[XOR:%.*]] = xor i32 [[TMP8]], [[TMP9]]
|
|
// CHECK3-NEXT: store i32 [[XOR]], ptr [[TMP7]], align 4
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l77
|
|
// CHECK3-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[T_VAR:%.*]], ptr noundef nonnull align 4 dereferenceable(8) [[VEC:%.*]]) #[[ATTR1]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[T_VAR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[VEC_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: store ptr [[T_VAR]], ptr [[T_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[VEC]], ptr [[VEC_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[T_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[VEC_ADDR]], align 4
|
|
// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l77.omp_outlined, ptr [[TMP0]], ptr [[TMP1]])
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l77.omp_outlined
|
|
// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[T_VAR:%.*]], ptr noundef nonnull align 4 dereferenceable(8) [[VEC:%.*]]) #[[ATTR1]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[T_VAR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[VEC_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[T_VAR1:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 4
|
|
// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[T_VAR]], ptr [[T_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[VEC]], ptr [[VEC_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[T_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[VEC_ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 -2147483648, ptr [[T_VAR1]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP3]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 1
|
|
// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK3: cond.true:
|
|
// CHECK3-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK3: cond.false:
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END]]
|
|
// CHECK3: cond.end:
|
|
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
|
|
// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK3: omp.inner.for.cond:
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
|
|
// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK3: omp.inner.for.body:
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
|
|
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4
|
|
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[T_VAR1]], align 4
|
|
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], ptr [[TMP1]], i32 0, i32 [[TMP11]]
|
|
// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
|
|
// CHECK3-NEXT: [[CMP3:%.*]] = icmp sge i32 [[TMP10]], [[TMP12]]
|
|
// CHECK3-NEXT: br i1 [[CMP3]], label [[COND_TRUE4:%.*]], label [[COND_FALSE5:%.*]]
|
|
// CHECK3: cond.true4:
|
|
// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[T_VAR1]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END7:%.*]]
|
|
// CHECK3: cond.false5:
|
|
// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK3-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [2 x i32], ptr [[TMP1]], i32 0, i32 [[TMP14]]
|
|
// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[ARRAYIDX6]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END7]]
|
|
// CHECK3: cond.end7:
|
|
// CHECK3-NEXT: [[COND8:%.*]] = phi i32 [ [[TMP13]], [[COND_TRUE4]] ], [ [[TMP15]], [[COND_FALSE5]] ]
|
|
// CHECK3-NEXT: store i32 [[COND8]], ptr [[T_VAR1]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK3: omp.body.continue:
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK3: omp.inner.for.inc:
|
|
// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP16]], 1
|
|
// CHECK3-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK3: omp.inner.for.end:
|
|
// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK3: omp.loop.exit:
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]])
|
|
// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr [[T_VAR1]], ptr [[TMP17]], align 4
|
|
// CHECK3-NEXT: [[TMP18:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2]], i32 [[TMP3]], i32 1, i32 4, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l77.omp_outlined.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: switch i32 [[TMP18]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
|
|
// CHECK3-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
|
|
// CHECK3-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
|
|
// CHECK3-NEXT: ]
|
|
// CHECK3: .omp.reduction.case1:
|
|
// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP0]], align 4
|
|
// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[T_VAR1]], align 4
|
|
// CHECK3-NEXT: [[CMP10:%.*]] = icmp sgt i32 [[TMP19]], [[TMP20]]
|
|
// CHECK3-NEXT: br i1 [[CMP10]], label [[COND_TRUE11:%.*]], label [[COND_FALSE12:%.*]]
|
|
// CHECK3: cond.true11:
|
|
// CHECK3-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP0]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END13:%.*]]
|
|
// CHECK3: cond.false12:
|
|
// CHECK3-NEXT: [[TMP22:%.*]] = load i32, ptr [[T_VAR1]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END13]]
|
|
// CHECK3: cond.end13:
|
|
// CHECK3-NEXT: [[COND14:%.*]] = phi i32 [ [[TMP21]], [[COND_TRUE11]] ], [ [[TMP22]], [[COND_FALSE12]] ]
|
|
// CHECK3-NEXT: store i32 [[COND14]], ptr [[TMP0]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP3]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK3: .omp.reduction.case2:
|
|
// CHECK3-NEXT: [[TMP23:%.*]] = load i32, ptr [[T_VAR1]], align 4
|
|
// CHECK3-NEXT: [[TMP24:%.*]] = atomicrmw max ptr [[TMP0]], i32 [[TMP23]] monotonic, align 4
|
|
// CHECK3-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP3]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK3: .omp.reduction.default:
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l77.omp_outlined.omp.reduction.reduction_func
|
|
// CHECK3-SAME: (ptr noundef [[TMP0:%.*]], ptr noundef [[TMP1:%.*]]) #[[ATTR3]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 4
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 4
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 4
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 4
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
|
|
// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK3: cond.true:
|
|
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK3: cond.false:
|
|
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END]]
|
|
// CHECK3: cond.end:
|
|
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
|
|
// CHECK3-NEXT: store i32 [[COND]], ptr [[TMP7]], align 4
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l83
|
|
// CHECK3-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[T_VAR:%.*]], ptr noundef nonnull align 4 dereferenceable(8) [[VEC:%.*]]) #[[ATTR1]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[T_VAR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[VEC_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: store ptr [[T_VAR]], ptr [[T_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[VEC]], ptr [[VEC_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[T_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[VEC_ADDR]], align 4
|
|
// CHECK3-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l83.omp_outlined, ptr [[TMP0]], ptr [[TMP1]])
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l83.omp_outlined
|
|
// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[T_VAR:%.*]], ptr noundef nonnull align 4 dereferenceable(8) [[VEC:%.*]]) #[[ATTR1]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[T_VAR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[VEC_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[T_VAR1:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 4
|
|
// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[T_VAR]], ptr [[T_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[VEC]], ptr [[VEC_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[T_VAR_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[VEC_ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 2147483647, ptr [[T_VAR1]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP3]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 1
|
|
// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK3: cond.true:
|
|
// CHECK3-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK3: cond.false:
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END]]
|
|
// CHECK3: cond.end:
|
|
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
|
|
// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK3: omp.inner.for.cond:
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
|
|
// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK3: omp.inner.for.body:
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
|
|
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4
|
|
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[T_VAR1]], align 4
|
|
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], ptr [[TMP1]], i32 0, i32 [[TMP11]]
|
|
// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
|
|
// CHECK3-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP10]], [[TMP12]]
|
|
// CHECK3-NEXT: br i1 [[CMP3]], label [[COND_TRUE4:%.*]], label [[COND_FALSE5:%.*]]
|
|
// CHECK3: cond.true4:
|
|
// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[T_VAR1]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END7:%.*]]
|
|
// CHECK3: cond.false5:
|
|
// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK3-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [2 x i32], ptr [[TMP1]], i32 0, i32 [[TMP14]]
|
|
// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[ARRAYIDX6]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END7]]
|
|
// CHECK3: cond.end7:
|
|
// CHECK3-NEXT: [[COND8:%.*]] = phi i32 [ [[TMP13]], [[COND_TRUE4]] ], [ [[TMP15]], [[COND_FALSE5]] ]
|
|
// CHECK3-NEXT: store i32 [[COND8]], ptr [[T_VAR1]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK3: omp.body.continue:
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK3: omp.inner.for.inc:
|
|
// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP16]], 1
|
|
// CHECK3-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK3: omp.inner.for.end:
|
|
// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK3: omp.loop.exit:
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP3]])
|
|
// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
|
|
// CHECK3-NEXT: store ptr [[T_VAR1]], ptr [[TMP17]], align 4
|
|
// CHECK3-NEXT: [[TMP18:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2]], i32 [[TMP3]], i32 1, i32 4, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l83.omp_outlined.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: switch i32 [[TMP18]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
|
|
// CHECK3-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
|
|
// CHECK3-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
|
|
// CHECK3-NEXT: ]
|
|
// CHECK3: .omp.reduction.case1:
|
|
// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP0]], align 4
|
|
// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[T_VAR1]], align 4
|
|
// CHECK3-NEXT: [[CMP10:%.*]] = icmp slt i32 [[TMP19]], [[TMP20]]
|
|
// CHECK3-NEXT: br i1 [[CMP10]], label [[COND_TRUE11:%.*]], label [[COND_FALSE12:%.*]]
|
|
// CHECK3: cond.true11:
|
|
// CHECK3-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP0]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END13:%.*]]
|
|
// CHECK3: cond.false12:
|
|
// CHECK3-NEXT: [[TMP22:%.*]] = load i32, ptr [[T_VAR1]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END13]]
|
|
// CHECK3: cond.end13:
|
|
// CHECK3-NEXT: [[COND14:%.*]] = phi i32 [ [[TMP21]], [[COND_TRUE11]] ], [ [[TMP22]], [[COND_FALSE12]] ]
|
|
// CHECK3-NEXT: store i32 [[COND14]], ptr [[TMP0]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP3]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK3: .omp.reduction.case2:
|
|
// CHECK3-NEXT: [[TMP23:%.*]] = load i32, ptr [[T_VAR1]], align 4
|
|
// CHECK3-NEXT: [[TMP24:%.*]] = atomicrmw min ptr [[TMP0]], i32 [[TMP23]] monotonic, align 4
|
|
// CHECK3-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP3]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK3: .omp.reduction.default:
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiET_v_l83.omp_outlined.omp.reduction.reduction_func
|
|
// CHECK3-SAME: (ptr noundef [[TMP0:%.*]], ptr noundef [[TMP1:%.*]]) #[[ATTR3]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 4
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 4
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 4
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 4
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK3-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP8]], [[TMP9]]
|
|
// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK3: cond.true:
|
|
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK3: cond.false:
|
|
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END]]
|
|
// CHECK3: cond.end:
|
|
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
|
|
// CHECK3-NEXT: store i32 [[COND]], ptr [[TMP7]], align 4
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK9-LABEL: define {{[^@]+}}@main
|
|
// CHECK9-SAME: () #[[ATTR0:[0-9]+]] {
|
|
// CHECK9-NEXT: entry:
|
|
// CHECK9-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 1
|
|
// CHECK9-NEXT: [[REF_TMP1:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 1
|
|
// CHECK9-NEXT: [[REF_TMP2:%.*]] = alloca [[CLASS_ANON_2:%.*]], align 1
|
|
// CHECK9-NEXT: [[REF_TMP3:%.*]] = alloca [[CLASS_ANON_4:%.*]], align 1
|
|
// CHECK9-NEXT: [[REF_TMP4:%.*]] = alloca [[CLASS_ANON_6:%.*]], align 1
|
|
// CHECK9-NEXT: [[REF_TMP5:%.*]] = alloca [[CLASS_ANON_8:%.*]], align 1
|
|
// CHECK9-NEXT: [[REF_TMP6:%.*]] = alloca [[CLASS_ANON_10:%.*]], align 1
|
|
// CHECK9-NEXT: [[REF_TMP7:%.*]] = alloca [[CLASS_ANON_12:%.*]], align 1
|
|
// CHECK9-NEXT: [[REF_TMP8:%.*]] = alloca [[CLASS_ANON_14:%.*]], align 1
|
|
// CHECK9-NEXT: [[REF_TMP9:%.*]] = alloca [[CLASS_ANON_16:%.*]], align 1
|
|
// CHECK9-NEXT: store i32 0, ptr [[RETVAL]], align 4
|
|
// CHECK9-NEXT: call void @"_ZZ4mainENK3$_0clEv"(ptr noundef nonnull align 1 dereferenceable(1) [[REF_TMP]])
|
|
// CHECK9-NEXT: call void @"_ZZ4mainENK3$_1clEv"(ptr noundef nonnull align 1 dereferenceable(1) [[REF_TMP1]])
|
|
// CHECK9-NEXT: call void @"_ZZ4mainENK3$_2clEv"(ptr noundef nonnull align 1 dereferenceable(1) [[REF_TMP2]])
|
|
// CHECK9-NEXT: call void @"_ZZ4mainENK3$_3clEv"(ptr noundef nonnull align 1 dereferenceable(1) [[REF_TMP3]])
|
|
// CHECK9-NEXT: call void @"_ZZ4mainENK3$_4clEv"(ptr noundef nonnull align 1 dereferenceable(1) [[REF_TMP4]])
|
|
// CHECK9-NEXT: call void @"_ZZ4mainENK3$_5clEv"(ptr noundef nonnull align 1 dereferenceable(1) [[REF_TMP5]])
|
|
// CHECK9-NEXT: call void @"_ZZ4mainENK3$_6clEv"(ptr noundef nonnull align 1 dereferenceable(1) [[REF_TMP6]])
|
|
// CHECK9-NEXT: call void @"_ZZ4mainENK3$_7clEv"(ptr noundef nonnull align 1 dereferenceable(1) [[REF_TMP7]])
|
|
// CHECK9-NEXT: call void @"_ZZ4mainENK3$_8clEv"(ptr noundef nonnull align 1 dereferenceable(1) [[REF_TMP8]])
|
|
// CHECK9-NEXT: call void @"_ZZ4mainENK3$_9clEv"(ptr noundef nonnull align 1 dereferenceable(1) [[REF_TMP9]])
|
|
// CHECK9-NEXT: ret i32 0
|
|
//
|
|
//
|
|
// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l96
|
|
// CHECK9-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR2:[0-9]+]] {
|
|
// CHECK9-NEXT: entry:
|
|
// CHECK9-NEXT: [[SIVAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: store ptr [[SIVAR]], ptr [[SIVAR_ADDR]], align 8
|
|
// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[SIVAR_ADDR]], align 8
|
|
// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3:[0-9]+]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l96.omp_outlined, ptr [[TMP0]])
|
|
// CHECK9-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l96.omp_outlined
|
|
// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR2]] {
|
|
// CHECK9-NEXT: entry:
|
|
// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: [[SIVAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: [[SIVAR1:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON_18:%.*]], align 8
|
|
// CHECK9-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK9-NEXT: store ptr [[SIVAR]], ptr [[SIVAR_ADDR]], align 8
|
|
// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[SIVAR_ADDR]], align 8
|
|
// CHECK9-NEXT: store i32 0, ptr [[SIVAR1]], align 4
|
|
// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK9-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
|
|
// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
|
|
// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK9: cond.true:
|
|
// CHECK9-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK9: cond.false:
|
|
// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK9-NEXT: br label [[COND_END]]
|
|
// CHECK9: cond.end:
|
|
// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
|
|
// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK9-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK9: omp.inner.for.cond:
|
|
// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
|
|
// CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK9: omp.inner.for.body:
|
|
// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
|
|
// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4
|
|
// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[SIVAR1]], align 4
|
|
// CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], [[TMP9]]
|
|
// CHECK9-NEXT: store i32 [[ADD3]], ptr [[SIVAR1]], align 4
|
|
// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[CLASS_ANON_18]], ptr [[REF_TMP]], i32 0, i32 0
|
|
// CHECK9-NEXT: store ptr [[SIVAR1]], ptr [[TMP11]], align 8
|
|
// CHECK9-NEXT: call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(ptr noundef nonnull align 8 dereferenceable(8) [[REF_TMP]])
|
|
// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK9: omp.body.continue:
|
|
// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK9: omp.inner.for.inc:
|
|
// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK9-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP12]], 1
|
|
// CHECK9-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK9: omp.inner.for.end:
|
|
// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK9: omp.loop.exit:
|
|
// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
|
|
// CHECK9-NEXT: [[TMP13:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
|
|
// CHECK9-NEXT: store ptr [[SIVAR1]], ptr [[TMP13]], align 8
|
|
// CHECK9-NEXT: [[TMP14:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2:[0-9]+]], i32 [[TMP2]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l96.omp_outlined.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK9-NEXT: switch i32 [[TMP14]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
|
|
// CHECK9-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
|
|
// CHECK9-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
|
|
// CHECK9-NEXT: ]
|
|
// CHECK9: .omp.reduction.case1:
|
|
// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP0]], align 4
|
|
// CHECK9-NEXT: [[TMP16:%.*]] = load i32, ptr [[SIVAR1]], align 4
|
|
// CHECK9-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP15]], [[TMP16]]
|
|
// CHECK9-NEXT: store i32 [[ADD5]], ptr [[TMP0]], align 4
|
|
// CHECK9-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK9-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK9: .omp.reduction.case2:
|
|
// CHECK9-NEXT: [[TMP17:%.*]] = load i32, ptr [[SIVAR1]], align 4
|
|
// CHECK9-NEXT: [[TMP18:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP17]] monotonic, align 4
|
|
// CHECK9-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK9-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK9: .omp.reduction.default:
|
|
// CHECK9-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l96.omp_outlined.omp.reduction.reduction_func
|
|
// CHECK9-SAME: (ptr noundef [[TMP0:%.*]], ptr noundef [[TMP1:%.*]]) #[[ATTR4:[0-9]+]] {
|
|
// CHECK9-NEXT: entry:
|
|
// CHECK9-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
|
|
// CHECK9-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8
|
|
// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8
|
|
// CHECK9-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8
|
|
// CHECK9-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i64 0, i64 0
|
|
// CHECK9-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8
|
|
// CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0
|
|
// CHECK9-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8
|
|
// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], [[TMP9]]
|
|
// CHECK9-NEXT: store i32 [[ADD]], ptr [[TMP7]], align 4
|
|
// CHECK9-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l112
|
|
// CHECK9-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR2]] {
|
|
// CHECK9-NEXT: entry:
|
|
// CHECK9-NEXT: [[SIVAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: store ptr [[SIVAR]], ptr [[SIVAR_ADDR]], align 8
|
|
// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[SIVAR_ADDR]], align 8
|
|
// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l112.omp_outlined, ptr [[TMP0]])
|
|
// CHECK9-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l112.omp_outlined
|
|
// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR2]] {
|
|
// CHECK9-NEXT: entry:
|
|
// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: [[SIVAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: [[SIVAR1:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON_19:%.*]], align 8
|
|
// CHECK9-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK9-NEXT: store ptr [[SIVAR]], ptr [[SIVAR_ADDR]], align 8
|
|
// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[SIVAR_ADDR]], align 8
|
|
// CHECK9-NEXT: store i32 0, ptr [[SIVAR1]], align 4
|
|
// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK9-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
|
|
// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
|
|
// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK9: cond.true:
|
|
// CHECK9-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK9: cond.false:
|
|
// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK9-NEXT: br label [[COND_END]]
|
|
// CHECK9: cond.end:
|
|
// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
|
|
// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK9-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK9: omp.inner.for.cond:
|
|
// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
|
|
// CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK9: omp.inner.for.body:
|
|
// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
|
|
// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4
|
|
// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[SIVAR1]], align 4
|
|
// CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP10]], [[TMP9]]
|
|
// CHECK9-NEXT: store i32 [[SUB]], ptr [[SIVAR1]], align 4
|
|
// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[CLASS_ANON_19]], ptr [[REF_TMP]], i32 0, i32 0
|
|
// CHECK9-NEXT: store ptr [[SIVAR1]], ptr [[TMP11]], align 8
|
|
// CHECK9-NEXT: call void @"_ZZZ4mainENK3$_1clEvENKUlvE_clEv"(ptr noundef nonnull align 8 dereferenceable(8) [[REF_TMP]])
|
|
// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK9: omp.body.continue:
|
|
// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK9: omp.inner.for.inc:
|
|
// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
|
|
// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK9: omp.inner.for.end:
|
|
// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK9: omp.loop.exit:
|
|
// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
|
|
// CHECK9-NEXT: [[TMP13:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
|
|
// CHECK9-NEXT: store ptr [[SIVAR1]], ptr [[TMP13]], align 8
|
|
// CHECK9-NEXT: [[TMP14:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2]], i32 [[TMP2]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l112.omp_outlined.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK9-NEXT: switch i32 [[TMP14]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
|
|
// CHECK9-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
|
|
// CHECK9-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
|
|
// CHECK9-NEXT: ]
|
|
// CHECK9: .omp.reduction.case1:
|
|
// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP0]], align 4
|
|
// CHECK9-NEXT: [[TMP16:%.*]] = load i32, ptr [[SIVAR1]], align 4
|
|
// CHECK9-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP15]], [[TMP16]]
|
|
// CHECK9-NEXT: store i32 [[ADD4]], ptr [[TMP0]], align 4
|
|
// CHECK9-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK9-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK9: .omp.reduction.case2:
|
|
// CHECK9-NEXT: [[TMP17:%.*]] = load i32, ptr [[SIVAR1]], align 4
|
|
// CHECK9-NEXT: [[TMP18:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP17]] monotonic, align 4
|
|
// CHECK9-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK9-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK9: .omp.reduction.default:
|
|
// CHECK9-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l112.omp_outlined.omp.reduction.reduction_func
|
|
// CHECK9-SAME: (ptr noundef [[TMP0:%.*]], ptr noundef [[TMP1:%.*]]) #[[ATTR4]] {
|
|
// CHECK9-NEXT: entry:
|
|
// CHECK9-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
|
|
// CHECK9-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8
|
|
// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8
|
|
// CHECK9-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8
|
|
// CHECK9-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i64 0, i64 0
|
|
// CHECK9-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8
|
|
// CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0
|
|
// CHECK9-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8
|
|
// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], [[TMP9]]
|
|
// CHECK9-NEXT: store i32 [[ADD]], ptr [[TMP7]], align 4
|
|
// CHECK9-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l122
|
|
// CHECK9-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR2]] {
|
|
// CHECK9-NEXT: entry:
|
|
// CHECK9-NEXT: [[SIVAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: store ptr [[SIVAR]], ptr [[SIVAR_ADDR]], align 8
|
|
// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[SIVAR_ADDR]], align 8
|
|
// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l122.omp_outlined, ptr [[TMP0]])
|
|
// CHECK9-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l122.omp_outlined
|
|
// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR2]] {
|
|
// CHECK9-NEXT: entry:
|
|
// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: [[SIVAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: [[SIVAR1:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON_20:%.*]], align 8
|
|
// CHECK9-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK9-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[_TMP6:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK9-NEXT: store ptr [[SIVAR]], ptr [[SIVAR_ADDR]], align 8
|
|
// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[SIVAR_ADDR]], align 8
|
|
// CHECK9-NEXT: store i32 1, ptr [[SIVAR1]], align 4
|
|
// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK9-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
|
|
// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
|
|
// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK9: cond.true:
|
|
// CHECK9-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK9: cond.false:
|
|
// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK9-NEXT: br label [[COND_END]]
|
|
// CHECK9: cond.end:
|
|
// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
|
|
// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK9-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK9: omp.inner.for.cond:
|
|
// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
|
|
// CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK9: omp.inner.for.body:
|
|
// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
|
|
// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4
|
|
// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[SIVAR1]], align 4
|
|
// CHECK9-NEXT: [[MUL3:%.*]] = mul nsw i32 [[TMP10]], [[TMP9]]
|
|
// CHECK9-NEXT: store i32 [[MUL3]], ptr [[SIVAR1]], align 4
|
|
// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[CLASS_ANON_20]], ptr [[REF_TMP]], i32 0, i32 0
|
|
// CHECK9-NEXT: store ptr [[SIVAR1]], ptr [[TMP11]], align 8
|
|
// CHECK9-NEXT: call void @"_ZZZ4mainENK3$_2clEvENKUlvE_clEv"(ptr noundef nonnull align 8 dereferenceable(8) [[REF_TMP]])
|
|
// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK9: omp.body.continue:
|
|
// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK9: omp.inner.for.inc:
|
|
// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK9-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP12]], 1
|
|
// CHECK9-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK9: omp.inner.for.end:
|
|
// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK9: omp.loop.exit:
|
|
// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
|
|
// CHECK9-NEXT: [[TMP13:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
|
|
// CHECK9-NEXT: store ptr [[SIVAR1]], ptr [[TMP13]], align 8
|
|
// CHECK9-NEXT: [[TMP14:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2]], i32 [[TMP2]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l122.omp_outlined.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK9-NEXT: switch i32 [[TMP14]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
|
|
// CHECK9-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
|
|
// CHECK9-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
|
|
// CHECK9-NEXT: ]
|
|
// CHECK9: .omp.reduction.case1:
|
|
// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP0]], align 4
|
|
// CHECK9-NEXT: [[TMP16:%.*]] = load i32, ptr [[SIVAR1]], align 4
|
|
// CHECK9-NEXT: [[MUL5:%.*]] = mul nsw i32 [[TMP15]], [[TMP16]]
|
|
// CHECK9-NEXT: store i32 [[MUL5]], ptr [[TMP0]], align 4
|
|
// CHECK9-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK9-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK9: .omp.reduction.case2:
|
|
// CHECK9-NEXT: [[TMP17:%.*]] = load i32, ptr [[SIVAR1]], align 4
|
|
// CHECK9-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i32, ptr [[TMP0]] monotonic, align 4
|
|
// CHECK9-NEXT: br label [[ATOMIC_CONT:%.*]]
|
|
// CHECK9: atomic_cont:
|
|
// CHECK9-NEXT: [[TMP18:%.*]] = phi i32 [ [[ATOMIC_LOAD]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[TMP23:%.*]], [[ATOMIC_CONT]] ]
|
|
// CHECK9-NEXT: store i32 [[TMP18]], ptr [[_TMP6]], align 4
|
|
// CHECK9-NEXT: [[TMP19:%.*]] = load i32, ptr [[_TMP6]], align 4
|
|
// CHECK9-NEXT: [[TMP20:%.*]] = load i32, ptr [[SIVAR1]], align 4
|
|
// CHECK9-NEXT: [[MUL7:%.*]] = mul nsw i32 [[TMP19]], [[TMP20]]
|
|
// CHECK9-NEXT: store i32 [[MUL7]], ptr [[ATOMIC_TEMP]], align 4
|
|
// CHECK9-NEXT: [[TMP21:%.*]] = load i32, ptr [[ATOMIC_TEMP]], align 4
|
|
// CHECK9-NEXT: [[TMP22:%.*]] = cmpxchg ptr [[TMP0]], i32 [[TMP18]], i32 [[TMP21]] monotonic monotonic, align 4
|
|
// CHECK9-NEXT: [[TMP23]] = extractvalue { i32, i1 } [[TMP22]], 0
|
|
// CHECK9-NEXT: [[TMP24:%.*]] = extractvalue { i32, i1 } [[TMP22]], 1
|
|
// CHECK9-NEXT: br i1 [[TMP24]], label [[ATOMIC_EXIT:%.*]], label [[ATOMIC_CONT]]
|
|
// CHECK9: atomic_exit:
|
|
// CHECK9-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK9-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK9: .omp.reduction.default:
|
|
// CHECK9-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l122.omp_outlined.omp.reduction.reduction_func
|
|
// CHECK9-SAME: (ptr noundef [[TMP0:%.*]], ptr noundef [[TMP1:%.*]]) #[[ATTR4]] {
|
|
// CHECK9-NEXT: entry:
|
|
// CHECK9-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
|
|
// CHECK9-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8
|
|
// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8
|
|
// CHECK9-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8
|
|
// CHECK9-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i64 0, i64 0
|
|
// CHECK9-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8
|
|
// CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0
|
|
// CHECK9-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8
|
|
// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], [[TMP9]]
|
|
// CHECK9-NEXT: store i32 [[MUL]], ptr [[TMP7]], align 4
|
|
// CHECK9-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l133
|
|
// CHECK9-SAME: (ptr noundef nonnull align 1 dereferenceable(1) [[AND_VAR:%.*]], i64 noundef [[SIVAR:%.*]]) #[[ATTR2]] {
|
|
// CHECK9-NEXT: entry:
|
|
// CHECK9-NEXT: [[AND_VAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: [[SIVAR_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK9-NEXT: [[SIVAR_CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK9-NEXT: store ptr [[AND_VAR]], ptr [[AND_VAR_ADDR]], align 8
|
|
// CHECK9-NEXT: store i64 [[SIVAR]], ptr [[SIVAR_ADDR]], align 8
|
|
// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AND_VAR_ADDR]], align 8
|
|
// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[SIVAR_ADDR]], align 4
|
|
// CHECK9-NEXT: store i32 [[TMP1]], ptr [[SIVAR_CASTED]], align 4
|
|
// CHECK9-NEXT: [[TMP2:%.*]] = load i64, ptr [[SIVAR_CASTED]], align 8
|
|
// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l133.omp_outlined, ptr [[TMP0]], i64 [[TMP2]])
|
|
// CHECK9-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l133.omp_outlined
|
|
// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[AND_VAR:%.*]], i64 noundef [[SIVAR:%.*]]) #[[ATTR2]] {
|
|
// CHECK9-NEXT: entry:
|
|
// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: [[AND_VAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: [[SIVAR_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK9-NEXT: [[AND_VAR1:%.*]] = alloca i8, align 1
|
|
// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON_21:%.*]], align 8
|
|
// CHECK9-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK9-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i8, align 1
|
|
// CHECK9-NEXT: [[_TMP12:%.*]] = alloca i8, align 1
|
|
// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK9-NEXT: store ptr [[AND_VAR]], ptr [[AND_VAR_ADDR]], align 8
|
|
// CHECK9-NEXT: store i64 [[SIVAR]], ptr [[SIVAR_ADDR]], align 8
|
|
// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AND_VAR_ADDR]], align 8
|
|
// CHECK9-NEXT: store i8 1, ptr [[AND_VAR1]], align 1
|
|
// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK9-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
|
|
// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
|
|
// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK9: cond.true:
|
|
// CHECK9-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK9: cond.false:
|
|
// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK9-NEXT: br label [[COND_END]]
|
|
// CHECK9: cond.end:
|
|
// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
|
|
// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK9-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK9: omp.inner.for.cond:
|
|
// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
|
|
// CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK9: omp.inner.for.body:
|
|
// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
|
|
// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4
|
|
// CHECK9-NEXT: [[TMP9:%.*]] = load i8, ptr [[AND_VAR1]], align 1
|
|
// CHECK9-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP9]] to i1
|
|
// CHECK9-NEXT: br i1 [[TOBOOL]], label [[LAND_RHS:%.*]], label [[LAND_END:%.*]]
|
|
// CHECK9: land.rhs:
|
|
// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK9-NEXT: [[CMP3:%.*]] = icmp eq i32 [[TMP10]], 0
|
|
// CHECK9-NEXT: br label [[LAND_END]]
|
|
// CHECK9: land.end:
|
|
// CHECK9-NEXT: [[TMP11:%.*]] = phi i1 [ false, [[OMP_INNER_FOR_BODY]] ], [ [[CMP3]], [[LAND_RHS]] ]
|
|
// CHECK9-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TMP11]] to i8
|
|
// CHECK9-NEXT: store i8 [[FROMBOOL]], ptr [[AND_VAR1]], align 1
|
|
// CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[CLASS_ANON_21]], ptr [[REF_TMP]], i32 0, i32 0
|
|
// CHECK9-NEXT: store ptr [[SIVAR_ADDR]], ptr [[TMP12]], align 8
|
|
// CHECK9-NEXT: call void @"_ZZZ4mainENK3$_3clEvENKUlvE_clEv"(ptr noundef nonnull align 8 dereferenceable(8) [[REF_TMP]])
|
|
// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK9: omp.body.continue:
|
|
// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK9: omp.inner.for.inc:
|
|
// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK9-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP13]], 1
|
|
// CHECK9-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK9: omp.inner.for.end:
|
|
// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK9: omp.loop.exit:
|
|
// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
|
|
// CHECK9-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
|
|
// CHECK9-NEXT: store ptr [[AND_VAR1]], ptr [[TMP14]], align 8
|
|
// CHECK9-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2]], i32 [[TMP2]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l133.omp_outlined.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK9-NEXT: switch i32 [[TMP15]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
|
|
// CHECK9-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
|
|
// CHECK9-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
|
|
// CHECK9-NEXT: ]
|
|
// CHECK9: .omp.reduction.case1:
|
|
// CHECK9-NEXT: [[TMP16:%.*]] = load i8, ptr [[TMP0]], align 1
|
|
// CHECK9-NEXT: [[TOBOOL5:%.*]] = trunc i8 [[TMP16]] to i1
|
|
// CHECK9-NEXT: br i1 [[TOBOOL5]], label [[LAND_RHS6:%.*]], label [[LAND_END8:%.*]]
|
|
// CHECK9: land.rhs6:
|
|
// CHECK9-NEXT: [[TMP17:%.*]] = load i8, ptr [[AND_VAR1]], align 1
|
|
// CHECK9-NEXT: [[TOBOOL7:%.*]] = trunc i8 [[TMP17]] to i1
|
|
// CHECK9-NEXT: br label [[LAND_END8]]
|
|
// CHECK9: land.end8:
|
|
// CHECK9-NEXT: [[TMP18:%.*]] = phi i1 [ false, [[DOTOMP_REDUCTION_CASE1]] ], [ [[TOBOOL7]], [[LAND_RHS6]] ]
|
|
// CHECK9-NEXT: [[FROMBOOL9:%.*]] = zext i1 [[TMP18]] to i8
|
|
// CHECK9-NEXT: store i8 [[FROMBOOL9]], ptr [[TMP0]], align 1
|
|
// CHECK9-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK9-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK9: .omp.reduction.case2:
|
|
// CHECK9-NEXT: [[TMP19:%.*]] = load i8, ptr [[AND_VAR1]], align 1
|
|
// CHECK9-NEXT: [[TOBOOL10:%.*]] = trunc i8 [[TMP19]] to i1
|
|
// CHECK9-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i8, ptr [[TMP0]] monotonic, align 1
|
|
// CHECK9-NEXT: br label [[ATOMIC_CONT:%.*]]
|
|
// CHECK9: atomic_cont:
|
|
// CHECK9-NEXT: [[TMP20:%.*]] = phi i8 [ [[ATOMIC_LOAD]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[TMP26:%.*]], [[LAND_END17:%.*]] ]
|
|
// CHECK9-NEXT: [[TOBOOL11:%.*]] = trunc i8 [[TMP20]] to i1
|
|
// CHECK9-NEXT: [[FROMBOOL13:%.*]] = zext i1 [[TOBOOL11]] to i8
|
|
// CHECK9-NEXT: store i8 [[FROMBOOL13]], ptr [[_TMP12]], align 1
|
|
// CHECK9-NEXT: [[TMP21:%.*]] = load i8, ptr [[_TMP12]], align 1
|
|
// CHECK9-NEXT: [[TOBOOL14:%.*]] = trunc i8 [[TMP21]] to i1
|
|
// CHECK9-NEXT: br i1 [[TOBOOL14]], label [[LAND_RHS15:%.*]], label [[LAND_END17]]
|
|
// CHECK9: land.rhs15:
|
|
// CHECK9-NEXT: [[TMP22:%.*]] = load i8, ptr [[AND_VAR1]], align 1
|
|
// CHECK9-NEXT: [[TOBOOL16:%.*]] = trunc i8 [[TMP22]] to i1
|
|
// CHECK9-NEXT: br label [[LAND_END17]]
|
|
// CHECK9: land.end17:
|
|
// CHECK9-NEXT: [[TMP23:%.*]] = phi i1 [ false, [[ATOMIC_CONT]] ], [ [[TOBOOL16]], [[LAND_RHS15]] ]
|
|
// CHECK9-NEXT: [[FROMBOOL18:%.*]] = zext i1 [[TMP23]] to i8
|
|
// CHECK9-NEXT: store i8 [[FROMBOOL18]], ptr [[ATOMIC_TEMP]], align 1
|
|
// CHECK9-NEXT: [[TMP24:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1
|
|
// CHECK9-NEXT: [[TMP25:%.*]] = cmpxchg ptr [[TMP0]], i8 [[TMP20]], i8 [[TMP24]] monotonic monotonic, align 1
|
|
// CHECK9-NEXT: [[TMP26]] = extractvalue { i8, i1 } [[TMP25]], 0
|
|
// CHECK9-NEXT: [[TMP27:%.*]] = extractvalue { i8, i1 } [[TMP25]], 1
|
|
// CHECK9-NEXT: br i1 [[TMP27]], label [[ATOMIC_EXIT:%.*]], label [[ATOMIC_CONT]]
|
|
// CHECK9: atomic_exit:
|
|
// CHECK9-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK9-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK9: .omp.reduction.default:
|
|
// CHECK9-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l133.omp_outlined.omp.reduction.reduction_func
|
|
// CHECK9-SAME: (ptr noundef [[TMP0:%.*]], ptr noundef [[TMP1:%.*]]) #[[ATTR4]] {
|
|
// CHECK9-NEXT: entry:
|
|
// CHECK9-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
|
|
// CHECK9-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8
|
|
// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8
|
|
// CHECK9-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8
|
|
// CHECK9-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i64 0, i64 0
|
|
// CHECK9-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8
|
|
// CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0
|
|
// CHECK9-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8
|
|
// CHECK9-NEXT: [[TMP8:%.*]] = load i8, ptr [[TMP7]], align 1
|
|
// CHECK9-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP8]] to i1
|
|
// CHECK9-NEXT: br i1 [[TOBOOL]], label [[LAND_RHS:%.*]], label [[LAND_END:%.*]]
|
|
// CHECK9: land.rhs:
|
|
// CHECK9-NEXT: [[TMP9:%.*]] = load i8, ptr [[TMP5]], align 1
|
|
// CHECK9-NEXT: [[TOBOOL2:%.*]] = trunc i8 [[TMP9]] to i1
|
|
// CHECK9-NEXT: br label [[LAND_END]]
|
|
// CHECK9: land.end:
|
|
// CHECK9-NEXT: [[TMP10:%.*]] = phi i1 [ false, [[ENTRY:%.*]] ], [ [[TOBOOL2]], [[LAND_RHS]] ]
|
|
// CHECK9-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TMP10]] to i8
|
|
// CHECK9-NEXT: store i8 [[FROMBOOL]], ptr [[TMP7]], align 1
|
|
// CHECK9-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l144
|
|
// CHECK9-SAME: (ptr noundef nonnull align 1 dereferenceable(1) [[OR_VAR:%.*]], i64 noundef [[SIVAR:%.*]]) #[[ATTR2]] {
|
|
// CHECK9-NEXT: entry:
|
|
// CHECK9-NEXT: [[OR_VAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: [[SIVAR_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK9-NEXT: [[SIVAR_CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK9-NEXT: store ptr [[OR_VAR]], ptr [[OR_VAR_ADDR]], align 8
|
|
// CHECK9-NEXT: store i64 [[SIVAR]], ptr [[SIVAR_ADDR]], align 8
|
|
// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[OR_VAR_ADDR]], align 8
|
|
// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[SIVAR_ADDR]], align 4
|
|
// CHECK9-NEXT: store i32 [[TMP1]], ptr [[SIVAR_CASTED]], align 4
|
|
// CHECK9-NEXT: [[TMP2:%.*]] = load i64, ptr [[SIVAR_CASTED]], align 8
|
|
// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l144.omp_outlined, ptr [[TMP0]], i64 [[TMP2]])
|
|
// CHECK9-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l144.omp_outlined
|
|
// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[OR_VAR:%.*]], i64 noundef [[SIVAR:%.*]]) #[[ATTR2]] {
|
|
// CHECK9-NEXT: entry:
|
|
// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: [[OR_VAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: [[SIVAR_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK9-NEXT: [[OR_VAR1:%.*]] = alloca i8, align 1
|
|
// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON_22:%.*]], align 8
|
|
// CHECK9-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK9-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i8, align 1
|
|
// CHECK9-NEXT: [[_TMP12:%.*]] = alloca i8, align 1
|
|
// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK9-NEXT: store ptr [[OR_VAR]], ptr [[OR_VAR_ADDR]], align 8
|
|
// CHECK9-NEXT: store i64 [[SIVAR]], ptr [[SIVAR_ADDR]], align 8
|
|
// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[OR_VAR_ADDR]], align 8
|
|
// CHECK9-NEXT: store i8 0, ptr [[OR_VAR1]], align 1
|
|
// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK9-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
|
|
// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
|
|
// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK9: cond.true:
|
|
// CHECK9-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK9: cond.false:
|
|
// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK9-NEXT: br label [[COND_END]]
|
|
// CHECK9: cond.end:
|
|
// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
|
|
// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK9-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK9: omp.inner.for.cond:
|
|
// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
|
|
// CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK9: omp.inner.for.body:
|
|
// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
|
|
// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4
|
|
// CHECK9-NEXT: [[TMP9:%.*]] = load i8, ptr [[OR_VAR1]], align 1
|
|
// CHECK9-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP9]] to i1
|
|
// CHECK9-NEXT: br i1 [[TOBOOL]], label [[LOR_END:%.*]], label [[LOR_RHS:%.*]]
|
|
// CHECK9: lor.rhs:
|
|
// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK9-NEXT: [[CMP3:%.*]] = icmp eq i32 [[TMP10]], 0
|
|
// CHECK9-NEXT: br label [[LOR_END]]
|
|
// CHECK9: lor.end:
|
|
// CHECK9-NEXT: [[TMP11:%.*]] = phi i1 [ true, [[OMP_INNER_FOR_BODY]] ], [ [[CMP3]], [[LOR_RHS]] ]
|
|
// CHECK9-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TMP11]] to i8
|
|
// CHECK9-NEXT: store i8 [[FROMBOOL]], ptr [[OR_VAR1]], align 1
|
|
// CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[CLASS_ANON_22]], ptr [[REF_TMP]], i32 0, i32 0
|
|
// CHECK9-NEXT: store ptr [[SIVAR_ADDR]], ptr [[TMP12]], align 8
|
|
// CHECK9-NEXT: call void @"_ZZZ4mainENK3$_4clEvENKUlvE_clEv"(ptr noundef nonnull align 8 dereferenceable(8) [[REF_TMP]])
|
|
// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK9: omp.body.continue:
|
|
// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK9: omp.inner.for.inc:
|
|
// CHECK9-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK9-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP13]], 1
|
|
// CHECK9-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK9: omp.inner.for.end:
|
|
// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK9: omp.loop.exit:
|
|
// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
|
|
// CHECK9-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
|
|
// CHECK9-NEXT: store ptr [[OR_VAR1]], ptr [[TMP14]], align 8
|
|
// CHECK9-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2]], i32 [[TMP2]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l144.omp_outlined.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK9-NEXT: switch i32 [[TMP15]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
|
|
// CHECK9-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
|
|
// CHECK9-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
|
|
// CHECK9-NEXT: ]
|
|
// CHECK9: .omp.reduction.case1:
|
|
// CHECK9-NEXT: [[TMP16:%.*]] = load i8, ptr [[TMP0]], align 1
|
|
// CHECK9-NEXT: [[TOBOOL5:%.*]] = trunc i8 [[TMP16]] to i1
|
|
// CHECK9-NEXT: br i1 [[TOBOOL5]], label [[LOR_END8:%.*]], label [[LOR_RHS6:%.*]]
|
|
// CHECK9: lor.rhs6:
|
|
// CHECK9-NEXT: [[TMP17:%.*]] = load i8, ptr [[OR_VAR1]], align 1
|
|
// CHECK9-NEXT: [[TOBOOL7:%.*]] = trunc i8 [[TMP17]] to i1
|
|
// CHECK9-NEXT: br label [[LOR_END8]]
|
|
// CHECK9: lor.end8:
|
|
// CHECK9-NEXT: [[TMP18:%.*]] = phi i1 [ true, [[DOTOMP_REDUCTION_CASE1]] ], [ [[TOBOOL7]], [[LOR_RHS6]] ]
|
|
// CHECK9-NEXT: [[FROMBOOL9:%.*]] = zext i1 [[TMP18]] to i8
|
|
// CHECK9-NEXT: store i8 [[FROMBOOL9]], ptr [[TMP0]], align 1
|
|
// CHECK9-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK9-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK9: .omp.reduction.case2:
|
|
// CHECK9-NEXT: [[TMP19:%.*]] = load i8, ptr [[OR_VAR1]], align 1
|
|
// CHECK9-NEXT: [[TOBOOL10:%.*]] = trunc i8 [[TMP19]] to i1
|
|
// CHECK9-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i8, ptr [[TMP0]] monotonic, align 1
|
|
// CHECK9-NEXT: br label [[ATOMIC_CONT:%.*]]
|
|
// CHECK9: atomic_cont:
|
|
// CHECK9-NEXT: [[TMP20:%.*]] = phi i8 [ [[ATOMIC_LOAD]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[TMP26:%.*]], [[LOR_END17:%.*]] ]
|
|
// CHECK9-NEXT: [[TOBOOL11:%.*]] = trunc i8 [[TMP20]] to i1
|
|
// CHECK9-NEXT: [[FROMBOOL13:%.*]] = zext i1 [[TOBOOL11]] to i8
|
|
// CHECK9-NEXT: store i8 [[FROMBOOL13]], ptr [[_TMP12]], align 1
|
|
// CHECK9-NEXT: [[TMP21:%.*]] = load i8, ptr [[_TMP12]], align 1
|
|
// CHECK9-NEXT: [[TOBOOL14:%.*]] = trunc i8 [[TMP21]] to i1
|
|
// CHECK9-NEXT: br i1 [[TOBOOL14]], label [[LOR_END17]], label [[LOR_RHS15:%.*]]
|
|
// CHECK9: lor.rhs15:
|
|
// CHECK9-NEXT: [[TMP22:%.*]] = load i8, ptr [[OR_VAR1]], align 1
|
|
// CHECK9-NEXT: [[TOBOOL16:%.*]] = trunc i8 [[TMP22]] to i1
|
|
// CHECK9-NEXT: br label [[LOR_END17]]
|
|
// CHECK9: lor.end17:
|
|
// CHECK9-NEXT: [[TMP23:%.*]] = phi i1 [ true, [[ATOMIC_CONT]] ], [ [[TOBOOL16]], [[LOR_RHS15]] ]
|
|
// CHECK9-NEXT: [[FROMBOOL18:%.*]] = zext i1 [[TMP23]] to i8
|
|
// CHECK9-NEXT: store i8 [[FROMBOOL18]], ptr [[ATOMIC_TEMP]], align 1
|
|
// CHECK9-NEXT: [[TMP24:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1
|
|
// CHECK9-NEXT: [[TMP25:%.*]] = cmpxchg ptr [[TMP0]], i8 [[TMP20]], i8 [[TMP24]] monotonic monotonic, align 1
|
|
// CHECK9-NEXT: [[TMP26]] = extractvalue { i8, i1 } [[TMP25]], 0
|
|
// CHECK9-NEXT: [[TMP27:%.*]] = extractvalue { i8, i1 } [[TMP25]], 1
|
|
// CHECK9-NEXT: br i1 [[TMP27]], label [[ATOMIC_EXIT:%.*]], label [[ATOMIC_CONT]]
|
|
// CHECK9: atomic_exit:
|
|
// CHECK9-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK9-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK9: .omp.reduction.default:
|
|
// CHECK9-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l144.omp_outlined.omp.reduction.reduction_func
|
|
// CHECK9-SAME: (ptr noundef [[TMP0:%.*]], ptr noundef [[TMP1:%.*]]) #[[ATTR4]] {
|
|
// CHECK9-NEXT: entry:
|
|
// CHECK9-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
|
|
// CHECK9-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8
|
|
// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8
|
|
// CHECK9-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8
|
|
// CHECK9-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i64 0, i64 0
|
|
// CHECK9-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8
|
|
// CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0
|
|
// CHECK9-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8
|
|
// CHECK9-NEXT: [[TMP8:%.*]] = load i8, ptr [[TMP7]], align 1
|
|
// CHECK9-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP8]] to i1
|
|
// CHECK9-NEXT: br i1 [[TOBOOL]], label [[LOR_END:%.*]], label [[LOR_RHS:%.*]]
|
|
// CHECK9: lor.rhs:
|
|
// CHECK9-NEXT: [[TMP9:%.*]] = load i8, ptr [[TMP5]], align 1
|
|
// CHECK9-NEXT: [[TOBOOL2:%.*]] = trunc i8 [[TMP9]] to i1
|
|
// CHECK9-NEXT: br label [[LOR_END]]
|
|
// CHECK9: lor.end:
|
|
// CHECK9-NEXT: [[TMP10:%.*]] = phi i1 [ true, [[ENTRY:%.*]] ], [ [[TOBOOL2]], [[LOR_RHS]] ]
|
|
// CHECK9-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TMP10]] to i8
|
|
// CHECK9-NEXT: store i8 [[FROMBOOL]], ptr [[TMP7]], align 1
|
|
// CHECK9-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155
|
|
// CHECK9-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[BIT_VAR:%.*]], i64 noundef [[SIVAR:%.*]]) #[[ATTR2]] {
|
|
// CHECK9-NEXT: entry:
|
|
// CHECK9-NEXT: [[BIT_VAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: [[SIVAR_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK9-NEXT: [[SIVAR_CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK9-NEXT: store ptr [[BIT_VAR]], ptr [[BIT_VAR_ADDR]], align 8
|
|
// CHECK9-NEXT: store i64 [[SIVAR]], ptr [[SIVAR_ADDR]], align 8
|
|
// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[BIT_VAR_ADDR]], align 8
|
|
// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[SIVAR_ADDR]], align 4
|
|
// CHECK9-NEXT: store i32 [[TMP1]], ptr [[SIVAR_CASTED]], align 4
|
|
// CHECK9-NEXT: [[TMP2:%.*]] = load i64, ptr [[SIVAR_CASTED]], align 8
|
|
// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155.omp_outlined, ptr [[TMP0]], i64 [[TMP2]])
|
|
// CHECK9-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155.omp_outlined
|
|
// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[BIT_VAR:%.*]], i64 noundef [[SIVAR:%.*]]) #[[ATTR2]] {
|
|
// CHECK9-NEXT: entry:
|
|
// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: [[BIT_VAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: [[SIVAR_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK9-NEXT: [[BIT_VAR1:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON_23:%.*]], align 8
|
|
// CHECK9-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK9-NEXT: store ptr [[BIT_VAR]], ptr [[BIT_VAR_ADDR]], align 8
|
|
// CHECK9-NEXT: store i64 [[SIVAR]], ptr [[SIVAR_ADDR]], align 8
|
|
// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[BIT_VAR_ADDR]], align 8
|
|
// CHECK9-NEXT: store i32 -1, ptr [[BIT_VAR1]], align 4
|
|
// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK9-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
|
|
// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
|
|
// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK9: cond.true:
|
|
// CHECK9-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK9: cond.false:
|
|
// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK9-NEXT: br label [[COND_END]]
|
|
// CHECK9: cond.end:
|
|
// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
|
|
// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK9-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK9: omp.inner.for.cond:
|
|
// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
|
|
// CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK9: omp.inner.for.body:
|
|
// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
|
|
// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4
|
|
// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[BIT_VAR1]], align 4
|
|
// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK9-NEXT: [[AND:%.*]] = and i32 [[TMP9]], [[TMP10]]
|
|
// CHECK9-NEXT: store i32 [[AND]], ptr [[BIT_VAR1]], align 4
|
|
// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[CLASS_ANON_23]], ptr [[REF_TMP]], i32 0, i32 0
|
|
// CHECK9-NEXT: store ptr [[SIVAR_ADDR]], ptr [[TMP11]], align 8
|
|
// CHECK9-NEXT: call void @"_ZZZ4mainENK3$_5clEvENKUlvE_clEv"(ptr noundef nonnull align 8 dereferenceable(8) [[REF_TMP]])
|
|
// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK9: omp.body.continue:
|
|
// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK9: omp.inner.for.inc:
|
|
// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
|
|
// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK9: omp.inner.for.end:
|
|
// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK9: omp.loop.exit:
|
|
// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
|
|
// CHECK9-NEXT: [[TMP13:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
|
|
// CHECK9-NEXT: store ptr [[BIT_VAR1]], ptr [[TMP13]], align 8
|
|
// CHECK9-NEXT: [[TMP14:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2]], i32 [[TMP2]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155.omp_outlined.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK9-NEXT: switch i32 [[TMP14]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
|
|
// CHECK9-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
|
|
// CHECK9-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
|
|
// CHECK9-NEXT: ]
|
|
// CHECK9: .omp.reduction.case1:
|
|
// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP0]], align 4
|
|
// CHECK9-NEXT: [[TMP16:%.*]] = load i32, ptr [[BIT_VAR1]], align 4
|
|
// CHECK9-NEXT: [[AND4:%.*]] = and i32 [[TMP15]], [[TMP16]]
|
|
// CHECK9-NEXT: store i32 [[AND4]], ptr [[TMP0]], align 4
|
|
// CHECK9-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK9-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK9: .omp.reduction.case2:
|
|
// CHECK9-NEXT: [[TMP17:%.*]] = load i32, ptr [[BIT_VAR1]], align 4
|
|
// CHECK9-NEXT: [[TMP18:%.*]] = atomicrmw and ptr [[TMP0]], i32 [[TMP17]] monotonic, align 4
|
|
// CHECK9-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK9-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK9: .omp.reduction.default:
|
|
// CHECK9-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155.omp_outlined.omp.reduction.reduction_func
|
|
// CHECK9-SAME: (ptr noundef [[TMP0:%.*]], ptr noundef [[TMP1:%.*]]) #[[ATTR4]] {
|
|
// CHECK9-NEXT: entry:
|
|
// CHECK9-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
|
|
// CHECK9-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8
|
|
// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8
|
|
// CHECK9-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8
|
|
// CHECK9-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i64 0, i64 0
|
|
// CHECK9-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8
|
|
// CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0
|
|
// CHECK9-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8
|
|
// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK9-NEXT: [[AND:%.*]] = and i32 [[TMP8]], [[TMP9]]
|
|
// CHECK9-NEXT: store i32 [[AND]], ptr [[TMP7]], align 4
|
|
// CHECK9-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l166
|
|
// CHECK9-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[BIT_VAR:%.*]], i64 noundef [[SIVAR:%.*]]) #[[ATTR2]] {
|
|
// CHECK9-NEXT: entry:
|
|
// CHECK9-NEXT: [[BIT_VAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: [[SIVAR_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK9-NEXT: [[SIVAR_CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK9-NEXT: store ptr [[BIT_VAR]], ptr [[BIT_VAR_ADDR]], align 8
|
|
// CHECK9-NEXT: store i64 [[SIVAR]], ptr [[SIVAR_ADDR]], align 8
|
|
// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[BIT_VAR_ADDR]], align 8
|
|
// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[SIVAR_ADDR]], align 4
|
|
// CHECK9-NEXT: store i32 [[TMP1]], ptr [[SIVAR_CASTED]], align 4
|
|
// CHECK9-NEXT: [[TMP2:%.*]] = load i64, ptr [[SIVAR_CASTED]], align 8
|
|
// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l166.omp_outlined, ptr [[TMP0]], i64 [[TMP2]])
|
|
// CHECK9-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l166.omp_outlined
|
|
// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[BIT_VAR:%.*]], i64 noundef [[SIVAR:%.*]]) #[[ATTR2]] {
|
|
// CHECK9-NEXT: entry:
|
|
// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: [[BIT_VAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: [[SIVAR_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK9-NEXT: [[BIT_VAR1:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON_24:%.*]], align 8
|
|
// CHECK9-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK9-NEXT: store ptr [[BIT_VAR]], ptr [[BIT_VAR_ADDR]], align 8
|
|
// CHECK9-NEXT: store i64 [[SIVAR]], ptr [[SIVAR_ADDR]], align 8
|
|
// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[BIT_VAR_ADDR]], align 8
|
|
// CHECK9-NEXT: store i32 0, ptr [[BIT_VAR1]], align 4
|
|
// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK9-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
|
|
// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
|
|
// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK9: cond.true:
|
|
// CHECK9-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK9: cond.false:
|
|
// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK9-NEXT: br label [[COND_END]]
|
|
// CHECK9: cond.end:
|
|
// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
|
|
// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK9-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK9: omp.inner.for.cond:
|
|
// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
|
|
// CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK9: omp.inner.for.body:
|
|
// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
|
|
// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4
|
|
// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[BIT_VAR1]], align 4
|
|
// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK9-NEXT: [[OR:%.*]] = or i32 [[TMP9]], [[TMP10]]
|
|
// CHECK9-NEXT: store i32 [[OR]], ptr [[BIT_VAR1]], align 4
|
|
// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[CLASS_ANON_24]], ptr [[REF_TMP]], i32 0, i32 0
|
|
// CHECK9-NEXT: store ptr [[SIVAR_ADDR]], ptr [[TMP11]], align 8
|
|
// CHECK9-NEXT: call void @"_ZZZ4mainENK3$_6clEvENKUlvE_clEv"(ptr noundef nonnull align 8 dereferenceable(8) [[REF_TMP]])
|
|
// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK9: omp.body.continue:
|
|
// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK9: omp.inner.for.inc:
|
|
// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
|
|
// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK9: omp.inner.for.end:
|
|
// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK9: omp.loop.exit:
|
|
// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
|
|
// CHECK9-NEXT: [[TMP13:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
|
|
// CHECK9-NEXT: store ptr [[BIT_VAR1]], ptr [[TMP13]], align 8
|
|
// CHECK9-NEXT: [[TMP14:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2]], i32 [[TMP2]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l166.omp_outlined.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK9-NEXT: switch i32 [[TMP14]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
|
|
// CHECK9-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
|
|
// CHECK9-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
|
|
// CHECK9-NEXT: ]
|
|
// CHECK9: .omp.reduction.case1:
|
|
// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP0]], align 4
|
|
// CHECK9-NEXT: [[TMP16:%.*]] = load i32, ptr [[BIT_VAR1]], align 4
|
|
// CHECK9-NEXT: [[OR4:%.*]] = or i32 [[TMP15]], [[TMP16]]
|
|
// CHECK9-NEXT: store i32 [[OR4]], ptr [[TMP0]], align 4
|
|
// CHECK9-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK9-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK9: .omp.reduction.case2:
|
|
// CHECK9-NEXT: [[TMP17:%.*]] = load i32, ptr [[BIT_VAR1]], align 4
|
|
// CHECK9-NEXT: [[TMP18:%.*]] = atomicrmw or ptr [[TMP0]], i32 [[TMP17]] monotonic, align 4
|
|
// CHECK9-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK9-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK9: .omp.reduction.default:
|
|
// CHECK9-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l166.omp_outlined.omp.reduction.reduction_func
|
|
// CHECK9-SAME: (ptr noundef [[TMP0:%.*]], ptr noundef [[TMP1:%.*]]) #[[ATTR4]] {
|
|
// CHECK9-NEXT: entry:
|
|
// CHECK9-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
|
|
// CHECK9-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8
|
|
// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8
|
|
// CHECK9-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8
|
|
// CHECK9-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i64 0, i64 0
|
|
// CHECK9-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8
|
|
// CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0
|
|
// CHECK9-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8
|
|
// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK9-NEXT: [[OR:%.*]] = or i32 [[TMP8]], [[TMP9]]
|
|
// CHECK9-NEXT: store i32 [[OR]], ptr [[TMP7]], align 4
|
|
// CHECK9-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l177
|
|
// CHECK9-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[BIT_VAR:%.*]], i64 noundef [[SIVAR:%.*]]) #[[ATTR2]] {
|
|
// CHECK9-NEXT: entry:
|
|
// CHECK9-NEXT: [[BIT_VAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: [[SIVAR_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK9-NEXT: [[SIVAR_CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK9-NEXT: store ptr [[BIT_VAR]], ptr [[BIT_VAR_ADDR]], align 8
|
|
// CHECK9-NEXT: store i64 [[SIVAR]], ptr [[SIVAR_ADDR]], align 8
|
|
// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[BIT_VAR_ADDR]], align 8
|
|
// CHECK9-NEXT: [[TMP1:%.*]] = load i32, ptr [[SIVAR_ADDR]], align 4
|
|
// CHECK9-NEXT: store i32 [[TMP1]], ptr [[SIVAR_CASTED]], align 4
|
|
// CHECK9-NEXT: [[TMP2:%.*]] = load i64, ptr [[SIVAR_CASTED]], align 8
|
|
// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l177.omp_outlined, ptr [[TMP0]], i64 [[TMP2]])
|
|
// CHECK9-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l177.omp_outlined
|
|
// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[BIT_VAR:%.*]], i64 noundef [[SIVAR:%.*]]) #[[ATTR2]] {
|
|
// CHECK9-NEXT: entry:
|
|
// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: [[BIT_VAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: [[SIVAR_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK9-NEXT: [[BIT_VAR1:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON_25:%.*]], align 8
|
|
// CHECK9-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK9-NEXT: store ptr [[BIT_VAR]], ptr [[BIT_VAR_ADDR]], align 8
|
|
// CHECK9-NEXT: store i64 [[SIVAR]], ptr [[SIVAR_ADDR]], align 8
|
|
// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[BIT_VAR_ADDR]], align 8
|
|
// CHECK9-NEXT: store i32 0, ptr [[BIT_VAR1]], align 4
|
|
// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK9-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
|
|
// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
|
|
// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK9: cond.true:
|
|
// CHECK9-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK9: cond.false:
|
|
// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK9-NEXT: br label [[COND_END]]
|
|
// CHECK9: cond.end:
|
|
// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
|
|
// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK9-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK9: omp.inner.for.cond:
|
|
// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
|
|
// CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK9: omp.inner.for.body:
|
|
// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
|
|
// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4
|
|
// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[BIT_VAR1]], align 4
|
|
// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK9-NEXT: [[XOR:%.*]] = xor i32 [[TMP9]], [[TMP10]]
|
|
// CHECK9-NEXT: store i32 [[XOR]], ptr [[BIT_VAR1]], align 4
|
|
// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[CLASS_ANON_25]], ptr [[REF_TMP]], i32 0, i32 0
|
|
// CHECK9-NEXT: store ptr [[SIVAR_ADDR]], ptr [[TMP11]], align 8
|
|
// CHECK9-NEXT: call void @"_ZZZ4mainENK3$_7clEvENKUlvE_clEv"(ptr noundef nonnull align 8 dereferenceable(8) [[REF_TMP]])
|
|
// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK9: omp.body.continue:
|
|
// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK9: omp.inner.for.inc:
|
|
// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK9-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
|
|
// CHECK9-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK9: omp.inner.for.end:
|
|
// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK9: omp.loop.exit:
|
|
// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
|
|
// CHECK9-NEXT: [[TMP13:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
|
|
// CHECK9-NEXT: store ptr [[BIT_VAR1]], ptr [[TMP13]], align 8
|
|
// CHECK9-NEXT: [[TMP14:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2]], i32 [[TMP2]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l177.omp_outlined.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK9-NEXT: switch i32 [[TMP14]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
|
|
// CHECK9-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
|
|
// CHECK9-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
|
|
// CHECK9-NEXT: ]
|
|
// CHECK9: .omp.reduction.case1:
|
|
// CHECK9-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP0]], align 4
|
|
// CHECK9-NEXT: [[TMP16:%.*]] = load i32, ptr [[BIT_VAR1]], align 4
|
|
// CHECK9-NEXT: [[XOR4:%.*]] = xor i32 [[TMP15]], [[TMP16]]
|
|
// CHECK9-NEXT: store i32 [[XOR4]], ptr [[TMP0]], align 4
|
|
// CHECK9-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK9-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK9: .omp.reduction.case2:
|
|
// CHECK9-NEXT: [[TMP17:%.*]] = load i32, ptr [[BIT_VAR1]], align 4
|
|
// CHECK9-NEXT: [[TMP18:%.*]] = atomicrmw xor ptr [[TMP0]], i32 [[TMP17]] monotonic, align 4
|
|
// CHECK9-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK9-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK9: .omp.reduction.default:
|
|
// CHECK9-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l177.omp_outlined.omp.reduction.reduction_func
|
|
// CHECK9-SAME: (ptr noundef [[TMP0:%.*]], ptr noundef [[TMP1:%.*]]) #[[ATTR4]] {
|
|
// CHECK9-NEXT: entry:
|
|
// CHECK9-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
|
|
// CHECK9-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8
|
|
// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8
|
|
// CHECK9-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8
|
|
// CHECK9-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i64 0, i64 0
|
|
// CHECK9-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8
|
|
// CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0
|
|
// CHECK9-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8
|
|
// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK9-NEXT: [[XOR:%.*]] = xor i32 [[TMP8]], [[TMP9]]
|
|
// CHECK9-NEXT: store i32 [[XOR]], ptr [[TMP7]], align 4
|
|
// CHECK9-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l188
|
|
// CHECK9-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[MAX_VAR:%.*]]) #[[ATTR2]] {
|
|
// CHECK9-NEXT: entry:
|
|
// CHECK9-NEXT: [[MAX_VAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: store ptr [[MAX_VAR]], ptr [[MAX_VAR_ADDR]], align 8
|
|
// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[MAX_VAR_ADDR]], align 8
|
|
// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l188.omp_outlined, ptr [[TMP0]])
|
|
// CHECK9-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l188.omp_outlined
|
|
// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[MAX_VAR:%.*]]) #[[ATTR2]] {
|
|
// CHECK9-NEXT: entry:
|
|
// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: [[MAX_VAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: [[MAX_VAR1:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON_26:%.*]], align 8
|
|
// CHECK9-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK9-NEXT: store ptr [[MAX_VAR]], ptr [[MAX_VAR_ADDR]], align 8
|
|
// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[MAX_VAR_ADDR]], align 8
|
|
// CHECK9-NEXT: store i32 -2147483648, ptr [[MAX_VAR1]], align 4
|
|
// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK9-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
|
|
// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
|
|
// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK9: cond.true:
|
|
// CHECK9-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK9: cond.false:
|
|
// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK9-NEXT: br label [[COND_END]]
|
|
// CHECK9: cond.end:
|
|
// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
|
|
// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK9-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK9: omp.inner.for.cond:
|
|
// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
|
|
// CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK9: omp.inner.for.body:
|
|
// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
|
|
// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4
|
|
// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[MAX_VAR1]], align 4
|
|
// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK9-NEXT: [[CMP3:%.*]] = icmp sge i32 [[TMP9]], [[TMP10]]
|
|
// CHECK9-NEXT: br i1 [[CMP3]], label [[COND_TRUE4:%.*]], label [[COND_FALSE5:%.*]]
|
|
// CHECK9: cond.true4:
|
|
// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[MAX_VAR1]], align 4
|
|
// CHECK9-NEXT: br label [[COND_END6:%.*]]
|
|
// CHECK9: cond.false5:
|
|
// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK9-NEXT: br label [[COND_END6]]
|
|
// CHECK9: cond.end6:
|
|
// CHECK9-NEXT: [[COND7:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE4]] ], [ [[TMP12]], [[COND_FALSE5]] ]
|
|
// CHECK9-NEXT: store i32 [[COND7]], ptr [[MAX_VAR1]], align 4
|
|
// CHECK9-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[CLASS_ANON_26]], ptr [[REF_TMP]], i32 0, i32 0
|
|
// CHECK9-NEXT: store ptr [[MAX_VAR1]], ptr [[TMP13]], align 8
|
|
// CHECK9-NEXT: call void @"_ZZZ4mainENK3$_8clEvENKUlvE_clEv"(ptr noundef nonnull align 8 dereferenceable(8) [[REF_TMP]])
|
|
// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK9: omp.body.continue:
|
|
// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK9: omp.inner.for.inc:
|
|
// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK9-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP14]], 1
|
|
// CHECK9-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK9: omp.inner.for.end:
|
|
// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK9: omp.loop.exit:
|
|
// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
|
|
// CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
|
|
// CHECK9-NEXT: store ptr [[MAX_VAR1]], ptr [[TMP15]], align 8
|
|
// CHECK9-NEXT: [[TMP16:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2]], i32 [[TMP2]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l188.omp_outlined.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK9-NEXT: switch i32 [[TMP16]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
|
|
// CHECK9-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
|
|
// CHECK9-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
|
|
// CHECK9-NEXT: ]
|
|
// CHECK9: .omp.reduction.case1:
|
|
// CHECK9-NEXT: [[TMP17:%.*]] = load i32, ptr [[TMP0]], align 4
|
|
// CHECK9-NEXT: [[TMP18:%.*]] = load i32, ptr [[MAX_VAR1]], align 4
|
|
// CHECK9-NEXT: [[CMP9:%.*]] = icmp sgt i32 [[TMP17]], [[TMP18]]
|
|
// CHECK9-NEXT: br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
|
|
// CHECK9: cond.true10:
|
|
// CHECK9-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP0]], align 4
|
|
// CHECK9-NEXT: br label [[COND_END12:%.*]]
|
|
// CHECK9: cond.false11:
|
|
// CHECK9-NEXT: [[TMP20:%.*]] = load i32, ptr [[MAX_VAR1]], align 4
|
|
// CHECK9-NEXT: br label [[COND_END12]]
|
|
// CHECK9: cond.end12:
|
|
// CHECK9-NEXT: [[COND13:%.*]] = phi i32 [ [[TMP19]], [[COND_TRUE10]] ], [ [[TMP20]], [[COND_FALSE11]] ]
|
|
// CHECK9-NEXT: store i32 [[COND13]], ptr [[TMP0]], align 4
|
|
// CHECK9-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK9-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK9: .omp.reduction.case2:
|
|
// CHECK9-NEXT: [[TMP21:%.*]] = load i32, ptr [[MAX_VAR1]], align 4
|
|
// CHECK9-NEXT: [[TMP22:%.*]] = atomicrmw max ptr [[TMP0]], i32 [[TMP21]] monotonic, align 4
|
|
// CHECK9-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK9-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK9: .omp.reduction.default:
|
|
// CHECK9-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l188.omp_outlined.omp.reduction.reduction_func
|
|
// CHECK9-SAME: (ptr noundef [[TMP0:%.*]], ptr noundef [[TMP1:%.*]]) #[[ATTR4]] {
|
|
// CHECK9-NEXT: entry:
|
|
// CHECK9-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
|
|
// CHECK9-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8
|
|
// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8
|
|
// CHECK9-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8
|
|
// CHECK9-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i64 0, i64 0
|
|
// CHECK9-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8
|
|
// CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0
|
|
// CHECK9-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8
|
|
// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
|
|
// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK9: cond.true:
|
|
// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK9-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK9: cond.false:
|
|
// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK9-NEXT: br label [[COND_END]]
|
|
// CHECK9: cond.end:
|
|
// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
|
|
// CHECK9-NEXT: store i32 [[COND]], ptr [[TMP7]], align 4
|
|
// CHECK9-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l199
|
|
// CHECK9-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[MIN_VAR:%.*]]) #[[ATTR2]] {
|
|
// CHECK9-NEXT: entry:
|
|
// CHECK9-NEXT: [[MIN_VAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: store ptr [[MIN_VAR]], ptr [[MIN_VAR_ADDR]], align 8
|
|
// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[MIN_VAR_ADDR]], align 8
|
|
// CHECK9-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l199.omp_outlined, ptr [[TMP0]])
|
|
// CHECK9-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l199.omp_outlined
|
|
// CHECK9-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[MIN_VAR:%.*]]) #[[ATTR2]] {
|
|
// CHECK9-NEXT: entry:
|
|
// CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: [[MIN_VAR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: [[MIN_VAR1:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK9-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON_27:%.*]], align 8
|
|
// CHECK9-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 8
|
|
// CHECK9-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK9-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK9-NEXT: store ptr [[MIN_VAR]], ptr [[MIN_VAR_ADDR]], align 8
|
|
// CHECK9-NEXT: [[TMP0:%.*]] = load ptr, ptr [[MIN_VAR_ADDR]], align 8
|
|
// CHECK9-NEXT: store i32 2147483647, ptr [[MIN_VAR1]], align 4
|
|
// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK9-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK9-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK9-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK9-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
|
|
// CHECK9-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP2]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK9-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK9-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 1
|
|
// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK9: cond.true:
|
|
// CHECK9-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK9: cond.false:
|
|
// CHECK9-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK9-NEXT: br label [[COND_END]]
|
|
// CHECK9: cond.end:
|
|
// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
|
|
// CHECK9-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK9-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK9-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK9: omp.inner.for.cond:
|
|
// CHECK9-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK9-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK9-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
|
|
// CHECK9-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK9: omp.inner.for.body:
|
|
// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK9-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
|
|
// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK9-NEXT: store i32 [[ADD]], ptr [[I]], align 4
|
|
// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[MIN_VAR1]], align 4
|
|
// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK9-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
|
|
// CHECK9-NEXT: br i1 [[CMP3]], label [[COND_TRUE4:%.*]], label [[COND_FALSE5:%.*]]
|
|
// CHECK9: cond.true4:
|
|
// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[MIN_VAR1]], align 4
|
|
// CHECK9-NEXT: br label [[COND_END6:%.*]]
|
|
// CHECK9: cond.false5:
|
|
// CHECK9-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK9-NEXT: br label [[COND_END6]]
|
|
// CHECK9: cond.end6:
|
|
// CHECK9-NEXT: [[COND7:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE4]] ], [ [[TMP12]], [[COND_FALSE5]] ]
|
|
// CHECK9-NEXT: store i32 [[COND7]], ptr [[MIN_VAR1]], align 4
|
|
// CHECK9-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[CLASS_ANON_27]], ptr [[REF_TMP]], i32 0, i32 0
|
|
// CHECK9-NEXT: store ptr [[MIN_VAR1]], ptr [[TMP13]], align 8
|
|
// CHECK9-NEXT: call void @"_ZZZ4mainENK3$_9clEvENKUlvE_clEv"(ptr noundef nonnull align 8 dereferenceable(8) [[REF_TMP]])
|
|
// CHECK9-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK9: omp.body.continue:
|
|
// CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK9: omp.inner.for.inc:
|
|
// CHECK9-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK9-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP14]], 1
|
|
// CHECK9-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK9-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK9: omp.inner.for.end:
|
|
// CHECK9-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK9: omp.loop.exit:
|
|
// CHECK9-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]])
|
|
// CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
|
|
// CHECK9-NEXT: store ptr [[MIN_VAR1]], ptr [[TMP15]], align 8
|
|
// CHECK9-NEXT: [[TMP16:%.*]] = call i32 @__kmpc_reduce(ptr @[[GLOB2]], i32 [[TMP2]], i32 1, i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l199.omp_outlined.omp.reduction.reduction_func, ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK9-NEXT: switch i32 [[TMP16]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
|
|
// CHECK9-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
|
|
// CHECK9-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
|
|
// CHECK9-NEXT: ]
|
|
// CHECK9: .omp.reduction.case1:
|
|
// CHECK9-NEXT: [[TMP17:%.*]] = load i32, ptr [[TMP0]], align 4
|
|
// CHECK9-NEXT: [[TMP18:%.*]] = load i32, ptr [[MIN_VAR1]], align 4
|
|
// CHECK9-NEXT: [[CMP9:%.*]] = icmp slt i32 [[TMP17]], [[TMP18]]
|
|
// CHECK9-NEXT: br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
|
|
// CHECK9: cond.true10:
|
|
// CHECK9-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP0]], align 4
|
|
// CHECK9-NEXT: br label [[COND_END12:%.*]]
|
|
// CHECK9: cond.false11:
|
|
// CHECK9-NEXT: [[TMP20:%.*]] = load i32, ptr [[MIN_VAR1]], align 4
|
|
// CHECK9-NEXT: br label [[COND_END12]]
|
|
// CHECK9: cond.end12:
|
|
// CHECK9-NEXT: [[COND13:%.*]] = phi i32 [ [[TMP19]], [[COND_TRUE10]] ], [ [[TMP20]], [[COND_FALSE11]] ]
|
|
// CHECK9-NEXT: store i32 [[COND13]], ptr [[TMP0]], align 4
|
|
// CHECK9-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK9-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK9: .omp.reduction.case2:
|
|
// CHECK9-NEXT: [[TMP21:%.*]] = load i32, ptr [[MIN_VAR1]], align 4
|
|
// CHECK9-NEXT: [[TMP22:%.*]] = atomicrmw min ptr [[TMP0]], i32 [[TMP21]] monotonic, align 4
|
|
// CHECK9-NEXT: call void @__kmpc_end_reduce(ptr @[[GLOB2]], i32 [[TMP2]], ptr @.gomp_critical_user_.reduction.var)
|
|
// CHECK9-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
|
|
// CHECK9: .omp.reduction.default:
|
|
// CHECK9-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l199.omp_outlined.omp.reduction.reduction_func
|
|
// CHECK9-SAME: (ptr noundef [[TMP0:%.*]], ptr noundef [[TMP1:%.*]]) #[[ATTR4]] {
|
|
// CHECK9-NEXT: entry:
|
|
// CHECK9-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: [[DOTADDR1:%.*]] = alloca ptr, align 8
|
|
// CHECK9-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
|
|
// CHECK9-NEXT: store ptr [[TMP1]], ptr [[DOTADDR1]], align 8
|
|
// CHECK9-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8
|
|
// CHECK9-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTADDR1]], align 8
|
|
// CHECK9-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP3]], i64 0, i64 0
|
|
// CHECK9-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP4]], align 8
|
|
// CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP2]], i64 0, i64 0
|
|
// CHECK9-NEXT: [[TMP7:%.*]] = load ptr, ptr [[TMP6]], align 8
|
|
// CHECK9-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK9-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK9-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP8]], [[TMP9]]
|
|
// CHECK9-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK9: cond.true:
|
|
// CHECK9-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK9-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK9: cond.false:
|
|
// CHECK9-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK9-NEXT: br label [[COND_END]]
|
|
// CHECK9: cond.end:
|
|
// CHECK9-NEXT: [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
|
|
// CHECK9-NEXT: store i32 [[COND]], ptr [[TMP7]], align 4
|
|
// CHECK9-NEXT: ret void
|
|
//
|