A kernel implicit parameter (dyn_ptr) was introduced some time back. This patch increments the kernel args version for a compiler supporting dyn_ptr. The version will be used by the runtime to determine whether the implicit parameter is generated by the compiler. The versioning is required to support use cases where code generated by an older compiler is linked with a newer runtime. If approved, this patch should be backported to release 18.
2659 lines
182 KiB
C++
2659 lines
182 KiB
C++
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
|
|
// expected-no-diagnostics
|
|
#ifndef HEADER
|
|
#define HEADER
|
|
// Test host codegen.
|
|
// RUN: %clang_cc1 -DCK1 -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1
|
|
// RUN: %clang_cc1 -DCK1 -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK2
|
|
// RUN: %clang_cc1 -DCK1 -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
|
|
// RUN: %clang_cc1 -DCK1 -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK2
|
|
// RUN: %clang_cc1 -DCK1 -verify -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK4
|
|
// RUN: %clang_cc1 -DCK1 -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
|
|
// RUN: %clang_cc1 -DCK1 -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK4
|
|
|
|
// RUN: %clang_cc1 -DCK1 -verify -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
|
|
// RUN: %clang_cc1 -DCK1 -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
|
|
// RUN: %clang_cc1 -DCK1 -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
|
|
// RUN: %clang_cc1 -DCK1 -verify -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
|
|
// RUN: %clang_cc1 -DCK1 -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
|
|
// RUN: %clang_cc1 -DCK1 -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
|
|
|
|
// Test target codegen - host bc file has to be created first. (no significant differences with host version of target region)
|
|
// RUN: %clang_cc1 -DCK1 -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc
|
|
// RUN: %clang_cc1 -DCK1 -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --check-prefix=CHECK10
|
|
// RUN: %clang_cc1 -DCK1 -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s
|
|
// RUN: %clang_cc1 -DCK1 -fopenmp -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK10
|
|
// RUN: %clang_cc1 -DCK1 -verify -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc
|
|
// RUN: %clang_cc1 -DCK1 -verify -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix=CHECK12
|
|
// RUN: %clang_cc1 -DCK1 -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s
|
|
// RUN: %clang_cc1 -DCK1 -fopenmp -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK12
|
|
|
|
// RUN: %clang_cc1 -DCK1 -verify -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc
|
|
// RUN: %clang_cc1 -DCK1 -verify -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
|
|
// RUN: %clang_cc1 -DCK1 -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s
|
|
// RUN: %clang_cc1 -DCK1 -fopenmp-simd -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
|
|
// RUN: %clang_cc1 -DCK1 -verify -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc
|
|
// RUN: %clang_cc1 -DCK1 -verify -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
|
|
// RUN: %clang_cc1 -DCK1 -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s
|
|
// RUN: %clang_cc1 -DCK1 -fopenmp-simd -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
|
|
|
|
#ifdef CK1
|
|
|
|
|
|
int target_teams_fun(int *g){
|
|
int n = 1000;
|
|
int a[1000];
|
|
int te = n / 128;
|
|
int th = 128;
|
|
// discard n_addr
|
|
// discard capture expressions for te and th
|
|
|
|
#pragma omp target teams loop num_teams(te), thread_limit(th)
|
|
for(int i = 0; i < n; i++) {
|
|
a[i] = 0;
|
|
}
|
|
|
|
{{{
|
|
#pragma omp target teams loop is_device_ptr(g)
|
|
for(int i = 0; i < n; i++) {
|
|
a[i] = g[0];
|
|
}
|
|
}}}
|
|
|
|
// outlined target regions
|
|
|
|
|
|
|
|
|
|
return a[0];
|
|
}
|
|
|
|
#endif // CK1
|
|
#endif // HEADER
|
|
// CHECK1-LABEL: define {{[^@]+}}@_Z16target_teams_funPi
|
|
// CHECK1-SAME: (ptr noundef [[G:%.*]]) #[[ATTR0:[0-9]+]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[G_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[N:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[A:%.*]] = alloca [1000 x i32], align 4
|
|
// CHECK1-NEXT: [[TE:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[TH:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[DOTCAPTURE_EXPR__CASTED2:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[N_CASTED3:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: store ptr [[G]], ptr [[G_ADDR]], align 8
|
|
// CHECK1-NEXT: store i32 1000, ptr [[N]], align 4
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4
|
|
// CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP0]], 128
|
|
// CHECK1-NEXT: store i32 [[DIV]], ptr [[TE]], align 4
|
|
// CHECK1-NEXT: store i32 128, ptr [[TH]], align 4
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[TE]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TH]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[N]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP3]], ptr [[N_CASTED]], align 4
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[N_CASTED]], align 8
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP7]], ptr [[DOTCAPTURE_EXPR__CASTED2]], align 4
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED2]], align 8
|
|
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l51(i64 [[TMP4]], ptr [[A]], i64 [[TMP6]], i64 [[TMP8]]) #[[ATTR2:[0-9]+]]
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[N]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP9]], ptr [[N_CASTED3]], align 4
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = load i64, ptr [[N_CASTED3]], align 8
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = load ptr, ptr [[G_ADDR]], align 8
|
|
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l57(i64 [[TMP10]], ptr [[A]], ptr [[TMP11]]) #[[ATTR2]]
|
|
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i32], ptr [[A]], i64 0, i64 0
|
|
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
|
|
// CHECK1-NEXT: ret i32 [[TMP12]]
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l51
|
|
// CHECK1-SAME: (i64 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(4000) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]], i64 noundef [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR1:[0-9]+]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB3:[0-9]+]])
|
|
// CHECK1-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
|
|
// CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
|
|
// CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_push_num_teams(ptr @[[GLOB3]], i32 [[TMP0]], i32 [[TMP2]], i32 [[TMP3]])
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP4]], ptr [[N_CASTED]], align 4
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[N_CASTED]], align 8
|
|
// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l51.omp_outlined, i64 [[TMP5]], ptr [[TMP1]])
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l51.omp_outlined
|
|
// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(4000) [[A:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[I3:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
|
|
// CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
|
|
// CHECK1-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
|
|
// CHECK1-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
|
|
// CHECK1-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK1: omp.precond.then:
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP6]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK1-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
|
|
// CHECK1-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK1: cond.true:
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK1: cond.false:
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END]]
|
|
// CHECK1: cond.end:
|
|
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
|
|
// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP11]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK1: omp.inner.for.cond:
|
|
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
|
|
// CHECK1-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK1: omp.inner.for.body:
|
|
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK1-NEXT: [[TMP15:%.*]] = zext i32 [[TMP14]] to i64
|
|
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP17:%.*]] = zext i32 [[TMP16]] to i64
|
|
// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP18]], ptr [[N_CASTED]], align 4
|
|
// CHECK1-NEXT: [[TMP19:%.*]] = load i64, ptr [[N_CASTED]], align 8
|
|
// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l51.omp_outlined.omp_outlined, i64 [[TMP15]], i64 [[TMP17]], i64 [[TMP19]], ptr [[TMP0]])
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK1: omp.inner.for.inc:
|
|
// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
|
|
// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK1: omp.inner.for.end:
|
|
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK1: omp.loop.exit:
|
|
// CHECK1-NEXT: [[TMP22:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP23]])
|
|
// CHECK1-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK1: omp.precond.end:
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l51.omp_outlined.omp_outlined
|
|
// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i64 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(4000) [[A:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[I4:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
|
|
// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
|
|
// CHECK1-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
|
|
// CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
|
|
// CHECK1-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
|
|
// CHECK1-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
|
|
// CHECK1-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK1: omp.precond.then:
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
|
|
// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP5]] to i32
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
|
|
// CHECK1-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP6]] to i32
|
|
// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 [[CONV3]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP8]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK1-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
|
|
// CHECK1-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK1: cond.true:
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK1: cond.false:
|
|
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END]]
|
|
// CHECK1: cond.end:
|
|
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
|
|
// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK1: omp.inner.for.cond:
|
|
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
|
|
// CHECK1-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK1: omp.inner.for.body:
|
|
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP16]], 1
|
|
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK1-NEXT: store i32 [[ADD]], ptr [[I4]], align 4
|
|
// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[I4]], align 4
|
|
// CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP17]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
|
|
// CHECK1-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK1: omp.body.continue:
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK1: omp.inner.for.inc:
|
|
// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP18]], 1
|
|
// CHECK1-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK1: omp.inner.for.end:
|
|
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK1: omp.loop.exit:
|
|
// CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP19]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP20]])
|
|
// CHECK1-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK1: omp.precond.end:
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l57
|
|
// CHECK1-SAME: (i64 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(4000) [[A:%.*]], ptr noundef [[G:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[G_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[G]], ptr [[G_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP1]], ptr [[N_CASTED]], align 4
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[N_CASTED]], align 8
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[G_ADDR]], align 8
|
|
// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l57.omp_outlined, i64 [[TMP2]], ptr [[TMP0]], ptr [[TMP3]])
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l57.omp_outlined
|
|
// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(4000) [[A:%.*]], ptr noundef [[G:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[G_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[I3:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[G]], ptr [[G_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
|
|
// CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
|
|
// CHECK1-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
|
|
// CHECK1-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
|
|
// CHECK1-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK1: omp.precond.then:
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP6]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK1-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
|
|
// CHECK1-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK1: cond.true:
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK1: cond.false:
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END]]
|
|
// CHECK1: cond.end:
|
|
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
|
|
// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP11]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK1: omp.inner.for.cond:
|
|
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
|
|
// CHECK1-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK1: omp.inner.for.body:
|
|
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK1-NEXT: [[TMP15:%.*]] = zext i32 [[TMP14]] to i64
|
|
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP17:%.*]] = zext i32 [[TMP16]] to i64
|
|
// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP18]], ptr [[N_CASTED]], align 4
|
|
// CHECK1-NEXT: [[TMP19:%.*]] = load i64, ptr [[N_CASTED]], align 8
|
|
// CHECK1-NEXT: [[TMP20:%.*]] = load ptr, ptr [[G_ADDR]], align 8
|
|
// CHECK1-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 5, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l57.omp_outlined.omp_outlined, i64 [[TMP15]], i64 [[TMP17]], i64 [[TMP19]], ptr [[TMP0]], ptr [[TMP20]])
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK1: omp.inner.for.inc:
|
|
// CHECK1-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
|
|
// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK1: omp.inner.for.end:
|
|
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK1: omp.loop.exit:
|
|
// CHECK1-NEXT: [[TMP23:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP24:%.*]] = load i32, ptr [[TMP23]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP24]])
|
|
// CHECK1-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK1: omp.precond.end:
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l57.omp_outlined.omp_outlined
|
|
// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i64 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(4000) [[A:%.*]], ptr noundef [[G:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[G_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[I4:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
|
|
// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
|
|
// CHECK1-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[G]], ptr [[G_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
|
|
// CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
|
|
// CHECK1-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
|
|
// CHECK1-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
|
|
// CHECK1-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK1: omp.precond.then:
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
|
|
// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP5]] to i32
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
|
|
// CHECK1-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP6]] to i32
|
|
// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 [[CONV3]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP8]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK1-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
|
|
// CHECK1-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK1: cond.true:
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK1: cond.false:
|
|
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END]]
|
|
// CHECK1: cond.end:
|
|
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
|
|
// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK1: omp.inner.for.cond:
|
|
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
|
|
// CHECK1-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK1: omp.inner.for.body:
|
|
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP16]], 1
|
|
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK1-NEXT: store i32 [[ADD]], ptr [[I4]], align 4
|
|
// CHECK1-NEXT: [[TMP17:%.*]] = load ptr, ptr [[G_ADDR]], align 8
|
|
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP17]], i64 0
|
|
// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
|
|
// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[I4]], align 4
|
|
// CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP19]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds [1000 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
|
|
// CHECK1-NEXT: store i32 [[TMP18]], ptr [[ARRAYIDX7]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK1: omp.body.continue:
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK1: omp.inner.for.inc:
|
|
// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP20]], 1
|
|
// CHECK1-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK1: omp.inner.for.end:
|
|
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK1: omp.loop.exit:
|
|
// CHECK1-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP22]])
|
|
// CHECK1-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK1: omp.precond.end:
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK2-LABEL: define {{[^@]+}}@_Z16target_teams_funPi
|
|
// CHECK2-SAME: (ptr noundef [[G:%.*]]) #[[ATTR0:[0-9]+]] {
|
|
// CHECK2-NEXT: entry:
|
|
// CHECK2-NEXT: [[G_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[N:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[A:%.*]] = alloca [1000 x i32], align 4
|
|
// CHECK2-NEXT: [[TE:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[TH:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[DOTCAPTURE_EXPR__CASTED2:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x ptr], align 8
|
|
// CHECK2-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x ptr], align 8
|
|
// CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x ptr], align 8
|
|
// CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_4:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
|
|
// CHECK2-NEXT: [[N_CASTED7:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS8:%.*]] = alloca [3 x ptr], align 8
|
|
// CHECK2-NEXT: [[DOTOFFLOAD_PTRS9:%.*]] = alloca [3 x ptr], align 8
|
|
// CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS10:%.*]] = alloca [3 x ptr], align 8
|
|
// CHECK2-NEXT: [[_TMP11:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_12:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_13:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[KERNEL_ARGS18:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
|
|
// CHECK2-NEXT: store ptr [[G]], ptr [[G_ADDR]], align 8
|
|
// CHECK2-NEXT: store i32 1000, ptr [[N]], align 4
|
|
// CHECK2-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4
|
|
// CHECK2-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP0]], 128
|
|
// CHECK2-NEXT: store i32 [[DIV]], ptr [[TE]], align 4
|
|
// CHECK2-NEXT: store i32 128, ptr [[TH]], align 4
|
|
// CHECK2-NEXT: [[TMP1:%.*]] = load i32, ptr [[TE]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[TH]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[N]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP3]], ptr [[N_CASTED]], align 4
|
|
// CHECK2-NEXT: [[TMP4:%.*]] = load i64, ptr [[N_CASTED]], align 8
|
|
// CHECK2-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP5]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
|
|
// CHECK2-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8
|
|
// CHECK2-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP7]], ptr [[DOTCAPTURE_EXPR__CASTED2]], align 4
|
|
// CHECK2-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED2]], align 8
|
|
// CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
|
|
// CHECK2-NEXT: store i64 [[TMP4]], ptr [[TMP9]], align 8
|
|
// CHECK2-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
|
|
// CHECK2-NEXT: store i64 [[TMP4]], ptr [[TMP10]], align 8
|
|
// CHECK2-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
|
|
// CHECK2-NEXT: store ptr null, ptr [[TMP11]], align 8
|
|
// CHECK2-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
|
|
// CHECK2-NEXT: store ptr [[A]], ptr [[TMP12]], align 8
|
|
// CHECK2-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1
|
|
// CHECK2-NEXT: store ptr [[A]], ptr [[TMP13]], align 8
|
|
// CHECK2-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
|
|
// CHECK2-NEXT: store ptr null, ptr [[TMP14]], align 8
|
|
// CHECK2-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
|
|
// CHECK2-NEXT: store i64 [[TMP6]], ptr [[TMP15]], align 8
|
|
// CHECK2-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2
|
|
// CHECK2-NEXT: store i64 [[TMP6]], ptr [[TMP16]], align 8
|
|
// CHECK2-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
|
|
// CHECK2-NEXT: store ptr null, ptr [[TMP17]], align 8
|
|
// CHECK2-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
|
|
// CHECK2-NEXT: store i64 [[TMP8]], ptr [[TMP18]], align 8
|
|
// CHECK2-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3
|
|
// CHECK2-NEXT: store i64 [[TMP8]], ptr [[TMP19]], align 8
|
|
// CHECK2-NEXT: [[TMP20:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3
|
|
// CHECK2-NEXT: store ptr null, ptr [[TMP20]], align 8
|
|
// CHECK2-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
|
|
// CHECK2-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
|
|
// CHECK2-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK2-NEXT: [[TMP24:%.*]] = load i32, ptr [[N]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP24]], ptr [[DOTCAPTURE_EXPR_3]], align 4
|
|
// CHECK2-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4
|
|
// CHECK2-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP25]], 0
|
|
// CHECK2-NEXT: [[DIV5:%.*]] = sdiv i32 [[SUB]], 1
|
|
// CHECK2-NEXT: [[SUB6:%.*]] = sub nsw i32 [[DIV5]], 1
|
|
// CHECK2-NEXT: store i32 [[SUB6]], ptr [[DOTCAPTURE_EXPR_4]], align 4
|
|
// CHECK2-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4
|
|
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP26]], 1
|
|
// CHECK2-NEXT: [[TMP27:%.*]] = zext i32 [[ADD]] to i64
|
|
// CHECK2-NEXT: [[TMP28:%.*]] = insertvalue [3 x i32] zeroinitializer, i32 [[TMP23]], 0
|
|
// CHECK2-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
|
|
// CHECK2-NEXT: store i32 3, ptr [[TMP29]], align 4
|
|
// CHECK2-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
|
|
// CHECK2-NEXT: store i32 4, ptr [[TMP30]], align 4
|
|
// CHECK2-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
|
|
// CHECK2-NEXT: store ptr [[TMP21]], ptr [[TMP31]], align 8
|
|
// CHECK2-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
|
|
// CHECK2-NEXT: store ptr [[TMP22]], ptr [[TMP32]], align 8
|
|
// CHECK2-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
|
|
// CHECK2-NEXT: store ptr @.offload_sizes, ptr [[TMP33]], align 8
|
|
// CHECK2-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
|
|
// CHECK2-NEXT: store ptr @.offload_maptypes, ptr [[TMP34]], align 8
|
|
// CHECK2-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
|
|
// CHECK2-NEXT: store ptr null, ptr [[TMP35]], align 8
|
|
// CHECK2-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
|
|
// CHECK2-NEXT: store ptr null, ptr [[TMP36]], align 8
|
|
// CHECK2-NEXT: [[TMP37:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
|
|
// CHECK2-NEXT: store i64 [[TMP27]], ptr [[TMP37]], align 8
|
|
// CHECK2-NEXT: [[TMP38:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
|
|
// CHECK2-NEXT: store i64 0, ptr [[TMP38]], align 8
|
|
// CHECK2-NEXT: [[TMP39:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
|
|
// CHECK2-NEXT: store [3 x i32] [i32 -1, i32 0, i32 0], ptr [[TMP39]], align 4
|
|
// CHECK2-NEXT: [[TMP40:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
|
|
// CHECK2-NEXT: store [3 x i32] [[TMP28]], ptr [[TMP40]], align 4
|
|
// CHECK2-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
|
|
// CHECK2-NEXT: store i32 0, ptr [[TMP41]], align 4
|
|
// CHECK2-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 -1, i32 [[TMP23]], ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l51.region_id, ptr [[KERNEL_ARGS]])
|
|
// CHECK2-NEXT: [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0
|
|
// CHECK2-NEXT: br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
|
|
// CHECK2: omp_offload.failed:
|
|
// CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l51(i64 [[TMP4]], ptr [[A]], i64 [[TMP6]], i64 [[TMP8]]) #[[ATTR2:[0-9]+]]
|
|
// CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT]]
|
|
// CHECK2: omp_offload.cont:
|
|
// CHECK2-NEXT: [[TMP44:%.*]] = load i32, ptr [[N]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP44]], ptr [[N_CASTED7]], align 4
|
|
// CHECK2-NEXT: [[TMP45:%.*]] = load i64, ptr [[N_CASTED7]], align 8
|
|
// CHECK2-NEXT: [[TMP46:%.*]] = load ptr, ptr [[G_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS8]], i32 0, i32 0
|
|
// CHECK2-NEXT: store i64 [[TMP45]], ptr [[TMP47]], align 8
|
|
// CHECK2-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS9]], i32 0, i32 0
|
|
// CHECK2-NEXT: store i64 [[TMP45]], ptr [[TMP48]], align 8
|
|
// CHECK2-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS10]], i64 0, i64 0
|
|
// CHECK2-NEXT: store ptr null, ptr [[TMP49]], align 8
|
|
// CHECK2-NEXT: [[TMP50:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS8]], i32 0, i32 1
|
|
// CHECK2-NEXT: store ptr [[A]], ptr [[TMP50]], align 8
|
|
// CHECK2-NEXT: [[TMP51:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS9]], i32 0, i32 1
|
|
// CHECK2-NEXT: store ptr [[A]], ptr [[TMP51]], align 8
|
|
// CHECK2-NEXT: [[TMP52:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS10]], i64 0, i64 1
|
|
// CHECK2-NEXT: store ptr null, ptr [[TMP52]], align 8
|
|
// CHECK2-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS8]], i32 0, i32 2
|
|
// CHECK2-NEXT: store ptr [[TMP46]], ptr [[TMP53]], align 8
|
|
// CHECK2-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS9]], i32 0, i32 2
|
|
// CHECK2-NEXT: store ptr [[TMP46]], ptr [[TMP54]], align 8
|
|
// CHECK2-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS10]], i64 0, i64 2
|
|
// CHECK2-NEXT: store ptr null, ptr [[TMP55]], align 8
|
|
// CHECK2-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS8]], i32 0, i32 0
|
|
// CHECK2-NEXT: [[TMP57:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS9]], i32 0, i32 0
|
|
// CHECK2-NEXT: [[TMP58:%.*]] = load i32, ptr [[N]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP58]], ptr [[DOTCAPTURE_EXPR_12]], align 4
|
|
// CHECK2-NEXT: [[TMP59:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_12]], align 4
|
|
// CHECK2-NEXT: [[SUB14:%.*]] = sub nsw i32 [[TMP59]], 0
|
|
// CHECK2-NEXT: [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
|
|
// CHECK2-NEXT: [[SUB16:%.*]] = sub nsw i32 [[DIV15]], 1
|
|
// CHECK2-NEXT: store i32 [[SUB16]], ptr [[DOTCAPTURE_EXPR_13]], align 4
|
|
// CHECK2-NEXT: [[TMP60:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_13]], align 4
|
|
// CHECK2-NEXT: [[ADD17:%.*]] = add nsw i32 [[TMP60]], 1
|
|
// CHECK2-NEXT: [[TMP61:%.*]] = zext i32 [[ADD17]] to i64
|
|
// CHECK2-NEXT: [[TMP62:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS18]], i32 0, i32 0
|
|
// CHECK2-NEXT: store i32 3, ptr [[TMP62]], align 4
|
|
// CHECK2-NEXT: [[TMP63:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS18]], i32 0, i32 1
|
|
// CHECK2-NEXT: store i32 3, ptr [[TMP63]], align 4
|
|
// CHECK2-NEXT: [[TMP64:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS18]], i32 0, i32 2
|
|
// CHECK2-NEXT: store ptr [[TMP56]], ptr [[TMP64]], align 8
|
|
// CHECK2-NEXT: [[TMP65:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS18]], i32 0, i32 3
|
|
// CHECK2-NEXT: store ptr [[TMP57]], ptr [[TMP65]], align 8
|
|
// CHECK2-NEXT: [[TMP66:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS18]], i32 0, i32 4
|
|
// CHECK2-NEXT: store ptr @.offload_sizes.1, ptr [[TMP66]], align 8
|
|
// CHECK2-NEXT: [[TMP67:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS18]], i32 0, i32 5
|
|
// CHECK2-NEXT: store ptr @.offload_maptypes.2, ptr [[TMP67]], align 8
|
|
// CHECK2-NEXT: [[TMP68:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS18]], i32 0, i32 6
|
|
// CHECK2-NEXT: store ptr null, ptr [[TMP68]], align 8
|
|
// CHECK2-NEXT: [[TMP69:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS18]], i32 0, i32 7
|
|
// CHECK2-NEXT: store ptr null, ptr [[TMP69]], align 8
|
|
// CHECK2-NEXT: [[TMP70:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS18]], i32 0, i32 8
|
|
// CHECK2-NEXT: store i64 [[TMP61]], ptr [[TMP70]], align 8
|
|
// CHECK2-NEXT: [[TMP71:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS18]], i32 0, i32 9
|
|
// CHECK2-NEXT: store i64 0, ptr [[TMP71]], align 8
|
|
// CHECK2-NEXT: [[TMP72:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS18]], i32 0, i32 10
|
|
// CHECK2-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP72]], align 4
|
|
// CHECK2-NEXT: [[TMP73:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS18]], i32 0, i32 11
|
|
// CHECK2-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP73]], align 4
|
|
// CHECK2-NEXT: [[TMP74:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS18]], i32 0, i32 12
|
|
// CHECK2-NEXT: store i32 0, ptr [[TMP74]], align 4
|
|
// CHECK2-NEXT: [[TMP75:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l57.region_id, ptr [[KERNEL_ARGS18]])
|
|
// CHECK2-NEXT: [[TMP76:%.*]] = icmp ne i32 [[TMP75]], 0
|
|
// CHECK2-NEXT: br i1 [[TMP76]], label [[OMP_OFFLOAD_FAILED19:%.*]], label [[OMP_OFFLOAD_CONT20:%.*]]
|
|
// CHECK2: omp_offload.failed19:
|
|
// CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l57(i64 [[TMP45]], ptr [[A]], ptr [[TMP46]]) #[[ATTR2]]
|
|
// CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT20]]
|
|
// CHECK2: omp_offload.cont20:
|
|
// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i32], ptr [[A]], i64 0, i64 0
|
|
// CHECK2-NEXT: [[TMP77:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
|
|
// CHECK2-NEXT: ret i32 [[TMP77]]
|
|
//
|
|
//
|
|
// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l51
|
|
// CHECK2-SAME: (i64 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(4000) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]], i64 noundef [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR1:[0-9]+]] {
|
|
// CHECK2-NEXT: entry:
|
|
// CHECK2-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB3]])
|
|
// CHECK2-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
|
|
// CHECK2-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
|
|
// CHECK2-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
|
|
// CHECK2-NEXT: store i64 [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8
|
|
// CHECK2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
|
|
// CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4
|
|
// CHECK2-NEXT: call void @__kmpc_push_num_teams(ptr @[[GLOB3]], i32 [[TMP0]], i32 [[TMP2]], i32 [[TMP3]])
|
|
// CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP4]], ptr [[N_CASTED]], align 4
|
|
// CHECK2-NEXT: [[TMP5:%.*]] = load i64, ptr [[N_CASTED]], align 8
|
|
// CHECK2-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l51.omp_outlined, i64 [[TMP5]], ptr [[TMP1]])
|
|
// CHECK2-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l51.omp_outlined
|
|
// CHECK2-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(4000) [[A:%.*]]) #[[ATTR1]] {
|
|
// CHECK2-NEXT: entry:
|
|
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[I3:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
|
|
// CHECK2-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK2-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
|
|
// CHECK2-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
|
|
// CHECK2-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
|
|
// CHECK2-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK2-NEXT: store i32 0, ptr [[I]], align 4
|
|
// CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK2-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
|
|
// CHECK2-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK2: omp.precond.then:
|
|
// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK2-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK2-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP6]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK2-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK2-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
|
|
// CHECK2-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK2: cond.true:
|
|
// CHECK2-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK2-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK2: cond.false:
|
|
// CHECK2-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: br label [[COND_END]]
|
|
// CHECK2: cond.end:
|
|
// CHECK2-NEXT: [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
|
|
// CHECK2-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP11]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK2: omp.inner.for.cond:
|
|
// CHECK2-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
|
|
// CHECK2-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK2: omp.inner.for.body:
|
|
// CHECK2-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK2-NEXT: [[TMP15:%.*]] = zext i32 [[TMP14]] to i64
|
|
// CHECK2-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: [[TMP17:%.*]] = zext i32 [[TMP16]] to i64
|
|
// CHECK2-NEXT: [[TMP18:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP18]], ptr [[N_CASTED]], align 4
|
|
// CHECK2-NEXT: [[TMP19:%.*]] = load i64, ptr [[N_CASTED]], align 8
|
|
// CHECK2-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l51.omp_outlined.omp_outlined, i64 [[TMP15]], i64 [[TMP17]], i64 [[TMP19]], ptr [[TMP0]])
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK2: omp.inner.for.inc:
|
|
// CHECK2-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
|
|
// CHECK2-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK2: omp.inner.for.end:
|
|
// CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK2: omp.loop.exit:
|
|
// CHECK2-NEXT: [[TMP22:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4
|
|
// CHECK2-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP23]])
|
|
// CHECK2-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK2: omp.precond.end:
|
|
// CHECK2-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l51.omp_outlined.omp_outlined
|
|
// CHECK2-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i64 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(4000) [[A:%.*]]) #[[ATTR1]] {
|
|
// CHECK2-NEXT: entry:
|
|
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[I4:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
|
|
// CHECK2-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
|
|
// CHECK2-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
|
|
// CHECK2-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK2-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
|
|
// CHECK2-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
|
|
// CHECK2-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
|
|
// CHECK2-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK2-NEXT: store i32 0, ptr [[I]], align 4
|
|
// CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK2-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
|
|
// CHECK2-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK2: omp.precond.then:
|
|
// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK2-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
|
|
// CHECK2-NEXT: [[CONV:%.*]] = trunc i64 [[TMP5]] to i32
|
|
// CHECK2-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
|
|
// CHECK2-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP6]] to i32
|
|
// CHECK2-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
|
|
// CHECK2-NEXT: store i32 [[CONV3]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK2-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK2-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK2-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP8]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK2-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK2-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK2-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
|
|
// CHECK2-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK2: cond.true:
|
|
// CHECK2-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK2-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK2: cond.false:
|
|
// CHECK2-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK2-NEXT: br label [[COND_END]]
|
|
// CHECK2: cond.end:
|
|
// CHECK2-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
|
|
// CHECK2-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK2-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK2: omp.inner.for.cond:
|
|
// CHECK2-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK2-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
|
|
// CHECK2-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK2: omp.inner.for.body:
|
|
// CHECK2-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP16]], 1
|
|
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK2-NEXT: store i32 [[ADD]], ptr [[I4]], align 4
|
|
// CHECK2-NEXT: [[TMP17:%.*]] = load i32, ptr [[I4]], align 4
|
|
// CHECK2-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP17]] to i64
|
|
// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
|
|
// CHECK2-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4
|
|
// CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK2: omp.body.continue:
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK2: omp.inner.for.inc:
|
|
// CHECK2-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP18]], 1
|
|
// CHECK2-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK2: omp.inner.for.end:
|
|
// CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK2: omp.loop.exit:
|
|
// CHECK2-NEXT: [[TMP19:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP19]], align 4
|
|
// CHECK2-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP20]])
|
|
// CHECK2-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK2: omp.precond.end:
|
|
// CHECK2-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l57
|
|
// CHECK2-SAME: (i64 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(4000) [[A:%.*]], ptr noundef [[G:%.*]]) #[[ATTR1]] {
|
|
// CHECK2-NEXT: entry:
|
|
// CHECK2-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[G_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
|
|
// CHECK2-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
|
|
// CHECK2-NEXT: store ptr [[G]], ptr [[G_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP1]], ptr [[N_CASTED]], align 4
|
|
// CHECK2-NEXT: [[TMP2:%.*]] = load i64, ptr [[N_CASTED]], align 8
|
|
// CHECK2-NEXT: [[TMP3:%.*]] = load ptr, ptr [[G_ADDR]], align 8
|
|
// CHECK2-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l57.omp_outlined, i64 [[TMP2]], ptr [[TMP0]], ptr [[TMP3]])
|
|
// CHECK2-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l57.omp_outlined
|
|
// CHECK2-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(4000) [[A:%.*]], ptr noundef [[G:%.*]]) #[[ATTR1]] {
|
|
// CHECK2-NEXT: entry:
|
|
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[G_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[I3:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
|
|
// CHECK2-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
|
|
// CHECK2-NEXT: store ptr [[G]], ptr [[G_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK2-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
|
|
// CHECK2-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
|
|
// CHECK2-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
|
|
// CHECK2-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK2-NEXT: store i32 0, ptr [[I]], align 4
|
|
// CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK2-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
|
|
// CHECK2-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK2: omp.precond.then:
|
|
// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK2-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK2-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP6]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK2-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK2-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
|
|
// CHECK2-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK2: cond.true:
|
|
// CHECK2-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK2-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK2: cond.false:
|
|
// CHECK2-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: br label [[COND_END]]
|
|
// CHECK2: cond.end:
|
|
// CHECK2-NEXT: [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
|
|
// CHECK2-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP11]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK2: omp.inner.for.cond:
|
|
// CHECK2-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
|
|
// CHECK2-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK2: omp.inner.for.body:
|
|
// CHECK2-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK2-NEXT: [[TMP15:%.*]] = zext i32 [[TMP14]] to i64
|
|
// CHECK2-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: [[TMP17:%.*]] = zext i32 [[TMP16]] to i64
|
|
// CHECK2-NEXT: [[TMP18:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP18]], ptr [[N_CASTED]], align 4
|
|
// CHECK2-NEXT: [[TMP19:%.*]] = load i64, ptr [[N_CASTED]], align 8
|
|
// CHECK2-NEXT: [[TMP20:%.*]] = load ptr, ptr [[G_ADDR]], align 8
|
|
// CHECK2-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 5, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l57.omp_outlined.omp_outlined, i64 [[TMP15]], i64 [[TMP17]], i64 [[TMP19]], ptr [[TMP0]], ptr [[TMP20]])
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK2: omp.inner.for.inc:
|
|
// CHECK2-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
|
|
// CHECK2-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK2: omp.inner.for.end:
|
|
// CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK2: omp.loop.exit:
|
|
// CHECK2-NEXT: [[TMP23:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP24:%.*]] = load i32, ptr [[TMP23]], align 4
|
|
// CHECK2-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP24]])
|
|
// CHECK2-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK2: omp.precond.end:
|
|
// CHECK2-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l57.omp_outlined.omp_outlined
|
|
// CHECK2-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i64 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(4000) [[A:%.*]], ptr noundef [[G:%.*]]) #[[ATTR1]] {
|
|
// CHECK2-NEXT: entry:
|
|
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[G_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[I4:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
|
|
// CHECK2-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
|
|
// CHECK2-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
|
|
// CHECK2-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
|
|
// CHECK2-NEXT: store ptr [[G]], ptr [[G_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK2-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
|
|
// CHECK2-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
|
|
// CHECK2-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
|
|
// CHECK2-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK2-NEXT: store i32 0, ptr [[I]], align 4
|
|
// CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK2-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
|
|
// CHECK2-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK2: omp.precond.then:
|
|
// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK2-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
|
|
// CHECK2-NEXT: [[CONV:%.*]] = trunc i64 [[TMP5]] to i32
|
|
// CHECK2-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
|
|
// CHECK2-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP6]] to i32
|
|
// CHECK2-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
|
|
// CHECK2-NEXT: store i32 [[CONV3]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK2-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK2-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK2-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP8]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK2-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK2-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK2-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
|
|
// CHECK2-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK2: cond.true:
|
|
// CHECK2-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK2-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK2: cond.false:
|
|
// CHECK2-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK2-NEXT: br label [[COND_END]]
|
|
// CHECK2: cond.end:
|
|
// CHECK2-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
|
|
// CHECK2-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK2-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK2: omp.inner.for.cond:
|
|
// CHECK2-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK2-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
|
|
// CHECK2-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK2: omp.inner.for.body:
|
|
// CHECK2-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP16]], 1
|
|
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK2-NEXT: store i32 [[ADD]], ptr [[I4]], align 4
|
|
// CHECK2-NEXT: [[TMP17:%.*]] = load ptr, ptr [[G_ADDR]], align 8
|
|
// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP17]], i64 0
|
|
// CHECK2-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
|
|
// CHECK2-NEXT: [[TMP19:%.*]] = load i32, ptr [[I4]], align 4
|
|
// CHECK2-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP19]] to i64
|
|
// CHECK2-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds [1000 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
|
|
// CHECK2-NEXT: store i32 [[TMP18]], ptr [[ARRAYIDX7]], align 4
|
|
// CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK2: omp.body.continue:
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK2: omp.inner.for.inc:
|
|
// CHECK2-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP20]], 1
|
|
// CHECK2-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK2: omp.inner.for.end:
|
|
// CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK2: omp.loop.exit:
|
|
// CHECK2-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4
|
|
// CHECK2-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP22]])
|
|
// CHECK2-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK2: omp.precond.end:
|
|
// CHECK2-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK4-LABEL: define {{[^@]+}}@_Z16target_teams_funPi
|
|
// CHECK4-SAME: (ptr noundef [[G:%.*]]) #[[ATTR0:[0-9]+]] {
|
|
// CHECK4-NEXT: entry:
|
|
// CHECK4-NEXT: [[G_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK4-NEXT: [[N:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[A:%.*]] = alloca [1000 x i32], align 4
|
|
// CHECK4-NEXT: [[TE:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[TH:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTCAPTURE_EXPR__CASTED2:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x ptr], align 4
|
|
// CHECK4-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x ptr], align 4
|
|
// CHECK4-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x ptr], align 4
|
|
// CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTCAPTURE_EXPR_4:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
|
|
// CHECK4-NEXT: [[N_CASTED7:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTOFFLOAD_BASEPTRS8:%.*]] = alloca [3 x ptr], align 4
|
|
// CHECK4-NEXT: [[DOTOFFLOAD_PTRS9:%.*]] = alloca [3 x ptr], align 4
|
|
// CHECK4-NEXT: [[DOTOFFLOAD_MAPPERS10:%.*]] = alloca [3 x ptr], align 4
|
|
// CHECK4-NEXT: [[_TMP11:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTCAPTURE_EXPR_12:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTCAPTURE_EXPR_13:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[KERNEL_ARGS18:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
|
|
// CHECK4-NEXT: store ptr [[G]], ptr [[G_ADDR]], align 4
|
|
// CHECK4-NEXT: store i32 1000, ptr [[N]], align 4
|
|
// CHECK4-NEXT: [[TMP0:%.*]] = load i32, ptr [[N]], align 4
|
|
// CHECK4-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP0]], 128
|
|
// CHECK4-NEXT: store i32 [[DIV]], ptr [[TE]], align 4
|
|
// CHECK4-NEXT: store i32 128, ptr [[TH]], align 4
|
|
// CHECK4-NEXT: [[TMP1:%.*]] = load i32, ptr [[TE]], align 4
|
|
// CHECK4-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK4-NEXT: [[TMP2:%.*]] = load i32, ptr [[TH]], align 4
|
|
// CHECK4-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK4-NEXT: [[TMP3:%.*]] = load i32, ptr [[N]], align 4
|
|
// CHECK4-NEXT: store i32 [[TMP3]], ptr [[N_CASTED]], align 4
|
|
// CHECK4-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_CASTED]], align 4
|
|
// CHECK4-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK4-NEXT: store i32 [[TMP5]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
|
|
// CHECK4-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
|
|
// CHECK4-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK4-NEXT: store i32 [[TMP7]], ptr [[DOTCAPTURE_EXPR__CASTED2]], align 4
|
|
// CHECK4-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED2]], align 4
|
|
// CHECK4-NEXT: [[TMP9:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
|
|
// CHECK4-NEXT: store i32 [[TMP4]], ptr [[TMP9]], align 4
|
|
// CHECK4-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
|
|
// CHECK4-NEXT: store i32 [[TMP4]], ptr [[TMP10]], align 4
|
|
// CHECK4-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
|
|
// CHECK4-NEXT: store ptr null, ptr [[TMP11]], align 4
|
|
// CHECK4-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
|
|
// CHECK4-NEXT: store ptr [[A]], ptr [[TMP12]], align 4
|
|
// CHECK4-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 1
|
|
// CHECK4-NEXT: store ptr [[A]], ptr [[TMP13]], align 4
|
|
// CHECK4-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
|
|
// CHECK4-NEXT: store ptr null, ptr [[TMP14]], align 4
|
|
// CHECK4-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
|
|
// CHECK4-NEXT: store i32 [[TMP6]], ptr [[TMP15]], align 4
|
|
// CHECK4-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 2
|
|
// CHECK4-NEXT: store i32 [[TMP6]], ptr [[TMP16]], align 4
|
|
// CHECK4-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
|
|
// CHECK4-NEXT: store ptr null, ptr [[TMP17]], align 4
|
|
// CHECK4-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3
|
|
// CHECK4-NEXT: store i32 [[TMP8]], ptr [[TMP18]], align 4
|
|
// CHECK4-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 3
|
|
// CHECK4-NEXT: store i32 [[TMP8]], ptr [[TMP19]], align 4
|
|
// CHECK4-NEXT: [[TMP20:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3
|
|
// CHECK4-NEXT: store ptr null, ptr [[TMP20]], align 4
|
|
// CHECK4-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
|
|
// CHECK4-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
|
|
// CHECK4-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK4-NEXT: [[TMP24:%.*]] = load i32, ptr [[N]], align 4
|
|
// CHECK4-NEXT: store i32 [[TMP24]], ptr [[DOTCAPTURE_EXPR_3]], align 4
|
|
// CHECK4-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4
|
|
// CHECK4-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP25]], 0
|
|
// CHECK4-NEXT: [[DIV5:%.*]] = sdiv i32 [[SUB]], 1
|
|
// CHECK4-NEXT: [[SUB6:%.*]] = sub nsw i32 [[DIV5]], 1
|
|
// CHECK4-NEXT: store i32 [[SUB6]], ptr [[DOTCAPTURE_EXPR_4]], align 4
|
|
// CHECK4-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_4]], align 4
|
|
// CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP26]], 1
|
|
// CHECK4-NEXT: [[TMP27:%.*]] = zext i32 [[ADD]] to i64
|
|
// CHECK4-NEXT: [[TMP28:%.*]] = insertvalue [3 x i32] zeroinitializer, i32 [[TMP23]], 0
|
|
// CHECK4-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
|
|
// CHECK4-NEXT: store i32 3, ptr [[TMP29]], align 4
|
|
// CHECK4-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
|
|
// CHECK4-NEXT: store i32 4, ptr [[TMP30]], align 4
|
|
// CHECK4-NEXT: [[TMP31:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
|
|
// CHECK4-NEXT: store ptr [[TMP21]], ptr [[TMP31]], align 4
|
|
// CHECK4-NEXT: [[TMP32:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
|
|
// CHECK4-NEXT: store ptr [[TMP22]], ptr [[TMP32]], align 4
|
|
// CHECK4-NEXT: [[TMP33:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
|
|
// CHECK4-NEXT: store ptr @.offload_sizes, ptr [[TMP33]], align 4
|
|
// CHECK4-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
|
|
// CHECK4-NEXT: store ptr @.offload_maptypes, ptr [[TMP34]], align 4
|
|
// CHECK4-NEXT: [[TMP35:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
|
|
// CHECK4-NEXT: store ptr null, ptr [[TMP35]], align 4
|
|
// CHECK4-NEXT: [[TMP36:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
|
|
// CHECK4-NEXT: store ptr null, ptr [[TMP36]], align 4
|
|
// CHECK4-NEXT: [[TMP37:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
|
|
// CHECK4-NEXT: store i64 [[TMP27]], ptr [[TMP37]], align 8
|
|
// CHECK4-NEXT: [[TMP38:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
|
|
// CHECK4-NEXT: store i64 0, ptr [[TMP38]], align 8
|
|
// CHECK4-NEXT: [[TMP39:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
|
|
// CHECK4-NEXT: store [3 x i32] [i32 -1, i32 0, i32 0], ptr [[TMP39]], align 4
|
|
// CHECK4-NEXT: [[TMP40:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
|
|
// CHECK4-NEXT: store [3 x i32] [[TMP28]], ptr [[TMP40]], align 4
|
|
// CHECK4-NEXT: [[TMP41:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
|
|
// CHECK4-NEXT: store i32 0, ptr [[TMP41]], align 4
|
|
// CHECK4-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3:[0-9]+]], i64 -1, i32 -1, i32 [[TMP23]], ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l51.region_id, ptr [[KERNEL_ARGS]])
|
|
// CHECK4-NEXT: [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0
|
|
// CHECK4-NEXT: br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
|
|
// CHECK4: omp_offload.failed:
|
|
// CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l51(i32 [[TMP4]], ptr [[A]], i32 [[TMP6]], i32 [[TMP8]]) #[[ATTR2:[0-9]+]]
|
|
// CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT]]
|
|
// CHECK4: omp_offload.cont:
|
|
// CHECK4-NEXT: [[TMP44:%.*]] = load i32, ptr [[N]], align 4
|
|
// CHECK4-NEXT: store i32 [[TMP44]], ptr [[N_CASTED7]], align 4
|
|
// CHECK4-NEXT: [[TMP45:%.*]] = load i32, ptr [[N_CASTED7]], align 4
|
|
// CHECK4-NEXT: [[TMP46:%.*]] = load ptr, ptr [[G_ADDR]], align 4
|
|
// CHECK4-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS8]], i32 0, i32 0
|
|
// CHECK4-NEXT: store i32 [[TMP45]], ptr [[TMP47]], align 4
|
|
// CHECK4-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS9]], i32 0, i32 0
|
|
// CHECK4-NEXT: store i32 [[TMP45]], ptr [[TMP48]], align 4
|
|
// CHECK4-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS10]], i32 0, i32 0
|
|
// CHECK4-NEXT: store ptr null, ptr [[TMP49]], align 4
|
|
// CHECK4-NEXT: [[TMP50:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS8]], i32 0, i32 1
|
|
// CHECK4-NEXT: store ptr [[A]], ptr [[TMP50]], align 4
|
|
// CHECK4-NEXT: [[TMP51:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS9]], i32 0, i32 1
|
|
// CHECK4-NEXT: store ptr [[A]], ptr [[TMP51]], align 4
|
|
// CHECK4-NEXT: [[TMP52:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS10]], i32 0, i32 1
|
|
// CHECK4-NEXT: store ptr null, ptr [[TMP52]], align 4
|
|
// CHECK4-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS8]], i32 0, i32 2
|
|
// CHECK4-NEXT: store ptr [[TMP46]], ptr [[TMP53]], align 4
|
|
// CHECK4-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS9]], i32 0, i32 2
|
|
// CHECK4-NEXT: store ptr [[TMP46]], ptr [[TMP54]], align 4
|
|
// CHECK4-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_MAPPERS10]], i32 0, i32 2
|
|
// CHECK4-NEXT: store ptr null, ptr [[TMP55]], align 4
|
|
// CHECK4-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_BASEPTRS8]], i32 0, i32 0
|
|
// CHECK4-NEXT: [[TMP57:%.*]] = getelementptr inbounds [3 x ptr], ptr [[DOTOFFLOAD_PTRS9]], i32 0, i32 0
|
|
// CHECK4-NEXT: [[TMP58:%.*]] = load i32, ptr [[N]], align 4
|
|
// CHECK4-NEXT: store i32 [[TMP58]], ptr [[DOTCAPTURE_EXPR_12]], align 4
|
|
// CHECK4-NEXT: [[TMP59:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_12]], align 4
|
|
// CHECK4-NEXT: [[SUB14:%.*]] = sub nsw i32 [[TMP59]], 0
|
|
// CHECK4-NEXT: [[DIV15:%.*]] = sdiv i32 [[SUB14]], 1
|
|
// CHECK4-NEXT: [[SUB16:%.*]] = sub nsw i32 [[DIV15]], 1
|
|
// CHECK4-NEXT: store i32 [[SUB16]], ptr [[DOTCAPTURE_EXPR_13]], align 4
|
|
// CHECK4-NEXT: [[TMP60:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_13]], align 4
|
|
// CHECK4-NEXT: [[ADD17:%.*]] = add nsw i32 [[TMP60]], 1
|
|
// CHECK4-NEXT: [[TMP61:%.*]] = zext i32 [[ADD17]] to i64
|
|
// CHECK4-NEXT: [[TMP62:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS18]], i32 0, i32 0
|
|
// CHECK4-NEXT: store i32 3, ptr [[TMP62]], align 4
|
|
// CHECK4-NEXT: [[TMP63:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS18]], i32 0, i32 1
|
|
// CHECK4-NEXT: store i32 3, ptr [[TMP63]], align 4
|
|
// CHECK4-NEXT: [[TMP64:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS18]], i32 0, i32 2
|
|
// CHECK4-NEXT: store ptr [[TMP56]], ptr [[TMP64]], align 4
|
|
// CHECK4-NEXT: [[TMP65:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS18]], i32 0, i32 3
|
|
// CHECK4-NEXT: store ptr [[TMP57]], ptr [[TMP65]], align 4
|
|
// CHECK4-NEXT: [[TMP66:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS18]], i32 0, i32 4
|
|
// CHECK4-NEXT: store ptr @.offload_sizes.1, ptr [[TMP66]], align 4
|
|
// CHECK4-NEXT: [[TMP67:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS18]], i32 0, i32 5
|
|
// CHECK4-NEXT: store ptr @.offload_maptypes.2, ptr [[TMP67]], align 4
|
|
// CHECK4-NEXT: [[TMP68:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS18]], i32 0, i32 6
|
|
// CHECK4-NEXT: store ptr null, ptr [[TMP68]], align 4
|
|
// CHECK4-NEXT: [[TMP69:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS18]], i32 0, i32 7
|
|
// CHECK4-NEXT: store ptr null, ptr [[TMP69]], align 4
|
|
// CHECK4-NEXT: [[TMP70:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS18]], i32 0, i32 8
|
|
// CHECK4-NEXT: store i64 [[TMP61]], ptr [[TMP70]], align 8
|
|
// CHECK4-NEXT: [[TMP71:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS18]], i32 0, i32 9
|
|
// CHECK4-NEXT: store i64 0, ptr [[TMP71]], align 8
|
|
// CHECK4-NEXT: [[TMP72:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS18]], i32 0, i32 10
|
|
// CHECK4-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP72]], align 4
|
|
// CHECK4-NEXT: [[TMP73:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS18]], i32 0, i32 11
|
|
// CHECK4-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP73]], align 4
|
|
// CHECK4-NEXT: [[TMP74:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS18]], i32 0, i32 12
|
|
// CHECK4-NEXT: store i32 0, ptr [[TMP74]], align 4
|
|
// CHECK4-NEXT: [[TMP75:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB3]], i64 -1, i32 0, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l57.region_id, ptr [[KERNEL_ARGS18]])
|
|
// CHECK4-NEXT: [[TMP76:%.*]] = icmp ne i32 [[TMP75]], 0
|
|
// CHECK4-NEXT: br i1 [[TMP76]], label [[OMP_OFFLOAD_FAILED19:%.*]], label [[OMP_OFFLOAD_CONT20:%.*]]
|
|
// CHECK4: omp_offload.failed19:
|
|
// CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l57(i32 [[TMP45]], ptr [[A]], ptr [[TMP46]]) #[[ATTR2]]
|
|
// CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT20]]
|
|
// CHECK4: omp_offload.cont20:
|
|
// CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i32], ptr [[A]], i32 0, i32 0
|
|
// CHECK4-NEXT: [[TMP77:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
|
|
// CHECK4-NEXT: ret i32 [[TMP77]]
|
|
//
|
|
//
|
|
// CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l51
|
|
// CHECK4-SAME: (i32 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(4000) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]], i32 noundef [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR1:[0-9]+]] {
|
|
// CHECK4-NEXT: entry:
|
|
// CHECK4-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK4-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB3]])
|
|
// CHECK4-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
|
|
// CHECK4-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
|
|
// CHECK4-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
|
|
// CHECK4-NEXT: store i32 [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4
|
|
// CHECK4-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 4
|
|
// CHECK4-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
|
|
// CHECK4-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4
|
|
// CHECK4-NEXT: call void @__kmpc_push_num_teams(ptr @[[GLOB3]], i32 [[TMP0]], i32 [[TMP2]], i32 [[TMP3]])
|
|
// CHECK4-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK4-NEXT: store i32 [[TMP4]], ptr [[N_CASTED]], align 4
|
|
// CHECK4-NEXT: [[TMP5:%.*]] = load i32, ptr [[N_CASTED]], align 4
|
|
// CHECK4-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l51.omp_outlined, i32 [[TMP5]], ptr [[TMP1]])
|
|
// CHECK4-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l51.omp_outlined
|
|
// CHECK4-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(4000) [[A:%.*]]) #[[ATTR1]] {
|
|
// CHECK4-NEXT: entry:
|
|
// CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK4-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[I3:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK4-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
|
|
// CHECK4-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
|
|
// CHECK4-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
|
|
// CHECK4-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
|
|
// CHECK4-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK4-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK4-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK4-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
|
|
// CHECK4-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
|
|
// CHECK4-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
|
|
// CHECK4-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK4-NEXT: store i32 0, ptr [[I]], align 4
|
|
// CHECK4-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK4-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
|
|
// CHECK4-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK4: omp.precond.then:
|
|
// CHECK4-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK4-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK4-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK4-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK4-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK4-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK4-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK4-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP6]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK4-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK4-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK4-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
|
|
// CHECK4-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK4: cond.true:
|
|
// CHECK4-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK4-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK4: cond.false:
|
|
// CHECK4-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK4-NEXT: br label [[COND_END]]
|
|
// CHECK4: cond.end:
|
|
// CHECK4-NEXT: [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
|
|
// CHECK4-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK4-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK4-NEXT: store i32 [[TMP11]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK4: omp.inner.for.cond:
|
|
// CHECK4-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK4-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK4-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
|
|
// CHECK4-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK4: omp.inner.for.body:
|
|
// CHECK4-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK4-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK4-NEXT: [[TMP16:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK4-NEXT: store i32 [[TMP16]], ptr [[N_CASTED]], align 4
|
|
// CHECK4-NEXT: [[TMP17:%.*]] = load i32, ptr [[N_CASTED]], align 4
|
|
// CHECK4-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l51.omp_outlined.omp_outlined, i32 [[TMP14]], i32 [[TMP15]], i32 [[TMP17]], ptr [[TMP0]])
|
|
// CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK4: omp.inner.for.inc:
|
|
// CHECK4-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK4-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
|
|
// CHECK4-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK4: omp.inner.for.end:
|
|
// CHECK4-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK4: omp.loop.exit:
|
|
// CHECK4-NEXT: [[TMP20:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK4-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP20]], align 4
|
|
// CHECK4-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP21]])
|
|
// CHECK4-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK4: omp.precond.end:
|
|
// CHECK4-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l51.omp_outlined.omp_outlined
|
|
// CHECK4-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(4000) [[A:%.*]]) #[[ATTR1]] {
|
|
// CHECK4-NEXT: entry:
|
|
// CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK4-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[I3:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK4-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
|
|
// CHECK4-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
|
|
// CHECK4-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
|
|
// CHECK4-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
|
|
// CHECK4-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
|
|
// CHECK4-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
|
|
// CHECK4-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK4-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK4-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK4-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
|
|
// CHECK4-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
|
|
// CHECK4-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
|
|
// CHECK4-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK4-NEXT: store i32 0, ptr [[I]], align 4
|
|
// CHECK4-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK4-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
|
|
// CHECK4-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK4: omp.precond.then:
|
|
// CHECK4-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK4-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK4-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK4-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
|
|
// CHECK4-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
|
|
// CHECK4-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_LB]], align 4
|
|
// CHECK4-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK4-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK4-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK4-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK4-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK4-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP8]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK4-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK4-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK4-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
|
|
// CHECK4-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK4: cond.true:
|
|
// CHECK4-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK4-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK4: cond.false:
|
|
// CHECK4-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK4-NEXT: br label [[COND_END]]
|
|
// CHECK4: cond.end:
|
|
// CHECK4-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
|
|
// CHECK4-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK4-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK4-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK4: omp.inner.for.cond:
|
|
// CHECK4-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK4-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK4-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
|
|
// CHECK4-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK4: omp.inner.for.body:
|
|
// CHECK4-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK4-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP16]], 1
|
|
// CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK4-NEXT: store i32 [[ADD]], ptr [[I3]], align 4
|
|
// CHECK4-NEXT: [[TMP17:%.*]] = load i32, ptr [[I3]], align 4
|
|
// CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i32], ptr [[TMP0]], i32 0, i32 [[TMP17]]
|
|
// CHECK4-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4
|
|
// CHECK4-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK4: omp.body.continue:
|
|
// CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK4: omp.inner.for.inc:
|
|
// CHECK4-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK4-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP18]], 1
|
|
// CHECK4-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK4: omp.inner.for.end:
|
|
// CHECK4-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK4: omp.loop.exit:
|
|
// CHECK4-NEXT: [[TMP19:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK4-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP19]], align 4
|
|
// CHECK4-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP20]])
|
|
// CHECK4-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK4: omp.precond.end:
|
|
// CHECK4-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l57
|
|
// CHECK4-SAME: (i32 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(4000) [[A:%.*]], ptr noundef [[G:%.*]]) #[[ATTR1]] {
|
|
// CHECK4-NEXT: entry:
|
|
// CHECK4-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK4-NEXT: [[G_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK4-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
|
|
// CHECK4-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
|
|
// CHECK4-NEXT: store ptr [[G]], ptr [[G_ADDR]], align 4
|
|
// CHECK4-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
|
|
// CHECK4-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK4-NEXT: store i32 [[TMP1]], ptr [[N_CASTED]], align 4
|
|
// CHECK4-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_CASTED]], align 4
|
|
// CHECK4-NEXT: [[TMP3:%.*]] = load ptr, ptr [[G_ADDR]], align 4
|
|
// CHECK4-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l57.omp_outlined, i32 [[TMP2]], ptr [[TMP0]], ptr [[TMP3]])
|
|
// CHECK4-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l57.omp_outlined
|
|
// CHECK4-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(4000) [[A:%.*]], ptr noundef [[G:%.*]]) #[[ATTR1]] {
|
|
// CHECK4-NEXT: entry:
|
|
// CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK4-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK4-NEXT: [[G_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[I3:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK4-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
|
|
// CHECK4-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
|
|
// CHECK4-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
|
|
// CHECK4-NEXT: store ptr [[G]], ptr [[G_ADDR]], align 4
|
|
// CHECK4-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
|
|
// CHECK4-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK4-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK4-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK4-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
|
|
// CHECK4-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
|
|
// CHECK4-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
|
|
// CHECK4-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK4-NEXT: store i32 0, ptr [[I]], align 4
|
|
// CHECK4-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK4-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
|
|
// CHECK4-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK4: omp.precond.then:
|
|
// CHECK4-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK4-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK4-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK4-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK4-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK4-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK4-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK4-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP6]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK4-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK4-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK4-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
|
|
// CHECK4-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK4: cond.true:
|
|
// CHECK4-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK4-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK4: cond.false:
|
|
// CHECK4-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK4-NEXT: br label [[COND_END]]
|
|
// CHECK4: cond.end:
|
|
// CHECK4-NEXT: [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
|
|
// CHECK4-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK4-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK4-NEXT: store i32 [[TMP11]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK4: omp.inner.for.cond:
|
|
// CHECK4-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK4-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK4-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
|
|
// CHECK4-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK4: omp.inner.for.body:
|
|
// CHECK4-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK4-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK4-NEXT: [[TMP16:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK4-NEXT: store i32 [[TMP16]], ptr [[N_CASTED]], align 4
|
|
// CHECK4-NEXT: [[TMP17:%.*]] = load i32, ptr [[N_CASTED]], align 4
|
|
// CHECK4-NEXT: [[TMP18:%.*]] = load ptr, ptr [[G_ADDR]], align 4
|
|
// CHECK4-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 5, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l57.omp_outlined.omp_outlined, i32 [[TMP14]], i32 [[TMP15]], i32 [[TMP17]], ptr [[TMP0]], ptr [[TMP18]])
|
|
// CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK4: omp.inner.for.inc:
|
|
// CHECK4-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK4-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
|
|
// CHECK4-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK4: omp.inner.for.end:
|
|
// CHECK4-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK4: omp.loop.exit:
|
|
// CHECK4-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK4-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4
|
|
// CHECK4-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP22]])
|
|
// CHECK4-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK4: omp.precond.end:
|
|
// CHECK4-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l57.omp_outlined.omp_outlined
|
|
// CHECK4-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(4000) [[A:%.*]], ptr noundef [[G:%.*]]) #[[ATTR1]] {
|
|
// CHECK4-NEXT: entry:
|
|
// CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK4-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK4-NEXT: [[G_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK4-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: [[I3:%.*]] = alloca i32, align 4
|
|
// CHECK4-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK4-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
|
|
// CHECK4-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
|
|
// CHECK4-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
|
|
// CHECK4-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
|
|
// CHECK4-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
|
|
// CHECK4-NEXT: store ptr [[G]], ptr [[G_ADDR]], align 4
|
|
// CHECK4-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
|
|
// CHECK4-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK4-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK4-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK4-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
|
|
// CHECK4-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
|
|
// CHECK4-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
|
|
// CHECK4-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK4-NEXT: store i32 0, ptr [[I]], align 4
|
|
// CHECK4-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK4-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
|
|
// CHECK4-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK4: omp.precond.then:
|
|
// CHECK4-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK4-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK4-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK4-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
|
|
// CHECK4-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
|
|
// CHECK4-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_LB]], align 4
|
|
// CHECK4-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK4-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK4-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK4-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK4-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK4-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP8]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK4-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK4-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK4-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
|
|
// CHECK4-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK4: cond.true:
|
|
// CHECK4-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK4-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK4: cond.false:
|
|
// CHECK4-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK4-NEXT: br label [[COND_END]]
|
|
// CHECK4: cond.end:
|
|
// CHECK4-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
|
|
// CHECK4-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK4-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK4-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK4: omp.inner.for.cond:
|
|
// CHECK4-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK4-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK4-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
|
|
// CHECK4-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK4: omp.inner.for.body:
|
|
// CHECK4-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK4-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP16]], 1
|
|
// CHECK4-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK4-NEXT: store i32 [[ADD]], ptr [[I3]], align 4
|
|
// CHECK4-NEXT: [[TMP17:%.*]] = load ptr, ptr [[G_ADDR]], align 4
|
|
// CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP17]], i32 0
|
|
// CHECK4-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
|
|
// CHECK4-NEXT: [[TMP19:%.*]] = load i32, ptr [[I3]], align 4
|
|
// CHECK4-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [1000 x i32], ptr [[TMP0]], i32 0, i32 [[TMP19]]
|
|
// CHECK4-NEXT: store i32 [[TMP18]], ptr [[ARRAYIDX6]], align 4
|
|
// CHECK4-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK4: omp.body.continue:
|
|
// CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK4: omp.inner.for.inc:
|
|
// CHECK4-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK4-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP20]], 1
|
|
// CHECK4-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK4: omp.inner.for.end:
|
|
// CHECK4-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK4: omp.loop.exit:
|
|
// CHECK4-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK4-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4
|
|
// CHECK4-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP22]])
|
|
// CHECK4-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK4: omp.precond.end:
|
|
// CHECK4-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l51
|
|
// CHECK10-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i64 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(4000) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]], i64 noundef [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR0:[0-9]+]] {
|
|
// CHECK10-NEXT: entry:
|
|
// CHECK10-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK10-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK10-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK10-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK10-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca i64, align 8
|
|
// CHECK10-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK10-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB3:[0-9]+]])
|
|
// CHECK10-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
|
|
// CHECK10-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
|
|
// CHECK10-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
|
|
// CHECK10-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
|
|
// CHECK10-NEXT: store i64 [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 8
|
|
// CHECK10-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 8
|
|
// CHECK10-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
|
|
// CHECK10-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4
|
|
// CHECK10-NEXT: call void @__kmpc_push_num_teams(ptr @[[GLOB3]], i32 [[TMP0]], i32 [[TMP2]], i32 [[TMP3]])
|
|
// CHECK10-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK10-NEXT: store i32 [[TMP4]], ptr [[N_CASTED]], align 4
|
|
// CHECK10-NEXT: [[TMP5:%.*]] = load i64, ptr [[N_CASTED]], align 8
|
|
// CHECK10-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l51.omp_outlined, i64 [[TMP5]], ptr [[TMP1]])
|
|
// CHECK10-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l51.omp_outlined
|
|
// CHECK10-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(4000) [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK10-NEXT: entry:
|
|
// CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK10-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK10-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK10-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK10-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK10-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK10-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
|
|
// CHECK10-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK10-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
|
|
// CHECK10-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
|
|
// CHECK10-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK10-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK10-NEXT: [[I3:%.*]] = alloca i32, align 4
|
|
// CHECK10-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK10-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK10-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK10-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
|
|
// CHECK10-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
|
|
// CHECK10-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
|
|
// CHECK10-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK10-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK10-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK10-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
|
|
// CHECK10-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
|
|
// CHECK10-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
|
|
// CHECK10-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK10-NEXT: store i32 0, ptr [[I]], align 4
|
|
// CHECK10-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK10-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
|
|
// CHECK10-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK10: omp.precond.then:
|
|
// CHECK10-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK10-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK10-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK10-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK10-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK10-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK10-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK10-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP6]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK10-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK10-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK10-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
|
|
// CHECK10-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK10: cond.true:
|
|
// CHECK10-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK10-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK10: cond.false:
|
|
// CHECK10-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK10-NEXT: br label [[COND_END]]
|
|
// CHECK10: cond.end:
|
|
// CHECK10-NEXT: [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
|
|
// CHECK10-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK10-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK10-NEXT: store i32 [[TMP11]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK10-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK10: omp.inner.for.cond:
|
|
// CHECK10-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK10-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK10-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
|
|
// CHECK10-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK10: omp.inner.for.body:
|
|
// CHECK10-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK10-NEXT: [[TMP15:%.*]] = zext i32 [[TMP14]] to i64
|
|
// CHECK10-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK10-NEXT: [[TMP17:%.*]] = zext i32 [[TMP16]] to i64
|
|
// CHECK10-NEXT: [[TMP18:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK10-NEXT: store i32 [[TMP18]], ptr [[N_CASTED]], align 4
|
|
// CHECK10-NEXT: [[TMP19:%.*]] = load i64, ptr [[N_CASTED]], align 8
|
|
// CHECK10-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l51.omp_outlined.omp_outlined, i64 [[TMP15]], i64 [[TMP17]], i64 [[TMP19]], ptr [[TMP0]])
|
|
// CHECK10-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK10: omp.inner.for.inc:
|
|
// CHECK10-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK10-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
|
|
// CHECK10-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK10-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK10: omp.inner.for.end:
|
|
// CHECK10-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK10: omp.loop.exit:
|
|
// CHECK10-NEXT: [[TMP22:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK10-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4
|
|
// CHECK10-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP23]])
|
|
// CHECK10-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK10: omp.precond.end:
|
|
// CHECK10-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l51.omp_outlined.omp_outlined
|
|
// CHECK10-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i64 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(4000) [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK10-NEXT: entry:
|
|
// CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK10-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK10-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK10-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK10-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK10-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK10-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK10-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK10-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
|
|
// CHECK10-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK10-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK10-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK10-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK10-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK10-NEXT: [[I4:%.*]] = alloca i32, align 4
|
|
// CHECK10-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK10-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK10-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
|
|
// CHECK10-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
|
|
// CHECK10-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
|
|
// CHECK10-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
|
|
// CHECK10-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
|
|
// CHECK10-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK10-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK10-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK10-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
|
|
// CHECK10-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
|
|
// CHECK10-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
|
|
// CHECK10-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK10-NEXT: store i32 0, ptr [[I]], align 4
|
|
// CHECK10-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK10-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
|
|
// CHECK10-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK10: omp.precond.then:
|
|
// CHECK10-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK10-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK10-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK10-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
|
|
// CHECK10-NEXT: [[CONV:%.*]] = trunc i64 [[TMP5]] to i32
|
|
// CHECK10-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
|
|
// CHECK10-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP6]] to i32
|
|
// CHECK10-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
|
|
// CHECK10-NEXT: store i32 [[CONV3]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK10-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK10-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK10-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK10-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK10-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP8]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK10-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK10-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK10-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
|
|
// CHECK10-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK10: cond.true:
|
|
// CHECK10-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK10-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK10: cond.false:
|
|
// CHECK10-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK10-NEXT: br label [[COND_END]]
|
|
// CHECK10: cond.end:
|
|
// CHECK10-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
|
|
// CHECK10-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK10-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK10-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK10-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK10: omp.inner.for.cond:
|
|
// CHECK10-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK10-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK10-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
|
|
// CHECK10-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK10: omp.inner.for.body:
|
|
// CHECK10-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK10-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP16]], 1
|
|
// CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK10-NEXT: store i32 [[ADD]], ptr [[I4]], align 4
|
|
// CHECK10-NEXT: [[TMP17:%.*]] = load i32, ptr [[I4]], align 4
|
|
// CHECK10-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP17]] to i64
|
|
// CHECK10-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
|
|
// CHECK10-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4
|
|
// CHECK10-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK10: omp.body.continue:
|
|
// CHECK10-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK10: omp.inner.for.inc:
|
|
// CHECK10-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK10-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP18]], 1
|
|
// CHECK10-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK10-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK10: omp.inner.for.end:
|
|
// CHECK10-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK10: omp.loop.exit:
|
|
// CHECK10-NEXT: [[TMP19:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK10-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP19]], align 4
|
|
// CHECK10-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP20]])
|
|
// CHECK10-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK10: omp.precond.end:
|
|
// CHECK10-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l57
|
|
// CHECK10-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i64 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(4000) [[A:%.*]], ptr noundef [[G:%.*]]) #[[ATTR0]] {
|
|
// CHECK10-NEXT: entry:
|
|
// CHECK10-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK10-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK10-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK10-NEXT: [[G_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK10-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK10-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
|
|
// CHECK10-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
|
|
// CHECK10-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
|
|
// CHECK10-NEXT: store ptr [[G]], ptr [[G_ADDR]], align 8
|
|
// CHECK10-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
|
|
// CHECK10-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK10-NEXT: store i32 [[TMP1]], ptr [[N_CASTED]], align 4
|
|
// CHECK10-NEXT: [[TMP2:%.*]] = load i64, ptr [[N_CASTED]], align 8
|
|
// CHECK10-NEXT: [[TMP3:%.*]] = load ptr, ptr [[G_ADDR]], align 8
|
|
// CHECK10-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l57.omp_outlined, i64 [[TMP2]], ptr [[TMP0]], ptr [[TMP3]])
|
|
// CHECK10-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l57.omp_outlined
|
|
// CHECK10-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(4000) [[A:%.*]], ptr noundef [[G:%.*]]) #[[ATTR0]] {
|
|
// CHECK10-NEXT: entry:
|
|
// CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK10-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK10-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK10-NEXT: [[G_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK10-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK10-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK10-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK10-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
|
|
// CHECK10-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK10-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
|
|
// CHECK10-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
|
|
// CHECK10-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK10-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK10-NEXT: [[I3:%.*]] = alloca i32, align 4
|
|
// CHECK10-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK10-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK10-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK10-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
|
|
// CHECK10-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
|
|
// CHECK10-NEXT: store ptr [[G]], ptr [[G_ADDR]], align 8
|
|
// CHECK10-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
|
|
// CHECK10-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK10-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK10-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK10-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
|
|
// CHECK10-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
|
|
// CHECK10-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
|
|
// CHECK10-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK10-NEXT: store i32 0, ptr [[I]], align 4
|
|
// CHECK10-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK10-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
|
|
// CHECK10-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK10: omp.precond.then:
|
|
// CHECK10-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK10-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK10-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK10-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK10-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK10-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK10-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK10-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP6]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK10-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK10-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK10-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
|
|
// CHECK10-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK10: cond.true:
|
|
// CHECK10-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK10-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK10: cond.false:
|
|
// CHECK10-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK10-NEXT: br label [[COND_END]]
|
|
// CHECK10: cond.end:
|
|
// CHECK10-NEXT: [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
|
|
// CHECK10-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK10-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK10-NEXT: store i32 [[TMP11]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK10-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK10: omp.inner.for.cond:
|
|
// CHECK10-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK10-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK10-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
|
|
// CHECK10-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK10: omp.inner.for.body:
|
|
// CHECK10-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK10-NEXT: [[TMP15:%.*]] = zext i32 [[TMP14]] to i64
|
|
// CHECK10-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK10-NEXT: [[TMP17:%.*]] = zext i32 [[TMP16]] to i64
|
|
// CHECK10-NEXT: [[TMP18:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK10-NEXT: store i32 [[TMP18]], ptr [[N_CASTED]], align 4
|
|
// CHECK10-NEXT: [[TMP19:%.*]] = load i64, ptr [[N_CASTED]], align 8
|
|
// CHECK10-NEXT: [[TMP20:%.*]] = load ptr, ptr [[G_ADDR]], align 8
|
|
// CHECK10-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 5, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l57.omp_outlined.omp_outlined, i64 [[TMP15]], i64 [[TMP17]], i64 [[TMP19]], ptr [[TMP0]], ptr [[TMP20]])
|
|
// CHECK10-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK10: omp.inner.for.inc:
|
|
// CHECK10-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK10-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
|
|
// CHECK10-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK10-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK10: omp.inner.for.end:
|
|
// CHECK10-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK10: omp.loop.exit:
|
|
// CHECK10-NEXT: [[TMP23:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK10-NEXT: [[TMP24:%.*]] = load i32, ptr [[TMP23]], align 4
|
|
// CHECK10-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP24]])
|
|
// CHECK10-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK10: omp.precond.end:
|
|
// CHECK10-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l57.omp_outlined.omp_outlined
|
|
// CHECK10-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i64 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(4000) [[A:%.*]], ptr noundef [[G:%.*]]) #[[ATTR0]] {
|
|
// CHECK10-NEXT: entry:
|
|
// CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK10-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK10-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK10-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK10-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK10-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK10-NEXT: [[G_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK10-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK10-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK10-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK10-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
|
|
// CHECK10-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK10-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK10-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK10-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK10-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK10-NEXT: [[I4:%.*]] = alloca i32, align 4
|
|
// CHECK10-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK10-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK10-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
|
|
// CHECK10-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
|
|
// CHECK10-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
|
|
// CHECK10-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
|
|
// CHECK10-NEXT: store ptr [[G]], ptr [[G_ADDR]], align 8
|
|
// CHECK10-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
|
|
// CHECK10-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK10-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK10-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK10-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
|
|
// CHECK10-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
|
|
// CHECK10-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
|
|
// CHECK10-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK10-NEXT: store i32 0, ptr [[I]], align 4
|
|
// CHECK10-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK10-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
|
|
// CHECK10-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK10: omp.precond.then:
|
|
// CHECK10-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK10-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK10-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK10-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
|
|
// CHECK10-NEXT: [[CONV:%.*]] = trunc i64 [[TMP5]] to i32
|
|
// CHECK10-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
|
|
// CHECK10-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP6]] to i32
|
|
// CHECK10-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
|
|
// CHECK10-NEXT: store i32 [[CONV3]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK10-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK10-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK10-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK10-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK10-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP8]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK10-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK10-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK10-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
|
|
// CHECK10-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK10: cond.true:
|
|
// CHECK10-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK10-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK10: cond.false:
|
|
// CHECK10-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK10-NEXT: br label [[COND_END]]
|
|
// CHECK10: cond.end:
|
|
// CHECK10-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
|
|
// CHECK10-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK10-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK10-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK10-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK10: omp.inner.for.cond:
|
|
// CHECK10-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK10-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK10-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
|
|
// CHECK10-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK10: omp.inner.for.body:
|
|
// CHECK10-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK10-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP16]], 1
|
|
// CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK10-NEXT: store i32 [[ADD]], ptr [[I4]], align 4
|
|
// CHECK10-NEXT: [[TMP17:%.*]] = load ptr, ptr [[G_ADDR]], align 8
|
|
// CHECK10-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP17]], i64 0
|
|
// CHECK10-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
|
|
// CHECK10-NEXT: [[TMP19:%.*]] = load i32, ptr [[I4]], align 4
|
|
// CHECK10-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP19]] to i64
|
|
// CHECK10-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds [1000 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
|
|
// CHECK10-NEXT: store i32 [[TMP18]], ptr [[ARRAYIDX7]], align 4
|
|
// CHECK10-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK10: omp.body.continue:
|
|
// CHECK10-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK10: omp.inner.for.inc:
|
|
// CHECK10-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK10-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP20]], 1
|
|
// CHECK10-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK10-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK10: omp.inner.for.end:
|
|
// CHECK10-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK10: omp.loop.exit:
|
|
// CHECK10-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK10-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4
|
|
// CHECK10-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP22]])
|
|
// CHECK10-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK10: omp.precond.end:
|
|
// CHECK10-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l51
|
|
// CHECK12-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i32 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(4000) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]], i32 noundef [[DOTCAPTURE_EXPR_1:%.*]]) #[[ATTR0:[0-9]+]] {
|
|
// CHECK12-NEXT: entry:
|
|
// CHECK12-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK12-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK12-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[DOTCAPTURE_EXPR__ADDR2:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB3:[0-9]+]])
|
|
// CHECK12-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
|
|
// CHECK12-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
|
|
// CHECK12-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
|
|
// CHECK12-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
|
|
// CHECK12-NEXT: store i32 [[DOTCAPTURE_EXPR_1]], ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4
|
|
// CHECK12-NEXT: [[TMP1:%.*]] = load ptr, ptr [[A_ADDR]], align 4
|
|
// CHECK12-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
|
|
// CHECK12-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ADDR2]], align 4
|
|
// CHECK12-NEXT: call void @__kmpc_push_num_teams(ptr @[[GLOB3]], i32 [[TMP0]], i32 [[TMP2]], i32 [[TMP3]])
|
|
// CHECK12-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK12-NEXT: store i32 [[TMP4]], ptr [[N_CASTED]], align 4
|
|
// CHECK12-NEXT: [[TMP5:%.*]] = load i32, ptr [[N_CASTED]], align 4
|
|
// CHECK12-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l51.omp_outlined, i32 [[TMP5]], ptr [[TMP1]])
|
|
// CHECK12-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l51.omp_outlined
|
|
// CHECK12-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(4000) [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK12-NEXT: entry:
|
|
// CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK12-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK12-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[I3:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK12-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
|
|
// CHECK12-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
|
|
// CHECK12-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
|
|
// CHECK12-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
|
|
// CHECK12-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK12-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK12-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK12-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
|
|
// CHECK12-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
|
|
// CHECK12-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
|
|
// CHECK12-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK12-NEXT: store i32 0, ptr [[I]], align 4
|
|
// CHECK12-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK12-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
|
|
// CHECK12-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK12: omp.precond.then:
|
|
// CHECK12-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK12-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK12-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK12-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK12-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK12-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK12-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK12-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP6]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK12-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK12-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK12-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
|
|
// CHECK12-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK12: cond.true:
|
|
// CHECK12-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK12-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK12: cond.false:
|
|
// CHECK12-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK12-NEXT: br label [[COND_END]]
|
|
// CHECK12: cond.end:
|
|
// CHECK12-NEXT: [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
|
|
// CHECK12-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK12-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK12-NEXT: store i32 [[TMP11]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK12-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK12: omp.inner.for.cond:
|
|
// CHECK12-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK12-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK12-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
|
|
// CHECK12-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK12: omp.inner.for.body:
|
|
// CHECK12-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK12-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK12-NEXT: [[TMP16:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK12-NEXT: store i32 [[TMP16]], ptr [[N_CASTED]], align 4
|
|
// CHECK12-NEXT: [[TMP17:%.*]] = load i32, ptr [[N_CASTED]], align 4
|
|
// CHECK12-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 4, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l51.omp_outlined.omp_outlined, i32 [[TMP14]], i32 [[TMP15]], i32 [[TMP17]], ptr [[TMP0]])
|
|
// CHECK12-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK12: omp.inner.for.inc:
|
|
// CHECK12-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK12-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
|
|
// CHECK12-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK12-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK12: omp.inner.for.end:
|
|
// CHECK12-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK12: omp.loop.exit:
|
|
// CHECK12-NEXT: [[TMP20:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK12-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP20]], align 4
|
|
// CHECK12-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP21]])
|
|
// CHECK12-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK12: omp.precond.end:
|
|
// CHECK12-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l51.omp_outlined.omp_outlined
|
|
// CHECK12-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(4000) [[A:%.*]]) #[[ATTR0]] {
|
|
// CHECK12-NEXT: entry:
|
|
// CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK12-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK12-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[I3:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK12-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
|
|
// CHECK12-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
|
|
// CHECK12-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
|
|
// CHECK12-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
|
|
// CHECK12-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
|
|
// CHECK12-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
|
|
// CHECK12-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK12-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK12-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK12-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
|
|
// CHECK12-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
|
|
// CHECK12-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
|
|
// CHECK12-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK12-NEXT: store i32 0, ptr [[I]], align 4
|
|
// CHECK12-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK12-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
|
|
// CHECK12-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK12: omp.precond.then:
|
|
// CHECK12-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK12-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK12-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK12-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
|
|
// CHECK12-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
|
|
// CHECK12-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_LB]], align 4
|
|
// CHECK12-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK12-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK12-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK12-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK12-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK12-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP8]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK12-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK12-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK12-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
|
|
// CHECK12-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK12: cond.true:
|
|
// CHECK12-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK12-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK12: cond.false:
|
|
// CHECK12-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK12-NEXT: br label [[COND_END]]
|
|
// CHECK12: cond.end:
|
|
// CHECK12-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
|
|
// CHECK12-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK12-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK12-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK12-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK12: omp.inner.for.cond:
|
|
// CHECK12-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK12-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK12-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
|
|
// CHECK12-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK12: omp.inner.for.body:
|
|
// CHECK12-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK12-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP16]], 1
|
|
// CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK12-NEXT: store i32 [[ADD]], ptr [[I3]], align 4
|
|
// CHECK12-NEXT: [[TMP17:%.*]] = load i32, ptr [[I3]], align 4
|
|
// CHECK12-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i32], ptr [[TMP0]], i32 0, i32 [[TMP17]]
|
|
// CHECK12-NEXT: store i32 0, ptr [[ARRAYIDX]], align 4
|
|
// CHECK12-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK12: omp.body.continue:
|
|
// CHECK12-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK12: omp.inner.for.inc:
|
|
// CHECK12-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK12-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP18]], 1
|
|
// CHECK12-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK12-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK12: omp.inner.for.end:
|
|
// CHECK12-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK12: omp.loop.exit:
|
|
// CHECK12-NEXT: [[TMP19:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK12-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP19]], align 4
|
|
// CHECK12-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP20]])
|
|
// CHECK12-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK12: omp.precond.end:
|
|
// CHECK12-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l57
|
|
// CHECK12-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i32 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(4000) [[A:%.*]], ptr noundef [[G:%.*]]) #[[ATTR0]] {
|
|
// CHECK12-NEXT: entry:
|
|
// CHECK12-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK12-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK12-NEXT: [[G_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK12-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
|
|
// CHECK12-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
|
|
// CHECK12-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
|
|
// CHECK12-NEXT: store ptr [[G]], ptr [[G_ADDR]], align 4
|
|
// CHECK12-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
|
|
// CHECK12-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK12-NEXT: store i32 [[TMP1]], ptr [[N_CASTED]], align 4
|
|
// CHECK12-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_CASTED]], align 4
|
|
// CHECK12-NEXT: [[TMP3:%.*]] = load ptr, ptr [[G_ADDR]], align 4
|
|
// CHECK12-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_teams(ptr @[[GLOB3]], i32 3, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l57.omp_outlined, i32 [[TMP2]], ptr [[TMP0]], ptr [[TMP3]])
|
|
// CHECK12-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l57.omp_outlined
|
|
// CHECK12-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(4000) [[A:%.*]], ptr noundef [[G:%.*]]) #[[ATTR0]] {
|
|
// CHECK12-NEXT: entry:
|
|
// CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK12-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK12-NEXT: [[G_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK12-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[I3:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK12-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
|
|
// CHECK12-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
|
|
// CHECK12-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
|
|
// CHECK12-NEXT: store ptr [[G]], ptr [[G_ADDR]], align 4
|
|
// CHECK12-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
|
|
// CHECK12-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK12-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK12-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK12-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
|
|
// CHECK12-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
|
|
// CHECK12-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
|
|
// CHECK12-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK12-NEXT: store i32 0, ptr [[I]], align 4
|
|
// CHECK12-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK12-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
|
|
// CHECK12-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK12: omp.precond.then:
|
|
// CHECK12-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK12-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK12-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK12-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK12-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK12-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK12-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK12-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP6]], i32 92, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK12-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK12-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK12-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
|
|
// CHECK12-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK12: cond.true:
|
|
// CHECK12-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK12-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK12: cond.false:
|
|
// CHECK12-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK12-NEXT: br label [[COND_END]]
|
|
// CHECK12: cond.end:
|
|
// CHECK12-NEXT: [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
|
|
// CHECK12-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK12-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK12-NEXT: store i32 [[TMP11]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK12-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK12: omp.inner.for.cond:
|
|
// CHECK12-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK12-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK12-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
|
|
// CHECK12-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK12: omp.inner.for.body:
|
|
// CHECK12-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK12-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK12-NEXT: [[TMP16:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK12-NEXT: store i32 [[TMP16]], ptr [[N_CASTED]], align 4
|
|
// CHECK12-NEXT: [[TMP17:%.*]] = load i32, ptr [[N_CASTED]], align 4
|
|
// CHECK12-NEXT: [[TMP18:%.*]] = load ptr, ptr [[G_ADDR]], align 4
|
|
// CHECK12-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB3]], i32 5, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l57.omp_outlined.omp_outlined, i32 [[TMP14]], i32 [[TMP15]], i32 [[TMP17]], ptr [[TMP0]], ptr [[TMP18]])
|
|
// CHECK12-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK12: omp.inner.for.inc:
|
|
// CHECK12-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK12-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP19]], [[TMP20]]
|
|
// CHECK12-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK12-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK12: omp.inner.for.end:
|
|
// CHECK12-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK12: omp.loop.exit:
|
|
// CHECK12-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK12-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4
|
|
// CHECK12-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP22]])
|
|
// CHECK12-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK12: omp.precond.end:
|
|
// CHECK12-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16target_teams_funPi_l57.omp_outlined.omp_outlined
|
|
// CHECK12-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(4000) [[A:%.*]], ptr noundef [[G:%.*]]) #[[ATTR0]] {
|
|
// CHECK12-NEXT: entry:
|
|
// CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK12-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK12-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK12-NEXT: [[G_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK12-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: [[I3:%.*]] = alloca i32, align 4
|
|
// CHECK12-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK12-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
|
|
// CHECK12-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
|
|
// CHECK12-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
|
|
// CHECK12-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
|
|
// CHECK12-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
|
|
// CHECK12-NEXT: store ptr [[G]], ptr [[G_ADDR]], align 4
|
|
// CHECK12-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
|
|
// CHECK12-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK12-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK12-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK12-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
|
|
// CHECK12-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
|
|
// CHECK12-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
|
|
// CHECK12-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK12-NEXT: store i32 0, ptr [[I]], align 4
|
|
// CHECK12-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK12-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
|
|
// CHECK12-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK12: omp.precond.then:
|
|
// CHECK12-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK12-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK12-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK12-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
|
|
// CHECK12-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
|
|
// CHECK12-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_LB]], align 4
|
|
// CHECK12-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK12-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK12-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK12-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK12-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK12-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB2]], i32 [[TMP8]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK12-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK12-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK12-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
|
|
// CHECK12-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK12: cond.true:
|
|
// CHECK12-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK12-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK12: cond.false:
|
|
// CHECK12-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK12-NEXT: br label [[COND_END]]
|
|
// CHECK12: cond.end:
|
|
// CHECK12-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
|
|
// CHECK12-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK12-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK12-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK12-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK12: omp.inner.for.cond:
|
|
// CHECK12-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK12-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK12-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
|
|
// CHECK12-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK12: omp.inner.for.body:
|
|
// CHECK12-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK12-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP16]], 1
|
|
// CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK12-NEXT: store i32 [[ADD]], ptr [[I3]], align 4
|
|
// CHECK12-NEXT: [[TMP17:%.*]] = load ptr, ptr [[G_ADDR]], align 4
|
|
// CHECK12-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP17]], i32 0
|
|
// CHECK12-NEXT: [[TMP18:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
|
|
// CHECK12-NEXT: [[TMP19:%.*]] = load i32, ptr [[I3]], align 4
|
|
// CHECK12-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [1000 x i32], ptr [[TMP0]], i32 0, i32 [[TMP19]]
|
|
// CHECK12-NEXT: store i32 [[TMP18]], ptr [[ARRAYIDX6]], align 4
|
|
// CHECK12-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK12: omp.body.continue:
|
|
// CHECK12-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK12: omp.inner.for.inc:
|
|
// CHECK12-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK12-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP20]], 1
|
|
// CHECK12-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK12-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK12: omp.inner.for.end:
|
|
// CHECK12-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK12: omp.loop.exit:
|
|
// CHECK12-NEXT: [[TMP21:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK12-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP21]], align 4
|
|
// CHECK12-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB2]], i32 [[TMP22]])
|
|
// CHECK12-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK12: omp.precond.end:
|
|
// CHECK12-NEXT: ret void
|
|
//
|