IR for 'target teams loop' is now dependent on suitability of associated loop-nest. If a loop-nest: - does not contain a function call, or - the -fopenmp-assume-no-nested-parallelism has been specified, - or the call is to an OpenMP API AND - does not contain nested loop bind(parallel) directives then it can be emitted as 'target teams distribute parallel for', which is the current default. Otherwise, it is emitted as 'target teams distribute'. Added debug output indicating how 'target teams loop' was emitted. Flag is -mllvm -debug-only=target-teams-loop-codegen Added LIT tests explicitly verifying 'target teams loop' emitted as a parallel loop and a distribute loop. Updated other 'loop' related tests as needed to reflect change in IR. - These updates account for most of the changed files and additions/deletions.
3807 lines
251 KiB
C++
3807 lines
251 KiB
C++
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
|
|
// Test target codegen - host bc file has to be created first.
|
|
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-cuda-mode -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc
|
|
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-cuda-mode -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --check-prefix=CHECK1
|
|
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-cuda-mode -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -fopenmp-optimistic-collapse -o - | FileCheck %s --check-prefix=CHECK2
|
|
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-cuda-mode -x c++ -triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm-bc %s -o %t-x86-host.bc
|
|
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-cuda-mode -x c++ -triple nvptx-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix=CHECK3
|
|
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-cuda-mode -fexceptions -fcxx-exceptions -x c++ -triple nvptx-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix=CHECK3
|
|
|
|
// expected-no-diagnostics
|
|
#ifndef HEADER
|
|
#define HEADER
|
|
|
|
#define N 1000
|
|
#define M 10
|
|
|
|
template<typename tx>
|
|
tx ftemplate(int n) {
|
|
tx a[N];
|
|
short aa[N];
|
|
tx b[10];
|
|
tx c[M][M];
|
|
tx f = n;
|
|
tx l;
|
|
int k;
|
|
tx *v;
|
|
|
|
#pragma omp target teams loop map(tofrom: aa) num_teams(M) thread_limit(64)
|
|
for(int i = 0; i < n; i++) {
|
|
aa[i] += 1;
|
|
}
|
|
|
|
#pragma omp target teams loop map(tofrom:a, aa, b) if(target: n>40)
|
|
for(int i = 0; i < 10; i++) {
|
|
b[i] += 1;
|
|
}
|
|
|
|
#pragma omp target teams loop collapse(2) firstprivate(f) private(k)
|
|
for(int i = 0; i < M; i++) {
|
|
for(int j = 0; j < M; j++) {
|
|
k = M;
|
|
c[i][j] = i + j * f + k;
|
|
}
|
|
}
|
|
|
|
#pragma omp target teams loop collapse(2)
|
|
for(int i = 0; i < n; i++) {
|
|
for(int j = 0; j < n; j++) {
|
|
c[i][j] = i + j;
|
|
}
|
|
}
|
|
|
|
#pragma omp target teams loop map(a, v[:N])
|
|
for(int i = 0; i < n; i++)
|
|
a[i] = v[i];
|
|
return a[0];
|
|
}
|
|
|
|
int bar(int n){
|
|
int a = 0;
|
|
|
|
a += ftemplate<int>(n);
|
|
|
|
return a;
|
|
}
|
|
|
|
#endif
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l28
|
|
// CHECK1-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i64 noundef [[N:%.*]], ptr noundef nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0:[0-9]+]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
|
|
// CHECK1-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l28_kernel_environment, ptr [[DYN_PTR]])
|
|
// CHECK1-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
|
|
// CHECK1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
|
|
// CHECK1: user_code.entry:
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1:[0-9]+]])
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP3]], ptr [[N_CASTED]], align 4
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[N_CASTED]], align 8
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTTHREADID_TEMP_]], align 4
|
|
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l28_omp_outlined(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]], i64 [[TMP4]], ptr [[TMP0]]) #[[ATTR2:[0-9]+]]
|
|
// CHECK1-NEXT: call void @__kmpc_target_deinit()
|
|
// CHECK1-NEXT: ret void
|
|
// CHECK1: worker.exit:
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l28_omp_outlined
|
|
// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[N:%.*]], ptr noundef nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR1:[0-9]+]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[I3:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x ptr], align 8
|
|
// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
|
|
// CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
|
|
// CHECK1-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
|
|
// CHECK1-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
|
|
// CHECK1-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK1: omp.precond.then:
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK1-NEXT: [[NVPTX_NUM_THREADS:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_distribute_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP6]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK1-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
|
|
// CHECK1-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK1: cond.true:
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK1: cond.false:
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END]]
|
|
// CHECK1: cond.end:
|
|
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
|
|
// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP11]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK1: omp.inner.for.cond:
|
|
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], 1
|
|
// CHECK1-NEXT: [[CMP5:%.*]] = icmp slt i32 [[TMP12]], [[ADD]]
|
|
// CHECK1-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK1: omp.inner.for.body:
|
|
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK1-NEXT: [[TMP15:%.*]] = zext i32 [[TMP14]] to i64
|
|
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP17:%.*]] = zext i32 [[TMP16]] to i64
|
|
// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP18]], ptr [[N_CASTED]], align 4
|
|
// CHECK1-NEXT: [[TMP19:%.*]] = load i64, ptr [[N_CASTED]], align 8
|
|
// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [4 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
|
|
// CHECK1-NEXT: [[TMP21:%.*]] = inttoptr i64 [[TMP15]] to ptr
|
|
// CHECK1-NEXT: store ptr [[TMP21]], ptr [[TMP20]], align 8
|
|
// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
|
|
// CHECK1-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP17]] to ptr
|
|
// CHECK1-NEXT: store ptr [[TMP23]], ptr [[TMP22]], align 8
|
|
// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 2
|
|
// CHECK1-NEXT: [[TMP25:%.*]] = inttoptr i64 [[TMP19]] to ptr
|
|
// CHECK1-NEXT: store ptr [[TMP25]], ptr [[TMP24]], align 8
|
|
// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [4 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 3
|
|
// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP26]], align 8
|
|
// CHECK1-NEXT: [[TMP27:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[TMP27]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP28]], i32 1, i32 -1, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l28_omp_outlined_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i64 4)
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK1: omp.inner.for.inc:
|
|
// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP29]], [[TMP30]]
|
|
// CHECK1-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK1-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP31]], [[TMP32]]
|
|
// CHECK1-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK1-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP33]], [[TMP34]]
|
|
// CHECK1-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK1-NEXT: [[CMP9:%.*]] = icmp sgt i32 [[TMP35]], [[TMP36]]
|
|
// CHECK1-NEXT: br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
|
|
// CHECK1: cond.true10:
|
|
// CHECK1-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END12:%.*]]
|
|
// CHECK1: cond.false11:
|
|
// CHECK1-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END12]]
|
|
// CHECK1: cond.end12:
|
|
// CHECK1-NEXT: [[COND13:%.*]] = phi i32 [ [[TMP37]], [[COND_TRUE10]] ], [ [[TMP38]], [[COND_FALSE11]] ]
|
|
// CHECK1-NEXT: store i32 [[COND13]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP39:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP39]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK1: omp.inner.for.end:
|
|
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK1: omp.loop.exit:
|
|
// CHECK1-NEXT: [[TMP40:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP41:%.*]] = load i32, ptr [[TMP40]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_distribute_static_fini(ptr @[[GLOB2]], i32 [[TMP41]])
|
|
// CHECK1-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK1: omp.precond.end:
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l28_omp_outlined_omp_outlined
|
|
// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i64 noundef [[N:%.*]], ptr noundef nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[I4:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
|
|
// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
|
|
// CHECK1-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
|
|
// CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
|
|
// CHECK1-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
|
|
// CHECK1-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
|
|
// CHECK1-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK1: omp.precond.then:
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
|
|
// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP5]] to i32
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
|
|
// CHECK1-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP6]] to i32
|
|
// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 [[CONV3]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB3:[0-9]+]], i32 [[TMP8]], i32 33, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP9]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK1: omp.inner.for.cond:
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[CONV5:%.*]] = sext i32 [[TMP10]] to i64
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
|
|
// CHECK1-NEXT: [[CMP6:%.*]] = icmp ule i64 [[CONV5]], [[TMP11]]
|
|
// CHECK1-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK1: omp.inner.for.body:
|
|
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
|
|
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK1-NEXT: store i32 [[ADD]], ptr [[I4]], align 4
|
|
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[I4]], align 4
|
|
// CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP13]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i16], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
|
|
// CHECK1-NEXT: [[TMP14:%.*]] = load i16, ptr [[ARRAYIDX]], align 2
|
|
// CHECK1-NEXT: [[CONV7:%.*]] = sext i16 [[TMP14]] to i32
|
|
// CHECK1-NEXT: [[ADD8:%.*]] = add nsw i32 [[CONV7]], 1
|
|
// CHECK1-NEXT: [[CONV9:%.*]] = trunc i32 [[ADD8]] to i16
|
|
// CHECK1-NEXT: store i16 [[CONV9]], ptr [[ARRAYIDX]], align 2
|
|
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK1: omp.body.continue:
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK1: omp.inner.for.inc:
|
|
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP15]], [[TMP16]]
|
|
// CHECK1-NEXT: store i32 [[ADD10]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK1: omp.inner.for.end:
|
|
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK1: omp.loop.exit:
|
|
// CHECK1-NEXT: [[TMP17:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP17]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB3]], i32 [[TMP18]])
|
|
// CHECK1-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK1: omp.precond.end:
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l33
|
|
// CHECK1-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR4:[0-9]+]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
|
|
// CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l33_kernel_environment, ptr [[DYN_PTR]])
|
|
// CHECK1-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
|
|
// CHECK1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
|
|
// CHECK1: user_code.entry:
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1
|
|
// CHECK1-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP3]] to i1
|
|
// CHECK1-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8
|
|
// CHECK1-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTTHREADID_TEMP_]], align 4
|
|
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l33_omp_outlined(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]], ptr [[TMP0]], i64 [[TMP4]]) #[[ATTR2]]
|
|
// CHECK1-NEXT: call void @__kmpc_target_deinit()
|
|
// CHECK1-NEXT: ret void
|
|
// CHECK1: worker.exit:
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l33_omp_outlined
|
|
// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [3 x ptr], align 8
|
|
// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
|
|
// CHECK1-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK1-NEXT: store i32 9, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK1-NEXT: [[NVPTX_NUM_THREADS:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_distribute_static_init_4(ptr @[[GLOB2]], i32 [[TMP2]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
|
|
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK1: cond.true:
|
|
// CHECK1-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK1: cond.false:
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END]]
|
|
// CHECK1: cond.end:
|
|
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
|
|
// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK1: omp.inner.for.cond:
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[CMP1:%.*]] = icmp slt i32 [[TMP6]], 10
|
|
// CHECK1-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK1: omp.inner.for.body:
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
|
|
// CHECK1-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP8]] to ptr
|
|
// CHECK1-NEXT: store ptr [[TMP12]], ptr [[TMP11]], align 8
|
|
// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
|
|
// CHECK1-NEXT: [[TMP14:%.*]] = inttoptr i64 [[TMP10]] to ptr
|
|
// CHECK1-NEXT: store ptr [[TMP14]], ptr [[TMP13]], align 8
|
|
// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 2
|
|
// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP15]], align 8
|
|
// CHECK1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l33_omp_outlined_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i64 3)
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK1: omp.inner.for.inc:
|
|
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP16]], [[TMP17]]
|
|
// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
|
|
// CHECK1-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
|
|
// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP22]], 9
|
|
// CHECK1-NEXT: br i1 [[CMP4]], label [[COND_TRUE5:%.*]], label [[COND_FALSE6:%.*]]
|
|
// CHECK1: cond.true5:
|
|
// CHECK1-NEXT: br label [[COND_END7:%.*]]
|
|
// CHECK1: cond.false6:
|
|
// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END7]]
|
|
// CHECK1: cond.end7:
|
|
// CHECK1-NEXT: [[COND8:%.*]] = phi i32 [ 9, [[COND_TRUE5]] ], [ [[TMP23]], [[COND_FALSE6]] ]
|
|
// CHECK1-NEXT: store i32 [[COND8]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP24]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK1: omp.inner.for.end:
|
|
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK1: omp.loop.exit:
|
|
// CHECK1-NEXT: call void @__kmpc_distribute_static_fini(ptr @[[GLOB2]], i32 [[TMP2]])
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l33_omp_outlined_omp_outlined
|
|
// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
|
|
// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
|
|
// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
|
|
// CHECK1-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
|
|
// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB3]], i32 [[TMP4]], i32 33, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK1: omp.inner.for.cond:
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[CONV2:%.*]] = sext i32 [[TMP6]] to i64
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
|
|
// CHECK1-NEXT: [[CMP:%.*]] = icmp ule i64 [[CONV2]], [[TMP7]]
|
|
// CHECK1-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK1: omp.inner.for.body:
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
|
|
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP9]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
|
|
// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
|
|
// CHECK1-NEXT: store i32 [[ADD3]], ptr [[ARRAYIDX]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK1: omp.body.continue:
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK1: omp.inner.for.inc:
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
|
|
// CHECK1-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK1: omp.inner.for.end:
|
|
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK1: omp.loop.exit:
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB3]], i32 [[TMP4]])
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l38
|
|
// CHECK1-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(400) [[C:%.*]], i64 noundef [[F:%.*]]) #[[ATTR4]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[F_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[F_CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
|
|
// CHECK1-NEXT: store i64 [[F]], ptr [[F_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[C_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l38_kernel_environment, ptr [[DYN_PTR]])
|
|
// CHECK1-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
|
|
// CHECK1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
|
|
// CHECK1: user_code.entry:
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[F_ADDR]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP3]], ptr [[F_CASTED]], align 4
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[F_CASTED]], align 8
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTTHREADID_TEMP_]], align 4
|
|
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l38_omp_outlined(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]], ptr [[TMP0]], i64 [[TMP4]]) #[[ATTR2]]
|
|
// CHECK1-NEXT: call void @__kmpc_target_deinit()
|
|
// CHECK1-NEXT: ret void
|
|
// CHECK1: worker.exit:
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l38_omp_outlined
|
|
// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(400) [[C:%.*]], i64 noundef [[F:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[F_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[K:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[J:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[F_CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x ptr], align 8
|
|
// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
|
|
// CHECK1-NEXT: store i64 [[F]], ptr [[F_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[C_ADDR]], align 8
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK1-NEXT: [[NVPTX_NUM_THREADS:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_distribute_static_init_4(ptr @[[GLOB2]], i32 [[TMP2]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99
|
|
// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK1: cond.true:
|
|
// CHECK1-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK1: cond.false:
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END]]
|
|
// CHECK1: cond.end:
|
|
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
|
|
// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK1: omp.inner.for.cond:
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[CMP2:%.*]] = icmp slt i32 [[TMP6]], 100
|
|
// CHECK1-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK1: omp.inner.for.body:
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[F_ADDR]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP11]], ptr [[F_CASTED]], align 4
|
|
// CHECK1-NEXT: [[TMP12:%.*]] = load i64, ptr [[F_CASTED]], align 8
|
|
// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
|
|
// CHECK1-NEXT: [[TMP14:%.*]] = inttoptr i64 [[TMP8]] to ptr
|
|
// CHECK1-NEXT: store ptr [[TMP14]], ptr [[TMP13]], align 8
|
|
// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
|
|
// CHECK1-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP10]] to ptr
|
|
// CHECK1-NEXT: store ptr [[TMP16]], ptr [[TMP15]], align 8
|
|
// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 2
|
|
// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP17]], align 8
|
|
// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 3
|
|
// CHECK1-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP12]] to ptr
|
|
// CHECK1-NEXT: store ptr [[TMP19]], ptr [[TMP18]], align 8
|
|
// CHECK1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l38_omp_outlined_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i64 4)
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK1: omp.inner.for.inc:
|
|
// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
|
|
// CHECK1-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
|
|
// CHECK1-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK1-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
|
|
// CHECK1-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK1-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP26]], 99
|
|
// CHECK1-NEXT: br i1 [[CMP5]], label [[COND_TRUE6:%.*]], label [[COND_FALSE7:%.*]]
|
|
// CHECK1: cond.true6:
|
|
// CHECK1-NEXT: br label [[COND_END8:%.*]]
|
|
// CHECK1: cond.false7:
|
|
// CHECK1-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END8]]
|
|
// CHECK1: cond.end8:
|
|
// CHECK1-NEXT: [[COND9:%.*]] = phi i32 [ 99, [[COND_TRUE6]] ], [ [[TMP27]], [[COND_FALSE7]] ]
|
|
// CHECK1-NEXT: store i32 [[COND9]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP28]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK1: omp.inner.for.end:
|
|
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK1: omp.loop.exit:
|
|
// CHECK1-NEXT: call void @__kmpc_distribute_static_fini(ptr @[[GLOB2]], i32 [[TMP2]])
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l38_omp_outlined_omp_outlined
|
|
// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(400) [[C:%.*]], i64 noundef [[F:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[F_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[K:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[J:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
|
|
// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
|
|
// CHECK1-NEXT: store i64 [[F]], ptr [[F_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[C_ADDR]], align 8
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
|
|
// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
|
|
// CHECK1-NEXT: [[CONV2:%.*]] = trunc i64 [[TMP2]] to i32
|
|
// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 [[CONV2]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB3]], i32 [[TMP4]], i32 33, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK1: omp.inner.for.cond:
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[CONV3:%.*]] = sext i32 [[TMP6]] to i64
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
|
|
// CHECK1-NEXT: [[CMP:%.*]] = icmp ule i64 [[CONV3]], [[TMP7]]
|
|
// CHECK1-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK1: omp.inner.for.body:
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP8]], 10
|
|
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1
|
|
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK1-NEXT: store i32 [[ADD]], ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[DIV4:%.*]] = sdiv i32 [[TMP10]], 10
|
|
// CHECK1-NEXT: [[MUL5:%.*]] = mul nsw i32 [[DIV4]], 10
|
|
// CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP9]], [[MUL5]]
|
|
// CHECK1-NEXT: [[MUL6:%.*]] = mul nsw i32 [[SUB]], 1
|
|
// CHECK1-NEXT: [[ADD7:%.*]] = add nsw i32 0, [[MUL6]]
|
|
// CHECK1-NEXT: store i32 [[ADD7]], ptr [[J]], align 4
|
|
// CHECK1-NEXT: store i32 10, ptr [[K]], align 4
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[J]], align 4
|
|
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[F_ADDR]], align 4
|
|
// CHECK1-NEXT: [[MUL8:%.*]] = mul nsw i32 [[TMP12]], [[TMP13]]
|
|
// CHECK1-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP11]], [[MUL8]]
|
|
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[K]], align 4
|
|
// CHECK1-NEXT: [[ADD10:%.*]] = add nsw i32 [[ADD9]], [[TMP14]]
|
|
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP15]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [10 x i32]], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
|
|
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[J]], align 4
|
|
// CHECK1-NEXT: [[IDXPROM11:%.*]] = sext i32 [[TMP16]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds [10 x i32], ptr [[ARRAYIDX]], i64 0, i64 [[IDXPROM11]]
|
|
// CHECK1-NEXT: store i32 [[ADD10]], ptr [[ARRAYIDX12]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK1: omp.body.continue:
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK1: omp.inner.for.inc:
|
|
// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
|
|
// CHECK1-NEXT: store i32 [[ADD13]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK1: omp.inner.for.end:
|
|
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK1: omp.loop.exit:
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB3]], i32 [[TMP4]])
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l46
|
|
// CHECK1-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i64 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(400) [[C:%.*]]) #[[ATTR4]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
|
|
// CHECK1-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[C_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l46_kernel_environment, ptr [[DYN_PTR]])
|
|
// CHECK1-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
|
|
// CHECK1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
|
|
// CHECK1: user_code.entry:
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP3]], ptr [[N_CASTED]], align 4
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[N_CASTED]], align 8
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTTHREADID_TEMP_]], align 4
|
|
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l46_omp_outlined(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]], i64 [[TMP4]], ptr [[TMP0]]) #[[ATTR2]]
|
|
// CHECK1-NEXT: call void @__kmpc_target_deinit()
|
|
// CHECK1-NEXT: ret void
|
|
// CHECK1: worker.exit:
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l46_omp_outlined
|
|
// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(400) [[C:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[J:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[I9:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[J10:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x ptr], align 8
|
|
// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[C_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_2]], align 4
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
|
|
// CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
|
|
// CHECK1-NEXT: [[CONV:%.*]] = sext i32 [[DIV]] to i64
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
|
|
// CHECK1-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP4]], 0
|
|
// CHECK1-NEXT: [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1
|
|
// CHECK1-NEXT: [[CONV6:%.*]] = sext i32 [[DIV5]] to i64
|
|
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV]], [[CONV6]]
|
|
// CHECK1-NEXT: [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1
|
|
// CHECK1-NEXT: store i64 [[SUB7]], ptr [[DOTCAPTURE_EXPR_3]], align 8
|
|
// CHECK1-NEXT: store i32 0, ptr [[I]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[J]], align 4
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
|
|
// CHECK1-NEXT: br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK1: land.lhs.true:
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
|
|
// CHECK1-NEXT: [[CMP8:%.*]] = icmp slt i32 0, [[TMP6]]
|
|
// CHECK1-NEXT: br i1 [[CMP8]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]]
|
|
// CHECK1: omp.precond.then:
|
|
// CHECK1-NEXT: store i64 0, ptr [[DOTOMP_COMB_LB]], align 8
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_3]], align 8
|
|
// CHECK1-NEXT: store i64 [[TMP7]], ptr [[DOTOMP_COMB_UB]], align 8
|
|
// CHECK1-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK1-NEXT: [[NVPTX_NUM_THREADS:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
|
|
// CHECK1-NEXT: [[CONV11:%.*]] = zext i32 [[NVPTX_NUM_THREADS]] to i64
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_distribute_static_init_8(ptr @[[GLOB2]], i32 [[TMP9]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 [[CONV11]])
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_3]], align 8
|
|
// CHECK1-NEXT: [[CMP12:%.*]] = icmp sgt i64 [[TMP10]], [[TMP11]]
|
|
// CHECK1-NEXT: br i1 [[CMP12]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK1: cond.true:
|
|
// CHECK1-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_3]], align 8
|
|
// CHECK1-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK1: cond.false:
|
|
// CHECK1-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8
|
|
// CHECK1-NEXT: br label [[COND_END]]
|
|
// CHECK1: cond.end:
|
|
// CHECK1-NEXT: [[COND:%.*]] = phi i64 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
|
|
// CHECK1-NEXT: store i64 [[COND]], ptr [[DOTOMP_COMB_UB]], align 8
|
|
// CHECK1-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_COMB_LB]], align 8
|
|
// CHECK1-NEXT: store i64 [[TMP14]], ptr [[DOTOMP_IV]], align 8
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK1: omp.inner.for.cond:
|
|
// CHECK1-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
|
|
// CHECK1-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_3]], align 8
|
|
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP16]], 1
|
|
// CHECK1-NEXT: [[CMP13:%.*]] = icmp slt i64 [[TMP15]], [[ADD]]
|
|
// CHECK1-NEXT: br i1 [[CMP13]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK1: omp.inner.for.body:
|
|
// CHECK1-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_COMB_LB]], align 8
|
|
// CHECK1-NEXT: [[TMP18:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8
|
|
// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP19]], ptr [[N_CASTED]], align 4
|
|
// CHECK1-NEXT: [[TMP20:%.*]] = load i64, ptr [[N_CASTED]], align 8
|
|
// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [4 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
|
|
// CHECK1-NEXT: [[TMP22:%.*]] = inttoptr i64 [[TMP17]] to ptr
|
|
// CHECK1-NEXT: store ptr [[TMP22]], ptr [[TMP21]], align 8
|
|
// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [4 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
|
|
// CHECK1-NEXT: [[TMP24:%.*]] = inttoptr i64 [[TMP18]] to ptr
|
|
// CHECK1-NEXT: store ptr [[TMP24]], ptr [[TMP23]], align 8
|
|
// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 2
|
|
// CHECK1-NEXT: [[TMP26:%.*]] = inttoptr i64 [[TMP20]] to ptr
|
|
// CHECK1-NEXT: store ptr [[TMP26]], ptr [[TMP25]], align 8
|
|
// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 3
|
|
// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP27]], align 8
|
|
// CHECK1-NEXT: [[TMP28:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP29:%.*]] = load i32, ptr [[TMP28]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP29]], i32 1, i32 -1, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l46_omp_outlined_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i64 4)
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK1: omp.inner.for.inc:
|
|
// CHECK1-NEXT: [[TMP30:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
|
|
// CHECK1-NEXT: [[TMP31:%.*]] = load i64, ptr [[DOTOMP_STRIDE]], align 8
|
|
// CHECK1-NEXT: [[ADD14:%.*]] = add nsw i64 [[TMP30]], [[TMP31]]
|
|
// CHECK1-NEXT: store i64 [[ADD14]], ptr [[DOTOMP_IV]], align 8
|
|
// CHECK1-NEXT: [[TMP32:%.*]] = load i64, ptr [[DOTOMP_COMB_LB]], align 8
|
|
// CHECK1-NEXT: [[TMP33:%.*]] = load i64, ptr [[DOTOMP_STRIDE]], align 8
|
|
// CHECK1-NEXT: [[ADD15:%.*]] = add nsw i64 [[TMP32]], [[TMP33]]
|
|
// CHECK1-NEXT: store i64 [[ADD15]], ptr [[DOTOMP_COMB_LB]], align 8
|
|
// CHECK1-NEXT: [[TMP34:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8
|
|
// CHECK1-NEXT: [[TMP35:%.*]] = load i64, ptr [[DOTOMP_STRIDE]], align 8
|
|
// CHECK1-NEXT: [[ADD16:%.*]] = add nsw i64 [[TMP34]], [[TMP35]]
|
|
// CHECK1-NEXT: store i64 [[ADD16]], ptr [[DOTOMP_COMB_UB]], align 8
|
|
// CHECK1-NEXT: [[TMP36:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8
|
|
// CHECK1-NEXT: [[TMP37:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_3]], align 8
|
|
// CHECK1-NEXT: [[CMP17:%.*]] = icmp sgt i64 [[TMP36]], [[TMP37]]
|
|
// CHECK1-NEXT: br i1 [[CMP17]], label [[COND_TRUE18:%.*]], label [[COND_FALSE19:%.*]]
|
|
// CHECK1: cond.true18:
|
|
// CHECK1-NEXT: [[TMP38:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_3]], align 8
|
|
// CHECK1-NEXT: br label [[COND_END20:%.*]]
|
|
// CHECK1: cond.false19:
|
|
// CHECK1-NEXT: [[TMP39:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8
|
|
// CHECK1-NEXT: br label [[COND_END20]]
|
|
// CHECK1: cond.end20:
|
|
// CHECK1-NEXT: [[COND21:%.*]] = phi i64 [ [[TMP38]], [[COND_TRUE18]] ], [ [[TMP39]], [[COND_FALSE19]] ]
|
|
// CHECK1-NEXT: store i64 [[COND21]], ptr [[DOTOMP_COMB_UB]], align 8
|
|
// CHECK1-NEXT: [[TMP40:%.*]] = load i64, ptr [[DOTOMP_COMB_LB]], align 8
|
|
// CHECK1-NEXT: store i64 [[TMP40]], ptr [[DOTOMP_IV]], align 8
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK1: omp.inner.for.end:
|
|
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK1: omp.loop.exit:
|
|
// CHECK1-NEXT: [[TMP41:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP42:%.*]] = load i32, ptr [[TMP41]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_distribute_static_fini(ptr @[[GLOB2]], i32 [[TMP42]])
|
|
// CHECK1-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK1: omp.precond.end:
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l46_omp_outlined_omp_outlined
|
|
// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i64 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(400) [[C:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[J:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[I9:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[J10:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
|
|
// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
|
|
// CHECK1-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[C_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_2]], align 4
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
|
|
// CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
|
|
// CHECK1-NEXT: [[CONV:%.*]] = sext i32 [[DIV]] to i64
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
|
|
// CHECK1-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP4]], 0
|
|
// CHECK1-NEXT: [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1
|
|
// CHECK1-NEXT: [[CONV6:%.*]] = sext i32 [[DIV5]] to i64
|
|
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV]], [[CONV6]]
|
|
// CHECK1-NEXT: [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1
|
|
// CHECK1-NEXT: store i64 [[SUB7]], ptr [[DOTCAPTURE_EXPR_3]], align 8
|
|
// CHECK1-NEXT: store i32 0, ptr [[I]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[J]], align 4
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
|
|
// CHECK1-NEXT: br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK1: land.lhs.true:
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
|
|
// CHECK1-NEXT: [[CMP8:%.*]] = icmp slt i32 0, [[TMP6]]
|
|
// CHECK1-NEXT: br i1 [[CMP8]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]]
|
|
// CHECK1: omp.precond.then:
|
|
// CHECK1-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_3]], align 8
|
|
// CHECK1-NEXT: store i64 [[TMP7]], ptr [[DOTOMP_UB]], align 8
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
|
|
// CHECK1-NEXT: store i64 [[TMP8]], ptr [[DOTOMP_LB]], align 8
|
|
// CHECK1-NEXT: store i64 [[TMP9]], ptr [[DOTOMP_UB]], align 8
|
|
// CHECK1-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP10]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_init_8(ptr @[[GLOB3]], i32 [[TMP11]], i32 33, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1)
|
|
// CHECK1-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8
|
|
// CHECK1-NEXT: store i64 [[TMP12]], ptr [[DOTOMP_IV]], align 8
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK1: omp.inner.for.cond:
|
|
// CHECK1-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
|
|
// CHECK1-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
|
|
// CHECK1-NEXT: [[CMP11:%.*]] = icmp ule i64 [[TMP13]], [[TMP14]]
|
|
// CHECK1-NEXT: br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK1: omp.inner.for.body:
|
|
// CHECK1-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
|
|
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
|
|
// CHECK1-NEXT: [[SUB12:%.*]] = sub nsw i32 [[TMP16]], 0
|
|
// CHECK1-NEXT: [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1
|
|
// CHECK1-NEXT: [[MUL14:%.*]] = mul nsw i32 1, [[DIV13]]
|
|
// CHECK1-NEXT: [[CONV15:%.*]] = sext i32 [[MUL14]] to i64
|
|
// CHECK1-NEXT: [[DIV16:%.*]] = sdiv i64 [[TMP15]], [[CONV15]]
|
|
// CHECK1-NEXT: [[MUL17:%.*]] = mul nsw i64 [[DIV16]], 1
|
|
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i64 0, [[MUL17]]
|
|
// CHECK1-NEXT: [[CONV18:%.*]] = trunc i64 [[ADD]] to i32
|
|
// CHECK1-NEXT: store i32 [[CONV18]], ptr [[I9]], align 4
|
|
// CHECK1-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
|
|
// CHECK1-NEXT: [[TMP18:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
|
|
// CHECK1-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
|
|
// CHECK1-NEXT: [[SUB19:%.*]] = sub nsw i32 [[TMP19]], 0
|
|
// CHECK1-NEXT: [[DIV20:%.*]] = sdiv i32 [[SUB19]], 1
|
|
// CHECK1-NEXT: [[MUL21:%.*]] = mul nsw i32 1, [[DIV20]]
|
|
// CHECK1-NEXT: [[CONV22:%.*]] = sext i32 [[MUL21]] to i64
|
|
// CHECK1-NEXT: [[DIV23:%.*]] = sdiv i64 [[TMP18]], [[CONV22]]
|
|
// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
|
|
// CHECK1-NEXT: [[SUB24:%.*]] = sub nsw i32 [[TMP20]], 0
|
|
// CHECK1-NEXT: [[DIV25:%.*]] = sdiv i32 [[SUB24]], 1
|
|
// CHECK1-NEXT: [[MUL26:%.*]] = mul nsw i32 1, [[DIV25]]
|
|
// CHECK1-NEXT: [[CONV27:%.*]] = sext i32 [[MUL26]] to i64
|
|
// CHECK1-NEXT: [[MUL28:%.*]] = mul nsw i64 [[DIV23]], [[CONV27]]
|
|
// CHECK1-NEXT: [[SUB29:%.*]] = sub nsw i64 [[TMP17]], [[MUL28]]
|
|
// CHECK1-NEXT: [[MUL30:%.*]] = mul nsw i64 [[SUB29]], 1
|
|
// CHECK1-NEXT: [[ADD31:%.*]] = add nsw i64 0, [[MUL30]]
|
|
// CHECK1-NEXT: [[CONV32:%.*]] = trunc i64 [[ADD31]] to i32
|
|
// CHECK1-NEXT: store i32 [[CONV32]], ptr [[J10]], align 4
|
|
// CHECK1-NEXT: [[TMP21:%.*]] = load i32, ptr [[I9]], align 4
|
|
// CHECK1-NEXT: [[TMP22:%.*]] = load i32, ptr [[J10]], align 4
|
|
// CHECK1-NEXT: [[ADD33:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
|
|
// CHECK1-NEXT: [[TMP23:%.*]] = load i32, ptr [[I9]], align 4
|
|
// CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP23]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [10 x i32]], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
|
|
// CHECK1-NEXT: [[TMP24:%.*]] = load i32, ptr [[J10]], align 4
|
|
// CHECK1-NEXT: [[IDXPROM34:%.*]] = sext i32 [[TMP24]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX35:%.*]] = getelementptr inbounds [10 x i32], ptr [[ARRAYIDX]], i64 0, i64 [[IDXPROM34]]
|
|
// CHECK1-NEXT: store i32 [[ADD33]], ptr [[ARRAYIDX35]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK1: omp.body.continue:
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK1: omp.inner.for.inc:
|
|
// CHECK1-NEXT: [[TMP25:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
|
|
// CHECK1-NEXT: [[TMP26:%.*]] = load i64, ptr [[DOTOMP_STRIDE]], align 8
|
|
// CHECK1-NEXT: [[ADD36:%.*]] = add nsw i64 [[TMP25]], [[TMP26]]
|
|
// CHECK1-NEXT: store i64 [[ADD36]], ptr [[DOTOMP_IV]], align 8
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK1: omp.inner.for.end:
|
|
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK1: omp.loop.exit:
|
|
// CHECK1-NEXT: [[TMP27:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP28:%.*]] = load i32, ptr [[TMP27]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB3]], i32 [[TMP28]])
|
|
// CHECK1-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK1: omp.precond.end:
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l53
|
|
// CHECK1-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i64 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(4000) [[A:%.*]], ptr noundef [[V:%.*]]) #[[ATTR4]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[V_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
|
|
// CHECK1-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[V]], ptr [[V_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l53_kernel_environment, ptr [[DYN_PTR]])
|
|
// CHECK1-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
|
|
// CHECK1-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
|
|
// CHECK1: user_code.entry:
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP3]], ptr [[N_CASTED]], align 4
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i64, ptr [[N_CASTED]], align 8
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[V_ADDR]], align 8
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP2]], ptr [[DOTTHREADID_TEMP_]], align 4
|
|
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l53_omp_outlined(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]], i64 [[TMP4]], ptr [[TMP0]], ptr [[TMP5]]) #[[ATTR2]]
|
|
// CHECK1-NEXT: call void @__kmpc_target_deinit()
|
|
// CHECK1-NEXT: ret void
|
|
// CHECK1: worker.exit:
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l53_omp_outlined
|
|
// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(4000) [[A:%.*]], ptr noundef [[V:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[V_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[I3:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [5 x ptr], align 8
|
|
// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[V]], ptr [[V_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
|
|
// CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
|
|
// CHECK1-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
|
|
// CHECK1-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
|
|
// CHECK1-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK1: omp.precond.then:
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK1-NEXT: [[NVPTX_NUM_THREADS:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_distribute_static_init_4(ptr @[[GLOB2]], i32 [[TMP6]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK1-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
|
|
// CHECK1-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK1: cond.true:
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK1: cond.false:
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END]]
|
|
// CHECK1: cond.end:
|
|
// CHECK1-NEXT: [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
|
|
// CHECK1-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP11]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK1: omp.inner.for.cond:
|
|
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], 1
|
|
// CHECK1-NEXT: [[CMP5:%.*]] = icmp slt i32 [[TMP12]], [[ADD]]
|
|
// CHECK1-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK1: omp.inner.for.body:
|
|
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK1-NEXT: [[TMP15:%.*]] = zext i32 [[TMP14]] to i64
|
|
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP17:%.*]] = zext i32 [[TMP16]] to i64
|
|
// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP18]], ptr [[N_CASTED]], align 4
|
|
// CHECK1-NEXT: [[TMP19:%.*]] = load i64, ptr [[N_CASTED]], align 8
|
|
// CHECK1-NEXT: [[TMP20:%.*]] = load ptr, ptr [[V_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
|
|
// CHECK1-NEXT: [[TMP22:%.*]] = inttoptr i64 [[TMP15]] to ptr
|
|
// CHECK1-NEXT: store ptr [[TMP22]], ptr [[TMP21]], align 8
|
|
// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
|
|
// CHECK1-NEXT: [[TMP24:%.*]] = inttoptr i64 [[TMP17]] to ptr
|
|
// CHECK1-NEXT: store ptr [[TMP24]], ptr [[TMP23]], align 8
|
|
// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 2
|
|
// CHECK1-NEXT: [[TMP26:%.*]] = inttoptr i64 [[TMP19]] to ptr
|
|
// CHECK1-NEXT: store ptr [[TMP26]], ptr [[TMP25]], align 8
|
|
// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 3
|
|
// CHECK1-NEXT: store ptr [[TMP0]], ptr [[TMP27]], align 8
|
|
// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 4
|
|
// CHECK1-NEXT: store ptr [[TMP20]], ptr [[TMP28]], align 8
|
|
// CHECK1-NEXT: [[TMP29:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP29]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP30]], i32 1, i32 -1, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l53_omp_outlined_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i64 5)
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK1: omp.inner.for.inc:
|
|
// CHECK1-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP31]], [[TMP32]]
|
|
// CHECK1-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK1-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP33]], [[TMP34]]
|
|
// CHECK1-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK1-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP35]], [[TMP36]]
|
|
// CHECK1-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK1-NEXT: [[CMP9:%.*]] = icmp sgt i32 [[TMP37]], [[TMP38]]
|
|
// CHECK1-NEXT: br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
|
|
// CHECK1: cond.true10:
|
|
// CHECK1-NEXT: [[TMP39:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END12:%.*]]
|
|
// CHECK1: cond.false11:
|
|
// CHECK1-NEXT: [[TMP40:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK1-NEXT: br label [[COND_END12]]
|
|
// CHECK1: cond.end12:
|
|
// CHECK1-NEXT: [[COND13:%.*]] = phi i32 [ [[TMP39]], [[COND_TRUE10]] ], [ [[TMP40]], [[COND_FALSE11]] ]
|
|
// CHECK1-NEXT: store i32 [[COND13]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP41:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP41]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK1: omp.inner.for.end:
|
|
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK1: omp.loop.exit:
|
|
// CHECK1-NEXT: [[TMP42:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP43:%.*]] = load i32, ptr [[TMP42]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_distribute_static_fini(ptr @[[GLOB2]], i32 [[TMP43]])
|
|
// CHECK1-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK1: omp.precond.end:
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l53_omp_outlined_omp_outlined
|
|
// CHECK1-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i64 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(4000) [[A:%.*]], ptr noundef [[V:%.*]]) #[[ATTR1]] {
|
|
// CHECK1-NEXT: entry:
|
|
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK1-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[V_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: [[I4:%.*]] = alloca i32, align 4
|
|
// CHECK1-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
|
|
// CHECK1-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
|
|
// CHECK1-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
|
|
// CHECK1-NEXT: store ptr [[V]], ptr [[V_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK1-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
|
|
// CHECK1-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
|
|
// CHECK1-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
|
|
// CHECK1-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[I]], align 4
|
|
// CHECK1-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
|
|
// CHECK1-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK1: omp.precond.then:
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
|
|
// CHECK1-NEXT: [[CONV:%.*]] = trunc i64 [[TMP5]] to i32
|
|
// CHECK1-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
|
|
// CHECK1-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP6]] to i32
|
|
// CHECK1-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 [[CONV3]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK1-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK1-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB3]], i32 [[TMP8]], i32 33, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK1-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK1-NEXT: store i32 [[TMP9]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK1: omp.inner.for.cond:
|
|
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[CONV5:%.*]] = sext i32 [[TMP10]] to i64
|
|
// CHECK1-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
|
|
// CHECK1-NEXT: [[CMP6:%.*]] = icmp ule i64 [[CONV5]], [[TMP11]]
|
|
// CHECK1-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK1: omp.inner.for.body:
|
|
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
|
|
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK1-NEXT: store i32 [[ADD]], ptr [[I4]], align 4
|
|
// CHECK1-NEXT: [[TMP13:%.*]] = load ptr, ptr [[V_ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, ptr [[I4]], align 4
|
|
// CHECK1-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP14]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP13]], i64 [[IDXPROM]]
|
|
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
|
|
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, ptr [[I4]], align 4
|
|
// CHECK1-NEXT: [[IDXPROM7:%.*]] = sext i32 [[TMP16]] to i64
|
|
// CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds [1000 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM7]]
|
|
// CHECK1-NEXT: store i32 [[TMP15]], ptr [[ARRAYIDX8]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK1: omp.body.continue:
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK1: omp.inner.for.inc:
|
|
// CHECK1-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK1-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
|
|
// CHECK1-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK1: omp.inner.for.end:
|
|
// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK1: omp.loop.exit:
|
|
// CHECK1-NEXT: [[TMP19:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK1-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP19]], align 4
|
|
// CHECK1-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB3]], i32 [[TMP20]])
|
|
// CHECK1-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK1: omp.precond.end:
|
|
// CHECK1-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l28
|
|
// CHECK2-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i64 noundef [[N:%.*]], ptr noundef nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0:[0-9]+]] {
|
|
// CHECK2-NEXT: entry:
|
|
// CHECK2-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
|
|
// CHECK2-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
|
|
// CHECK2-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l28_kernel_environment, ptr [[DYN_PTR]])
|
|
// CHECK2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
|
|
// CHECK2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
|
|
// CHECK2: user_code.entry:
|
|
// CHECK2-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1:[0-9]+]])
|
|
// CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP3]], ptr [[N_CASTED]], align 4
|
|
// CHECK2-NEXT: [[TMP4:%.*]] = load i64, ptr [[N_CASTED]], align 8
|
|
// CHECK2-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP2]], ptr [[DOTTHREADID_TEMP_]], align 4
|
|
// CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l28_omp_outlined(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]], i64 [[TMP4]], ptr [[TMP0]]) #[[ATTR2:[0-9]+]]
|
|
// CHECK2-NEXT: call void @__kmpc_target_deinit()
|
|
// CHECK2-NEXT: ret void
|
|
// CHECK2: worker.exit:
|
|
// CHECK2-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l28_omp_outlined
|
|
// CHECK2-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[N:%.*]], ptr noundef nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR1:[0-9]+]] {
|
|
// CHECK2-NEXT: entry:
|
|
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[I3:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x ptr], align 8
|
|
// CHECK2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
|
|
// CHECK2-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK2-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
|
|
// CHECK2-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
|
|
// CHECK2-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
|
|
// CHECK2-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK2-NEXT: store i32 0, ptr [[I]], align 4
|
|
// CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK2-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
|
|
// CHECK2-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK2: omp.precond.then:
|
|
// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK2-NEXT: [[NVPTX_NUM_THREADS:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
|
|
// CHECK2-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK2-NEXT: call void @__kmpc_distribute_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP6]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
|
|
// CHECK2-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK2-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
|
|
// CHECK2-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK2: cond.true:
|
|
// CHECK2-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK2-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK2: cond.false:
|
|
// CHECK2-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: br label [[COND_END]]
|
|
// CHECK2: cond.end:
|
|
// CHECK2-NEXT: [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
|
|
// CHECK2-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP11]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK2: omp.inner.for.cond:
|
|
// CHECK2-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], 1
|
|
// CHECK2-NEXT: [[CMP5:%.*]] = icmp slt i32 [[TMP12]], [[ADD]]
|
|
// CHECK2-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK2: omp.inner.for.body:
|
|
// CHECK2-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK2-NEXT: [[TMP15:%.*]] = zext i32 [[TMP14]] to i64
|
|
// CHECK2-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: [[TMP17:%.*]] = zext i32 [[TMP16]] to i64
|
|
// CHECK2-NEXT: [[TMP18:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP18]], ptr [[N_CASTED]], align 4
|
|
// CHECK2-NEXT: [[TMP19:%.*]] = load i64, ptr [[N_CASTED]], align 8
|
|
// CHECK2-NEXT: [[TMP20:%.*]] = getelementptr inbounds [4 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
|
|
// CHECK2-NEXT: [[TMP21:%.*]] = inttoptr i64 [[TMP15]] to ptr
|
|
// CHECK2-NEXT: store ptr [[TMP21]], ptr [[TMP20]], align 8
|
|
// CHECK2-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
|
|
// CHECK2-NEXT: [[TMP23:%.*]] = inttoptr i64 [[TMP17]] to ptr
|
|
// CHECK2-NEXT: store ptr [[TMP23]], ptr [[TMP22]], align 8
|
|
// CHECK2-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 2
|
|
// CHECK2-NEXT: [[TMP25:%.*]] = inttoptr i64 [[TMP19]] to ptr
|
|
// CHECK2-NEXT: store ptr [[TMP25]], ptr [[TMP24]], align 8
|
|
// CHECK2-NEXT: [[TMP26:%.*]] = getelementptr inbounds [4 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 3
|
|
// CHECK2-NEXT: store ptr [[TMP0]], ptr [[TMP26]], align 8
|
|
// CHECK2-NEXT: [[TMP27:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP28:%.*]] = load i32, ptr [[TMP27]], align 4
|
|
// CHECK2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP28]], i32 1, i32 -1, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l28_omp_outlined_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i64 4)
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK2: omp.inner.for.inc:
|
|
// CHECK2-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK2-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP29]], [[TMP30]]
|
|
// CHECK2-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK2-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK2-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP31]], [[TMP32]]
|
|
// CHECK2-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK2-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK2-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP33]], [[TMP34]]
|
|
// CHECK2-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK2-NEXT: [[CMP9:%.*]] = icmp sgt i32 [[TMP35]], [[TMP36]]
|
|
// CHECK2-NEXT: br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
|
|
// CHECK2: cond.true10:
|
|
// CHECK2-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK2-NEXT: br label [[COND_END12:%.*]]
|
|
// CHECK2: cond.false11:
|
|
// CHECK2-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: br label [[COND_END12]]
|
|
// CHECK2: cond.end12:
|
|
// CHECK2-NEXT: [[COND13:%.*]] = phi i32 [ [[TMP37]], [[COND_TRUE10]] ], [ [[TMP38]], [[COND_FALSE11]] ]
|
|
// CHECK2-NEXT: store i32 [[COND13]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: [[TMP39:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP39]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK2: omp.inner.for.end:
|
|
// CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK2: omp.loop.exit:
|
|
// CHECK2-NEXT: [[TMP40:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP41:%.*]] = load i32, ptr [[TMP40]], align 4
|
|
// CHECK2-NEXT: call void @__kmpc_distribute_static_fini(ptr @[[GLOB2]], i32 [[TMP41]])
|
|
// CHECK2-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK2: omp.precond.end:
|
|
// CHECK2-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l28_omp_outlined_omp_outlined
|
|
// CHECK2-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i64 noundef [[N:%.*]], ptr noundef nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR1]] {
|
|
// CHECK2-NEXT: entry:
|
|
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[I4:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
|
|
// CHECK2-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
|
|
// CHECK2-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
|
|
// CHECK2-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK2-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
|
|
// CHECK2-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
|
|
// CHECK2-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
|
|
// CHECK2-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK2-NEXT: store i32 0, ptr [[I]], align 4
|
|
// CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK2-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
|
|
// CHECK2-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK2: omp.precond.then:
|
|
// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK2-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
|
|
// CHECK2-NEXT: [[CONV:%.*]] = trunc i64 [[TMP5]] to i32
|
|
// CHECK2-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
|
|
// CHECK2-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP6]] to i32
|
|
// CHECK2-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
|
|
// CHECK2-NEXT: store i32 [[CONV3]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK2-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK2-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK2-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB3:[0-9]+]], i32 [[TMP8]], i32 33, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK2-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP9]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK2: omp.inner.for.cond:
|
|
// CHECK2-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[CONV5:%.*]] = sext i32 [[TMP10]] to i64
|
|
// CHECK2-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
|
|
// CHECK2-NEXT: [[CMP6:%.*]] = icmp ule i64 [[CONV5]], [[TMP11]]
|
|
// CHECK2-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK2: omp.inner.for.body:
|
|
// CHECK2-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
|
|
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK2-NEXT: store i32 [[ADD]], ptr [[I4]], align 4
|
|
// CHECK2-NEXT: [[TMP13:%.*]] = load i32, ptr [[I4]], align 4
|
|
// CHECK2-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP13]] to i64
|
|
// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i16], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
|
|
// CHECK2-NEXT: [[TMP14:%.*]] = load i16, ptr [[ARRAYIDX]], align 2
|
|
// CHECK2-NEXT: [[CONV7:%.*]] = sext i16 [[TMP14]] to i32
|
|
// CHECK2-NEXT: [[ADD8:%.*]] = add nsw i32 [[CONV7]], 1
|
|
// CHECK2-NEXT: [[CONV9:%.*]] = trunc i32 [[ADD8]] to i16
|
|
// CHECK2-NEXT: store i16 [[CONV9]], ptr [[ARRAYIDX]], align 2
|
|
// CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK2: omp.body.continue:
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK2: omp.inner.for.inc:
|
|
// CHECK2-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK2-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP15]], [[TMP16]]
|
|
// CHECK2-NEXT: store i32 [[ADD10]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK2: omp.inner.for.end:
|
|
// CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK2: omp.loop.exit:
|
|
// CHECK2-NEXT: [[TMP17:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP17]], align 4
|
|
// CHECK2-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB3]], i32 [[TMP18]])
|
|
// CHECK2-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK2: omp.precond.end:
|
|
// CHECK2-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l33
|
|
// CHECK2-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR4:[0-9]+]] {
|
|
// CHECK2-NEXT: entry:
|
|
// CHECK2-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
|
|
// CHECK2-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
|
|
// CHECK2-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l33_kernel_environment, ptr [[DYN_PTR]])
|
|
// CHECK2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
|
|
// CHECK2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
|
|
// CHECK2: user_code.entry:
|
|
// CHECK2-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
|
|
// CHECK2-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1
|
|
// CHECK2-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP3]] to i1
|
|
// CHECK2-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8
|
|
// CHECK2-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1
|
|
// CHECK2-NEXT: [[TMP4:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR__CASTED]], align 8
|
|
// CHECK2-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP2]], ptr [[DOTTHREADID_TEMP_]], align 4
|
|
// CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l33_omp_outlined(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]], ptr [[TMP0]], i64 [[TMP4]]) #[[ATTR2]]
|
|
// CHECK2-NEXT: call void @__kmpc_target_deinit()
|
|
// CHECK2-NEXT: ret void
|
|
// CHECK2: worker.exit:
|
|
// CHECK2-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l33_omp_outlined
|
|
// CHECK2-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
|
|
// CHECK2-NEXT: entry:
|
|
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [3 x ptr], align 8
|
|
// CHECK2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
|
|
// CHECK2-NEXT: store i64 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8
|
|
// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK2-NEXT: store i32 9, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK2-NEXT: [[NVPTX_NUM_THREADS:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
|
|
// CHECK2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
|
|
// CHECK2-NEXT: call void @__kmpc_distribute_static_init_4(ptr @[[GLOB2]], i32 [[TMP2]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
|
|
// CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
|
|
// CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK2: cond.true:
|
|
// CHECK2-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK2: cond.false:
|
|
// CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: br label [[COND_END]]
|
|
// CHECK2: cond.end:
|
|
// CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
|
|
// CHECK2-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK2: omp.inner.for.cond:
|
|
// CHECK2-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[CMP1:%.*]] = icmp slt i32 [[TMP6]], 10
|
|
// CHECK2-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK2: omp.inner.for.body:
|
|
// CHECK2-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK2-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
|
|
// CHECK2-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
|
|
// CHECK2-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
|
|
// CHECK2-NEXT: [[TMP12:%.*]] = inttoptr i64 [[TMP8]] to ptr
|
|
// CHECK2-NEXT: store ptr [[TMP12]], ptr [[TMP11]], align 8
|
|
// CHECK2-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
|
|
// CHECK2-NEXT: [[TMP14:%.*]] = inttoptr i64 [[TMP10]] to ptr
|
|
// CHECK2-NEXT: store ptr [[TMP14]], ptr [[TMP13]], align 8
|
|
// CHECK2-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 2
|
|
// CHECK2-NEXT: store ptr [[TMP0]], ptr [[TMP15]], align 8
|
|
// CHECK2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l33_omp_outlined_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i64 3)
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK2: omp.inner.for.inc:
|
|
// CHECK2-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP16]], [[TMP17]]
|
|
// CHECK2-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK2-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK2-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
|
|
// CHECK2-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK2-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK2-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
|
|
// CHECK2-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP22]], 9
|
|
// CHECK2-NEXT: br i1 [[CMP4]], label [[COND_TRUE5:%.*]], label [[COND_FALSE6:%.*]]
|
|
// CHECK2: cond.true5:
|
|
// CHECK2-NEXT: br label [[COND_END7:%.*]]
|
|
// CHECK2: cond.false6:
|
|
// CHECK2-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: br label [[COND_END7]]
|
|
// CHECK2: cond.end7:
|
|
// CHECK2-NEXT: [[COND8:%.*]] = phi i32 [ 9, [[COND_TRUE5]] ], [ [[TMP23]], [[COND_FALSE6]] ]
|
|
// CHECK2-NEXT: store i32 [[COND8]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP24]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK2: omp.inner.for.end:
|
|
// CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK2: omp.loop.exit:
|
|
// CHECK2-NEXT: call void @__kmpc_distribute_static_fini(ptr @[[GLOB2]], i32 [[TMP2]])
|
|
// CHECK2-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l33_omp_outlined_omp_outlined
|
|
// CHECK2-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
|
|
// CHECK2-NEXT: entry:
|
|
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
|
|
// CHECK2-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
|
|
// CHECK2-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8
|
|
// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK2-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK2-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
|
|
// CHECK2-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
|
|
// CHECK2-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
|
|
// CHECK2-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP2]] to i32
|
|
// CHECK2-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
|
|
// CHECK2-NEXT: store i32 [[CONV1]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK2-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK2-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
|
|
// CHECK2-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB3]], i32 [[TMP4]], i32 33, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK2-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK2: omp.inner.for.cond:
|
|
// CHECK2-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[CONV2:%.*]] = sext i32 [[TMP6]] to i64
|
|
// CHECK2-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
|
|
// CHECK2-NEXT: [[CMP:%.*]] = icmp ule i64 [[CONV2]], [[TMP7]]
|
|
// CHECK2-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK2: omp.inner.for.body:
|
|
// CHECK2-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
|
|
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK2-NEXT: store i32 [[ADD]], ptr [[I]], align 4
|
|
// CHECK2-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK2-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP9]] to i64
|
|
// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
|
|
// CHECK2-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
|
|
// CHECK2-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP10]], 1
|
|
// CHECK2-NEXT: store i32 [[ADD3]], ptr [[ARRAYIDX]], align 4
|
|
// CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK2: omp.body.continue:
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK2: omp.inner.for.inc:
|
|
// CHECK2-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK2-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
|
|
// CHECK2-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK2: omp.inner.for.end:
|
|
// CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK2: omp.loop.exit:
|
|
// CHECK2-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB3]], i32 [[TMP4]])
|
|
// CHECK2-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l38
|
|
// CHECK2-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(400) [[C:%.*]], i64 noundef [[F:%.*]]) #[[ATTR4]] {
|
|
// CHECK2-NEXT: entry:
|
|
// CHECK2-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[F_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[F_CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
|
|
// CHECK2-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
|
|
// CHECK2-NEXT: store i64 [[F]], ptr [[F_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[C_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l38_kernel_environment, ptr [[DYN_PTR]])
|
|
// CHECK2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
|
|
// CHECK2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
|
|
// CHECK2: user_code.entry:
|
|
// CHECK2-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
|
|
// CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[F_ADDR]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP3]], ptr [[F_CASTED]], align 4
|
|
// CHECK2-NEXT: [[TMP4:%.*]] = load i64, ptr [[F_CASTED]], align 8
|
|
// CHECK2-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP2]], ptr [[DOTTHREADID_TEMP_]], align 4
|
|
// CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l38_omp_outlined(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]], ptr [[TMP0]], i64 [[TMP4]]) #[[ATTR2]]
|
|
// CHECK2-NEXT: call void @__kmpc_target_deinit()
|
|
// CHECK2-NEXT: ret void
|
|
// CHECK2: worker.exit:
|
|
// CHECK2-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l38_omp_outlined
|
|
// CHECK2-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(400) [[C:%.*]], i64 noundef [[F:%.*]]) #[[ATTR1]] {
|
|
// CHECK2-NEXT: entry:
|
|
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[F_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[K:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[J:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[F_CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x ptr], align 8
|
|
// CHECK2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
|
|
// CHECK2-NEXT: store i64 [[F]], ptr [[F_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[C_ADDR]], align 8
|
|
// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK2-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK2-NEXT: [[NVPTX_NUM_THREADS:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
|
|
// CHECK2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
|
|
// CHECK2-NEXT: call void @__kmpc_distribute_static_init_4(ptr @[[GLOB2]], i32 [[TMP2]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
|
|
// CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99
|
|
// CHECK2-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK2: cond.true:
|
|
// CHECK2-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK2: cond.false:
|
|
// CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: br label [[COND_END]]
|
|
// CHECK2: cond.end:
|
|
// CHECK2-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
|
|
// CHECK2-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK2: omp.inner.for.cond:
|
|
// CHECK2-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[CMP2:%.*]] = icmp slt i32 [[TMP6]], 100
|
|
// CHECK2-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK2: omp.inner.for.body:
|
|
// CHECK2-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK2-NEXT: [[TMP8:%.*]] = zext i32 [[TMP7]] to i64
|
|
// CHECK2-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: [[TMP10:%.*]] = zext i32 [[TMP9]] to i64
|
|
// CHECK2-NEXT: [[TMP11:%.*]] = load i32, ptr [[F_ADDR]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP11]], ptr [[F_CASTED]], align 4
|
|
// CHECK2-NEXT: [[TMP12:%.*]] = load i64, ptr [[F_CASTED]], align 8
|
|
// CHECK2-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
|
|
// CHECK2-NEXT: [[TMP14:%.*]] = inttoptr i64 [[TMP8]] to ptr
|
|
// CHECK2-NEXT: store ptr [[TMP14]], ptr [[TMP13]], align 8
|
|
// CHECK2-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
|
|
// CHECK2-NEXT: [[TMP16:%.*]] = inttoptr i64 [[TMP10]] to ptr
|
|
// CHECK2-NEXT: store ptr [[TMP16]], ptr [[TMP15]], align 8
|
|
// CHECK2-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 2
|
|
// CHECK2-NEXT: store ptr [[TMP0]], ptr [[TMP17]], align 8
|
|
// CHECK2-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 3
|
|
// CHECK2-NEXT: [[TMP19:%.*]] = inttoptr i64 [[TMP12]] to ptr
|
|
// CHECK2-NEXT: store ptr [[TMP19]], ptr [[TMP18]], align 8
|
|
// CHECK2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l38_omp_outlined_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i64 4)
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK2: omp.inner.for.inc:
|
|
// CHECK2-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
|
|
// CHECK2-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK2-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK2-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
|
|
// CHECK2-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK2-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK2-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
|
|
// CHECK2-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP26]], 99
|
|
// CHECK2-NEXT: br i1 [[CMP5]], label [[COND_TRUE6:%.*]], label [[COND_FALSE7:%.*]]
|
|
// CHECK2: cond.true6:
|
|
// CHECK2-NEXT: br label [[COND_END8:%.*]]
|
|
// CHECK2: cond.false7:
|
|
// CHECK2-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: br label [[COND_END8]]
|
|
// CHECK2: cond.end8:
|
|
// CHECK2-NEXT: [[COND9:%.*]] = phi i32 [ 99, [[COND_TRUE6]] ], [ [[TMP27]], [[COND_FALSE7]] ]
|
|
// CHECK2-NEXT: store i32 [[COND9]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP28]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK2: omp.inner.for.end:
|
|
// CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK2: omp.loop.exit:
|
|
// CHECK2-NEXT: call void @__kmpc_distribute_static_fini(ptr @[[GLOB2]], i32 [[TMP2]])
|
|
// CHECK2-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l38_omp_outlined_omp_outlined
|
|
// CHECK2-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(400) [[C:%.*]], i64 noundef [[F:%.*]]) #[[ATTR1]] {
|
|
// CHECK2-NEXT: entry:
|
|
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[F_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[K:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[J:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
|
|
// CHECK2-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
|
|
// CHECK2-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
|
|
// CHECK2-NEXT: store i64 [[F]], ptr [[F_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[C_ADDR]], align 8
|
|
// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK2-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK2-NEXT: [[TMP1:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
|
|
// CHECK2-NEXT: [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
|
|
// CHECK2-NEXT: [[TMP2:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
|
|
// CHECK2-NEXT: [[CONV2:%.*]] = trunc i64 [[TMP2]] to i32
|
|
// CHECK2-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
|
|
// CHECK2-NEXT: store i32 [[CONV2]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK2-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK2-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
|
|
// CHECK2-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB3]], i32 [[TMP4]], i32 33, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK2-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK2: omp.inner.for.cond:
|
|
// CHECK2-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[CONV3:%.*]] = sext i32 [[TMP6]] to i64
|
|
// CHECK2-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
|
|
// CHECK2-NEXT: [[CMP:%.*]] = icmp ule i64 [[CONV3]], [[TMP7]]
|
|
// CHECK2-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK2: omp.inner.for.body:
|
|
// CHECK2-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP8]], 10
|
|
// CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1
|
|
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK2-NEXT: store i32 [[ADD]], ptr [[I]], align 4
|
|
// CHECK2-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[DIV4:%.*]] = sdiv i32 [[TMP10]], 10
|
|
// CHECK2-NEXT: [[MUL5:%.*]] = mul nsw i32 [[DIV4]], 10
|
|
// CHECK2-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP9]], [[MUL5]]
|
|
// CHECK2-NEXT: [[MUL6:%.*]] = mul nsw i32 [[SUB]], 1
|
|
// CHECK2-NEXT: [[ADD7:%.*]] = add nsw i32 0, [[MUL6]]
|
|
// CHECK2-NEXT: store i32 [[ADD7]], ptr [[J]], align 4
|
|
// CHECK2-NEXT: store i32 10, ptr [[K]], align 4
|
|
// CHECK2-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK2-NEXT: [[TMP12:%.*]] = load i32, ptr [[J]], align 4
|
|
// CHECK2-NEXT: [[TMP13:%.*]] = load i32, ptr [[F_ADDR]], align 4
|
|
// CHECK2-NEXT: [[MUL8:%.*]] = mul nsw i32 [[TMP12]], [[TMP13]]
|
|
// CHECK2-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP11]], [[MUL8]]
|
|
// CHECK2-NEXT: [[TMP14:%.*]] = load i32, ptr [[K]], align 4
|
|
// CHECK2-NEXT: [[ADD10:%.*]] = add nsw i32 [[ADD9]], [[TMP14]]
|
|
// CHECK2-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK2-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP15]] to i64
|
|
// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [10 x i32]], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
|
|
// CHECK2-NEXT: [[TMP16:%.*]] = load i32, ptr [[J]], align 4
|
|
// CHECK2-NEXT: [[IDXPROM11:%.*]] = sext i32 [[TMP16]] to i64
|
|
// CHECK2-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds [10 x i32], ptr [[ARRAYIDX]], i64 0, i64 [[IDXPROM11]]
|
|
// CHECK2-NEXT: store i32 [[ADD10]], ptr [[ARRAYIDX12]], align 4
|
|
// CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK2: omp.body.continue:
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK2: omp.inner.for.inc:
|
|
// CHECK2-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK2-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
|
|
// CHECK2-NEXT: store i32 [[ADD13]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK2: omp.inner.for.end:
|
|
// CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK2: omp.loop.exit:
|
|
// CHECK2-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB3]], i32 [[TMP4]])
|
|
// CHECK2-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l46
|
|
// CHECK2-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i64 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(400) [[C:%.*]]) #[[ATTR4]] {
|
|
// CHECK2-NEXT: entry:
|
|
// CHECK2-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
|
|
// CHECK2-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
|
|
// CHECK2-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[C_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l46_kernel_environment, ptr [[DYN_PTR]])
|
|
// CHECK2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
|
|
// CHECK2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
|
|
// CHECK2: user_code.entry:
|
|
// CHECK2-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
|
|
// CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP3]], ptr [[N_CASTED]], align 4
|
|
// CHECK2-NEXT: [[TMP4:%.*]] = load i64, ptr [[N_CASTED]], align 8
|
|
// CHECK2-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP2]], ptr [[DOTTHREADID_TEMP_]], align 4
|
|
// CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l46_omp_outlined(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]], i64 [[TMP4]], ptr [[TMP0]]) #[[ATTR2]]
|
|
// CHECK2-NEXT: call void @__kmpc_target_deinit()
|
|
// CHECK2-NEXT: ret void
|
|
// CHECK2: worker.exit:
|
|
// CHECK2-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l46_omp_outlined
|
|
// CHECK2-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(400) [[C:%.*]]) #[[ATTR1]] {
|
|
// CHECK2-NEXT: entry:
|
|
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[J:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[I8:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[J9:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x ptr], align 8
|
|
// CHECK2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
|
|
// CHECK2-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[C_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_2]], align 4
|
|
// CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK2-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
|
|
// CHECK2-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
|
|
// CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
|
|
// CHECK2-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP4]], 0
|
|
// CHECK2-NEXT: [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1
|
|
// CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], [[DIV5]]
|
|
// CHECK2-NEXT: [[SUB6:%.*]] = sub nsw i32 [[MUL]], 1
|
|
// CHECK2-NEXT: store i32 [[SUB6]], ptr [[DOTCAPTURE_EXPR_3]], align 4
|
|
// CHECK2-NEXT: store i32 0, ptr [[I]], align 4
|
|
// CHECK2-NEXT: store i32 0, ptr [[J]], align 4
|
|
// CHECK2-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK2-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
|
|
// CHECK2-NEXT: br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK2: land.lhs.true:
|
|
// CHECK2-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
|
|
// CHECK2-NEXT: [[CMP7:%.*]] = icmp slt i32 0, [[TMP6]]
|
|
// CHECK2-NEXT: br i1 [[CMP7]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]]
|
|
// CHECK2: omp.precond.then:
|
|
// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK2-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK2-NEXT: [[NVPTX_NUM_THREADS:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
|
|
// CHECK2-NEXT: [[TMP8:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4
|
|
// CHECK2-NEXT: call void @__kmpc_distribute_static_init_4(ptr @[[GLOB2]], i32 [[TMP9]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
|
|
// CHECK2-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4
|
|
// CHECK2-NEXT: [[CMP10:%.*]] = icmp sgt i32 [[TMP10]], [[TMP11]]
|
|
// CHECK2-NEXT: br i1 [[CMP10]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK2: cond.true:
|
|
// CHECK2-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4
|
|
// CHECK2-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK2: cond.false:
|
|
// CHECK2-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: br label [[COND_END]]
|
|
// CHECK2: cond.end:
|
|
// CHECK2-NEXT: [[COND:%.*]] = phi i32 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
|
|
// CHECK2-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP14]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK2: omp.inner.for.cond:
|
|
// CHECK2-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4
|
|
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP16]], 1
|
|
// CHECK2-NEXT: [[CMP11:%.*]] = icmp slt i32 [[TMP15]], [[ADD]]
|
|
// CHECK2-NEXT: br i1 [[CMP11]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK2: omp.inner.for.body:
|
|
// CHECK2-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK2-NEXT: [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
|
|
// CHECK2-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: [[TMP20:%.*]] = zext i32 [[TMP19]] to i64
|
|
// CHECK2-NEXT: [[TMP21:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP21]], ptr [[N_CASTED]], align 4
|
|
// CHECK2-NEXT: [[TMP22:%.*]] = load i64, ptr [[N_CASTED]], align 8
|
|
// CHECK2-NEXT: [[TMP23:%.*]] = getelementptr inbounds [4 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
|
|
// CHECK2-NEXT: [[TMP24:%.*]] = inttoptr i64 [[TMP18]] to ptr
|
|
// CHECK2-NEXT: store ptr [[TMP24]], ptr [[TMP23]], align 8
|
|
// CHECK2-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
|
|
// CHECK2-NEXT: [[TMP26:%.*]] = inttoptr i64 [[TMP20]] to ptr
|
|
// CHECK2-NEXT: store ptr [[TMP26]], ptr [[TMP25]], align 8
|
|
// CHECK2-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 2
|
|
// CHECK2-NEXT: [[TMP28:%.*]] = inttoptr i64 [[TMP22]] to ptr
|
|
// CHECK2-NEXT: store ptr [[TMP28]], ptr [[TMP27]], align 8
|
|
// CHECK2-NEXT: [[TMP29:%.*]] = getelementptr inbounds [4 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 3
|
|
// CHECK2-NEXT: store ptr [[TMP0]], ptr [[TMP29]], align 8
|
|
// CHECK2-NEXT: [[TMP30:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP31:%.*]] = load i32, ptr [[TMP30]], align 4
|
|
// CHECK2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP31]], i32 1, i32 -1, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l46_omp_outlined_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i64 4)
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK2: omp.inner.for.inc:
|
|
// CHECK2-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK2-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP32]], [[TMP33]]
|
|
// CHECK2-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK2-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK2-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP34]], [[TMP35]]
|
|
// CHECK2-NEXT: store i32 [[ADD13]], ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK2-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK2-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP36]], [[TMP37]]
|
|
// CHECK2-NEXT: store i32 [[ADD14]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: [[TMP39:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4
|
|
// CHECK2-NEXT: [[CMP15:%.*]] = icmp sgt i32 [[TMP38]], [[TMP39]]
|
|
// CHECK2-NEXT: br i1 [[CMP15]], label [[COND_TRUE16:%.*]], label [[COND_FALSE17:%.*]]
|
|
// CHECK2: cond.true16:
|
|
// CHECK2-NEXT: [[TMP40:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4
|
|
// CHECK2-NEXT: br label [[COND_END18:%.*]]
|
|
// CHECK2: cond.false17:
|
|
// CHECK2-NEXT: [[TMP41:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: br label [[COND_END18]]
|
|
// CHECK2: cond.end18:
|
|
// CHECK2-NEXT: [[COND19:%.*]] = phi i32 [ [[TMP40]], [[COND_TRUE16]] ], [ [[TMP41]], [[COND_FALSE17]] ]
|
|
// CHECK2-NEXT: store i32 [[COND19]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: [[TMP42:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP42]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK2: omp.inner.for.end:
|
|
// CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK2: omp.loop.exit:
|
|
// CHECK2-NEXT: [[TMP43:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP44:%.*]] = load i32, ptr [[TMP43]], align 4
|
|
// CHECK2-NEXT: call void @__kmpc_distribute_static_fini(ptr @[[GLOB2]], i32 [[TMP44]])
|
|
// CHECK2-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK2: omp.precond.end:
|
|
// CHECK2-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l46_omp_outlined_omp_outlined
|
|
// CHECK2-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i64 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(400) [[C:%.*]]) #[[ATTR1]] {
|
|
// CHECK2-NEXT: entry:
|
|
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[J:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[I9:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[J10:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
|
|
// CHECK2-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
|
|
// CHECK2-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
|
|
// CHECK2-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[C_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_2]], align 4
|
|
// CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK2-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
|
|
// CHECK2-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
|
|
// CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
|
|
// CHECK2-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP4]], 0
|
|
// CHECK2-NEXT: [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1
|
|
// CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], [[DIV5]]
|
|
// CHECK2-NEXT: [[SUB6:%.*]] = sub nsw i32 [[MUL]], 1
|
|
// CHECK2-NEXT: store i32 [[SUB6]], ptr [[DOTCAPTURE_EXPR_3]], align 4
|
|
// CHECK2-NEXT: store i32 0, ptr [[I]], align 4
|
|
// CHECK2-NEXT: store i32 0, ptr [[J]], align 4
|
|
// CHECK2-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK2-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
|
|
// CHECK2-NEXT: br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK2: land.lhs.true:
|
|
// CHECK2-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
|
|
// CHECK2-NEXT: [[CMP7:%.*]] = icmp slt i32 0, [[TMP6]]
|
|
// CHECK2-NEXT: br i1 [[CMP7]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]]
|
|
// CHECK2: omp.precond.then:
|
|
// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK2-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_3]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP7]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK2-NEXT: [[TMP8:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
|
|
// CHECK2-NEXT: [[CONV:%.*]] = trunc i64 [[TMP8]] to i32
|
|
// CHECK2-NEXT: [[TMP9:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
|
|
// CHECK2-NEXT: [[CONV8:%.*]] = trunc i64 [[TMP9]] to i32
|
|
// CHECK2-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
|
|
// CHECK2-NEXT: store i32 [[CONV8]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK2-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK2-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP10]], align 4
|
|
// CHECK2-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB3]], i32 [[TMP11]], i32 33, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK2-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP12]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK2: omp.inner.for.cond:
|
|
// CHECK2-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[CONV11:%.*]] = sext i32 [[TMP13]] to i64
|
|
// CHECK2-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
|
|
// CHECK2-NEXT: [[CMP12:%.*]] = icmp ule i64 [[CONV11]], [[TMP14]]
|
|
// CHECK2-NEXT: br i1 [[CMP12]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK2: omp.inner.for.body:
|
|
// CHECK2-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
|
|
// CHECK2-NEXT: [[SUB13:%.*]] = sub nsw i32 [[TMP16]], 0
|
|
// CHECK2-NEXT: [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1
|
|
// CHECK2-NEXT: [[MUL15:%.*]] = mul nsw i32 1, [[DIV14]]
|
|
// CHECK2-NEXT: [[DIV16:%.*]] = sdiv i32 [[TMP15]], [[MUL15]]
|
|
// CHECK2-NEXT: [[MUL17:%.*]] = mul nsw i32 [[DIV16]], 1
|
|
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL17]]
|
|
// CHECK2-NEXT: store i32 [[ADD]], ptr [[I9]], align 4
|
|
// CHECK2-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
|
|
// CHECK2-NEXT: [[SUB18:%.*]] = sub nsw i32 [[TMP19]], 0
|
|
// CHECK2-NEXT: [[DIV19:%.*]] = sdiv i32 [[SUB18]], 1
|
|
// CHECK2-NEXT: [[MUL20:%.*]] = mul nsw i32 1, [[DIV19]]
|
|
// CHECK2-NEXT: [[DIV21:%.*]] = sdiv i32 [[TMP18]], [[MUL20]]
|
|
// CHECK2-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
|
|
// CHECK2-NEXT: [[SUB22:%.*]] = sub nsw i32 [[TMP20]], 0
|
|
// CHECK2-NEXT: [[DIV23:%.*]] = sdiv i32 [[SUB22]], 1
|
|
// CHECK2-NEXT: [[MUL24:%.*]] = mul nsw i32 1, [[DIV23]]
|
|
// CHECK2-NEXT: [[MUL25:%.*]] = mul nsw i32 [[DIV21]], [[MUL24]]
|
|
// CHECK2-NEXT: [[SUB26:%.*]] = sub nsw i32 [[TMP17]], [[MUL25]]
|
|
// CHECK2-NEXT: [[MUL27:%.*]] = mul nsw i32 [[SUB26]], 1
|
|
// CHECK2-NEXT: [[ADD28:%.*]] = add nsw i32 0, [[MUL27]]
|
|
// CHECK2-NEXT: store i32 [[ADD28]], ptr [[J10]], align 4
|
|
// CHECK2-NEXT: [[TMP21:%.*]] = load i32, ptr [[I9]], align 4
|
|
// CHECK2-NEXT: [[TMP22:%.*]] = load i32, ptr [[J10]], align 4
|
|
// CHECK2-NEXT: [[ADD29:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
|
|
// CHECK2-NEXT: [[TMP23:%.*]] = load i32, ptr [[I9]], align 4
|
|
// CHECK2-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP23]] to i64
|
|
// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [10 x i32]], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
|
|
// CHECK2-NEXT: [[TMP24:%.*]] = load i32, ptr [[J10]], align 4
|
|
// CHECK2-NEXT: [[IDXPROM30:%.*]] = sext i32 [[TMP24]] to i64
|
|
// CHECK2-NEXT: [[ARRAYIDX31:%.*]] = getelementptr inbounds [10 x i32], ptr [[ARRAYIDX]], i64 0, i64 [[IDXPROM30]]
|
|
// CHECK2-NEXT: store i32 [[ADD29]], ptr [[ARRAYIDX31]], align 4
|
|
// CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK2: omp.body.continue:
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK2: omp.inner.for.inc:
|
|
// CHECK2-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK2-NEXT: [[ADD32:%.*]] = add nsw i32 [[TMP25]], [[TMP26]]
|
|
// CHECK2-NEXT: store i32 [[ADD32]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK2: omp.inner.for.end:
|
|
// CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK2: omp.loop.exit:
|
|
// CHECK2-NEXT: [[TMP27:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP28:%.*]] = load i32, ptr [[TMP27]], align 4
|
|
// CHECK2-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB3]], i32 [[TMP28]])
|
|
// CHECK2-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK2: omp.precond.end:
|
|
// CHECK2-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l53
|
|
// CHECK2-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i64 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(4000) [[A:%.*]], ptr noundef [[V:%.*]]) #[[ATTR4]] {
|
|
// CHECK2-NEXT: entry:
|
|
// CHECK2-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[V_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
|
|
// CHECK2-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
|
|
// CHECK2-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
|
|
// CHECK2-NEXT: store ptr [[V]], ptr [[V_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l53_kernel_environment, ptr [[DYN_PTR]])
|
|
// CHECK2-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
|
|
// CHECK2-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
|
|
// CHECK2: user_code.entry:
|
|
// CHECK2-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
|
|
// CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP3]], ptr [[N_CASTED]], align 4
|
|
// CHECK2-NEXT: [[TMP4:%.*]] = load i64, ptr [[N_CASTED]], align 8
|
|
// CHECK2-NEXT: [[TMP5:%.*]] = load ptr, ptr [[V_ADDR]], align 8
|
|
// CHECK2-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP2]], ptr [[DOTTHREADID_TEMP_]], align 4
|
|
// CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l53_omp_outlined(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]], i64 [[TMP4]], ptr [[TMP0]], ptr [[TMP5]]) #[[ATTR2]]
|
|
// CHECK2-NEXT: call void @__kmpc_target_deinit()
|
|
// CHECK2-NEXT: ret void
|
|
// CHECK2: worker.exit:
|
|
// CHECK2-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l53_omp_outlined
|
|
// CHECK2-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(4000) [[A:%.*]], ptr noundef [[V:%.*]]) #[[ATTR1]] {
|
|
// CHECK2-NEXT: entry:
|
|
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[V_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[I3:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [5 x ptr], align 8
|
|
// CHECK2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
|
|
// CHECK2-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
|
|
// CHECK2-NEXT: store ptr [[V]], ptr [[V_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK2-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
|
|
// CHECK2-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
|
|
// CHECK2-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
|
|
// CHECK2-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK2-NEXT: store i32 0, ptr [[I]], align 4
|
|
// CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK2-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
|
|
// CHECK2-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK2: omp.precond.then:
|
|
// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK2-NEXT: [[NVPTX_NUM_THREADS:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
|
|
// CHECK2-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK2-NEXT: call void @__kmpc_distribute_static_init_4(ptr @[[GLOB2]], i32 [[TMP6]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
|
|
// CHECK2-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK2-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
|
|
// CHECK2-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK2: cond.true:
|
|
// CHECK2-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK2-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK2: cond.false:
|
|
// CHECK2-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: br label [[COND_END]]
|
|
// CHECK2: cond.end:
|
|
// CHECK2-NEXT: [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
|
|
// CHECK2-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP11]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK2: omp.inner.for.cond:
|
|
// CHECK2-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], 1
|
|
// CHECK2-NEXT: [[CMP5:%.*]] = icmp slt i32 [[TMP12]], [[ADD]]
|
|
// CHECK2-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK2: omp.inner.for.body:
|
|
// CHECK2-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK2-NEXT: [[TMP15:%.*]] = zext i32 [[TMP14]] to i64
|
|
// CHECK2-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: [[TMP17:%.*]] = zext i32 [[TMP16]] to i64
|
|
// CHECK2-NEXT: [[TMP18:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP18]], ptr [[N_CASTED]], align 4
|
|
// CHECK2-NEXT: [[TMP19:%.*]] = load i64, ptr [[N_CASTED]], align 8
|
|
// CHECK2-NEXT: [[TMP20:%.*]] = load ptr, ptr [[V_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
|
|
// CHECK2-NEXT: [[TMP22:%.*]] = inttoptr i64 [[TMP15]] to ptr
|
|
// CHECK2-NEXT: store ptr [[TMP22]], ptr [[TMP21]], align 8
|
|
// CHECK2-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
|
|
// CHECK2-NEXT: [[TMP24:%.*]] = inttoptr i64 [[TMP17]] to ptr
|
|
// CHECK2-NEXT: store ptr [[TMP24]], ptr [[TMP23]], align 8
|
|
// CHECK2-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 2
|
|
// CHECK2-NEXT: [[TMP26:%.*]] = inttoptr i64 [[TMP19]] to ptr
|
|
// CHECK2-NEXT: store ptr [[TMP26]], ptr [[TMP25]], align 8
|
|
// CHECK2-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 3
|
|
// CHECK2-NEXT: store ptr [[TMP0]], ptr [[TMP27]], align 8
|
|
// CHECK2-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 4
|
|
// CHECK2-NEXT: store ptr [[TMP20]], ptr [[TMP28]], align 8
|
|
// CHECK2-NEXT: [[TMP29:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP29]], align 4
|
|
// CHECK2-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP30]], i32 1, i32 -1, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l53_omp_outlined_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i64 5)
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK2: omp.inner.for.inc:
|
|
// CHECK2-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK2-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP31]], [[TMP32]]
|
|
// CHECK2-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK2-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK2-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP33]], [[TMP34]]
|
|
// CHECK2-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK2-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK2-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP35]], [[TMP36]]
|
|
// CHECK2-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK2-NEXT: [[CMP9:%.*]] = icmp sgt i32 [[TMP37]], [[TMP38]]
|
|
// CHECK2-NEXT: br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
|
|
// CHECK2: cond.true10:
|
|
// CHECK2-NEXT: [[TMP39:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK2-NEXT: br label [[COND_END12:%.*]]
|
|
// CHECK2: cond.false11:
|
|
// CHECK2-NEXT: [[TMP40:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: br label [[COND_END12]]
|
|
// CHECK2: cond.end12:
|
|
// CHECK2-NEXT: [[COND13:%.*]] = phi i32 [ [[TMP39]], [[COND_TRUE10]] ], [ [[TMP40]], [[COND_FALSE11]] ]
|
|
// CHECK2-NEXT: store i32 [[COND13]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK2-NEXT: [[TMP41:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP41]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK2: omp.inner.for.end:
|
|
// CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK2: omp.loop.exit:
|
|
// CHECK2-NEXT: [[TMP42:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP43:%.*]] = load i32, ptr [[TMP42]], align 4
|
|
// CHECK2-NEXT: call void @__kmpc_distribute_static_fini(ptr @[[GLOB2]], i32 [[TMP43]])
|
|
// CHECK2-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK2: omp.precond.end:
|
|
// CHECK2-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l53_omp_outlined_omp_outlined
|
|
// CHECK2-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i64 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(4000) [[A:%.*]], ptr noundef [[V:%.*]]) #[[ATTR1]] {
|
|
// CHECK2-NEXT: entry:
|
|
// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[V_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: [[I4:%.*]] = alloca i32, align 4
|
|
// CHECK2-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 8
|
|
// CHECK2-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 8
|
|
// CHECK2-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
|
|
// CHECK2-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
|
|
// CHECK2-NEXT: store ptr [[V]], ptr [[V_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK2-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK2-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
|
|
// CHECK2-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
|
|
// CHECK2-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
|
|
// CHECK2-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK2-NEXT: store i32 0, ptr [[I]], align 4
|
|
// CHECK2-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK2-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
|
|
// CHECK2-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK2: omp.precond.then:
|
|
// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK2-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK2-NEXT: [[TMP5:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR]], align 8
|
|
// CHECK2-NEXT: [[CONV:%.*]] = trunc i64 [[TMP5]] to i32
|
|
// CHECK2-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
|
|
// CHECK2-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP6]] to i32
|
|
// CHECK2-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB]], align 4
|
|
// CHECK2-NEXT: store i32 [[CONV3]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK2-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK2-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK2-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK2-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB3]], i32 [[TMP8]], i32 33, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK2-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK2-NEXT: store i32 [[TMP9]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK2: omp.inner.for.cond:
|
|
// CHECK2-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[CONV5:%.*]] = sext i32 [[TMP10]] to i64
|
|
// CHECK2-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR]], align 8
|
|
// CHECK2-NEXT: [[CMP6:%.*]] = icmp ule i64 [[CONV5]], [[TMP11]]
|
|
// CHECK2-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK2: omp.inner.for.body:
|
|
// CHECK2-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
|
|
// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK2-NEXT: store i32 [[ADD]], ptr [[I4]], align 4
|
|
// CHECK2-NEXT: [[TMP13:%.*]] = load ptr, ptr [[V_ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP14:%.*]] = load i32, ptr [[I4]], align 4
|
|
// CHECK2-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP14]] to i64
|
|
// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP13]], i64 [[IDXPROM]]
|
|
// CHECK2-NEXT: [[TMP15:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
|
|
// CHECK2-NEXT: [[TMP16:%.*]] = load i32, ptr [[I4]], align 4
|
|
// CHECK2-NEXT: [[IDXPROM7:%.*]] = sext i32 [[TMP16]] to i64
|
|
// CHECK2-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds [1000 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM7]]
|
|
// CHECK2-NEXT: store i32 [[TMP15]], ptr [[ARRAYIDX8]], align 4
|
|
// CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK2: omp.body.continue:
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK2: omp.inner.for.inc:
|
|
// CHECK2-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK2-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
|
|
// CHECK2-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK2: omp.inner.for.end:
|
|
// CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK2: omp.loop.exit:
|
|
// CHECK2-NEXT: [[TMP19:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK2-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP19]], align 4
|
|
// CHECK2-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB3]], i32 [[TMP20]])
|
|
// CHECK2-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK2: omp.precond.end:
|
|
// CHECK2-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l28
|
|
// CHECK3-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i32 noundef [[N:%.*]], ptr noundef nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR0:[0-9]+]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l28_kernel_environment, ptr [[DYN_PTR]])
|
|
// CHECK3-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
|
|
// CHECK3-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
|
|
// CHECK3: user_code.entry:
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1:[0-9]+]])
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP3]], ptr [[N_CASTED]], align 4
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_CASTED]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTTHREADID_TEMP_]], align 4
|
|
// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l28_omp_outlined(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]], i32 [[TMP4]], ptr [[TMP0]]) #[[ATTR2:[0-9]+]]
|
|
// CHECK3-NEXT: call void @__kmpc_target_deinit()
|
|
// CHECK3-NEXT: ret void
|
|
// CHECK3: worker.exit:
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l28_omp_outlined
|
|
// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[N:%.*]], ptr noundef nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR1:[0-9]+]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[I3:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x ptr], align 4
|
|
// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
|
|
// CHECK3-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
|
|
// CHECK3-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
|
|
// CHECK3-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[I]], align 4
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK3-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
|
|
// CHECK3-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK3: omp.precond.then:
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK3-NEXT: [[NVPTX_NUM_THREADS:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_distribute_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP6]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK3-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
|
|
// CHECK3-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK3: cond.true:
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK3: cond.false:
|
|
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END]]
|
|
// CHECK3: cond.end:
|
|
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
|
|
// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP11]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK3: omp.inner.for.cond:
|
|
// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], 1
|
|
// CHECK3-NEXT: [[CMP5:%.*]] = icmp slt i32 [[TMP12]], [[ADD]]
|
|
// CHECK3-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK3: omp.inner.for.body:
|
|
// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP16]], ptr [[N_CASTED]], align 4
|
|
// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[N_CASTED]], align 4
|
|
// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP19:%.*]] = inttoptr i32 [[TMP14]] to ptr
|
|
// CHECK3-NEXT: store ptr [[TMP19]], ptr [[TMP18]], align 4
|
|
// CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [4 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
|
|
// CHECK3-NEXT: [[TMP21:%.*]] = inttoptr i32 [[TMP15]] to ptr
|
|
// CHECK3-NEXT: store ptr [[TMP21]], ptr [[TMP20]], align 4
|
|
// CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
|
|
// CHECK3-NEXT: [[TMP23:%.*]] = inttoptr i32 [[TMP17]] to ptr
|
|
// CHECK3-NEXT: store ptr [[TMP23]], ptr [[TMP22]], align 4
|
|
// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 3
|
|
// CHECK3-NEXT: store ptr [[TMP0]], ptr [[TMP24]], align 4
|
|
// CHECK3-NEXT: [[TMP25:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP26:%.*]] = load i32, ptr [[TMP25]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP26]], i32 1, i32 -1, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l28_omp_outlined_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i32 4)
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK3: omp.inner.for.inc:
|
|
// CHECK3-NEXT: [[TMP27:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP27]], [[TMP28]]
|
|
// CHECK3-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK3-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP29]], [[TMP30]]
|
|
// CHECK3-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK3-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK3-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP31]], [[TMP32]]
|
|
// CHECK3-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK3-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK3-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK3-NEXT: [[CMP9:%.*]] = icmp sgt i32 [[TMP33]], [[TMP34]]
|
|
// CHECK3-NEXT: br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
|
|
// CHECK3: cond.true10:
|
|
// CHECK3-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END12:%.*]]
|
|
// CHECK3: cond.false11:
|
|
// CHECK3-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END12]]
|
|
// CHECK3: cond.end12:
|
|
// CHECK3-NEXT: [[COND13:%.*]] = phi i32 [ [[TMP35]], [[COND_TRUE10]] ], [ [[TMP36]], [[COND_FALSE11]] ]
|
|
// CHECK3-NEXT: store i32 [[COND13]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK3-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP37]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK3: omp.inner.for.end:
|
|
// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK3: omp.loop.exit:
|
|
// CHECK3-NEXT: [[TMP38:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP39:%.*]] = load i32, ptr [[TMP38]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_distribute_static_fini(ptr @[[GLOB2]], i32 [[TMP39]])
|
|
// CHECK3-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK3: omp.precond.end:
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l28_omp_outlined_omp_outlined
|
|
// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32 noundef [[N:%.*]], ptr noundef nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR1]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[I3:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
|
|
// CHECK3-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
|
|
// CHECK3-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
|
|
// CHECK3-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[I]], align 4
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK3-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
|
|
// CHECK3-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK3: omp.precond.then:
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB3:[0-9]+]], i32 [[TMP8]], i32 33, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP9]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK3: omp.inner.for.cond:
|
|
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
|
|
// CHECK3-NEXT: [[CMP4:%.*]] = icmp ule i32 [[TMP10]], [[TMP11]]
|
|
// CHECK3-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK3: omp.inner.for.body:
|
|
// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
|
|
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK3-NEXT: store i32 [[ADD]], ptr [[I3]], align 4
|
|
// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[I3]], align 4
|
|
// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i16], ptr [[TMP0]], i32 0, i32 [[TMP13]]
|
|
// CHECK3-NEXT: [[TMP14:%.*]] = load i16, ptr [[ARRAYIDX]], align 2
|
|
// CHECK3-NEXT: [[CONV:%.*]] = sext i16 [[TMP14]] to i32
|
|
// CHECK3-NEXT: [[ADD5:%.*]] = add nsw i32 [[CONV]], 1
|
|
// CHECK3-NEXT: [[CONV6:%.*]] = trunc i32 [[ADD5]] to i16
|
|
// CHECK3-NEXT: store i16 [[CONV6]], ptr [[ARRAYIDX]], align 2
|
|
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK3: omp.body.continue:
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK3: omp.inner.for.inc:
|
|
// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP15]], [[TMP16]]
|
|
// CHECK3-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK3: omp.inner.for.end:
|
|
// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK3: omp.loop.exit:
|
|
// CHECK3-NEXT: [[TMP17:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP17]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB3]], i32 [[TMP18]])
|
|
// CHECK3-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK3: omp.precond.end:
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l33
|
|
// CHECK3-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR4:[0-9]+]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l33_kernel_environment, ptr [[DYN_PTR]])
|
|
// CHECK3-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
|
|
// CHECK3-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
|
|
// CHECK3: user_code.entry:
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load i8, ptr [[DOTCAPTURE_EXPR__ADDR]], align 1
|
|
// CHECK3-NEXT: [[TOBOOL:%.*]] = trunc i8 [[TMP3]] to i1
|
|
// CHECK3-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8
|
|
// CHECK3-NEXT: store i8 [[FROMBOOL]], ptr [[DOTCAPTURE_EXPR__CASTED]], align 1
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__CASTED]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTTHREADID_TEMP_]], align 4
|
|
// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l33_omp_outlined(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]], ptr [[TMP0]], i32 [[TMP4]]) #[[ATTR2]]
|
|
// CHECK3-NEXT: call void @__kmpc_target_deinit()
|
|
// CHECK3-NEXT: ret void
|
|
// CHECK3: worker.exit:
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l33_omp_outlined
|
|
// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR1]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTCAPTURE_EXPR__ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [3 x ptr], align 4
|
|
// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 [[DOTCAPTURE_EXPR_]], ptr [[DOTCAPTURE_EXPR__ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK3-NEXT: store i32 9, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK3-NEXT: [[NVPTX_NUM_THREADS:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_distribute_static_init_4(ptr @[[GLOB2]], i32 [[TMP2]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
|
|
// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK3: cond.true:
|
|
// CHECK3-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK3: cond.false:
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END]]
|
|
// CHECK3: cond.end:
|
|
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
|
|
// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK3: omp.inner.for.cond:
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[CMP1:%.*]] = icmp slt i32 [[TMP6]], 10
|
|
// CHECK3-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK3: omp.inner.for.body:
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP10:%.*]] = inttoptr i32 [[TMP7]] to ptr
|
|
// CHECK3-NEXT: store ptr [[TMP10]], ptr [[TMP9]], align 4
|
|
// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
|
|
// CHECK3-NEXT: [[TMP12:%.*]] = inttoptr i32 [[TMP8]] to ptr
|
|
// CHECK3-NEXT: store ptr [[TMP12]], ptr [[TMP11]], align 4
|
|
// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
|
|
// CHECK3-NEXT: store ptr [[TMP0]], ptr [[TMP13]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l33_omp_outlined_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i32 3)
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK3: omp.inner.for.inc:
|
|
// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], [[TMP15]]
|
|
// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP16]], [[TMP17]]
|
|
// CHECK3-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
|
|
// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK3-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP20]], 9
|
|
// CHECK3-NEXT: br i1 [[CMP4]], label [[COND_TRUE5:%.*]], label [[COND_FALSE6:%.*]]
|
|
// CHECK3: cond.true5:
|
|
// CHECK3-NEXT: br label [[COND_END7:%.*]]
|
|
// CHECK3: cond.false6:
|
|
// CHECK3-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END7]]
|
|
// CHECK3: cond.end7:
|
|
// CHECK3-NEXT: [[COND8:%.*]] = phi i32 [ 9, [[COND_TRUE5]] ], [ [[TMP21]], [[COND_FALSE6]] ]
|
|
// CHECK3-NEXT: store i32 [[COND8]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK3-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP22]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK3: omp.inner.for.end:
|
|
// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK3: omp.loop.exit:
|
|
// CHECK3-NEXT: call void @__kmpc_distribute_static_fini(ptr @[[GLOB2]], i32 [[TMP2]])
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l33_omp_outlined_omp_outlined
|
|
// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB3]], i32 [[TMP4]], i32 33, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK3: omp.inner.for.cond:
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
|
|
// CHECK3-NEXT: [[CMP:%.*]] = icmp ule i32 [[TMP6]], [[TMP7]]
|
|
// CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK3: omp.inner.for.body:
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP8]], 1
|
|
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 [[TMP9]]
|
|
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
|
|
// CHECK3-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP10]], 1
|
|
// CHECK3-NEXT: store i32 [[ADD1]], ptr [[ARRAYIDX]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK3: omp.body.continue:
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK3: omp.inner.for.inc:
|
|
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP11]], [[TMP12]]
|
|
// CHECK3-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK3: omp.inner.for.end:
|
|
// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK3: omp.loop.exit:
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB3]], i32 [[TMP4]])
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l38
|
|
// CHECK3-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(400) [[C:%.*]], i32 noundef [[F:%.*]]) #[[ATTR4]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[F_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[F_CASTED:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 [[F]], ptr [[F_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[C_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l38_kernel_environment, ptr [[DYN_PTR]])
|
|
// CHECK3-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
|
|
// CHECK3-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
|
|
// CHECK3: user_code.entry:
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[F_ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP3]], ptr [[F_CASTED]], align 4
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[F_CASTED]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTTHREADID_TEMP_]], align 4
|
|
// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l38_omp_outlined(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]], ptr [[TMP0]], i32 [[TMP4]]) #[[ATTR2]]
|
|
// CHECK3-NEXT: call void @__kmpc_target_deinit()
|
|
// CHECK3-NEXT: ret void
|
|
// CHECK3: worker.exit:
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l38_omp_outlined
|
|
// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(400) [[C:%.*]], i32 noundef [[F:%.*]]) #[[ATTR1]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[F_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[K:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[J:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[F_CASTED:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x ptr], align 4
|
|
// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 [[F]], ptr [[F_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[C_ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK3-NEXT: [[NVPTX_NUM_THREADS:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_distribute_static_init_4(ptr @[[GLOB2]], i32 [[TMP2]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK3-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99
|
|
// CHECK3-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK3: cond.true:
|
|
// CHECK3-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK3: cond.false:
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END]]
|
|
// CHECK3: cond.end:
|
|
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
|
|
// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK3: omp.inner.for.cond:
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[CMP2:%.*]] = icmp slt i32 [[TMP6]], 100
|
|
// CHECK3-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK3: omp.inner.for.body:
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[F_ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP9]], ptr [[F_CASTED]], align 4
|
|
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[F_CASTED]], align 4
|
|
// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [4 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP12:%.*]] = inttoptr i32 [[TMP7]] to ptr
|
|
// CHECK3-NEXT: store ptr [[TMP12]], ptr [[TMP11]], align 4
|
|
// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
|
|
// CHECK3-NEXT: [[TMP14:%.*]] = inttoptr i32 [[TMP8]] to ptr
|
|
// CHECK3-NEXT: store ptr [[TMP14]], ptr [[TMP13]], align 4
|
|
// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
|
|
// CHECK3-NEXT: store ptr [[TMP0]], ptr [[TMP15]], align 4
|
|
// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 3
|
|
// CHECK3-NEXT: [[TMP17:%.*]] = inttoptr i32 [[TMP10]] to ptr
|
|
// CHECK3-NEXT: store ptr [[TMP17]], ptr [[TMP16]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l38_omp_outlined_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i32 4)
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK3: omp.inner.for.inc:
|
|
// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
|
|
// CHECK3-NEXT: store i32 [[ADD]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK3-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
|
|
// CHECK3-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK3-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK3-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
|
|
// CHECK3-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK3-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK3-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP24]], 99
|
|
// CHECK3-NEXT: br i1 [[CMP5]], label [[COND_TRUE6:%.*]], label [[COND_FALSE7:%.*]]
|
|
// CHECK3: cond.true6:
|
|
// CHECK3-NEXT: br label [[COND_END8:%.*]]
|
|
// CHECK3: cond.false7:
|
|
// CHECK3-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END8]]
|
|
// CHECK3: cond.end8:
|
|
// CHECK3-NEXT: [[COND9:%.*]] = phi i32 [ 99, [[COND_TRUE6]] ], [ [[TMP25]], [[COND_FALSE7]] ]
|
|
// CHECK3-NEXT: store i32 [[COND9]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK3-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP26]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK3: omp.inner.for.end:
|
|
// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK3: omp.loop.exit:
|
|
// CHECK3-NEXT: call void @__kmpc_distribute_static_fini(ptr @[[GLOB2]], i32 [[TMP2]])
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l38_omp_outlined_omp_outlined
|
|
// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], ptr noundef nonnull align 4 dereferenceable(400) [[C:%.*]], i32 noundef [[F:%.*]]) #[[ATTR1]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[F_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[K:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[J:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 [[F]], ptr [[F_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[C_ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP1]], ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB3]], i32 [[TMP4]], i32 33, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK3: omp.inner.for.cond:
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
|
|
// CHECK3-NEXT: [[CMP:%.*]] = icmp ule i32 [[TMP6]], [[TMP7]]
|
|
// CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK3: omp.inner.for.body:
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP8]], 10
|
|
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1
|
|
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK3-NEXT: store i32 [[ADD]], ptr [[I]], align 4
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[DIV2:%.*]] = sdiv i32 [[TMP10]], 10
|
|
// CHECK3-NEXT: [[MUL3:%.*]] = mul nsw i32 [[DIV2]], 10
|
|
// CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP9]], [[MUL3]]
|
|
// CHECK3-NEXT: [[MUL4:%.*]] = mul nsw i32 [[SUB]], 1
|
|
// CHECK3-NEXT: [[ADD5:%.*]] = add nsw i32 0, [[MUL4]]
|
|
// CHECK3-NEXT: store i32 [[ADD5]], ptr [[J]], align 4
|
|
// CHECK3-NEXT: store i32 10, ptr [[K]], align 4
|
|
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[J]], align 4
|
|
// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[F_ADDR]], align 4
|
|
// CHECK3-NEXT: [[MUL6:%.*]] = mul nsw i32 [[TMP12]], [[TMP13]]
|
|
// CHECK3-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP11]], [[MUL6]]
|
|
// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[K]], align 4
|
|
// CHECK3-NEXT: [[ADD8:%.*]] = add nsw i32 [[ADD7]], [[TMP14]]
|
|
// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[I]], align 4
|
|
// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [10 x i32]], ptr [[TMP0]], i32 0, i32 [[TMP15]]
|
|
// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[J]], align 4
|
|
// CHECK3-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds [10 x i32], ptr [[ARRAYIDX]], i32 0, i32 [[TMP16]]
|
|
// CHECK3-NEXT: store i32 [[ADD8]], ptr [[ARRAYIDX9]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK3: omp.body.continue:
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK3: omp.inner.for.inc:
|
|
// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
|
|
// CHECK3-NEXT: store i32 [[ADD10]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK3: omp.inner.for.end:
|
|
// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK3: omp.loop.exit:
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB3]], i32 [[TMP4]])
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l46
|
|
// CHECK3-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i32 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(400) [[C:%.*]]) #[[ATTR4]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[C_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l46_kernel_environment, ptr [[DYN_PTR]])
|
|
// CHECK3-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
|
|
// CHECK3-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
|
|
// CHECK3: user_code.entry:
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP3]], ptr [[N_CASTED]], align 4
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_CASTED]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTTHREADID_TEMP_]], align 4
|
|
// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l46_omp_outlined(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]], i32 [[TMP4]], ptr [[TMP0]]) #[[ATTR2]]
|
|
// CHECK3-NEXT: call void @__kmpc_target_deinit()
|
|
// CHECK3-NEXT: ret void
|
|
// CHECK3: worker.exit:
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l46_omp_outlined
|
|
// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(400) [[C:%.*]]) #[[ATTR1]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
|
|
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca i64, align 8
|
|
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[J:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i64, align 8
|
|
// CHECK3-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i64, align 8
|
|
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
|
|
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[I9:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[J10:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [4 x ptr], align 4
|
|
// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[C_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_2]], align 4
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
|
|
// CHECK3-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
|
|
// CHECK3-NEXT: [[CONV:%.*]] = sext i32 [[DIV]] to i64
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
|
|
// CHECK3-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP4]], 0
|
|
// CHECK3-NEXT: [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1
|
|
// CHECK3-NEXT: [[CONV6:%.*]] = sext i32 [[DIV5]] to i64
|
|
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV]], [[CONV6]]
|
|
// CHECK3-NEXT: [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1
|
|
// CHECK3-NEXT: store i64 [[SUB7]], ptr [[DOTCAPTURE_EXPR_3]], align 8
|
|
// CHECK3-NEXT: store i32 0, ptr [[I]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[J]], align 4
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK3-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
|
|
// CHECK3-NEXT: br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK3: land.lhs.true:
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
|
|
// CHECK3-NEXT: [[CMP8:%.*]] = icmp slt i32 0, [[TMP6]]
|
|
// CHECK3-NEXT: br i1 [[CMP8]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]]
|
|
// CHECK3: omp.precond.then:
|
|
// CHECK3-NEXT: store i64 0, ptr [[DOTOMP_COMB_LB]], align 8
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_3]], align 8
|
|
// CHECK3-NEXT: store i64 [[TMP7]], ptr [[DOTOMP_COMB_UB]], align 8
|
|
// CHECK3-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK3-NEXT: [[NVPTX_NUM_THREADS:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
|
|
// CHECK3-NEXT: [[CONV11:%.*]] = zext i32 [[NVPTX_NUM_THREADS]] to i64
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_distribute_static_init_8(ptr @[[GLOB2]], i32 [[TMP9]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 [[CONV11]])
|
|
// CHECK3-NEXT: [[TMP10:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8
|
|
// CHECK3-NEXT: [[TMP11:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_3]], align 8
|
|
// CHECK3-NEXT: [[CMP12:%.*]] = icmp sgt i64 [[TMP10]], [[TMP11]]
|
|
// CHECK3-NEXT: br i1 [[CMP12]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK3: cond.true:
|
|
// CHECK3-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_3]], align 8
|
|
// CHECK3-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK3: cond.false:
|
|
// CHECK3-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8
|
|
// CHECK3-NEXT: br label [[COND_END]]
|
|
// CHECK3: cond.end:
|
|
// CHECK3-NEXT: [[COND:%.*]] = phi i64 [ [[TMP12]], [[COND_TRUE]] ], [ [[TMP13]], [[COND_FALSE]] ]
|
|
// CHECK3-NEXT: store i64 [[COND]], ptr [[DOTOMP_COMB_UB]], align 8
|
|
// CHECK3-NEXT: [[TMP14:%.*]] = load i64, ptr [[DOTOMP_COMB_LB]], align 8
|
|
// CHECK3-NEXT: store i64 [[TMP14]], ptr [[DOTOMP_IV]], align 8
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK3: omp.inner.for.cond:
|
|
// CHECK3-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
|
|
// CHECK3-NEXT: [[TMP16:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_3]], align 8
|
|
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP16]], 1
|
|
// CHECK3-NEXT: [[CMP13:%.*]] = icmp slt i64 [[TMP15]], [[ADD]]
|
|
// CHECK3-NEXT: br i1 [[CMP13]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK3: omp.inner.for.body:
|
|
// CHECK3-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_COMB_LB]], align 8
|
|
// CHECK3-NEXT: [[TMP18:%.*]] = trunc i64 [[TMP17]] to i32
|
|
// CHECK3-NEXT: [[TMP19:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8
|
|
// CHECK3-NEXT: [[TMP20:%.*]] = trunc i64 [[TMP19]] to i32
|
|
// CHECK3-NEXT: [[TMP21:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP21]], ptr [[N_CASTED]], align 4
|
|
// CHECK3-NEXT: [[TMP22:%.*]] = load i32, ptr [[N_CASTED]], align 4
|
|
// CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds [4 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP24:%.*]] = inttoptr i32 [[TMP18]] to ptr
|
|
// CHECK3-NEXT: store ptr [[TMP24]], ptr [[TMP23]], align 4
|
|
// CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
|
|
// CHECK3-NEXT: [[TMP26:%.*]] = inttoptr i32 [[TMP20]] to ptr
|
|
// CHECK3-NEXT: store ptr [[TMP26]], ptr [[TMP25]], align 4
|
|
// CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
|
|
// CHECK3-NEXT: [[TMP28:%.*]] = inttoptr i32 [[TMP22]] to ptr
|
|
// CHECK3-NEXT: store ptr [[TMP28]], ptr [[TMP27]], align 4
|
|
// CHECK3-NEXT: [[TMP29:%.*]] = getelementptr inbounds [4 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 3
|
|
// CHECK3-NEXT: store ptr [[TMP0]], ptr [[TMP29]], align 4
|
|
// CHECK3-NEXT: [[TMP30:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP31:%.*]] = load i32, ptr [[TMP30]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP31]], i32 1, i32 -1, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l46_omp_outlined_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i32 4)
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK3: omp.inner.for.inc:
|
|
// CHECK3-NEXT: [[TMP32:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
|
|
// CHECK3-NEXT: [[TMP33:%.*]] = load i64, ptr [[DOTOMP_STRIDE]], align 8
|
|
// CHECK3-NEXT: [[ADD14:%.*]] = add nsw i64 [[TMP32]], [[TMP33]]
|
|
// CHECK3-NEXT: store i64 [[ADD14]], ptr [[DOTOMP_IV]], align 8
|
|
// CHECK3-NEXT: [[TMP34:%.*]] = load i64, ptr [[DOTOMP_COMB_LB]], align 8
|
|
// CHECK3-NEXT: [[TMP35:%.*]] = load i64, ptr [[DOTOMP_STRIDE]], align 8
|
|
// CHECK3-NEXT: [[ADD15:%.*]] = add nsw i64 [[TMP34]], [[TMP35]]
|
|
// CHECK3-NEXT: store i64 [[ADD15]], ptr [[DOTOMP_COMB_LB]], align 8
|
|
// CHECK3-NEXT: [[TMP36:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8
|
|
// CHECK3-NEXT: [[TMP37:%.*]] = load i64, ptr [[DOTOMP_STRIDE]], align 8
|
|
// CHECK3-NEXT: [[ADD16:%.*]] = add nsw i64 [[TMP36]], [[TMP37]]
|
|
// CHECK3-NEXT: store i64 [[ADD16]], ptr [[DOTOMP_COMB_UB]], align 8
|
|
// CHECK3-NEXT: [[TMP38:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8
|
|
// CHECK3-NEXT: [[TMP39:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_3]], align 8
|
|
// CHECK3-NEXT: [[CMP17:%.*]] = icmp sgt i64 [[TMP38]], [[TMP39]]
|
|
// CHECK3-NEXT: br i1 [[CMP17]], label [[COND_TRUE18:%.*]], label [[COND_FALSE19:%.*]]
|
|
// CHECK3: cond.true18:
|
|
// CHECK3-NEXT: [[TMP40:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_3]], align 8
|
|
// CHECK3-NEXT: br label [[COND_END20:%.*]]
|
|
// CHECK3: cond.false19:
|
|
// CHECK3-NEXT: [[TMP41:%.*]] = load i64, ptr [[DOTOMP_COMB_UB]], align 8
|
|
// CHECK3-NEXT: br label [[COND_END20]]
|
|
// CHECK3: cond.end20:
|
|
// CHECK3-NEXT: [[COND21:%.*]] = phi i64 [ [[TMP40]], [[COND_TRUE18]] ], [ [[TMP41]], [[COND_FALSE19]] ]
|
|
// CHECK3-NEXT: store i64 [[COND21]], ptr [[DOTOMP_COMB_UB]], align 8
|
|
// CHECK3-NEXT: [[TMP42:%.*]] = load i64, ptr [[DOTOMP_COMB_LB]], align 8
|
|
// CHECK3-NEXT: store i64 [[TMP42]], ptr [[DOTOMP_IV]], align 8
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK3: omp.inner.for.end:
|
|
// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK3: omp.loop.exit:
|
|
// CHECK3-NEXT: [[TMP43:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP44:%.*]] = load i32, ptr [[TMP43]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_distribute_static_fini(ptr @[[GLOB2]], i32 [[TMP44]])
|
|
// CHECK3-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK3: omp.precond.end:
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l46_omp_outlined_omp_outlined
|
|
// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(400) [[C:%.*]]) #[[ATTR1]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8
|
|
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca i64, align 8
|
|
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[J:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8
|
|
// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8
|
|
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8
|
|
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[I11:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[J12:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[C_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR_2]], align 4
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
|
|
// CHECK3-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
|
|
// CHECK3-NEXT: [[CONV:%.*]] = sext i32 [[DIV]] to i64
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
|
|
// CHECK3-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP4]], 0
|
|
// CHECK3-NEXT: [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1
|
|
// CHECK3-NEXT: [[CONV6:%.*]] = sext i32 [[DIV5]] to i64
|
|
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV]], [[CONV6]]
|
|
// CHECK3-NEXT: [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1
|
|
// CHECK3-NEXT: store i64 [[SUB7]], ptr [[DOTCAPTURE_EXPR_3]], align 8
|
|
// CHECK3-NEXT: store i32 0, ptr [[I]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[J]], align 4
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK3-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
|
|
// CHECK3-NEXT: br i1 [[CMP]], label [[LAND_LHS_TRUE:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK3: land.lhs.true:
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
|
|
// CHECK3-NEXT: [[CMP8:%.*]] = icmp slt i32 0, [[TMP6]]
|
|
// CHECK3-NEXT: br i1 [[CMP8]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END]]
|
|
// CHECK3: omp.precond.then:
|
|
// CHECK3-NEXT: store i64 0, ptr [[DOTOMP_LB]], align 8
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTCAPTURE_EXPR_3]], align 8
|
|
// CHECK3-NEXT: store i64 [[TMP7]], ptr [[DOTOMP_UB]], align 8
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
|
|
// CHECK3-NEXT: [[CONV9:%.*]] = zext i32 [[TMP8]] to i64
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
|
|
// CHECK3-NEXT: [[CONV10:%.*]] = zext i32 [[TMP9]] to i64
|
|
// CHECK3-NEXT: store i64 [[CONV9]], ptr [[DOTOMP_LB]], align 8
|
|
// CHECK3-NEXT: store i64 [[CONV10]], ptr [[DOTOMP_UB]], align 8
|
|
// CHECK3-NEXT: store i64 1, ptr [[DOTOMP_STRIDE]], align 8
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK3-NEXT: [[TMP10:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[TMP10]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_init_8(ptr @[[GLOB3]], i32 [[TMP11]], i32 33, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i64 1, i64 1)
|
|
// CHECK3-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTOMP_LB]], align 8
|
|
// CHECK3-NEXT: store i64 [[TMP12]], ptr [[DOTOMP_IV]], align 8
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK3: omp.inner.for.cond:
|
|
// CHECK3-NEXT: [[TMP13:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
|
|
// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
|
|
// CHECK3-NEXT: [[CONV13:%.*]] = zext i32 [[TMP14]] to i64
|
|
// CHECK3-NEXT: [[CMP14:%.*]] = icmp sle i64 [[TMP13]], [[CONV13]]
|
|
// CHECK3-NEXT: br i1 [[CMP14]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK3: omp.inner.for.body:
|
|
// CHECK3-NEXT: [[TMP15:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
|
|
// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
|
|
// CHECK3-NEXT: [[SUB15:%.*]] = sub nsw i32 [[TMP16]], 0
|
|
// CHECK3-NEXT: [[DIV16:%.*]] = sdiv i32 [[SUB15]], 1
|
|
// CHECK3-NEXT: [[MUL17:%.*]] = mul nsw i32 1, [[DIV16]]
|
|
// CHECK3-NEXT: [[CONV18:%.*]] = sext i32 [[MUL17]] to i64
|
|
// CHECK3-NEXT: [[DIV19:%.*]] = sdiv i64 [[TMP15]], [[CONV18]]
|
|
// CHECK3-NEXT: [[MUL20:%.*]] = mul nsw i64 [[DIV19]], 1
|
|
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i64 0, [[MUL20]]
|
|
// CHECK3-NEXT: [[CONV21:%.*]] = trunc i64 [[ADD]] to i32
|
|
// CHECK3-NEXT: store i32 [[CONV21]], ptr [[I11]], align 4
|
|
// CHECK3-NEXT: [[TMP17:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
|
|
// CHECK3-NEXT: [[TMP18:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
|
|
// CHECK3-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
|
|
// CHECK3-NEXT: [[SUB22:%.*]] = sub nsw i32 [[TMP19]], 0
|
|
// CHECK3-NEXT: [[DIV23:%.*]] = sdiv i32 [[SUB22]], 1
|
|
// CHECK3-NEXT: [[MUL24:%.*]] = mul nsw i32 1, [[DIV23]]
|
|
// CHECK3-NEXT: [[CONV25:%.*]] = sext i32 [[MUL24]] to i64
|
|
// CHECK3-NEXT: [[DIV26:%.*]] = sdiv i64 [[TMP18]], [[CONV25]]
|
|
// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2]], align 4
|
|
// CHECK3-NEXT: [[SUB27:%.*]] = sub nsw i32 [[TMP20]], 0
|
|
// CHECK3-NEXT: [[DIV28:%.*]] = sdiv i32 [[SUB27]], 1
|
|
// CHECK3-NEXT: [[MUL29:%.*]] = mul nsw i32 1, [[DIV28]]
|
|
// CHECK3-NEXT: [[CONV30:%.*]] = sext i32 [[MUL29]] to i64
|
|
// CHECK3-NEXT: [[MUL31:%.*]] = mul nsw i64 [[DIV26]], [[CONV30]]
|
|
// CHECK3-NEXT: [[SUB32:%.*]] = sub nsw i64 [[TMP17]], [[MUL31]]
|
|
// CHECK3-NEXT: [[MUL33:%.*]] = mul nsw i64 [[SUB32]], 1
|
|
// CHECK3-NEXT: [[ADD34:%.*]] = add nsw i64 0, [[MUL33]]
|
|
// CHECK3-NEXT: [[CONV35:%.*]] = trunc i64 [[ADD34]] to i32
|
|
// CHECK3-NEXT: store i32 [[CONV35]], ptr [[J12]], align 4
|
|
// CHECK3-NEXT: [[TMP21:%.*]] = load i32, ptr [[I11]], align 4
|
|
// CHECK3-NEXT: [[TMP22:%.*]] = load i32, ptr [[J12]], align 4
|
|
// CHECK3-NEXT: [[ADD36:%.*]] = add nsw i32 [[TMP21]], [[TMP22]]
|
|
// CHECK3-NEXT: [[TMP23:%.*]] = load i32, ptr [[I11]], align 4
|
|
// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [10 x i32]], ptr [[TMP0]], i32 0, i32 [[TMP23]]
|
|
// CHECK3-NEXT: [[TMP24:%.*]] = load i32, ptr [[J12]], align 4
|
|
// CHECK3-NEXT: [[ARRAYIDX37:%.*]] = getelementptr inbounds [10 x i32], ptr [[ARRAYIDX]], i32 0, i32 [[TMP24]]
|
|
// CHECK3-NEXT: store i32 [[ADD36]], ptr [[ARRAYIDX37]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK3: omp.body.continue:
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK3: omp.inner.for.inc:
|
|
// CHECK3-NEXT: [[TMP25:%.*]] = load i64, ptr [[DOTOMP_IV]], align 8
|
|
// CHECK3-NEXT: [[TMP26:%.*]] = load i64, ptr [[DOTOMP_STRIDE]], align 8
|
|
// CHECK3-NEXT: [[ADD38:%.*]] = add nsw i64 [[TMP25]], [[TMP26]]
|
|
// CHECK3-NEXT: store i64 [[ADD38]], ptr [[DOTOMP_IV]], align 8
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK3: omp.inner.for.end:
|
|
// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK3: omp.loop.exit:
|
|
// CHECK3-NEXT: [[TMP27:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP28:%.*]] = load i32, ptr [[TMP27]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB3]], i32 [[TMP28]])
|
|
// CHECK3-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK3: omp.precond.end:
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l53
|
|
// CHECK3-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i32 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(4000) [[A:%.*]], ptr noundef [[V:%.*]]) #[[ATTR4]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[V_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[V]], ptr [[V_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l53_kernel_environment, ptr [[DYN_PTR]])
|
|
// CHECK3-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
|
|
// CHECK3-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
|
|
// CHECK3: user_code.entry:
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP3]], ptr [[N_CASTED]], align 4
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_CASTED]], align 4
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load ptr, ptr [[V_ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP2]], ptr [[DOTTHREADID_TEMP_]], align 4
|
|
// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l53_omp_outlined(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]], i32 [[TMP4]], ptr [[TMP0]], ptr [[TMP5]]) #[[ATTR2]]
|
|
// CHECK3-NEXT: call void @__kmpc_target_deinit()
|
|
// CHECK3-NEXT: ret void
|
|
// CHECK3: worker.exit:
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l53_omp_outlined
|
|
// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(4000) [[A:%.*]], ptr noundef [[V:%.*]]) #[[ATTR1]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[V_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[I3:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [5 x ptr], align 4
|
|
// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[V]], ptr [[V_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
|
|
// CHECK3-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
|
|
// CHECK3-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
|
|
// CHECK3-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[I]], align 4
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK3-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
|
|
// CHECK3-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK3: omp.precond.then:
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK3-NEXT: [[NVPTX_NUM_THREADS:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_distribute_static_init_4(ptr @[[GLOB2]], i32 [[TMP6]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_COMB_LB]], ptr [[DOTOMP_COMB_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK3-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
|
|
// CHECK3-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK3: cond.true:
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK3: cond.false:
|
|
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END]]
|
|
// CHECK3: cond.end:
|
|
// CHECK3-NEXT: [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
|
|
// CHECK3-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP11]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK3: omp.inner.for.cond:
|
|
// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], 1
|
|
// CHECK3-NEXT: [[CMP5:%.*]] = icmp slt i32 [[TMP12]], [[ADD]]
|
|
// CHECK3-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK3: omp.inner.for.body:
|
|
// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP16]], ptr [[N_CASTED]], align 4
|
|
// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[N_CASTED]], align 4
|
|
// CHECK3-NEXT: [[TMP18:%.*]] = load ptr, ptr [[V_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
|
|
// CHECK3-NEXT: [[TMP20:%.*]] = inttoptr i32 [[TMP14]] to ptr
|
|
// CHECK3-NEXT: store ptr [[TMP20]], ptr [[TMP19]], align 4
|
|
// CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
|
|
// CHECK3-NEXT: [[TMP22:%.*]] = inttoptr i32 [[TMP15]] to ptr
|
|
// CHECK3-NEXT: store ptr [[TMP22]], ptr [[TMP21]], align 4
|
|
// CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 2
|
|
// CHECK3-NEXT: [[TMP24:%.*]] = inttoptr i32 [[TMP17]] to ptr
|
|
// CHECK3-NEXT: store ptr [[TMP24]], ptr [[TMP23]], align 4
|
|
// CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 3
|
|
// CHECK3-NEXT: store ptr [[TMP0]], ptr [[TMP25]], align 4
|
|
// CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 4
|
|
// CHECK3-NEXT: store ptr [[TMP18]], ptr [[TMP26]], align 4
|
|
// CHECK3-NEXT: [[TMP27:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP28:%.*]] = load i32, ptr [[TMP27]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP28]], i32 1, i32 -1, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l53_omp_outlined_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i32 5)
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK3: omp.inner.for.inc:
|
|
// CHECK3-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[TMP30:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP29]], [[TMP30]]
|
|
// CHECK3-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[TMP31:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK3-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP31]], [[TMP32]]
|
|
// CHECK3-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK3-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK3-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP33]], [[TMP34]]
|
|
// CHECK3-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK3-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK3-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK3-NEXT: [[CMP9:%.*]] = icmp sgt i32 [[TMP35]], [[TMP36]]
|
|
// CHECK3-NEXT: br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
|
|
// CHECK3: cond.true10:
|
|
// CHECK3-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END12:%.*]]
|
|
// CHECK3: cond.false11:
|
|
// CHECK3-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK3-NEXT: br label [[COND_END12]]
|
|
// CHECK3: cond.end12:
|
|
// CHECK3-NEXT: [[COND13:%.*]] = phi i32 [ [[TMP37]], [[COND_TRUE10]] ], [ [[TMP38]], [[COND_FALSE11]] ]
|
|
// CHECK3-NEXT: store i32 [[COND13]], ptr [[DOTOMP_COMB_UB]], align 4
|
|
// CHECK3-NEXT: [[TMP39:%.*]] = load i32, ptr [[DOTOMP_COMB_LB]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP39]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK3: omp.inner.for.end:
|
|
// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK3: omp.loop.exit:
|
|
// CHECK3-NEXT: [[TMP40:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP41:%.*]] = load i32, ptr [[TMP40]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_distribute_static_fini(ptr @[[GLOB2]], i32 [[TMP41]])
|
|
// CHECK3-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK3: omp.precond.end:
|
|
// CHECK3-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l53_omp_outlined_omp_outlined
|
|
// CHECK3-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32 noundef [[N:%.*]], ptr noundef nonnull align 4 dereferenceable(4000) [[A:%.*]], ptr noundef [[V:%.*]]) #[[ATTR1]] {
|
|
// CHECK3-NEXT: entry:
|
|
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[V_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: [[I3:%.*]] = alloca i32, align 4
|
|
// CHECK3-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
|
|
// CHECK3-NEXT: store ptr [[V]], ptr [[V_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK3-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK3-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
|
|
// CHECK3-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
|
|
// CHECK3-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
|
|
// CHECK3-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[I]], align 4
|
|
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK3-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
|
|
// CHECK3-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK3: omp.precond.then:
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTPREVIOUS_LB__ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK3-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK3-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB3]], i32 [[TMP8]], i32 33, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
|
|
// CHECK3-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK3-NEXT: store i32 [[TMP9]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK3: omp.inner.for.cond:
|
|
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTPREVIOUS_UB__ADDR]], align 4
|
|
// CHECK3-NEXT: [[CMP4:%.*]] = icmp ule i32 [[TMP10]], [[TMP11]]
|
|
// CHECK3-NEXT: br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK3: omp.inner.for.body:
|
|
// CHECK3-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP12]], 1
|
|
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK3-NEXT: store i32 [[ADD]], ptr [[I3]], align 4
|
|
// CHECK3-NEXT: [[TMP13:%.*]] = load ptr, ptr [[V_ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP14:%.*]] = load i32, ptr [[I3]], align 4
|
|
// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP13]], i32 [[TMP14]]
|
|
// CHECK3-NEXT: [[TMP15:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
|
|
// CHECK3-NEXT: [[TMP16:%.*]] = load i32, ptr [[I3]], align 4
|
|
// CHECK3-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [1000 x i32], ptr [[TMP0]], i32 0, i32 [[TMP16]]
|
|
// CHECK3-NEXT: store i32 [[TMP15]], ptr [[ARRAYIDX5]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK3: omp.body.continue:
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK3: omp.inner.for.inc:
|
|
// CHECK3-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK3-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP17]], [[TMP18]]
|
|
// CHECK3-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]]
|
|
// CHECK3: omp.inner.for.end:
|
|
// CHECK3-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
|
|
// CHECK3: omp.loop.exit:
|
|
// CHECK3-NEXT: [[TMP19:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK3-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP19]], align 4
|
|
// CHECK3-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB3]], i32 [[TMP20]])
|
|
// CHECK3-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK3: omp.precond.end:
|
|
// CHECK3-NEXT: ret void
|
|
//
|