The KernelEnvironment is for compile time information about a kernel. It allows the compiler to feed information to the runtime. The KernelLaunchEnvironment is for dynamic information *per* kernel launch. It allows the rutime to feed information to the kernel that is not shared with other invocations of the kernel. The first use case is to replace the globals that synchronize teams reductions with per-launch versions. This allows concurrent teams reductions. More uses cases will follow, e.g., per launch memory pools. Fixes: https://github.com/llvm/llvm-project/issues/70249
3583 lines
248 KiB
C++
3583 lines
248 KiB
C++
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
|
|
// Test target codegen - host bc file has to be created first.
|
|
// RUN: %clang_cc1 -no-enable-noundef-analysis -verify -fopenmp -fopenmp-version=45 -fopenmp-cuda-mode -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc
|
|
// RUN: %clang_cc1 -no-enable-noundef-analysis -verify -fopenmp -fopenmp-version=45 -fopenmp-cuda-mode -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --check-prefix=CHECK45-64
|
|
// RUN: %clang_cc1 -no-enable-noundef-analysis -verify -fopenmp -fopenmp-version=45 -fopenmp-cuda-mode -x c++ -triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm-bc %s -o %t-x86-host.bc
|
|
// RUN: %clang_cc1 -no-enable-noundef-analysis -verify -fopenmp -fopenmp-version=45 -fopenmp-cuda-mode -x c++ -triple nvptx-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix=CHECK45-32
|
|
// RUN: %clang_cc1 -no-enable-noundef-analysis -verify -fopenmp -fopenmp-version=45 -fopenmp-cuda-mode -fexceptions -fcxx-exceptions -x c++ -triple nvptx-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix=CHECK45-32-EX
|
|
|
|
// RUN: %clang_cc1 -no-enable-noundef-analysis -verify -fopenmp -fopenmp-cuda-mode -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc
|
|
// RUN: %clang_cc1 -no-enable-noundef-analysis -verify -fopenmp -fopenmp-cuda-mode -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --check-prefix=CHECK-64
|
|
// RUN: %clang_cc1 -no-enable-noundef-analysis -verify -fopenmp -fopenmp-cuda-mode -x c++ -triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm-bc %s -o %t-x86-host.bc
|
|
// RUN: %clang_cc1 -no-enable-noundef-analysis -verify -fopenmp -fopenmp-cuda-mode -x c++ -triple nvptx-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix=CHECK-32
|
|
// RUN: %clang_cc1 -no-enable-noundef-analysis -verify -fopenmp -fopenmp-cuda-mode -fexceptions -fcxx-exceptions -x c++ -triple nvptx-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix=CHECK-32-EX
|
|
|
|
// expected-no-diagnostics
|
|
#ifndef HEADER
|
|
#define HEADER
|
|
|
|
// Check that the execution mode of all 2 target regions on the gpu is set to NonSPMD Mode.
|
|
|
|
#define N 1000
|
|
#define M 10
|
|
|
|
template<typename tx>
|
|
tx ftemplate(int n) {
|
|
tx a[N];
|
|
short aa[N];
|
|
tx b[10];
|
|
tx c[M][M];
|
|
tx f = n;
|
|
tx l;
|
|
int k;
|
|
|
|
#pragma omp target teams distribute simd lastprivate(l) dist_schedule(static,128)
|
|
for(int i = 0; i < n; i++) {
|
|
a[i] = 1;
|
|
l = i;
|
|
}
|
|
|
|
#pragma omp target teams distribute simd map(tofrom: aa) num_teams(M) thread_limit(64)
|
|
for(int i = 0; i < n; i++) {
|
|
aa[i] += 1;
|
|
}
|
|
|
|
#pragma omp target teams distribute simd map(tofrom:a, aa, b) if(target: n>40)
|
|
for(int i = 0; i < 10; i++) {
|
|
b[i] += 1;
|
|
}
|
|
|
|
#pragma omp target teams distribute simd collapse(2) firstprivate(f) private(k)
|
|
for(int i = 0; i < M; i++) {
|
|
for(int j = 0; j < M; j++) {
|
|
k = M;
|
|
c[i][j] = i + j * f + k;
|
|
}
|
|
}
|
|
|
|
return a[0];
|
|
}
|
|
|
|
int bar(int n){
|
|
int a = 0;
|
|
|
|
a += ftemplate<int>(n);
|
|
|
|
return a;
|
|
}
|
|
|
|
#endif
|
|
// CHECK45-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l34
|
|
// CHECK45-64-SAME: (ptr noalias [[DYN_PTR:%.*]], i64 [[N:%.*]], ptr nonnull align 4 dereferenceable(4000) [[A:%.*]], i64 [[L:%.*]]) #[[ATTR0:[0-9]+]] {
|
|
// CHECK45-64-NEXT: entry:
|
|
// CHECK45-64-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK45-64-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK45-64-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK45-64-NEXT: [[L_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK45-64-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK45-64-NEXT: [[L_CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK45-64-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK45-64-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
|
|
// CHECK45-64-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
|
|
// CHECK45-64-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
|
|
// CHECK45-64-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
|
|
// CHECK45-64-NEXT: store i64 [[L]], ptr [[L_ADDR]], align 8
|
|
// CHECK45-64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
|
|
// CHECK45-64-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l34_kernel_environment, ptr [[DYN_PTR]])
|
|
// CHECK45-64-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
|
|
// CHECK45-64-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
|
|
// CHECK45-64: user_code.entry:
|
|
// CHECK45-64-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1:[0-9]+]])
|
|
// CHECK45-64-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK45-64-NEXT: store i32 [[TMP3]], ptr [[N_CASTED]], align 4
|
|
// CHECK45-64-NEXT: [[TMP4:%.*]] = load i64, ptr [[N_CASTED]], align 8
|
|
// CHECK45-64-NEXT: [[TMP5:%.*]] = load i32, ptr [[L_ADDR]], align 4
|
|
// CHECK45-64-NEXT: store i32 [[TMP5]], ptr [[L_CASTED]], align 4
|
|
// CHECK45-64-NEXT: [[TMP6:%.*]] = load i64, ptr [[L_CASTED]], align 8
|
|
// CHECK45-64-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4
|
|
// CHECK45-64-NEXT: store i32 [[TMP2]], ptr [[DOTTHREADID_TEMP_]], align 4
|
|
// CHECK45-64-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l34_omp_outlined(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]], i64 [[TMP4]], ptr [[TMP0]], i64 [[TMP6]]) #[[ATTR2:[0-9]+]]
|
|
// CHECK45-64-NEXT: call void @__kmpc_target_deinit()
|
|
// CHECK45-64-NEXT: ret void
|
|
// CHECK45-64: worker.exit:
|
|
// CHECK45-64-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK45-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l34_omp_outlined
|
|
// CHECK45-64-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], ptr nonnull align 4 dereferenceable(4000) [[A:%.*]], i64 [[L:%.*]]) #[[ATTR1:[0-9]+]] {
|
|
// CHECK45-64-NEXT: entry:
|
|
// CHECK45-64-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK45-64-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK45-64-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK45-64-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK45-64-NEXT: [[L_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK45-64-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK45-64-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK45-64-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK45-64-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
|
|
// CHECK45-64-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK45-64-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK45-64-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK45-64-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK45-64-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK45-64-NEXT: [[I3:%.*]] = alloca i32, align 4
|
|
// CHECK45-64-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK45-64-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK45-64-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
|
|
// CHECK45-64-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
|
|
// CHECK45-64-NEXT: store i64 [[L]], ptr [[L_ADDR]], align 8
|
|
// CHECK45-64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
|
|
// CHECK45-64-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK45-64-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK45-64-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK45-64-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
|
|
// CHECK45-64-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
|
|
// CHECK45-64-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
|
|
// CHECK45-64-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK45-64-NEXT: store i32 0, ptr [[I]], align 4
|
|
// CHECK45-64-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK45-64-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
|
|
// CHECK45-64-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK45-64: omp.precond.then:
|
|
// CHECK45-64-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK45-64-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK45-64-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-64-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK45-64-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK45-64-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK45-64-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK45-64-NEXT: call void @__kmpc_distribute_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP6]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 128)
|
|
// CHECK45-64-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK45-64: omp.dispatch.cond:
|
|
// CHECK45-64-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-64-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK45-64-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
|
|
// CHECK45-64-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK45-64: cond.true:
|
|
// CHECK45-64-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK45-64-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK45-64: cond.false:
|
|
// CHECK45-64-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-64-NEXT: br label [[COND_END]]
|
|
// CHECK45-64: cond.end:
|
|
// CHECK45-64-NEXT: [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
|
|
// CHECK45-64-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-64-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK45-64-NEXT: store i32 [[TMP11]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK45-64-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK45-64-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-64-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
|
|
// CHECK45-64-NEXT: br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK45-64: omp.dispatch.body:
|
|
// CHECK45-64-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK45-64: omp.inner.for.cond:
|
|
// CHECK45-64-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18:![0-9]+]]
|
|
// CHECK45-64-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP18]]
|
|
// CHECK45-64-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
|
|
// CHECK45-64-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK45-64: omp.inner.for.body:
|
|
// CHECK45-64-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
|
|
// CHECK45-64-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP16]], 1
|
|
// CHECK45-64-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK45-64-NEXT: store i32 [[ADD]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP18]]
|
|
// CHECK45-64-NEXT: [[TMP17:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP18]]
|
|
// CHECK45-64-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP17]] to i64
|
|
// CHECK45-64-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
|
|
// CHECK45-64-NEXT: store i32 1, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP18]]
|
|
// CHECK45-64-NEXT: [[TMP18:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP18]]
|
|
// CHECK45-64-NEXT: store i32 [[TMP18]], ptr [[L_ADDR]], align 4, !llvm.access.group [[ACC_GRP18]]
|
|
// CHECK45-64-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK45-64: omp.body.continue:
|
|
// CHECK45-64-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK45-64: omp.inner.for.inc:
|
|
// CHECK45-64-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
|
|
// CHECK45-64-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP19]], 1
|
|
// CHECK45-64-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
|
|
// CHECK45-64-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
|
|
// CHECK45-64: omp.inner.for.end:
|
|
// CHECK45-64-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK45-64: omp.dispatch.inc:
|
|
// CHECK45-64-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK45-64-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK45-64-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
|
|
// CHECK45-64-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_LB]], align 4
|
|
// CHECK45-64-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-64-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK45-64-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
|
|
// CHECK45-64-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-64-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK45-64: omp.dispatch.end:
|
|
// CHECK45-64-NEXT: [[TMP24:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK45-64-NEXT: [[TMP25:%.*]] = load i32, ptr [[TMP24]], align 4
|
|
// CHECK45-64-NEXT: call void @__kmpc_distribute_static_fini(ptr @[[GLOB2]], i32 [[TMP25]])
|
|
// CHECK45-64-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK45-64-NEXT: [[TMP27:%.*]] = icmp ne i32 [[TMP26]], 0
|
|
// CHECK45-64-NEXT: br i1 [[TMP27]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
|
|
// CHECK45-64: .omp.final.then:
|
|
// CHECK45-64-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK45-64-NEXT: [[SUB10:%.*]] = sub nsw i32 [[TMP28]], 0
|
|
// CHECK45-64-NEXT: [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
|
|
// CHECK45-64-NEXT: [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
|
|
// CHECK45-64-NEXT: [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
|
|
// CHECK45-64-NEXT: store i32 [[ADD13]], ptr [[I3]], align 4
|
|
// CHECK45-64-NEXT: br label [[DOTOMP_FINAL_DONE]]
|
|
// CHECK45-64: .omp.final.done:
|
|
// CHECK45-64-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK45-64-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
|
|
// CHECK45-64-NEXT: br i1 [[TMP30]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
|
|
// CHECK45-64: .omp.lastprivate.then:
|
|
// CHECK45-64-NEXT: [[TMP31:%.*]] = load i32, ptr [[L_ADDR]], align 4
|
|
// CHECK45-64-NEXT: store i32 [[TMP31]], ptr [[L_ADDR]], align 4
|
|
// CHECK45-64-NEXT: br label [[DOTOMP_LASTPRIVATE_DONE]]
|
|
// CHECK45-64: .omp.lastprivate.done:
|
|
// CHECK45-64-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK45-64: omp.precond.end:
|
|
// CHECK45-64-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK45-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l40
|
|
// CHECK45-64-SAME: (ptr noalias [[DYN_PTR:%.*]], i64 [[N:%.*]], ptr nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR3:[0-9]+]] {
|
|
// CHECK45-64-NEXT: entry:
|
|
// CHECK45-64-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK45-64-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK45-64-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK45-64-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK45-64-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK45-64-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
|
|
// CHECK45-64-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
|
|
// CHECK45-64-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
|
|
// CHECK45-64-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 8
|
|
// CHECK45-64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 8
|
|
// CHECK45-64-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l40_kernel_environment, ptr [[DYN_PTR]])
|
|
// CHECK45-64-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
|
|
// CHECK45-64-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
|
|
// CHECK45-64: user_code.entry:
|
|
// CHECK45-64-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
|
|
// CHECK45-64-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK45-64-NEXT: store i32 [[TMP3]], ptr [[N_CASTED]], align 4
|
|
// CHECK45-64-NEXT: [[TMP4:%.*]] = load i64, ptr [[N_CASTED]], align 8
|
|
// CHECK45-64-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4
|
|
// CHECK45-64-NEXT: store i32 [[TMP2]], ptr [[DOTTHREADID_TEMP_]], align 4
|
|
// CHECK45-64-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l40_omp_outlined(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]], i64 [[TMP4]], ptr [[TMP0]]) #[[ATTR2]]
|
|
// CHECK45-64-NEXT: call void @__kmpc_target_deinit()
|
|
// CHECK45-64-NEXT: ret void
|
|
// CHECK45-64: worker.exit:
|
|
// CHECK45-64-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK45-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l40_omp_outlined
|
|
// CHECK45-64-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], ptr nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR1]] {
|
|
// CHECK45-64-NEXT: entry:
|
|
// CHECK45-64-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK45-64-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK45-64-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK45-64-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK45-64-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK45-64-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK45-64-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK45-64-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
|
|
// CHECK45-64-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK45-64-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK45-64-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK45-64-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK45-64-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK45-64-NEXT: [[I3:%.*]] = alloca i32, align 4
|
|
// CHECK45-64-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK45-64-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK45-64-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
|
|
// CHECK45-64-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 8
|
|
// CHECK45-64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 8
|
|
// CHECK45-64-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK45-64-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK45-64-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK45-64-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
|
|
// CHECK45-64-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
|
|
// CHECK45-64-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
|
|
// CHECK45-64-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK45-64-NEXT: store i32 0, ptr [[I]], align 4
|
|
// CHECK45-64-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK45-64-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
|
|
// CHECK45-64-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK45-64: omp.precond.then:
|
|
// CHECK45-64-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK45-64-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK45-64-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-64-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK45-64-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK45-64-NEXT: [[NVPTX_NUM_THREADS:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
|
|
// CHECK45-64-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK45-64-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK45-64-NEXT: call void @__kmpc_distribute_static_init_4(ptr @[[GLOB2]], i32 [[TMP6]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
|
|
// CHECK45-64-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK45-64: omp.dispatch.cond:
|
|
// CHECK45-64-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-64-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK45-64-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
|
|
// CHECK45-64-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK45-64: cond.true:
|
|
// CHECK45-64-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK45-64-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK45-64: cond.false:
|
|
// CHECK45-64-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-64-NEXT: br label [[COND_END]]
|
|
// CHECK45-64: cond.end:
|
|
// CHECK45-64-NEXT: [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
|
|
// CHECK45-64-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-64-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK45-64-NEXT: store i32 [[TMP11]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK45-64-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK45-64-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-64-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
|
|
// CHECK45-64-NEXT: br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK45-64: omp.dispatch.body:
|
|
// CHECK45-64-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK45-64: omp.inner.for.cond:
|
|
// CHECK45-64-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22:![0-9]+]]
|
|
// CHECK45-64-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP22]]
|
|
// CHECK45-64-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
|
|
// CHECK45-64-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK45-64: omp.inner.for.body:
|
|
// CHECK45-64-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]]
|
|
// CHECK45-64-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP16]], 1
|
|
// CHECK45-64-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK45-64-NEXT: store i32 [[ADD]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP22]]
|
|
// CHECK45-64-NEXT: [[TMP17:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP22]]
|
|
// CHECK45-64-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP17]] to i64
|
|
// CHECK45-64-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i16], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
|
|
// CHECK45-64-NEXT: [[TMP18:%.*]] = load i16, ptr [[ARRAYIDX]], align 2, !llvm.access.group [[ACC_GRP22]]
|
|
// CHECK45-64-NEXT: [[CONV:%.*]] = sext i16 [[TMP18]] to i32
|
|
// CHECK45-64-NEXT: [[ADD7:%.*]] = add nsw i32 [[CONV]], 1
|
|
// CHECK45-64-NEXT: [[CONV8:%.*]] = trunc i32 [[ADD7]] to i16
|
|
// CHECK45-64-NEXT: store i16 [[CONV8]], ptr [[ARRAYIDX]], align 2, !llvm.access.group [[ACC_GRP22]]
|
|
// CHECK45-64-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK45-64: omp.body.continue:
|
|
// CHECK45-64-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK45-64: omp.inner.for.inc:
|
|
// CHECK45-64-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]]
|
|
// CHECK45-64-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP19]], 1
|
|
// CHECK45-64-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]]
|
|
// CHECK45-64-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]]
|
|
// CHECK45-64: omp.inner.for.end:
|
|
// CHECK45-64-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK45-64: omp.dispatch.inc:
|
|
// CHECK45-64-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK45-64-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK45-64-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
|
|
// CHECK45-64-NEXT: store i32 [[ADD10]], ptr [[DOTOMP_LB]], align 4
|
|
// CHECK45-64-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-64-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK45-64-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
|
|
// CHECK45-64-NEXT: store i32 [[ADD11]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-64-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK45-64: omp.dispatch.end:
|
|
// CHECK45-64-NEXT: [[TMP24:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK45-64-NEXT: [[TMP25:%.*]] = load i32, ptr [[TMP24]], align 4
|
|
// CHECK45-64-NEXT: call void @__kmpc_distribute_static_fini(ptr @[[GLOB2]], i32 [[TMP25]])
|
|
// CHECK45-64-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK45-64-NEXT: [[TMP27:%.*]] = icmp ne i32 [[TMP26]], 0
|
|
// CHECK45-64-NEXT: br i1 [[TMP27]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
|
|
// CHECK45-64: .omp.final.then:
|
|
// CHECK45-64-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK45-64-NEXT: [[SUB12:%.*]] = sub nsw i32 [[TMP28]], 0
|
|
// CHECK45-64-NEXT: [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1
|
|
// CHECK45-64-NEXT: [[MUL14:%.*]] = mul nsw i32 [[DIV13]], 1
|
|
// CHECK45-64-NEXT: [[ADD15:%.*]] = add nsw i32 0, [[MUL14]]
|
|
// CHECK45-64-NEXT: store i32 [[ADD15]], ptr [[I3]], align 4
|
|
// CHECK45-64-NEXT: br label [[DOTOMP_FINAL_DONE]]
|
|
// CHECK45-64: .omp.final.done:
|
|
// CHECK45-64-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK45-64: omp.precond.end:
|
|
// CHECK45-64-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK45-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l45
|
|
// CHECK45-64-SAME: (ptr noalias [[DYN_PTR:%.*]], ptr nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
|
|
// CHECK45-64-NEXT: entry:
|
|
// CHECK45-64-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK45-64-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK45-64-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK45-64-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
|
|
// CHECK45-64-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
|
|
// CHECK45-64-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
|
|
// CHECK45-64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8
|
|
// CHECK45-64-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l45_kernel_environment, ptr [[DYN_PTR]])
|
|
// CHECK45-64-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
|
|
// CHECK45-64-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
|
|
// CHECK45-64: user_code.entry:
|
|
// CHECK45-64-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
|
|
// CHECK45-64-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4
|
|
// CHECK45-64-NEXT: store i32 [[TMP2]], ptr [[DOTTHREADID_TEMP_]], align 4
|
|
// CHECK45-64-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l45_omp_outlined(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]], ptr [[TMP0]]) #[[ATTR2]]
|
|
// CHECK45-64-NEXT: call void @__kmpc_target_deinit()
|
|
// CHECK45-64-NEXT: ret void
|
|
// CHECK45-64: worker.exit:
|
|
// CHECK45-64-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK45-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l45_omp_outlined
|
|
// CHECK45-64-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], ptr nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
|
|
// CHECK45-64-NEXT: entry:
|
|
// CHECK45-64-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK45-64-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK45-64-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK45-64-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK45-64-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK45-64-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK45-64-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK45-64-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK45-64-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK45-64-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK45-64-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK45-64-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK45-64-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
|
|
// CHECK45-64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8
|
|
// CHECK45-64-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK45-64-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-64-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK45-64-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK45-64-NEXT: [[NVPTX_NUM_THREADS:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
|
|
// CHECK45-64-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK45-64-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
|
|
// CHECK45-64-NEXT: call void @__kmpc_distribute_static_init_4(ptr @[[GLOB2]], i32 [[TMP2]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
|
|
// CHECK45-64-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK45-64: omp.dispatch.cond:
|
|
// CHECK45-64-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-64-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
|
|
// CHECK45-64-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK45-64: cond.true:
|
|
// CHECK45-64-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK45-64: cond.false:
|
|
// CHECK45-64-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-64-NEXT: br label [[COND_END]]
|
|
// CHECK45-64: cond.end:
|
|
// CHECK45-64-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
|
|
// CHECK45-64-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-64-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK45-64-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK45-64-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK45-64-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-64-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
|
|
// CHECK45-64-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK45-64: omp.dispatch.body:
|
|
// CHECK45-64-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK45-64: omp.inner.for.cond:
|
|
// CHECK45-64-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25:![0-9]+]]
|
|
// CHECK45-64-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP25]]
|
|
// CHECK45-64-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
|
|
// CHECK45-64-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK45-64: omp.inner.for.body:
|
|
// CHECK45-64-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]]
|
|
// CHECK45-64-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
|
|
// CHECK45-64-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK45-64-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP25]]
|
|
// CHECK45-64-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP25]]
|
|
// CHECK45-64-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
|
|
// CHECK45-64-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
|
|
// CHECK45-64-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP25]]
|
|
// CHECK45-64-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
|
|
// CHECK45-64-NEXT: store i32 [[ADD3]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP25]]
|
|
// CHECK45-64-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK45-64: omp.body.continue:
|
|
// CHECK45-64-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK45-64: omp.inner.for.inc:
|
|
// CHECK45-64-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]]
|
|
// CHECK45-64-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP13]], 1
|
|
// CHECK45-64-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]]
|
|
// CHECK45-64-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]]
|
|
// CHECK45-64: omp.inner.for.end:
|
|
// CHECK45-64-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK45-64: omp.dispatch.inc:
|
|
// CHECK45-64-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK45-64-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK45-64-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP14]], [[TMP15]]
|
|
// CHECK45-64-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_LB]], align 4
|
|
// CHECK45-64-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-64-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK45-64-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP16]], [[TMP17]]
|
|
// CHECK45-64-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-64-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK45-64: omp.dispatch.end:
|
|
// CHECK45-64-NEXT: call void @__kmpc_distribute_static_fini(ptr @[[GLOB2]], i32 [[TMP2]])
|
|
// CHECK45-64-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK45-64-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0
|
|
// CHECK45-64-NEXT: br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
|
|
// CHECK45-64: .omp.final.then:
|
|
// CHECK45-64-NEXT: store i32 10, ptr [[I]], align 4
|
|
// CHECK45-64-NEXT: br label [[DOTOMP_FINAL_DONE]]
|
|
// CHECK45-64: .omp.final.done:
|
|
// CHECK45-64-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK45-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l50
|
|
// CHECK45-64-SAME: (ptr noalias [[DYN_PTR:%.*]], ptr nonnull align 4 dereferenceable(400) [[C:%.*]], i64 [[F:%.*]]) #[[ATTR0]] {
|
|
// CHECK45-64-NEXT: entry:
|
|
// CHECK45-64-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK45-64-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK45-64-NEXT: [[F_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK45-64-NEXT: [[F_CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK45-64-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK45-64-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
|
|
// CHECK45-64-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
|
|
// CHECK45-64-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
|
|
// CHECK45-64-NEXT: store i64 [[F]], ptr [[F_ADDR]], align 8
|
|
// CHECK45-64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[C_ADDR]], align 8
|
|
// CHECK45-64-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l50_kernel_environment, ptr [[DYN_PTR]])
|
|
// CHECK45-64-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
|
|
// CHECK45-64-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
|
|
// CHECK45-64: user_code.entry:
|
|
// CHECK45-64-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
|
|
// CHECK45-64-NEXT: [[TMP3:%.*]] = load i32, ptr [[F_ADDR]], align 4
|
|
// CHECK45-64-NEXT: store i32 [[TMP3]], ptr [[F_CASTED]], align 4
|
|
// CHECK45-64-NEXT: [[TMP4:%.*]] = load i64, ptr [[F_CASTED]], align 8
|
|
// CHECK45-64-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4
|
|
// CHECK45-64-NEXT: store i32 [[TMP2]], ptr [[DOTTHREADID_TEMP_]], align 4
|
|
// CHECK45-64-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l50_omp_outlined(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]], ptr [[TMP0]], i64 [[TMP4]]) #[[ATTR2]]
|
|
// CHECK45-64-NEXT: call void @__kmpc_target_deinit()
|
|
// CHECK45-64-NEXT: ret void
|
|
// CHECK45-64: worker.exit:
|
|
// CHECK45-64-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK45-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l50_omp_outlined
|
|
// CHECK45-64-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], ptr nonnull align 4 dereferenceable(400) [[C:%.*]], i64 [[F:%.*]]) #[[ATTR1]] {
|
|
// CHECK45-64-NEXT: entry:
|
|
// CHECK45-64-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK45-64-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK45-64-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK45-64-NEXT: [[F_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK45-64-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK45-64-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK45-64-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
|
|
// CHECK45-64-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK45-64-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK45-64-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK45-64-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK45-64-NEXT: [[K:%.*]] = alloca i32, align 4
|
|
// CHECK45-64-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK45-64-NEXT: [[J:%.*]] = alloca i32, align 4
|
|
// CHECK45-64-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK45-64-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK45-64-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
|
|
// CHECK45-64-NEXT: store i64 [[F]], ptr [[F_ADDR]], align 8
|
|
// CHECK45-64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[C_ADDR]], align 8
|
|
// CHECK45-64-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK45-64-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-64-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK45-64-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK45-64-NEXT: [[NVPTX_NUM_THREADS:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
|
|
// CHECK45-64-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK45-64-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
|
|
// CHECK45-64-NEXT: call void @__kmpc_distribute_static_init_4(ptr @[[GLOB2]], i32 [[TMP2]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
|
|
// CHECK45-64-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK45-64: omp.dispatch.cond:
|
|
// CHECK45-64-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-64-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99
|
|
// CHECK45-64-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK45-64: cond.true:
|
|
// CHECK45-64-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK45-64: cond.false:
|
|
// CHECK45-64-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-64-NEXT: br label [[COND_END]]
|
|
// CHECK45-64: cond.end:
|
|
// CHECK45-64-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
|
|
// CHECK45-64-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-64-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK45-64-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK45-64-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK45-64-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-64-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
|
|
// CHECK45-64-NEXT: br i1 [[CMP2]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK45-64: omp.dispatch.body:
|
|
// CHECK45-64-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK45-64: omp.inner.for.cond:
|
|
// CHECK45-64-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28:![0-9]+]]
|
|
// CHECK45-64-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK45-64-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
|
|
// CHECK45-64-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK45-64: omp.inner.for.body:
|
|
// CHECK45-64-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK45-64-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP10]], 10
|
|
// CHECK45-64-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1
|
|
// CHECK45-64-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK45-64-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK45-64-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK45-64-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK45-64-NEXT: [[DIV4:%.*]] = sdiv i32 [[TMP12]], 10
|
|
// CHECK45-64-NEXT: [[MUL5:%.*]] = mul nsw i32 [[DIV4]], 10
|
|
// CHECK45-64-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP11]], [[MUL5]]
|
|
// CHECK45-64-NEXT: [[MUL6:%.*]] = mul nsw i32 [[SUB]], 1
|
|
// CHECK45-64-NEXT: [[ADD7:%.*]] = add nsw i32 0, [[MUL6]]
|
|
// CHECK45-64-NEXT: store i32 [[ADD7]], ptr [[J]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK45-64-NEXT: store i32 10, ptr [[K]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK45-64-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK45-64-NEXT: [[TMP14:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK45-64-NEXT: [[TMP15:%.*]] = load i32, ptr [[F_ADDR]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK45-64-NEXT: [[MUL8:%.*]] = mul nsw i32 [[TMP14]], [[TMP15]]
|
|
// CHECK45-64-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP13]], [[MUL8]]
|
|
// CHECK45-64-NEXT: [[TMP16:%.*]] = load i32, ptr [[K]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK45-64-NEXT: [[ADD10:%.*]] = add nsw i32 [[ADD9]], [[TMP16]]
|
|
// CHECK45-64-NEXT: [[TMP17:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK45-64-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP17]] to i64
|
|
// CHECK45-64-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [10 x i32]], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
|
|
// CHECK45-64-NEXT: [[TMP18:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK45-64-NEXT: [[IDXPROM11:%.*]] = sext i32 [[TMP18]] to i64
|
|
// CHECK45-64-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds [10 x i32], ptr [[ARRAYIDX]], i64 0, i64 [[IDXPROM11]]
|
|
// CHECK45-64-NEXT: store i32 [[ADD10]], ptr [[ARRAYIDX12]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK45-64-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK45-64: omp.body.continue:
|
|
// CHECK45-64-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK45-64: omp.inner.for.inc:
|
|
// CHECK45-64-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK45-64-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP19]], 1
|
|
// CHECK45-64-NEXT: store i32 [[ADD13]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK45-64-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]]
|
|
// CHECK45-64: omp.inner.for.end:
|
|
// CHECK45-64-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK45-64: omp.dispatch.inc:
|
|
// CHECK45-64-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK45-64-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK45-64-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
|
|
// CHECK45-64-NEXT: store i32 [[ADD14]], ptr [[DOTOMP_LB]], align 4
|
|
// CHECK45-64-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-64-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK45-64-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
|
|
// CHECK45-64-NEXT: store i32 [[ADD15]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-64-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK45-64: omp.dispatch.end:
|
|
// CHECK45-64-NEXT: call void @__kmpc_distribute_static_fini(ptr @[[GLOB2]], i32 [[TMP2]])
|
|
// CHECK45-64-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK45-64-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0
|
|
// CHECK45-64-NEXT: br i1 [[TMP25]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
|
|
// CHECK45-64: .omp.final.then:
|
|
// CHECK45-64-NEXT: store i32 10, ptr [[I]], align 4
|
|
// CHECK45-64-NEXT: store i32 10, ptr [[J]], align 4
|
|
// CHECK45-64-NEXT: br label [[DOTOMP_FINAL_DONE]]
|
|
// CHECK45-64: .omp.final.done:
|
|
// CHECK45-64-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK45-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l34
|
|
// CHECK45-32-SAME: (ptr noalias [[DYN_PTR:%.*]], i32 [[N:%.*]], ptr nonnull align 4 dereferenceable(4000) [[A:%.*]], i32 [[L:%.*]]) #[[ATTR0:[0-9]+]] {
|
|
// CHECK45-32-NEXT: entry:
|
|
// CHECK45-32-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK45-32-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK45-32-NEXT: [[L_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-NEXT: [[L_CASTED:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
|
|
// CHECK45-32-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
|
|
// CHECK45-32-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
|
|
// CHECK45-32-NEXT: store i32 [[L]], ptr [[L_ADDR]], align 4
|
|
// CHECK45-32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
|
|
// CHECK45-32-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l34_kernel_environment, ptr [[DYN_PTR]])
|
|
// CHECK45-32-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
|
|
// CHECK45-32-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
|
|
// CHECK45-32: user_code.entry:
|
|
// CHECK45-32-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1:[0-9]+]])
|
|
// CHECK45-32-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK45-32-NEXT: store i32 [[TMP3]], ptr [[N_CASTED]], align 4
|
|
// CHECK45-32-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_CASTED]], align 4
|
|
// CHECK45-32-NEXT: [[TMP5:%.*]] = load i32, ptr [[L_ADDR]], align 4
|
|
// CHECK45-32-NEXT: store i32 [[TMP5]], ptr [[L_CASTED]], align 4
|
|
// CHECK45-32-NEXT: [[TMP6:%.*]] = load i32, ptr [[L_CASTED]], align 4
|
|
// CHECK45-32-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4
|
|
// CHECK45-32-NEXT: store i32 [[TMP2]], ptr [[DOTTHREADID_TEMP_]], align 4
|
|
// CHECK45-32-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l34_omp_outlined(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]], i32 [[TMP4]], ptr [[TMP0]], i32 [[TMP6]]) #[[ATTR2:[0-9]+]]
|
|
// CHECK45-32-NEXT: call void @__kmpc_target_deinit()
|
|
// CHECK45-32-NEXT: ret void
|
|
// CHECK45-32: worker.exit:
|
|
// CHECK45-32-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK45-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l34_omp_outlined
|
|
// CHECK45-32-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], ptr nonnull align 4 dereferenceable(4000) [[A:%.*]], i32 [[L:%.*]]) #[[ATTR1:[0-9]+]] {
|
|
// CHECK45-32-NEXT: entry:
|
|
// CHECK45-32-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK45-32-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK45-32-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK45-32-NEXT: [[L_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-NEXT: [[I3:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK45-32-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
|
|
// CHECK45-32-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
|
|
// CHECK45-32-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
|
|
// CHECK45-32-NEXT: store i32 [[L]], ptr [[L_ADDR]], align 4
|
|
// CHECK45-32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
|
|
// CHECK45-32-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK45-32-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK45-32-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK45-32-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
|
|
// CHECK45-32-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
|
|
// CHECK45-32-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
|
|
// CHECK45-32-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK45-32-NEXT: store i32 0, ptr [[I]], align 4
|
|
// CHECK45-32-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK45-32-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
|
|
// CHECK45-32-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK45-32: omp.precond.then:
|
|
// CHECK45-32-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK45-32-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK45-32-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-32-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK45-32-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK45-32-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK45-32-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK45-32-NEXT: call void @__kmpc_distribute_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP6]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 128)
|
|
// CHECK45-32-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK45-32: omp.dispatch.cond:
|
|
// CHECK45-32-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-32-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK45-32-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
|
|
// CHECK45-32-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK45-32: cond.true:
|
|
// CHECK45-32-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK45-32-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK45-32: cond.false:
|
|
// CHECK45-32-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-32-NEXT: br label [[COND_END]]
|
|
// CHECK45-32: cond.end:
|
|
// CHECK45-32-NEXT: [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
|
|
// CHECK45-32-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-32-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK45-32-NEXT: store i32 [[TMP11]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK45-32-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK45-32-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-32-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
|
|
// CHECK45-32-NEXT: br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK45-32: omp.dispatch.body:
|
|
// CHECK45-32-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK45-32: omp.inner.for.cond:
|
|
// CHECK45-32-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18:![0-9]+]]
|
|
// CHECK45-32-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP18]]
|
|
// CHECK45-32-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
|
|
// CHECK45-32-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK45-32: omp.inner.for.body:
|
|
// CHECK45-32-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
|
|
// CHECK45-32-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP16]], 1
|
|
// CHECK45-32-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK45-32-NEXT: store i32 [[ADD]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP18]]
|
|
// CHECK45-32-NEXT: [[TMP17:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP18]]
|
|
// CHECK45-32-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i32], ptr [[TMP0]], i32 0, i32 [[TMP17]]
|
|
// CHECK45-32-NEXT: store i32 1, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP18]]
|
|
// CHECK45-32-NEXT: [[TMP18:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP18]]
|
|
// CHECK45-32-NEXT: store i32 [[TMP18]], ptr [[L_ADDR]], align 4, !llvm.access.group [[ACC_GRP18]]
|
|
// CHECK45-32-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK45-32: omp.body.continue:
|
|
// CHECK45-32-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK45-32: omp.inner.for.inc:
|
|
// CHECK45-32-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
|
|
// CHECK45-32-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP19]], 1
|
|
// CHECK45-32-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
|
|
// CHECK45-32-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
|
|
// CHECK45-32: omp.inner.for.end:
|
|
// CHECK45-32-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK45-32: omp.dispatch.inc:
|
|
// CHECK45-32-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK45-32-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK45-32-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
|
|
// CHECK45-32-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_LB]], align 4
|
|
// CHECK45-32-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-32-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK45-32-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
|
|
// CHECK45-32-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-32-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK45-32: omp.dispatch.end:
|
|
// CHECK45-32-NEXT: [[TMP24:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK45-32-NEXT: [[TMP25:%.*]] = load i32, ptr [[TMP24]], align 4
|
|
// CHECK45-32-NEXT: call void @__kmpc_distribute_static_fini(ptr @[[GLOB2]], i32 [[TMP25]])
|
|
// CHECK45-32-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK45-32-NEXT: [[TMP27:%.*]] = icmp ne i32 [[TMP26]], 0
|
|
// CHECK45-32-NEXT: br i1 [[TMP27]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
|
|
// CHECK45-32: .omp.final.then:
|
|
// CHECK45-32-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK45-32-NEXT: [[SUB10:%.*]] = sub nsw i32 [[TMP28]], 0
|
|
// CHECK45-32-NEXT: [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
|
|
// CHECK45-32-NEXT: [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
|
|
// CHECK45-32-NEXT: [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
|
|
// CHECK45-32-NEXT: store i32 [[ADD13]], ptr [[I3]], align 4
|
|
// CHECK45-32-NEXT: br label [[DOTOMP_FINAL_DONE]]
|
|
// CHECK45-32: .omp.final.done:
|
|
// CHECK45-32-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK45-32-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
|
|
// CHECK45-32-NEXT: br i1 [[TMP30]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
|
|
// CHECK45-32: .omp.lastprivate.then:
|
|
// CHECK45-32-NEXT: [[TMP31:%.*]] = load i32, ptr [[L_ADDR]], align 4
|
|
// CHECK45-32-NEXT: store i32 [[TMP31]], ptr [[L_ADDR]], align 4
|
|
// CHECK45-32-NEXT: br label [[DOTOMP_LASTPRIVATE_DONE]]
|
|
// CHECK45-32: .omp.lastprivate.done:
|
|
// CHECK45-32-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK45-32: omp.precond.end:
|
|
// CHECK45-32-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK45-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l40
|
|
// CHECK45-32-SAME: (ptr noalias [[DYN_PTR:%.*]], i32 [[N:%.*]], ptr nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR3:[0-9]+]] {
|
|
// CHECK45-32-NEXT: entry:
|
|
// CHECK45-32-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK45-32-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK45-32-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
|
|
// CHECK45-32-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
|
|
// CHECK45-32-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 4
|
|
// CHECK45-32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 4
|
|
// CHECK45-32-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l40_kernel_environment, ptr [[DYN_PTR]])
|
|
// CHECK45-32-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
|
|
// CHECK45-32-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
|
|
// CHECK45-32: user_code.entry:
|
|
// CHECK45-32-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
|
|
// CHECK45-32-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK45-32-NEXT: store i32 [[TMP3]], ptr [[N_CASTED]], align 4
|
|
// CHECK45-32-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_CASTED]], align 4
|
|
// CHECK45-32-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4
|
|
// CHECK45-32-NEXT: store i32 [[TMP2]], ptr [[DOTTHREADID_TEMP_]], align 4
|
|
// CHECK45-32-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l40_omp_outlined(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]], i32 [[TMP4]], ptr [[TMP0]]) #[[ATTR2]]
|
|
// CHECK45-32-NEXT: call void @__kmpc_target_deinit()
|
|
// CHECK45-32-NEXT: ret void
|
|
// CHECK45-32: worker.exit:
|
|
// CHECK45-32-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK45-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l40_omp_outlined
|
|
// CHECK45-32-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], ptr nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR1]] {
|
|
// CHECK45-32-NEXT: entry:
|
|
// CHECK45-32-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK45-32-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK45-32-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK45-32-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-NEXT: [[I3:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK45-32-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
|
|
// CHECK45-32-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
|
|
// CHECK45-32-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 4
|
|
// CHECK45-32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 4
|
|
// CHECK45-32-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK45-32-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK45-32-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK45-32-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
|
|
// CHECK45-32-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
|
|
// CHECK45-32-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
|
|
// CHECK45-32-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK45-32-NEXT: store i32 0, ptr [[I]], align 4
|
|
// CHECK45-32-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK45-32-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
|
|
// CHECK45-32-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK45-32: omp.precond.then:
|
|
// CHECK45-32-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK45-32-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK45-32-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-32-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK45-32-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK45-32-NEXT: [[NVPTX_NUM_THREADS:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
|
|
// CHECK45-32-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK45-32-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK45-32-NEXT: call void @__kmpc_distribute_static_init_4(ptr @[[GLOB2]], i32 [[TMP6]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
|
|
// CHECK45-32-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK45-32: omp.dispatch.cond:
|
|
// CHECK45-32-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-32-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK45-32-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
|
|
// CHECK45-32-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK45-32: cond.true:
|
|
// CHECK45-32-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK45-32-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK45-32: cond.false:
|
|
// CHECK45-32-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-32-NEXT: br label [[COND_END]]
|
|
// CHECK45-32: cond.end:
|
|
// CHECK45-32-NEXT: [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
|
|
// CHECK45-32-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-32-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK45-32-NEXT: store i32 [[TMP11]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK45-32-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK45-32-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-32-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
|
|
// CHECK45-32-NEXT: br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK45-32: omp.dispatch.body:
|
|
// CHECK45-32-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK45-32: omp.inner.for.cond:
|
|
// CHECK45-32-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22:![0-9]+]]
|
|
// CHECK45-32-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP22]]
|
|
// CHECK45-32-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
|
|
// CHECK45-32-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK45-32: omp.inner.for.body:
|
|
// CHECK45-32-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]]
|
|
// CHECK45-32-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP16]], 1
|
|
// CHECK45-32-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK45-32-NEXT: store i32 [[ADD]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP22]]
|
|
// CHECK45-32-NEXT: [[TMP17:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP22]]
|
|
// CHECK45-32-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i16], ptr [[TMP0]], i32 0, i32 [[TMP17]]
|
|
// CHECK45-32-NEXT: [[TMP18:%.*]] = load i16, ptr [[ARRAYIDX]], align 2, !llvm.access.group [[ACC_GRP22]]
|
|
// CHECK45-32-NEXT: [[CONV:%.*]] = sext i16 [[TMP18]] to i32
|
|
// CHECK45-32-NEXT: [[ADD7:%.*]] = add nsw i32 [[CONV]], 1
|
|
// CHECK45-32-NEXT: [[CONV8:%.*]] = trunc i32 [[ADD7]] to i16
|
|
// CHECK45-32-NEXT: store i16 [[CONV8]], ptr [[ARRAYIDX]], align 2, !llvm.access.group [[ACC_GRP22]]
|
|
// CHECK45-32-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK45-32: omp.body.continue:
|
|
// CHECK45-32-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK45-32: omp.inner.for.inc:
|
|
// CHECK45-32-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]]
|
|
// CHECK45-32-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP19]], 1
|
|
// CHECK45-32-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]]
|
|
// CHECK45-32-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]]
|
|
// CHECK45-32: omp.inner.for.end:
|
|
// CHECK45-32-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK45-32: omp.dispatch.inc:
|
|
// CHECK45-32-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK45-32-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK45-32-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
|
|
// CHECK45-32-NEXT: store i32 [[ADD10]], ptr [[DOTOMP_LB]], align 4
|
|
// CHECK45-32-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-32-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK45-32-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
|
|
// CHECK45-32-NEXT: store i32 [[ADD11]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-32-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK45-32: omp.dispatch.end:
|
|
// CHECK45-32-NEXT: [[TMP24:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK45-32-NEXT: [[TMP25:%.*]] = load i32, ptr [[TMP24]], align 4
|
|
// CHECK45-32-NEXT: call void @__kmpc_distribute_static_fini(ptr @[[GLOB2]], i32 [[TMP25]])
|
|
// CHECK45-32-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK45-32-NEXT: [[TMP27:%.*]] = icmp ne i32 [[TMP26]], 0
|
|
// CHECK45-32-NEXT: br i1 [[TMP27]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
|
|
// CHECK45-32: .omp.final.then:
|
|
// CHECK45-32-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK45-32-NEXT: [[SUB12:%.*]] = sub nsw i32 [[TMP28]], 0
|
|
// CHECK45-32-NEXT: [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1
|
|
// CHECK45-32-NEXT: [[MUL14:%.*]] = mul nsw i32 [[DIV13]], 1
|
|
// CHECK45-32-NEXT: [[ADD15:%.*]] = add nsw i32 0, [[MUL14]]
|
|
// CHECK45-32-NEXT: store i32 [[ADD15]], ptr [[I3]], align 4
|
|
// CHECK45-32-NEXT: br label [[DOTOMP_FINAL_DONE]]
|
|
// CHECK45-32: .omp.final.done:
|
|
// CHECK45-32-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK45-32: omp.precond.end:
|
|
// CHECK45-32-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK45-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l45
|
|
// CHECK45-32-SAME: (ptr noalias [[DYN_PTR:%.*]], ptr nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
|
|
// CHECK45-32-NEXT: entry:
|
|
// CHECK45-32-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK45-32-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK45-32-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
|
|
// CHECK45-32-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
|
|
// CHECK45-32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4
|
|
// CHECK45-32-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l45_kernel_environment, ptr [[DYN_PTR]])
|
|
// CHECK45-32-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
|
|
// CHECK45-32-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
|
|
// CHECK45-32: user_code.entry:
|
|
// CHECK45-32-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
|
|
// CHECK45-32-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4
|
|
// CHECK45-32-NEXT: store i32 [[TMP2]], ptr [[DOTTHREADID_TEMP_]], align 4
|
|
// CHECK45-32-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l45_omp_outlined(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]], ptr [[TMP0]]) #[[ATTR2]]
|
|
// CHECK45-32-NEXT: call void @__kmpc_target_deinit()
|
|
// CHECK45-32-NEXT: ret void
|
|
// CHECK45-32: worker.exit:
|
|
// CHECK45-32-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK45-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l45_omp_outlined
|
|
// CHECK45-32-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], ptr nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
|
|
// CHECK45-32-NEXT: entry:
|
|
// CHECK45-32-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK45-32-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK45-32-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK45-32-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK45-32-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
|
|
// CHECK45-32-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
|
|
// CHECK45-32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4
|
|
// CHECK45-32-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK45-32-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-32-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK45-32-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK45-32-NEXT: [[NVPTX_NUM_THREADS:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
|
|
// CHECK45-32-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK45-32-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
|
|
// CHECK45-32-NEXT: call void @__kmpc_distribute_static_init_4(ptr @[[GLOB2]], i32 [[TMP2]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
|
|
// CHECK45-32-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK45-32: omp.dispatch.cond:
|
|
// CHECK45-32-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-32-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
|
|
// CHECK45-32-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK45-32: cond.true:
|
|
// CHECK45-32-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK45-32: cond.false:
|
|
// CHECK45-32-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-32-NEXT: br label [[COND_END]]
|
|
// CHECK45-32: cond.end:
|
|
// CHECK45-32-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
|
|
// CHECK45-32-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-32-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK45-32-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK45-32-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK45-32-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-32-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
|
|
// CHECK45-32-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK45-32: omp.dispatch.body:
|
|
// CHECK45-32-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK45-32: omp.inner.for.cond:
|
|
// CHECK45-32-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25:![0-9]+]]
|
|
// CHECK45-32-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP25]]
|
|
// CHECK45-32-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
|
|
// CHECK45-32-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK45-32: omp.inner.for.body:
|
|
// CHECK45-32-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]]
|
|
// CHECK45-32-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
|
|
// CHECK45-32-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK45-32-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP25]]
|
|
// CHECK45-32-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP25]]
|
|
// CHECK45-32-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 [[TMP11]]
|
|
// CHECK45-32-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP25]]
|
|
// CHECK45-32-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
|
|
// CHECK45-32-NEXT: store i32 [[ADD3]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP25]]
|
|
// CHECK45-32-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK45-32: omp.body.continue:
|
|
// CHECK45-32-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK45-32: omp.inner.for.inc:
|
|
// CHECK45-32-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]]
|
|
// CHECK45-32-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP13]], 1
|
|
// CHECK45-32-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]]
|
|
// CHECK45-32-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]]
|
|
// CHECK45-32: omp.inner.for.end:
|
|
// CHECK45-32-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK45-32: omp.dispatch.inc:
|
|
// CHECK45-32-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK45-32-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK45-32-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP14]], [[TMP15]]
|
|
// CHECK45-32-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_LB]], align 4
|
|
// CHECK45-32-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-32-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK45-32-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP16]], [[TMP17]]
|
|
// CHECK45-32-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-32-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK45-32: omp.dispatch.end:
|
|
// CHECK45-32-NEXT: call void @__kmpc_distribute_static_fini(ptr @[[GLOB2]], i32 [[TMP2]])
|
|
// CHECK45-32-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK45-32-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0
|
|
// CHECK45-32-NEXT: br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
|
|
// CHECK45-32: .omp.final.then:
|
|
// CHECK45-32-NEXT: store i32 10, ptr [[I]], align 4
|
|
// CHECK45-32-NEXT: br label [[DOTOMP_FINAL_DONE]]
|
|
// CHECK45-32: .omp.final.done:
|
|
// CHECK45-32-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK45-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l50
|
|
// CHECK45-32-SAME: (ptr noalias [[DYN_PTR:%.*]], ptr nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[F:%.*]]) #[[ATTR0]] {
|
|
// CHECK45-32-NEXT: entry:
|
|
// CHECK45-32-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK45-32-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK45-32-NEXT: [[F_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-NEXT: [[F_CASTED:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
|
|
// CHECK45-32-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
|
|
// CHECK45-32-NEXT: store i32 [[F]], ptr [[F_ADDR]], align 4
|
|
// CHECK45-32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[C_ADDR]], align 4
|
|
// CHECK45-32-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l50_kernel_environment, ptr [[DYN_PTR]])
|
|
// CHECK45-32-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
|
|
// CHECK45-32-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
|
|
// CHECK45-32: user_code.entry:
|
|
// CHECK45-32-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
|
|
// CHECK45-32-NEXT: [[TMP3:%.*]] = load i32, ptr [[F_ADDR]], align 4
|
|
// CHECK45-32-NEXT: store i32 [[TMP3]], ptr [[F_CASTED]], align 4
|
|
// CHECK45-32-NEXT: [[TMP4:%.*]] = load i32, ptr [[F_CASTED]], align 4
|
|
// CHECK45-32-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4
|
|
// CHECK45-32-NEXT: store i32 [[TMP2]], ptr [[DOTTHREADID_TEMP_]], align 4
|
|
// CHECK45-32-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l50_omp_outlined(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]], ptr [[TMP0]], i32 [[TMP4]]) #[[ATTR2]]
|
|
// CHECK45-32-NEXT: call void @__kmpc_target_deinit()
|
|
// CHECK45-32-NEXT: ret void
|
|
// CHECK45-32: worker.exit:
|
|
// CHECK45-32-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK45-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l50_omp_outlined
|
|
// CHECK45-32-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], ptr nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[F:%.*]]) #[[ATTR1]] {
|
|
// CHECK45-32-NEXT: entry:
|
|
// CHECK45-32-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK45-32-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK45-32-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK45-32-NEXT: [[F_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-NEXT: [[K:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-NEXT: [[J:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK45-32-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
|
|
// CHECK45-32-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
|
|
// CHECK45-32-NEXT: store i32 [[F]], ptr [[F_ADDR]], align 4
|
|
// CHECK45-32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[C_ADDR]], align 4
|
|
// CHECK45-32-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK45-32-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-32-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK45-32-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK45-32-NEXT: [[NVPTX_NUM_THREADS:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
|
|
// CHECK45-32-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK45-32-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
|
|
// CHECK45-32-NEXT: call void @__kmpc_distribute_static_init_4(ptr @[[GLOB2]], i32 [[TMP2]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
|
|
// CHECK45-32-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK45-32: omp.dispatch.cond:
|
|
// CHECK45-32-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-32-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99
|
|
// CHECK45-32-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK45-32: cond.true:
|
|
// CHECK45-32-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK45-32: cond.false:
|
|
// CHECK45-32-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-32-NEXT: br label [[COND_END]]
|
|
// CHECK45-32: cond.end:
|
|
// CHECK45-32-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
|
|
// CHECK45-32-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-32-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK45-32-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK45-32-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK45-32-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-32-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
|
|
// CHECK45-32-NEXT: br i1 [[CMP2]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK45-32: omp.dispatch.body:
|
|
// CHECK45-32-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK45-32: omp.inner.for.cond:
|
|
// CHECK45-32-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28:![0-9]+]]
|
|
// CHECK45-32-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK45-32-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
|
|
// CHECK45-32-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK45-32: omp.inner.for.body:
|
|
// CHECK45-32-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK45-32-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP10]], 10
|
|
// CHECK45-32-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1
|
|
// CHECK45-32-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK45-32-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK45-32-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK45-32-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK45-32-NEXT: [[DIV4:%.*]] = sdiv i32 [[TMP12]], 10
|
|
// CHECK45-32-NEXT: [[MUL5:%.*]] = mul nsw i32 [[DIV4]], 10
|
|
// CHECK45-32-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP11]], [[MUL5]]
|
|
// CHECK45-32-NEXT: [[MUL6:%.*]] = mul nsw i32 [[SUB]], 1
|
|
// CHECK45-32-NEXT: [[ADD7:%.*]] = add nsw i32 0, [[MUL6]]
|
|
// CHECK45-32-NEXT: store i32 [[ADD7]], ptr [[J]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK45-32-NEXT: store i32 10, ptr [[K]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK45-32-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK45-32-NEXT: [[TMP14:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK45-32-NEXT: [[TMP15:%.*]] = load i32, ptr [[F_ADDR]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK45-32-NEXT: [[MUL8:%.*]] = mul nsw i32 [[TMP14]], [[TMP15]]
|
|
// CHECK45-32-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP13]], [[MUL8]]
|
|
// CHECK45-32-NEXT: [[TMP16:%.*]] = load i32, ptr [[K]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK45-32-NEXT: [[ADD10:%.*]] = add nsw i32 [[ADD9]], [[TMP16]]
|
|
// CHECK45-32-NEXT: [[TMP17:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK45-32-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [10 x i32]], ptr [[TMP0]], i32 0, i32 [[TMP17]]
|
|
// CHECK45-32-NEXT: [[TMP18:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK45-32-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds [10 x i32], ptr [[ARRAYIDX]], i32 0, i32 [[TMP18]]
|
|
// CHECK45-32-NEXT: store i32 [[ADD10]], ptr [[ARRAYIDX11]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK45-32-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK45-32: omp.body.continue:
|
|
// CHECK45-32-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK45-32: omp.inner.for.inc:
|
|
// CHECK45-32-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK45-32-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP19]], 1
|
|
// CHECK45-32-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK45-32-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]]
|
|
// CHECK45-32: omp.inner.for.end:
|
|
// CHECK45-32-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK45-32: omp.dispatch.inc:
|
|
// CHECK45-32-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK45-32-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK45-32-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
|
|
// CHECK45-32-NEXT: store i32 [[ADD13]], ptr [[DOTOMP_LB]], align 4
|
|
// CHECK45-32-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-32-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK45-32-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
|
|
// CHECK45-32-NEXT: store i32 [[ADD14]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-32-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK45-32: omp.dispatch.end:
|
|
// CHECK45-32-NEXT: call void @__kmpc_distribute_static_fini(ptr @[[GLOB2]], i32 [[TMP2]])
|
|
// CHECK45-32-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK45-32-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0
|
|
// CHECK45-32-NEXT: br i1 [[TMP25]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
|
|
// CHECK45-32: .omp.final.then:
|
|
// CHECK45-32-NEXT: store i32 10, ptr [[I]], align 4
|
|
// CHECK45-32-NEXT: store i32 10, ptr [[J]], align 4
|
|
// CHECK45-32-NEXT: br label [[DOTOMP_FINAL_DONE]]
|
|
// CHECK45-32: .omp.final.done:
|
|
// CHECK45-32-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK45-32-EX-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l34
|
|
// CHECK45-32-EX-SAME: (ptr noalias [[DYN_PTR:%.*]], i32 [[N:%.*]], ptr nonnull align 4 dereferenceable(4000) [[A:%.*]], i32 [[L:%.*]]) #[[ATTR0:[0-9]+]] {
|
|
// CHECK45-32-EX-NEXT: entry:
|
|
// CHECK45-32-EX-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK45-32-EX-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-EX-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK45-32-EX-NEXT: [[L_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-EX-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-EX-NEXT: [[L_CASTED:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-EX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-EX-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-EX-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
|
|
// CHECK45-32-EX-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
|
|
// CHECK45-32-EX-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
|
|
// CHECK45-32-EX-NEXT: store i32 [[L]], ptr [[L_ADDR]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l34_kernel_environment, ptr [[DYN_PTR]])
|
|
// CHECK45-32-EX-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
|
|
// CHECK45-32-EX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
|
|
// CHECK45-32-EX: user_code.entry:
|
|
// CHECK45-32-EX-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1:[0-9]+]])
|
|
// CHECK45-32-EX-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK45-32-EX-NEXT: store i32 [[TMP3]], ptr [[N_CASTED]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_CASTED]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP5:%.*]] = load i32, ptr [[L_ADDR]], align 4
|
|
// CHECK45-32-EX-NEXT: store i32 [[TMP5]], ptr [[L_CASTED]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP6:%.*]] = load i32, ptr [[L_CASTED]], align 4
|
|
// CHECK45-32-EX-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4
|
|
// CHECK45-32-EX-NEXT: store i32 [[TMP2]], ptr [[DOTTHREADID_TEMP_]], align 4
|
|
// CHECK45-32-EX-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l34_omp_outlined(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]], i32 [[TMP4]], ptr [[TMP0]], i32 [[TMP6]]) #[[ATTR2:[0-9]+]]
|
|
// CHECK45-32-EX-NEXT: call void @__kmpc_target_deinit()
|
|
// CHECK45-32-EX-NEXT: ret void
|
|
// CHECK45-32-EX: worker.exit:
|
|
// CHECK45-32-EX-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK45-32-EX-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l34_omp_outlined
|
|
// CHECK45-32-EX-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], ptr nonnull align 4 dereferenceable(4000) [[A:%.*]], i32 [[L:%.*]]) #[[ATTR1:[0-9]+]] {
|
|
// CHECK45-32-EX-NEXT: entry:
|
|
// CHECK45-32-EX-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK45-32-EX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK45-32-EX-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-EX-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK45-32-EX-NEXT: [[L_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-EX-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-EX-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-EX-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-EX-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-EX-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-EX-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-EX-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-EX-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-EX-NEXT: [[I3:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-EX-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK45-32-EX-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
|
|
// CHECK45-32-EX-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
|
|
// CHECK45-32-EX-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
|
|
// CHECK45-32-EX-NEXT: store i32 [[L]], ptr [[L_ADDR]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK45-32-EX-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK45-32-EX-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
|
|
// CHECK45-32-EX-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
|
|
// CHECK45-32-EX-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
|
|
// CHECK45-32-EX-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK45-32-EX-NEXT: store i32 0, ptr [[I]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK45-32-EX-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
|
|
// CHECK45-32-EX-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK45-32-EX: omp.precond.then:
|
|
// CHECK45-32-EX-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK45-32-EX-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-32-EX-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK45-32-EX-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK45-32-EX-NEXT: call void @__kmpc_distribute_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP6]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 128)
|
|
// CHECK45-32-EX-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK45-32-EX: omp.dispatch.cond:
|
|
// CHECK45-32-EX-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK45-32-EX-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
|
|
// CHECK45-32-EX-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK45-32-EX: cond.true:
|
|
// CHECK45-32-EX-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK45-32-EX-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK45-32-EX: cond.false:
|
|
// CHECK45-32-EX-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-32-EX-NEXT: br label [[COND_END]]
|
|
// CHECK45-32-EX: cond.end:
|
|
// CHECK45-32-EX-NEXT: [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
|
|
// CHECK45-32-EX-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK45-32-EX-NEXT: store i32 [[TMP11]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-32-EX-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
|
|
// CHECK45-32-EX-NEXT: br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK45-32-EX: omp.dispatch.body:
|
|
// CHECK45-32-EX-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK45-32-EX: omp.inner.for.cond:
|
|
// CHECK45-32-EX-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18:![0-9]+]]
|
|
// CHECK45-32-EX-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP18]]
|
|
// CHECK45-32-EX-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
|
|
// CHECK45-32-EX-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK45-32-EX: omp.inner.for.body:
|
|
// CHECK45-32-EX-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
|
|
// CHECK45-32-EX-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP16]], 1
|
|
// CHECK45-32-EX-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK45-32-EX-NEXT: store i32 [[ADD]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP18]]
|
|
// CHECK45-32-EX-NEXT: [[TMP17:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP18]]
|
|
// CHECK45-32-EX-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i32], ptr [[TMP0]], i32 0, i32 [[TMP17]]
|
|
// CHECK45-32-EX-NEXT: store i32 1, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP18]]
|
|
// CHECK45-32-EX-NEXT: [[TMP18:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP18]]
|
|
// CHECK45-32-EX-NEXT: store i32 [[TMP18]], ptr [[L_ADDR]], align 4, !llvm.access.group [[ACC_GRP18]]
|
|
// CHECK45-32-EX-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK45-32-EX: omp.body.continue:
|
|
// CHECK45-32-EX-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK45-32-EX: omp.inner.for.inc:
|
|
// CHECK45-32-EX-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
|
|
// CHECK45-32-EX-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP19]], 1
|
|
// CHECK45-32-EX-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
|
|
// CHECK45-32-EX-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
|
|
// CHECK45-32-EX: omp.inner.for.end:
|
|
// CHECK45-32-EX-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK45-32-EX: omp.dispatch.inc:
|
|
// CHECK45-32-EX-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK45-32-EX-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
|
|
// CHECK45-32-EX-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_LB]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK45-32-EX-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
|
|
// CHECK45-32-EX-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-32-EX-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK45-32-EX: omp.dispatch.end:
|
|
// CHECK45-32-EX-NEXT: [[TMP24:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP25:%.*]] = load i32, ptr [[TMP24]], align 4
|
|
// CHECK45-32-EX-NEXT: call void @__kmpc_distribute_static_fini(ptr @[[GLOB2]], i32 [[TMP25]])
|
|
// CHECK45-32-EX-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP27:%.*]] = icmp ne i32 [[TMP26]], 0
|
|
// CHECK45-32-EX-NEXT: br i1 [[TMP27]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
|
|
// CHECK45-32-EX: .omp.final.then:
|
|
// CHECK45-32-EX-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK45-32-EX-NEXT: [[SUB10:%.*]] = sub nsw i32 [[TMP28]], 0
|
|
// CHECK45-32-EX-NEXT: [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
|
|
// CHECK45-32-EX-NEXT: [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
|
|
// CHECK45-32-EX-NEXT: [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
|
|
// CHECK45-32-EX-NEXT: store i32 [[ADD13]], ptr [[I3]], align 4
|
|
// CHECK45-32-EX-NEXT: br label [[DOTOMP_FINAL_DONE]]
|
|
// CHECK45-32-EX: .omp.final.done:
|
|
// CHECK45-32-EX-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
|
|
// CHECK45-32-EX-NEXT: br i1 [[TMP30]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
|
|
// CHECK45-32-EX: .omp.lastprivate.then:
|
|
// CHECK45-32-EX-NEXT: [[TMP31:%.*]] = load i32, ptr [[L_ADDR]], align 4
|
|
// CHECK45-32-EX-NEXT: store i32 [[TMP31]], ptr [[L_ADDR]], align 4
|
|
// CHECK45-32-EX-NEXT: br label [[DOTOMP_LASTPRIVATE_DONE]]
|
|
// CHECK45-32-EX: .omp.lastprivate.done:
|
|
// CHECK45-32-EX-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK45-32-EX: omp.precond.end:
|
|
// CHECK45-32-EX-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK45-32-EX-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l40
|
|
// CHECK45-32-EX-SAME: (ptr noalias [[DYN_PTR:%.*]], i32 [[N:%.*]], ptr nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR3:[0-9]+]] {
|
|
// CHECK45-32-EX-NEXT: entry:
|
|
// CHECK45-32-EX-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK45-32-EX-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-EX-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK45-32-EX-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-EX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-EX-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-EX-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
|
|
// CHECK45-32-EX-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
|
|
// CHECK45-32-EX-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l40_kernel_environment, ptr [[DYN_PTR]])
|
|
// CHECK45-32-EX-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
|
|
// CHECK45-32-EX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
|
|
// CHECK45-32-EX: user_code.entry:
|
|
// CHECK45-32-EX-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
|
|
// CHECK45-32-EX-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK45-32-EX-NEXT: store i32 [[TMP3]], ptr [[N_CASTED]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_CASTED]], align 4
|
|
// CHECK45-32-EX-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4
|
|
// CHECK45-32-EX-NEXT: store i32 [[TMP2]], ptr [[DOTTHREADID_TEMP_]], align 4
|
|
// CHECK45-32-EX-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l40_omp_outlined(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]], i32 [[TMP4]], ptr [[TMP0]]) #[[ATTR2]]
|
|
// CHECK45-32-EX-NEXT: call void @__kmpc_target_deinit()
|
|
// CHECK45-32-EX-NEXT: ret void
|
|
// CHECK45-32-EX: worker.exit:
|
|
// CHECK45-32-EX-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK45-32-EX-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l40_omp_outlined
|
|
// CHECK45-32-EX-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], ptr nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR1]] {
|
|
// CHECK45-32-EX-NEXT: entry:
|
|
// CHECK45-32-EX-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK45-32-EX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK45-32-EX-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-EX-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK45-32-EX-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-EX-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-EX-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-EX-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-EX-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-EX-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-EX-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-EX-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-EX-NEXT: [[I3:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-EX-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK45-32-EX-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
|
|
// CHECK45-32-EX-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
|
|
// CHECK45-32-EX-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK45-32-EX-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK45-32-EX-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
|
|
// CHECK45-32-EX-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
|
|
// CHECK45-32-EX-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
|
|
// CHECK45-32-EX-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK45-32-EX-NEXT: store i32 0, ptr [[I]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK45-32-EX-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
|
|
// CHECK45-32-EX-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK45-32-EX: omp.precond.then:
|
|
// CHECK45-32-EX-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK45-32-EX-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-32-EX-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK45-32-EX-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK45-32-EX-NEXT: [[NVPTX_NUM_THREADS:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
|
|
// CHECK45-32-EX-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK45-32-EX-NEXT: call void @__kmpc_distribute_static_init_4(ptr @[[GLOB2]], i32 [[TMP6]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
|
|
// CHECK45-32-EX-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK45-32-EX: omp.dispatch.cond:
|
|
// CHECK45-32-EX-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK45-32-EX-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
|
|
// CHECK45-32-EX-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK45-32-EX: cond.true:
|
|
// CHECK45-32-EX-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK45-32-EX-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK45-32-EX: cond.false:
|
|
// CHECK45-32-EX-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-32-EX-NEXT: br label [[COND_END]]
|
|
// CHECK45-32-EX: cond.end:
|
|
// CHECK45-32-EX-NEXT: [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
|
|
// CHECK45-32-EX-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK45-32-EX-NEXT: store i32 [[TMP11]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-32-EX-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
|
|
// CHECK45-32-EX-NEXT: br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK45-32-EX: omp.dispatch.body:
|
|
// CHECK45-32-EX-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK45-32-EX: omp.inner.for.cond:
|
|
// CHECK45-32-EX-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22:![0-9]+]]
|
|
// CHECK45-32-EX-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP22]]
|
|
// CHECK45-32-EX-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
|
|
// CHECK45-32-EX-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK45-32-EX: omp.inner.for.body:
|
|
// CHECK45-32-EX-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]]
|
|
// CHECK45-32-EX-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP16]], 1
|
|
// CHECK45-32-EX-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK45-32-EX-NEXT: store i32 [[ADD]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP22]]
|
|
// CHECK45-32-EX-NEXT: [[TMP17:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP22]]
|
|
// CHECK45-32-EX-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i16], ptr [[TMP0]], i32 0, i32 [[TMP17]]
|
|
// CHECK45-32-EX-NEXT: [[TMP18:%.*]] = load i16, ptr [[ARRAYIDX]], align 2, !llvm.access.group [[ACC_GRP22]]
|
|
// CHECK45-32-EX-NEXT: [[CONV:%.*]] = sext i16 [[TMP18]] to i32
|
|
// CHECK45-32-EX-NEXT: [[ADD7:%.*]] = add nsw i32 [[CONV]], 1
|
|
// CHECK45-32-EX-NEXT: [[CONV8:%.*]] = trunc i32 [[ADD7]] to i16
|
|
// CHECK45-32-EX-NEXT: store i16 [[CONV8]], ptr [[ARRAYIDX]], align 2, !llvm.access.group [[ACC_GRP22]]
|
|
// CHECK45-32-EX-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK45-32-EX: omp.body.continue:
|
|
// CHECK45-32-EX-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK45-32-EX: omp.inner.for.inc:
|
|
// CHECK45-32-EX-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]]
|
|
// CHECK45-32-EX-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP19]], 1
|
|
// CHECK45-32-EX-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]]
|
|
// CHECK45-32-EX-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]]
|
|
// CHECK45-32-EX: omp.inner.for.end:
|
|
// CHECK45-32-EX-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK45-32-EX: omp.dispatch.inc:
|
|
// CHECK45-32-EX-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK45-32-EX-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
|
|
// CHECK45-32-EX-NEXT: store i32 [[ADD10]], ptr [[DOTOMP_LB]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK45-32-EX-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
|
|
// CHECK45-32-EX-NEXT: store i32 [[ADD11]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-32-EX-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK45-32-EX: omp.dispatch.end:
|
|
// CHECK45-32-EX-NEXT: [[TMP24:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP25:%.*]] = load i32, ptr [[TMP24]], align 4
|
|
// CHECK45-32-EX-NEXT: call void @__kmpc_distribute_static_fini(ptr @[[GLOB2]], i32 [[TMP25]])
|
|
// CHECK45-32-EX-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP27:%.*]] = icmp ne i32 [[TMP26]], 0
|
|
// CHECK45-32-EX-NEXT: br i1 [[TMP27]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
|
|
// CHECK45-32-EX: .omp.final.then:
|
|
// CHECK45-32-EX-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK45-32-EX-NEXT: [[SUB12:%.*]] = sub nsw i32 [[TMP28]], 0
|
|
// CHECK45-32-EX-NEXT: [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1
|
|
// CHECK45-32-EX-NEXT: [[MUL14:%.*]] = mul nsw i32 [[DIV13]], 1
|
|
// CHECK45-32-EX-NEXT: [[ADD15:%.*]] = add nsw i32 0, [[MUL14]]
|
|
// CHECK45-32-EX-NEXT: store i32 [[ADD15]], ptr [[I3]], align 4
|
|
// CHECK45-32-EX-NEXT: br label [[DOTOMP_FINAL_DONE]]
|
|
// CHECK45-32-EX: .omp.final.done:
|
|
// CHECK45-32-EX-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK45-32-EX: omp.precond.end:
|
|
// CHECK45-32-EX-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK45-32-EX-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l45
|
|
// CHECK45-32-EX-SAME: (ptr noalias [[DYN_PTR:%.*]], ptr nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
|
|
// CHECK45-32-EX-NEXT: entry:
|
|
// CHECK45-32-EX-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK45-32-EX-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK45-32-EX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-EX-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-EX-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
|
|
// CHECK45-32-EX-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l45_kernel_environment, ptr [[DYN_PTR]])
|
|
// CHECK45-32-EX-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
|
|
// CHECK45-32-EX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
|
|
// CHECK45-32-EX: user_code.entry:
|
|
// CHECK45-32-EX-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
|
|
// CHECK45-32-EX-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4
|
|
// CHECK45-32-EX-NEXT: store i32 [[TMP2]], ptr [[DOTTHREADID_TEMP_]], align 4
|
|
// CHECK45-32-EX-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l45_omp_outlined(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]], ptr [[TMP0]]) #[[ATTR2]]
|
|
// CHECK45-32-EX-NEXT: call void @__kmpc_target_deinit()
|
|
// CHECK45-32-EX-NEXT: ret void
|
|
// CHECK45-32-EX: worker.exit:
|
|
// CHECK45-32-EX-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK45-32-EX-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l45_omp_outlined
|
|
// CHECK45-32-EX-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], ptr nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
|
|
// CHECK45-32-EX-NEXT: entry:
|
|
// CHECK45-32-EX-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK45-32-EX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK45-32-EX-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK45-32-EX-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-EX-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-EX-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-EX-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-EX-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-EX-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-EX-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK45-32-EX-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
|
|
// CHECK45-32-EX-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4
|
|
// CHECK45-32-EX-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK45-32-EX-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-32-EX-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK45-32-EX-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK45-32-EX-NEXT: [[NVPTX_NUM_THREADS:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
|
|
// CHECK45-32-EX-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
|
|
// CHECK45-32-EX-NEXT: call void @__kmpc_distribute_static_init_4(ptr @[[GLOB2]], i32 [[TMP2]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
|
|
// CHECK45-32-EX-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK45-32-EX: omp.dispatch.cond:
|
|
// CHECK45-32-EX-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-32-EX-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
|
|
// CHECK45-32-EX-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK45-32-EX: cond.true:
|
|
// CHECK45-32-EX-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK45-32-EX: cond.false:
|
|
// CHECK45-32-EX-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-32-EX-NEXT: br label [[COND_END]]
|
|
// CHECK45-32-EX: cond.end:
|
|
// CHECK45-32-EX-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
|
|
// CHECK45-32-EX-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK45-32-EX-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-32-EX-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
|
|
// CHECK45-32-EX-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK45-32-EX: omp.dispatch.body:
|
|
// CHECK45-32-EX-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK45-32-EX: omp.inner.for.cond:
|
|
// CHECK45-32-EX-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25:![0-9]+]]
|
|
// CHECK45-32-EX-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP25]]
|
|
// CHECK45-32-EX-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
|
|
// CHECK45-32-EX-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK45-32-EX: omp.inner.for.body:
|
|
// CHECK45-32-EX-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]]
|
|
// CHECK45-32-EX-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
|
|
// CHECK45-32-EX-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK45-32-EX-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP25]]
|
|
// CHECK45-32-EX-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP25]]
|
|
// CHECK45-32-EX-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 [[TMP11]]
|
|
// CHECK45-32-EX-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP25]]
|
|
// CHECK45-32-EX-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
|
|
// CHECK45-32-EX-NEXT: store i32 [[ADD3]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP25]]
|
|
// CHECK45-32-EX-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK45-32-EX: omp.body.continue:
|
|
// CHECK45-32-EX-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK45-32-EX: omp.inner.for.inc:
|
|
// CHECK45-32-EX-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]]
|
|
// CHECK45-32-EX-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP13]], 1
|
|
// CHECK45-32-EX-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]]
|
|
// CHECK45-32-EX-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]]
|
|
// CHECK45-32-EX: omp.inner.for.end:
|
|
// CHECK45-32-EX-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK45-32-EX: omp.dispatch.inc:
|
|
// CHECK45-32-EX-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK45-32-EX-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP14]], [[TMP15]]
|
|
// CHECK45-32-EX-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_LB]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK45-32-EX-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP16]], [[TMP17]]
|
|
// CHECK45-32-EX-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-32-EX-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK45-32-EX: omp.dispatch.end:
|
|
// CHECK45-32-EX-NEXT: call void @__kmpc_distribute_static_fini(ptr @[[GLOB2]], i32 [[TMP2]])
|
|
// CHECK45-32-EX-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0
|
|
// CHECK45-32-EX-NEXT: br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
|
|
// CHECK45-32-EX: .omp.final.then:
|
|
// CHECK45-32-EX-NEXT: store i32 10, ptr [[I]], align 4
|
|
// CHECK45-32-EX-NEXT: br label [[DOTOMP_FINAL_DONE]]
|
|
// CHECK45-32-EX: .omp.final.done:
|
|
// CHECK45-32-EX-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK45-32-EX-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l50
|
|
// CHECK45-32-EX-SAME: (ptr noalias [[DYN_PTR:%.*]], ptr nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[F:%.*]]) #[[ATTR0]] {
|
|
// CHECK45-32-EX-NEXT: entry:
|
|
// CHECK45-32-EX-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK45-32-EX-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK45-32-EX-NEXT: [[F_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-EX-NEXT: [[F_CASTED:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-EX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-EX-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-EX-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
|
|
// CHECK45-32-EX-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
|
|
// CHECK45-32-EX-NEXT: store i32 [[F]], ptr [[F_ADDR]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP0:%.*]] = load ptr, ptr [[C_ADDR]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l50_kernel_environment, ptr [[DYN_PTR]])
|
|
// CHECK45-32-EX-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
|
|
// CHECK45-32-EX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
|
|
// CHECK45-32-EX: user_code.entry:
|
|
// CHECK45-32-EX-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
|
|
// CHECK45-32-EX-NEXT: [[TMP3:%.*]] = load i32, ptr [[F_ADDR]], align 4
|
|
// CHECK45-32-EX-NEXT: store i32 [[TMP3]], ptr [[F_CASTED]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP4:%.*]] = load i32, ptr [[F_CASTED]], align 4
|
|
// CHECK45-32-EX-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4
|
|
// CHECK45-32-EX-NEXT: store i32 [[TMP2]], ptr [[DOTTHREADID_TEMP_]], align 4
|
|
// CHECK45-32-EX-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l50_omp_outlined(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]], ptr [[TMP0]], i32 [[TMP4]]) #[[ATTR2]]
|
|
// CHECK45-32-EX-NEXT: call void @__kmpc_target_deinit()
|
|
// CHECK45-32-EX-NEXT: ret void
|
|
// CHECK45-32-EX: worker.exit:
|
|
// CHECK45-32-EX-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK45-32-EX-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l50_omp_outlined
|
|
// CHECK45-32-EX-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], ptr nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[F:%.*]]) #[[ATTR1]] {
|
|
// CHECK45-32-EX-NEXT: entry:
|
|
// CHECK45-32-EX-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK45-32-EX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK45-32-EX-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK45-32-EX-NEXT: [[F_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-EX-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-EX-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-EX-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-EX-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-EX-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-EX-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-EX-NEXT: [[K:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-EX-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-EX-NEXT: [[J:%.*]] = alloca i32, align 4
|
|
// CHECK45-32-EX-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK45-32-EX-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
|
|
// CHECK45-32-EX-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
|
|
// CHECK45-32-EX-NEXT: store i32 [[F]], ptr [[F_ADDR]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP0:%.*]] = load ptr, ptr [[C_ADDR]], align 4
|
|
// CHECK45-32-EX-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK45-32-EX-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-32-EX-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK45-32-EX-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK45-32-EX-NEXT: [[NVPTX_NUM_THREADS:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
|
|
// CHECK45-32-EX-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
|
|
// CHECK45-32-EX-NEXT: call void @__kmpc_distribute_static_init_4(ptr @[[GLOB2]], i32 [[TMP2]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
|
|
// CHECK45-32-EX-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK45-32-EX: omp.dispatch.cond:
|
|
// CHECK45-32-EX-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-32-EX-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99
|
|
// CHECK45-32-EX-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK45-32-EX: cond.true:
|
|
// CHECK45-32-EX-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK45-32-EX: cond.false:
|
|
// CHECK45-32-EX-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-32-EX-NEXT: br label [[COND_END]]
|
|
// CHECK45-32-EX: cond.end:
|
|
// CHECK45-32-EX-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
|
|
// CHECK45-32-EX-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK45-32-EX-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-32-EX-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
|
|
// CHECK45-32-EX-NEXT: br i1 [[CMP2]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK45-32-EX: omp.dispatch.body:
|
|
// CHECK45-32-EX-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK45-32-EX: omp.inner.for.cond:
|
|
// CHECK45-32-EX-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28:![0-9]+]]
|
|
// CHECK45-32-EX-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK45-32-EX-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
|
|
// CHECK45-32-EX-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK45-32-EX: omp.inner.for.body:
|
|
// CHECK45-32-EX-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK45-32-EX-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP10]], 10
|
|
// CHECK45-32-EX-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1
|
|
// CHECK45-32-EX-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK45-32-EX-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK45-32-EX-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK45-32-EX-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK45-32-EX-NEXT: [[DIV4:%.*]] = sdiv i32 [[TMP12]], 10
|
|
// CHECK45-32-EX-NEXT: [[MUL5:%.*]] = mul nsw i32 [[DIV4]], 10
|
|
// CHECK45-32-EX-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP11]], [[MUL5]]
|
|
// CHECK45-32-EX-NEXT: [[MUL6:%.*]] = mul nsw i32 [[SUB]], 1
|
|
// CHECK45-32-EX-NEXT: [[ADD7:%.*]] = add nsw i32 0, [[MUL6]]
|
|
// CHECK45-32-EX-NEXT: store i32 [[ADD7]], ptr [[J]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK45-32-EX-NEXT: store i32 10, ptr [[K]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK45-32-EX-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK45-32-EX-NEXT: [[TMP14:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK45-32-EX-NEXT: [[TMP15:%.*]] = load i32, ptr [[F_ADDR]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK45-32-EX-NEXT: [[MUL8:%.*]] = mul nsw i32 [[TMP14]], [[TMP15]]
|
|
// CHECK45-32-EX-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP13]], [[MUL8]]
|
|
// CHECK45-32-EX-NEXT: [[TMP16:%.*]] = load i32, ptr [[K]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK45-32-EX-NEXT: [[ADD10:%.*]] = add nsw i32 [[ADD9]], [[TMP16]]
|
|
// CHECK45-32-EX-NEXT: [[TMP17:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK45-32-EX-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [10 x i32]], ptr [[TMP0]], i32 0, i32 [[TMP17]]
|
|
// CHECK45-32-EX-NEXT: [[TMP18:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK45-32-EX-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds [10 x i32], ptr [[ARRAYIDX]], i32 0, i32 [[TMP18]]
|
|
// CHECK45-32-EX-NEXT: store i32 [[ADD10]], ptr [[ARRAYIDX11]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK45-32-EX-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK45-32-EX: omp.body.continue:
|
|
// CHECK45-32-EX-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK45-32-EX: omp.inner.for.inc:
|
|
// CHECK45-32-EX-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK45-32-EX-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP19]], 1
|
|
// CHECK45-32-EX-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK45-32-EX-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]]
|
|
// CHECK45-32-EX: omp.inner.for.end:
|
|
// CHECK45-32-EX-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK45-32-EX: omp.dispatch.inc:
|
|
// CHECK45-32-EX-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK45-32-EX-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
|
|
// CHECK45-32-EX-NEXT: store i32 [[ADD13]], ptr [[DOTOMP_LB]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK45-32-EX-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
|
|
// CHECK45-32-EX-NEXT: store i32 [[ADD14]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK45-32-EX-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK45-32-EX: omp.dispatch.end:
|
|
// CHECK45-32-EX-NEXT: call void @__kmpc_distribute_static_fini(ptr @[[GLOB2]], i32 [[TMP2]])
|
|
// CHECK45-32-EX-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK45-32-EX-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0
|
|
// CHECK45-32-EX-NEXT: br i1 [[TMP25]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
|
|
// CHECK45-32-EX: .omp.final.then:
|
|
// CHECK45-32-EX-NEXT: store i32 10, ptr [[I]], align 4
|
|
// CHECK45-32-EX-NEXT: store i32 10, ptr [[J]], align 4
|
|
// CHECK45-32-EX-NEXT: br label [[DOTOMP_FINAL_DONE]]
|
|
// CHECK45-32-EX: .omp.final.done:
|
|
// CHECK45-32-EX-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l34
|
|
// CHECK-64-SAME: (ptr noalias [[DYN_PTR:%.*]], i64 [[N:%.*]], ptr nonnull align 4 dereferenceable(4000) [[A:%.*]], i64 [[L:%.*]]) #[[ATTR0:[0-9]+]] {
|
|
// CHECK-64-NEXT: entry:
|
|
// CHECK-64-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK-64-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK-64-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK-64-NEXT: [[L_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK-64-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK-64-NEXT: [[L_CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK-64-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK-64-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
|
|
// CHECK-64-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
|
|
// CHECK-64-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
|
|
// CHECK-64-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
|
|
// CHECK-64-NEXT: store i64 [[L]], ptr [[L_ADDR]], align 8
|
|
// CHECK-64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
|
|
// CHECK-64-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l34_kernel_environment, ptr [[DYN_PTR]])
|
|
// CHECK-64-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
|
|
// CHECK-64-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
|
|
// CHECK-64: user_code.entry:
|
|
// CHECK-64-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1:[0-9]+]])
|
|
// CHECK-64-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK-64-NEXT: store i32 [[TMP3]], ptr [[N_CASTED]], align 4
|
|
// CHECK-64-NEXT: [[TMP4:%.*]] = load i64, ptr [[N_CASTED]], align 8
|
|
// CHECK-64-NEXT: [[TMP5:%.*]] = load i32, ptr [[L_ADDR]], align 4
|
|
// CHECK-64-NEXT: store i32 [[TMP5]], ptr [[L_CASTED]], align 4
|
|
// CHECK-64-NEXT: [[TMP6:%.*]] = load i64, ptr [[L_CASTED]], align 8
|
|
// CHECK-64-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4
|
|
// CHECK-64-NEXT: store i32 [[TMP2]], ptr [[DOTTHREADID_TEMP_]], align 4
|
|
// CHECK-64-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l34_omp_outlined(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]], i64 [[TMP4]], ptr [[TMP0]], i64 [[TMP6]]) #[[ATTR2:[0-9]+]]
|
|
// CHECK-64-NEXT: call void @__kmpc_target_deinit()
|
|
// CHECK-64-NEXT: ret void
|
|
// CHECK-64: worker.exit:
|
|
// CHECK-64-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l34_omp_outlined
|
|
// CHECK-64-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], ptr nonnull align 4 dereferenceable(4000) [[A:%.*]], i64 [[L:%.*]]) #[[ATTR1:[0-9]+]] {
|
|
// CHECK-64-NEXT: entry:
|
|
// CHECK-64-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK-64-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK-64-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK-64-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK-64-NEXT: [[L_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK-64-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK-64-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK-64-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK-64-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
|
|
// CHECK-64-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK-64-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK-64-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK-64-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK-64-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK-64-NEXT: [[I3:%.*]] = alloca i32, align 4
|
|
// CHECK-64-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK-64-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK-64-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
|
|
// CHECK-64-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
|
|
// CHECK-64-NEXT: store i64 [[L]], ptr [[L_ADDR]], align 8
|
|
// CHECK-64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
|
|
// CHECK-64-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK-64-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK-64-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK-64-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
|
|
// CHECK-64-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
|
|
// CHECK-64-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
|
|
// CHECK-64-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK-64-NEXT: store i32 0, ptr [[I]], align 4
|
|
// CHECK-64-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK-64-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
|
|
// CHECK-64-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK-64: omp.precond.then:
|
|
// CHECK-64-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK-64-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK-64-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-64-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK-64-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK-64-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK-64-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK-64-NEXT: call void @__kmpc_distribute_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP6]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 128)
|
|
// CHECK-64-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK-64: omp.dispatch.cond:
|
|
// CHECK-64-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-64-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK-64-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
|
|
// CHECK-64-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK-64: cond.true:
|
|
// CHECK-64-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK-64-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK-64: cond.false:
|
|
// CHECK-64-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-64-NEXT: br label [[COND_END]]
|
|
// CHECK-64: cond.end:
|
|
// CHECK-64-NEXT: [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
|
|
// CHECK-64-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-64-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK-64-NEXT: store i32 [[TMP11]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK-64-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK-64-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-64-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
|
|
// CHECK-64-NEXT: br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK-64: omp.dispatch.body:
|
|
// CHECK-64-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK-64: omp.inner.for.cond:
|
|
// CHECK-64-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18:![0-9]+]]
|
|
// CHECK-64-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP18]]
|
|
// CHECK-64-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
|
|
// CHECK-64-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK-64: omp.inner.for.body:
|
|
// CHECK-64-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
|
|
// CHECK-64-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP16]], 1
|
|
// CHECK-64-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK-64-NEXT: store i32 [[ADD]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP18]]
|
|
// CHECK-64-NEXT: [[TMP17:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP18]]
|
|
// CHECK-64-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP17]] to i64
|
|
// CHECK-64-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
|
|
// CHECK-64-NEXT: store i32 1, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP18]]
|
|
// CHECK-64-NEXT: [[TMP18:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP18]]
|
|
// CHECK-64-NEXT: store i32 [[TMP18]], ptr [[L_ADDR]], align 4, !llvm.access.group [[ACC_GRP18]]
|
|
// CHECK-64-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK-64: omp.body.continue:
|
|
// CHECK-64-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK-64: omp.inner.for.inc:
|
|
// CHECK-64-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
|
|
// CHECK-64-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP19]], 1
|
|
// CHECK-64-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
|
|
// CHECK-64-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
|
|
// CHECK-64: omp.inner.for.end:
|
|
// CHECK-64-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK-64: omp.dispatch.inc:
|
|
// CHECK-64-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK-64-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK-64-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
|
|
// CHECK-64-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_LB]], align 4
|
|
// CHECK-64-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-64-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK-64-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
|
|
// CHECK-64-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-64-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK-64: omp.dispatch.end:
|
|
// CHECK-64-NEXT: [[TMP24:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK-64-NEXT: [[TMP25:%.*]] = load i32, ptr [[TMP24]], align 4
|
|
// CHECK-64-NEXT: call void @__kmpc_distribute_static_fini(ptr @[[GLOB2]], i32 [[TMP25]])
|
|
// CHECK-64-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK-64-NEXT: [[TMP27:%.*]] = icmp ne i32 [[TMP26]], 0
|
|
// CHECK-64-NEXT: br i1 [[TMP27]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
|
|
// CHECK-64: .omp.final.then:
|
|
// CHECK-64-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK-64-NEXT: [[SUB10:%.*]] = sub nsw i32 [[TMP28]], 0
|
|
// CHECK-64-NEXT: [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
|
|
// CHECK-64-NEXT: [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
|
|
// CHECK-64-NEXT: [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
|
|
// CHECK-64-NEXT: store i32 [[ADD13]], ptr [[I3]], align 4
|
|
// CHECK-64-NEXT: br label [[DOTOMP_FINAL_DONE]]
|
|
// CHECK-64: .omp.final.done:
|
|
// CHECK-64-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK-64-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
|
|
// CHECK-64-NEXT: br i1 [[TMP30]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
|
|
// CHECK-64: .omp.lastprivate.then:
|
|
// CHECK-64-NEXT: [[TMP31:%.*]] = load i32, ptr [[L_ADDR]], align 4
|
|
// CHECK-64-NEXT: store i32 [[TMP31]], ptr [[L_ADDR]], align 4
|
|
// CHECK-64-NEXT: br label [[DOTOMP_LASTPRIVATE_DONE]]
|
|
// CHECK-64: .omp.lastprivate.done:
|
|
// CHECK-64-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK-64: omp.precond.end:
|
|
// CHECK-64-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l40
|
|
// CHECK-64-SAME: (ptr noalias [[DYN_PTR:%.*]], i64 [[N:%.*]], ptr nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR3:[0-9]+]] {
|
|
// CHECK-64-NEXT: entry:
|
|
// CHECK-64-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK-64-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK-64-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK-64-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK-64-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK-64-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
|
|
// CHECK-64-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
|
|
// CHECK-64-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
|
|
// CHECK-64-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 8
|
|
// CHECK-64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 8
|
|
// CHECK-64-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l40_kernel_environment, ptr [[DYN_PTR]])
|
|
// CHECK-64-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
|
|
// CHECK-64-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
|
|
// CHECK-64: user_code.entry:
|
|
// CHECK-64-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
|
|
// CHECK-64-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK-64-NEXT: store i32 [[TMP3]], ptr [[N_CASTED]], align 4
|
|
// CHECK-64-NEXT: [[TMP4:%.*]] = load i64, ptr [[N_CASTED]], align 8
|
|
// CHECK-64-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4
|
|
// CHECK-64-NEXT: store i32 [[TMP2]], ptr [[DOTTHREADID_TEMP_]], align 4
|
|
// CHECK-64-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l40_omp_outlined(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]], i64 [[TMP4]], ptr [[TMP0]]) #[[ATTR2]]
|
|
// CHECK-64-NEXT: call void @__kmpc_target_deinit()
|
|
// CHECK-64-NEXT: ret void
|
|
// CHECK-64: worker.exit:
|
|
// CHECK-64-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l40_omp_outlined
|
|
// CHECK-64-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i64 [[N:%.*]], ptr nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR1]] {
|
|
// CHECK-64-NEXT: entry:
|
|
// CHECK-64-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK-64-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK-64-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK-64-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK-64-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK-64-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK-64-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK-64-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
|
|
// CHECK-64-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK-64-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK-64-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK-64-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK-64-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK-64-NEXT: [[I3:%.*]] = alloca i32, align 4
|
|
// CHECK-64-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK-64-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK-64-NEXT: store i64 [[N]], ptr [[N_ADDR]], align 8
|
|
// CHECK-64-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 8
|
|
// CHECK-64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 8
|
|
// CHECK-64-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK-64-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK-64-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK-64-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
|
|
// CHECK-64-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
|
|
// CHECK-64-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
|
|
// CHECK-64-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK-64-NEXT: store i32 0, ptr [[I]], align 4
|
|
// CHECK-64-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK-64-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
|
|
// CHECK-64-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK-64: omp.precond.then:
|
|
// CHECK-64-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK-64-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK-64-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-64-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK-64-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK-64-NEXT: [[NVPTX_NUM_THREADS:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
|
|
// CHECK-64-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK-64-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK-64-NEXT: call void @__kmpc_distribute_static_init_4(ptr @[[GLOB2]], i32 [[TMP6]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
|
|
// CHECK-64-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK-64: omp.dispatch.cond:
|
|
// CHECK-64-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-64-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK-64-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
|
|
// CHECK-64-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK-64: cond.true:
|
|
// CHECK-64-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK-64-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK-64: cond.false:
|
|
// CHECK-64-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-64-NEXT: br label [[COND_END]]
|
|
// CHECK-64: cond.end:
|
|
// CHECK-64-NEXT: [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
|
|
// CHECK-64-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-64-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK-64-NEXT: store i32 [[TMP11]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK-64-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK-64-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-64-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
|
|
// CHECK-64-NEXT: br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK-64: omp.dispatch.body:
|
|
// CHECK-64-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK-64: omp.inner.for.cond:
|
|
// CHECK-64-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22:![0-9]+]]
|
|
// CHECK-64-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP22]]
|
|
// CHECK-64-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
|
|
// CHECK-64-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK-64: omp.inner.for.body:
|
|
// CHECK-64-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]]
|
|
// CHECK-64-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP16]], 1
|
|
// CHECK-64-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK-64-NEXT: store i32 [[ADD]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP22]]
|
|
// CHECK-64-NEXT: [[TMP17:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP22]]
|
|
// CHECK-64-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP17]] to i64
|
|
// CHECK-64-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i16], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
|
|
// CHECK-64-NEXT: [[TMP18:%.*]] = load i16, ptr [[ARRAYIDX]], align 2, !llvm.access.group [[ACC_GRP22]]
|
|
// CHECK-64-NEXT: [[CONV:%.*]] = sext i16 [[TMP18]] to i32
|
|
// CHECK-64-NEXT: [[ADD7:%.*]] = add nsw i32 [[CONV]], 1
|
|
// CHECK-64-NEXT: [[CONV8:%.*]] = trunc i32 [[ADD7]] to i16
|
|
// CHECK-64-NEXT: store i16 [[CONV8]], ptr [[ARRAYIDX]], align 2, !llvm.access.group [[ACC_GRP22]]
|
|
// CHECK-64-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK-64: omp.body.continue:
|
|
// CHECK-64-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK-64: omp.inner.for.inc:
|
|
// CHECK-64-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]]
|
|
// CHECK-64-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP19]], 1
|
|
// CHECK-64-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]]
|
|
// CHECK-64-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]]
|
|
// CHECK-64: omp.inner.for.end:
|
|
// CHECK-64-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK-64: omp.dispatch.inc:
|
|
// CHECK-64-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK-64-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK-64-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
|
|
// CHECK-64-NEXT: store i32 [[ADD10]], ptr [[DOTOMP_LB]], align 4
|
|
// CHECK-64-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-64-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK-64-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
|
|
// CHECK-64-NEXT: store i32 [[ADD11]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-64-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK-64: omp.dispatch.end:
|
|
// CHECK-64-NEXT: [[TMP24:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK-64-NEXT: [[TMP25:%.*]] = load i32, ptr [[TMP24]], align 4
|
|
// CHECK-64-NEXT: call void @__kmpc_distribute_static_fini(ptr @[[GLOB2]], i32 [[TMP25]])
|
|
// CHECK-64-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK-64-NEXT: [[TMP27:%.*]] = icmp ne i32 [[TMP26]], 0
|
|
// CHECK-64-NEXT: br i1 [[TMP27]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
|
|
// CHECK-64: .omp.final.then:
|
|
// CHECK-64-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK-64-NEXT: [[SUB12:%.*]] = sub nsw i32 [[TMP28]], 0
|
|
// CHECK-64-NEXT: [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1
|
|
// CHECK-64-NEXT: [[MUL14:%.*]] = mul nsw i32 [[DIV13]], 1
|
|
// CHECK-64-NEXT: [[ADD15:%.*]] = add nsw i32 0, [[MUL14]]
|
|
// CHECK-64-NEXT: store i32 [[ADD15]], ptr [[I3]], align 4
|
|
// CHECK-64-NEXT: br label [[DOTOMP_FINAL_DONE]]
|
|
// CHECK-64: .omp.final.done:
|
|
// CHECK-64-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK-64: omp.precond.end:
|
|
// CHECK-64-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l45
|
|
// CHECK-64-SAME: (ptr noalias [[DYN_PTR:%.*]], ptr nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
|
|
// CHECK-64-NEXT: entry:
|
|
// CHECK-64-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK-64-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK-64-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK-64-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
|
|
// CHECK-64-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
|
|
// CHECK-64-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
|
|
// CHECK-64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8
|
|
// CHECK-64-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l45_kernel_environment, ptr [[DYN_PTR]])
|
|
// CHECK-64-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
|
|
// CHECK-64-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
|
|
// CHECK-64: user_code.entry:
|
|
// CHECK-64-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
|
|
// CHECK-64-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4
|
|
// CHECK-64-NEXT: store i32 [[TMP2]], ptr [[DOTTHREADID_TEMP_]], align 4
|
|
// CHECK-64-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l45_omp_outlined(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]], ptr [[TMP0]]) #[[ATTR2]]
|
|
// CHECK-64-NEXT: call void @__kmpc_target_deinit()
|
|
// CHECK-64-NEXT: ret void
|
|
// CHECK-64: worker.exit:
|
|
// CHECK-64-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l45_omp_outlined
|
|
// CHECK-64-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], ptr nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
|
|
// CHECK-64-NEXT: entry:
|
|
// CHECK-64-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK-64-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK-64-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK-64-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK-64-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK-64-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK-64-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK-64-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK-64-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK-64-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK-64-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK-64-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK-64-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
|
|
// CHECK-64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 8
|
|
// CHECK-64-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK-64-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-64-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK-64-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK-64-NEXT: [[NVPTX_NUM_THREADS:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
|
|
// CHECK-64-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK-64-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
|
|
// CHECK-64-NEXT: call void @__kmpc_distribute_static_init_4(ptr @[[GLOB2]], i32 [[TMP2]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
|
|
// CHECK-64-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK-64: omp.dispatch.cond:
|
|
// CHECK-64-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-64-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
|
|
// CHECK-64-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK-64: cond.true:
|
|
// CHECK-64-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK-64: cond.false:
|
|
// CHECK-64-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-64-NEXT: br label [[COND_END]]
|
|
// CHECK-64: cond.end:
|
|
// CHECK-64-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
|
|
// CHECK-64-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-64-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK-64-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK-64-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK-64-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-64-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
|
|
// CHECK-64-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK-64: omp.dispatch.body:
|
|
// CHECK-64-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK-64: omp.inner.for.cond:
|
|
// CHECK-64-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25:![0-9]+]]
|
|
// CHECK-64-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP25]]
|
|
// CHECK-64-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
|
|
// CHECK-64-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK-64: omp.inner.for.body:
|
|
// CHECK-64-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]]
|
|
// CHECK-64-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
|
|
// CHECK-64-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK-64-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP25]]
|
|
// CHECK-64-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP25]]
|
|
// CHECK-64-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
|
|
// CHECK-64-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
|
|
// CHECK-64-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP25]]
|
|
// CHECK-64-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
|
|
// CHECK-64-NEXT: store i32 [[ADD3]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP25]]
|
|
// CHECK-64-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK-64: omp.body.continue:
|
|
// CHECK-64-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK-64: omp.inner.for.inc:
|
|
// CHECK-64-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]]
|
|
// CHECK-64-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP13]], 1
|
|
// CHECK-64-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]]
|
|
// CHECK-64-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]]
|
|
// CHECK-64: omp.inner.for.end:
|
|
// CHECK-64-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK-64: omp.dispatch.inc:
|
|
// CHECK-64-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK-64-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK-64-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP14]], [[TMP15]]
|
|
// CHECK-64-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_LB]], align 4
|
|
// CHECK-64-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-64-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK-64-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP16]], [[TMP17]]
|
|
// CHECK-64-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-64-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK-64: omp.dispatch.end:
|
|
// CHECK-64-NEXT: call void @__kmpc_distribute_static_fini(ptr @[[GLOB2]], i32 [[TMP2]])
|
|
// CHECK-64-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK-64-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0
|
|
// CHECK-64-NEXT: br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
|
|
// CHECK-64: .omp.final.then:
|
|
// CHECK-64-NEXT: store i32 10, ptr [[I]], align 4
|
|
// CHECK-64-NEXT: br label [[DOTOMP_FINAL_DONE]]
|
|
// CHECK-64: .omp.final.done:
|
|
// CHECK-64-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l50
|
|
// CHECK-64-SAME: (ptr noalias [[DYN_PTR:%.*]], ptr nonnull align 4 dereferenceable(400) [[C:%.*]], i64 [[F:%.*]]) #[[ATTR0]] {
|
|
// CHECK-64-NEXT: entry:
|
|
// CHECK-64-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK-64-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK-64-NEXT: [[F_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK-64-NEXT: [[F_CASTED:%.*]] = alloca i64, align 8
|
|
// CHECK-64-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK-64-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
|
|
// CHECK-64-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
|
|
// CHECK-64-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
|
|
// CHECK-64-NEXT: store i64 [[F]], ptr [[F_ADDR]], align 8
|
|
// CHECK-64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[C_ADDR]], align 8
|
|
// CHECK-64-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l50_kernel_environment, ptr [[DYN_PTR]])
|
|
// CHECK-64-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
|
|
// CHECK-64-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
|
|
// CHECK-64: user_code.entry:
|
|
// CHECK-64-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
|
|
// CHECK-64-NEXT: [[TMP3:%.*]] = load i32, ptr [[F_ADDR]], align 4
|
|
// CHECK-64-NEXT: store i32 [[TMP3]], ptr [[F_CASTED]], align 4
|
|
// CHECK-64-NEXT: [[TMP4:%.*]] = load i64, ptr [[F_CASTED]], align 8
|
|
// CHECK-64-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4
|
|
// CHECK-64-NEXT: store i32 [[TMP2]], ptr [[DOTTHREADID_TEMP_]], align 4
|
|
// CHECK-64-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l50_omp_outlined(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]], ptr [[TMP0]], i64 [[TMP4]]) #[[ATTR2]]
|
|
// CHECK-64-NEXT: call void @__kmpc_target_deinit()
|
|
// CHECK-64-NEXT: ret void
|
|
// CHECK-64: worker.exit:
|
|
// CHECK-64-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l50_omp_outlined
|
|
// CHECK-64-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], ptr nonnull align 4 dereferenceable(400) [[C:%.*]], i64 [[F:%.*]]) #[[ATTR1]] {
|
|
// CHECK-64-NEXT: entry:
|
|
// CHECK-64-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK-64-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK-64-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
|
|
// CHECK-64-NEXT: [[F_ADDR:%.*]] = alloca i64, align 8
|
|
// CHECK-64-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK-64-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK-64-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
|
|
// CHECK-64-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK-64-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK-64-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK-64-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK-64-NEXT: [[K:%.*]] = alloca i32, align 4
|
|
// CHECK-64-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK-64-NEXT: [[J:%.*]] = alloca i32, align 4
|
|
// CHECK-64-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK-64-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
|
|
// CHECK-64-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
|
|
// CHECK-64-NEXT: store i64 [[F]], ptr [[F_ADDR]], align 8
|
|
// CHECK-64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[C_ADDR]], align 8
|
|
// CHECK-64-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK-64-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-64-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK-64-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK-64-NEXT: [[NVPTX_NUM_THREADS:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
|
|
// CHECK-64-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
|
|
// CHECK-64-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
|
|
// CHECK-64-NEXT: call void @__kmpc_distribute_static_init_4(ptr @[[GLOB2]], i32 [[TMP2]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
|
|
// CHECK-64-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK-64: omp.dispatch.cond:
|
|
// CHECK-64-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-64-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99
|
|
// CHECK-64-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK-64: cond.true:
|
|
// CHECK-64-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK-64: cond.false:
|
|
// CHECK-64-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-64-NEXT: br label [[COND_END]]
|
|
// CHECK-64: cond.end:
|
|
// CHECK-64-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
|
|
// CHECK-64-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-64-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK-64-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK-64-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK-64-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-64-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
|
|
// CHECK-64-NEXT: br i1 [[CMP2]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK-64: omp.dispatch.body:
|
|
// CHECK-64-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK-64: omp.inner.for.cond:
|
|
// CHECK-64-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28:![0-9]+]]
|
|
// CHECK-64-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK-64-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
|
|
// CHECK-64-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK-64: omp.inner.for.body:
|
|
// CHECK-64-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK-64-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP10]], 10
|
|
// CHECK-64-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1
|
|
// CHECK-64-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK-64-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK-64-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK-64-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK-64-NEXT: [[DIV4:%.*]] = sdiv i32 [[TMP12]], 10
|
|
// CHECK-64-NEXT: [[MUL5:%.*]] = mul nsw i32 [[DIV4]], 10
|
|
// CHECK-64-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP11]], [[MUL5]]
|
|
// CHECK-64-NEXT: [[MUL6:%.*]] = mul nsw i32 [[SUB]], 1
|
|
// CHECK-64-NEXT: [[ADD7:%.*]] = add nsw i32 0, [[MUL6]]
|
|
// CHECK-64-NEXT: store i32 [[ADD7]], ptr [[J]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK-64-NEXT: store i32 10, ptr [[K]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK-64-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK-64-NEXT: [[TMP14:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK-64-NEXT: [[TMP15:%.*]] = load i32, ptr [[F_ADDR]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK-64-NEXT: [[MUL8:%.*]] = mul nsw i32 [[TMP14]], [[TMP15]]
|
|
// CHECK-64-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP13]], [[MUL8]]
|
|
// CHECK-64-NEXT: [[TMP16:%.*]] = load i32, ptr [[K]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK-64-NEXT: [[ADD10:%.*]] = add nsw i32 [[ADD9]], [[TMP16]]
|
|
// CHECK-64-NEXT: [[TMP17:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK-64-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP17]] to i64
|
|
// CHECK-64-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [10 x i32]], ptr [[TMP0]], i64 0, i64 [[IDXPROM]]
|
|
// CHECK-64-NEXT: [[TMP18:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK-64-NEXT: [[IDXPROM11:%.*]] = sext i32 [[TMP18]] to i64
|
|
// CHECK-64-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds [10 x i32], ptr [[ARRAYIDX]], i64 0, i64 [[IDXPROM11]]
|
|
// CHECK-64-NEXT: store i32 [[ADD10]], ptr [[ARRAYIDX12]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK-64-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK-64: omp.body.continue:
|
|
// CHECK-64-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK-64: omp.inner.for.inc:
|
|
// CHECK-64-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK-64-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP19]], 1
|
|
// CHECK-64-NEXT: store i32 [[ADD13]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK-64-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]]
|
|
// CHECK-64: omp.inner.for.end:
|
|
// CHECK-64-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK-64: omp.dispatch.inc:
|
|
// CHECK-64-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK-64-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK-64-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
|
|
// CHECK-64-NEXT: store i32 [[ADD14]], ptr [[DOTOMP_LB]], align 4
|
|
// CHECK-64-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-64-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK-64-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
|
|
// CHECK-64-NEXT: store i32 [[ADD15]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-64-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK-64: omp.dispatch.end:
|
|
// CHECK-64-NEXT: call void @__kmpc_distribute_static_fini(ptr @[[GLOB2]], i32 [[TMP2]])
|
|
// CHECK-64-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK-64-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0
|
|
// CHECK-64-NEXT: br i1 [[TMP25]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
|
|
// CHECK-64: .omp.final.then:
|
|
// CHECK-64-NEXT: store i32 10, ptr [[I]], align 4
|
|
// CHECK-64-NEXT: store i32 10, ptr [[J]], align 4
|
|
// CHECK-64-NEXT: br label [[DOTOMP_FINAL_DONE]]
|
|
// CHECK-64: .omp.final.done:
|
|
// CHECK-64-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l34
|
|
// CHECK-32-SAME: (ptr noalias [[DYN_PTR:%.*]], i32 [[N:%.*]], ptr nonnull align 4 dereferenceable(4000) [[A:%.*]], i32 [[L:%.*]]) #[[ATTR0:[0-9]+]] {
|
|
// CHECK-32-NEXT: entry:
|
|
// CHECK-32-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK-32-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK-32-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK-32-NEXT: [[L_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK-32-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
|
|
// CHECK-32-NEXT: [[L_CASTED:%.*]] = alloca i32, align 4
|
|
// CHECK-32-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK-32-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
|
|
// CHECK-32-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
|
|
// CHECK-32-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
|
|
// CHECK-32-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
|
|
// CHECK-32-NEXT: store i32 [[L]], ptr [[L_ADDR]], align 4
|
|
// CHECK-32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
|
|
// CHECK-32-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l34_kernel_environment, ptr [[DYN_PTR]])
|
|
// CHECK-32-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
|
|
// CHECK-32-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
|
|
// CHECK-32: user_code.entry:
|
|
// CHECK-32-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1:[0-9]+]])
|
|
// CHECK-32-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK-32-NEXT: store i32 [[TMP3]], ptr [[N_CASTED]], align 4
|
|
// CHECK-32-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_CASTED]], align 4
|
|
// CHECK-32-NEXT: [[TMP5:%.*]] = load i32, ptr [[L_ADDR]], align 4
|
|
// CHECK-32-NEXT: store i32 [[TMP5]], ptr [[L_CASTED]], align 4
|
|
// CHECK-32-NEXT: [[TMP6:%.*]] = load i32, ptr [[L_CASTED]], align 4
|
|
// CHECK-32-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4
|
|
// CHECK-32-NEXT: store i32 [[TMP2]], ptr [[DOTTHREADID_TEMP_]], align 4
|
|
// CHECK-32-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l34_omp_outlined(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]], i32 [[TMP4]], ptr [[TMP0]], i32 [[TMP6]]) #[[ATTR2:[0-9]+]]
|
|
// CHECK-32-NEXT: call void @__kmpc_target_deinit()
|
|
// CHECK-32-NEXT: ret void
|
|
// CHECK-32: worker.exit:
|
|
// CHECK-32-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l34_omp_outlined
|
|
// CHECK-32-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], ptr nonnull align 4 dereferenceable(4000) [[A:%.*]], i32 [[L:%.*]]) #[[ATTR1:[0-9]+]] {
|
|
// CHECK-32-NEXT: entry:
|
|
// CHECK-32-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK-32-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK-32-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK-32-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK-32-NEXT: [[L_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK-32-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK-32-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK-32-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK-32-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
|
|
// CHECK-32-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK-32-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK-32-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK-32-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK-32-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK-32-NEXT: [[I3:%.*]] = alloca i32, align 4
|
|
// CHECK-32-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK-32-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
|
|
// CHECK-32-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
|
|
// CHECK-32-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
|
|
// CHECK-32-NEXT: store i32 [[L]], ptr [[L_ADDR]], align 4
|
|
// CHECK-32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
|
|
// CHECK-32-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK-32-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK-32-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK-32-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
|
|
// CHECK-32-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
|
|
// CHECK-32-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
|
|
// CHECK-32-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK-32-NEXT: store i32 0, ptr [[I]], align 4
|
|
// CHECK-32-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK-32-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
|
|
// CHECK-32-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK-32: omp.precond.then:
|
|
// CHECK-32-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK-32-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK-32-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-32-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK-32-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK-32-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK-32-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK-32-NEXT: call void @__kmpc_distribute_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP6]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 128)
|
|
// CHECK-32-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK-32: omp.dispatch.cond:
|
|
// CHECK-32-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-32-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK-32-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
|
|
// CHECK-32-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK-32: cond.true:
|
|
// CHECK-32-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK-32-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK-32: cond.false:
|
|
// CHECK-32-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-32-NEXT: br label [[COND_END]]
|
|
// CHECK-32: cond.end:
|
|
// CHECK-32-NEXT: [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
|
|
// CHECK-32-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-32-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK-32-NEXT: store i32 [[TMP11]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK-32-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK-32-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-32-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
|
|
// CHECK-32-NEXT: br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK-32: omp.dispatch.body:
|
|
// CHECK-32-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK-32: omp.inner.for.cond:
|
|
// CHECK-32-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18:![0-9]+]]
|
|
// CHECK-32-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP18]]
|
|
// CHECK-32-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
|
|
// CHECK-32-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK-32: omp.inner.for.body:
|
|
// CHECK-32-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
|
|
// CHECK-32-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP16]], 1
|
|
// CHECK-32-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK-32-NEXT: store i32 [[ADD]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP18]]
|
|
// CHECK-32-NEXT: [[TMP17:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP18]]
|
|
// CHECK-32-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i32], ptr [[TMP0]], i32 0, i32 [[TMP17]]
|
|
// CHECK-32-NEXT: store i32 1, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP18]]
|
|
// CHECK-32-NEXT: [[TMP18:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP18]]
|
|
// CHECK-32-NEXT: store i32 [[TMP18]], ptr [[L_ADDR]], align 4, !llvm.access.group [[ACC_GRP18]]
|
|
// CHECK-32-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK-32: omp.body.continue:
|
|
// CHECK-32-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK-32: omp.inner.for.inc:
|
|
// CHECK-32-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
|
|
// CHECK-32-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP19]], 1
|
|
// CHECK-32-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
|
|
// CHECK-32-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
|
|
// CHECK-32: omp.inner.for.end:
|
|
// CHECK-32-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK-32: omp.dispatch.inc:
|
|
// CHECK-32-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK-32-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK-32-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
|
|
// CHECK-32-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_LB]], align 4
|
|
// CHECK-32-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-32-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK-32-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
|
|
// CHECK-32-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-32-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK-32: omp.dispatch.end:
|
|
// CHECK-32-NEXT: [[TMP24:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK-32-NEXT: [[TMP25:%.*]] = load i32, ptr [[TMP24]], align 4
|
|
// CHECK-32-NEXT: call void @__kmpc_distribute_static_fini(ptr @[[GLOB2]], i32 [[TMP25]])
|
|
// CHECK-32-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK-32-NEXT: [[TMP27:%.*]] = icmp ne i32 [[TMP26]], 0
|
|
// CHECK-32-NEXT: br i1 [[TMP27]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
|
|
// CHECK-32: .omp.final.then:
|
|
// CHECK-32-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK-32-NEXT: [[SUB10:%.*]] = sub nsw i32 [[TMP28]], 0
|
|
// CHECK-32-NEXT: [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
|
|
// CHECK-32-NEXT: [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
|
|
// CHECK-32-NEXT: [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
|
|
// CHECK-32-NEXT: store i32 [[ADD13]], ptr [[I3]], align 4
|
|
// CHECK-32-NEXT: br label [[DOTOMP_FINAL_DONE]]
|
|
// CHECK-32: .omp.final.done:
|
|
// CHECK-32-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK-32-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
|
|
// CHECK-32-NEXT: br i1 [[TMP30]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
|
|
// CHECK-32: .omp.lastprivate.then:
|
|
// CHECK-32-NEXT: [[TMP31:%.*]] = load i32, ptr [[L_ADDR]], align 4
|
|
// CHECK-32-NEXT: store i32 [[TMP31]], ptr [[L_ADDR]], align 4
|
|
// CHECK-32-NEXT: br label [[DOTOMP_LASTPRIVATE_DONE]]
|
|
// CHECK-32: .omp.lastprivate.done:
|
|
// CHECK-32-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK-32: omp.precond.end:
|
|
// CHECK-32-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l40
|
|
// CHECK-32-SAME: (ptr noalias [[DYN_PTR:%.*]], i32 [[N:%.*]], ptr nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR3:[0-9]+]] {
|
|
// CHECK-32-NEXT: entry:
|
|
// CHECK-32-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK-32-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK-32-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK-32-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
|
|
// CHECK-32-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK-32-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
|
|
// CHECK-32-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
|
|
// CHECK-32-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
|
|
// CHECK-32-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 4
|
|
// CHECK-32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 4
|
|
// CHECK-32-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l40_kernel_environment, ptr [[DYN_PTR]])
|
|
// CHECK-32-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
|
|
// CHECK-32-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
|
|
// CHECK-32: user_code.entry:
|
|
// CHECK-32-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
|
|
// CHECK-32-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK-32-NEXT: store i32 [[TMP3]], ptr [[N_CASTED]], align 4
|
|
// CHECK-32-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_CASTED]], align 4
|
|
// CHECK-32-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4
|
|
// CHECK-32-NEXT: store i32 [[TMP2]], ptr [[DOTTHREADID_TEMP_]], align 4
|
|
// CHECK-32-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l40_omp_outlined(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]], i32 [[TMP4]], ptr [[TMP0]]) #[[ATTR2]]
|
|
// CHECK-32-NEXT: call void @__kmpc_target_deinit()
|
|
// CHECK-32-NEXT: ret void
|
|
// CHECK-32: worker.exit:
|
|
// CHECK-32-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l40_omp_outlined
|
|
// CHECK-32-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], ptr nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR1]] {
|
|
// CHECK-32-NEXT: entry:
|
|
// CHECK-32-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK-32-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK-32-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK-32-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK-32-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK-32-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK-32-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK-32-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
|
|
// CHECK-32-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK-32-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK-32-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK-32-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK-32-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK-32-NEXT: [[I3:%.*]] = alloca i32, align 4
|
|
// CHECK-32-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK-32-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
|
|
// CHECK-32-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
|
|
// CHECK-32-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 4
|
|
// CHECK-32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 4
|
|
// CHECK-32-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK-32-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK-32-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK-32-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
|
|
// CHECK-32-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
|
|
// CHECK-32-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
|
|
// CHECK-32-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK-32-NEXT: store i32 0, ptr [[I]], align 4
|
|
// CHECK-32-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK-32-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
|
|
// CHECK-32-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK-32: omp.precond.then:
|
|
// CHECK-32-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK-32-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK-32-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-32-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK-32-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK-32-NEXT: [[NVPTX_NUM_THREADS:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
|
|
// CHECK-32-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK-32-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK-32-NEXT: call void @__kmpc_distribute_static_init_4(ptr @[[GLOB2]], i32 [[TMP6]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
|
|
// CHECK-32-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK-32: omp.dispatch.cond:
|
|
// CHECK-32-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-32-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK-32-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
|
|
// CHECK-32-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK-32: cond.true:
|
|
// CHECK-32-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK-32-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK-32: cond.false:
|
|
// CHECK-32-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-32-NEXT: br label [[COND_END]]
|
|
// CHECK-32: cond.end:
|
|
// CHECK-32-NEXT: [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
|
|
// CHECK-32-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-32-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK-32-NEXT: store i32 [[TMP11]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK-32-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK-32-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-32-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
|
|
// CHECK-32-NEXT: br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK-32: omp.dispatch.body:
|
|
// CHECK-32-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK-32: omp.inner.for.cond:
|
|
// CHECK-32-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22:![0-9]+]]
|
|
// CHECK-32-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP22]]
|
|
// CHECK-32-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
|
|
// CHECK-32-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK-32: omp.inner.for.body:
|
|
// CHECK-32-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]]
|
|
// CHECK-32-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP16]], 1
|
|
// CHECK-32-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK-32-NEXT: store i32 [[ADD]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP22]]
|
|
// CHECK-32-NEXT: [[TMP17:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP22]]
|
|
// CHECK-32-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i16], ptr [[TMP0]], i32 0, i32 [[TMP17]]
|
|
// CHECK-32-NEXT: [[TMP18:%.*]] = load i16, ptr [[ARRAYIDX]], align 2, !llvm.access.group [[ACC_GRP22]]
|
|
// CHECK-32-NEXT: [[CONV:%.*]] = sext i16 [[TMP18]] to i32
|
|
// CHECK-32-NEXT: [[ADD7:%.*]] = add nsw i32 [[CONV]], 1
|
|
// CHECK-32-NEXT: [[CONV8:%.*]] = trunc i32 [[ADD7]] to i16
|
|
// CHECK-32-NEXT: store i16 [[CONV8]], ptr [[ARRAYIDX]], align 2, !llvm.access.group [[ACC_GRP22]]
|
|
// CHECK-32-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK-32: omp.body.continue:
|
|
// CHECK-32-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK-32: omp.inner.for.inc:
|
|
// CHECK-32-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]]
|
|
// CHECK-32-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP19]], 1
|
|
// CHECK-32-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]]
|
|
// CHECK-32-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]]
|
|
// CHECK-32: omp.inner.for.end:
|
|
// CHECK-32-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK-32: omp.dispatch.inc:
|
|
// CHECK-32-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK-32-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK-32-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
|
|
// CHECK-32-NEXT: store i32 [[ADD10]], ptr [[DOTOMP_LB]], align 4
|
|
// CHECK-32-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-32-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK-32-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
|
|
// CHECK-32-NEXT: store i32 [[ADD11]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-32-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK-32: omp.dispatch.end:
|
|
// CHECK-32-NEXT: [[TMP24:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK-32-NEXT: [[TMP25:%.*]] = load i32, ptr [[TMP24]], align 4
|
|
// CHECK-32-NEXT: call void @__kmpc_distribute_static_fini(ptr @[[GLOB2]], i32 [[TMP25]])
|
|
// CHECK-32-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK-32-NEXT: [[TMP27:%.*]] = icmp ne i32 [[TMP26]], 0
|
|
// CHECK-32-NEXT: br i1 [[TMP27]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
|
|
// CHECK-32: .omp.final.then:
|
|
// CHECK-32-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK-32-NEXT: [[SUB12:%.*]] = sub nsw i32 [[TMP28]], 0
|
|
// CHECK-32-NEXT: [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1
|
|
// CHECK-32-NEXT: [[MUL14:%.*]] = mul nsw i32 [[DIV13]], 1
|
|
// CHECK-32-NEXT: [[ADD15:%.*]] = add nsw i32 0, [[MUL14]]
|
|
// CHECK-32-NEXT: store i32 [[ADD15]], ptr [[I3]], align 4
|
|
// CHECK-32-NEXT: br label [[DOTOMP_FINAL_DONE]]
|
|
// CHECK-32: .omp.final.done:
|
|
// CHECK-32-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK-32: omp.precond.end:
|
|
// CHECK-32-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l45
|
|
// CHECK-32-SAME: (ptr noalias [[DYN_PTR:%.*]], ptr nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
|
|
// CHECK-32-NEXT: entry:
|
|
// CHECK-32-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK-32-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK-32-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK-32-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
|
|
// CHECK-32-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
|
|
// CHECK-32-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
|
|
// CHECK-32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4
|
|
// CHECK-32-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l45_kernel_environment, ptr [[DYN_PTR]])
|
|
// CHECK-32-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
|
|
// CHECK-32-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
|
|
// CHECK-32: user_code.entry:
|
|
// CHECK-32-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
|
|
// CHECK-32-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4
|
|
// CHECK-32-NEXT: store i32 [[TMP2]], ptr [[DOTTHREADID_TEMP_]], align 4
|
|
// CHECK-32-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l45_omp_outlined(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]], ptr [[TMP0]]) #[[ATTR2]]
|
|
// CHECK-32-NEXT: call void @__kmpc_target_deinit()
|
|
// CHECK-32-NEXT: ret void
|
|
// CHECK-32: worker.exit:
|
|
// CHECK-32-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l45_omp_outlined
|
|
// CHECK-32-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], ptr nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
|
|
// CHECK-32-NEXT: entry:
|
|
// CHECK-32-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK-32-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK-32-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK-32-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK-32-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK-32-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK-32-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK-32-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK-32-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK-32-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK-32-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK-32-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
|
|
// CHECK-32-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
|
|
// CHECK-32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4
|
|
// CHECK-32-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK-32-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-32-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK-32-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK-32-NEXT: [[NVPTX_NUM_THREADS:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
|
|
// CHECK-32-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK-32-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
|
|
// CHECK-32-NEXT: call void @__kmpc_distribute_static_init_4(ptr @[[GLOB2]], i32 [[TMP2]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
|
|
// CHECK-32-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK-32: omp.dispatch.cond:
|
|
// CHECK-32-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-32-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
|
|
// CHECK-32-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK-32: cond.true:
|
|
// CHECK-32-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK-32: cond.false:
|
|
// CHECK-32-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-32-NEXT: br label [[COND_END]]
|
|
// CHECK-32: cond.end:
|
|
// CHECK-32-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
|
|
// CHECK-32-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-32-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK-32-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK-32-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK-32-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-32-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
|
|
// CHECK-32-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK-32: omp.dispatch.body:
|
|
// CHECK-32-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK-32: omp.inner.for.cond:
|
|
// CHECK-32-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25:![0-9]+]]
|
|
// CHECK-32-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP25]]
|
|
// CHECK-32-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
|
|
// CHECK-32-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK-32: omp.inner.for.body:
|
|
// CHECK-32-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]]
|
|
// CHECK-32-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
|
|
// CHECK-32-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK-32-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP25]]
|
|
// CHECK-32-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP25]]
|
|
// CHECK-32-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 [[TMP11]]
|
|
// CHECK-32-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP25]]
|
|
// CHECK-32-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
|
|
// CHECK-32-NEXT: store i32 [[ADD3]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP25]]
|
|
// CHECK-32-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK-32: omp.body.continue:
|
|
// CHECK-32-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK-32: omp.inner.for.inc:
|
|
// CHECK-32-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]]
|
|
// CHECK-32-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP13]], 1
|
|
// CHECK-32-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]]
|
|
// CHECK-32-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]]
|
|
// CHECK-32: omp.inner.for.end:
|
|
// CHECK-32-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK-32: omp.dispatch.inc:
|
|
// CHECK-32-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK-32-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK-32-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP14]], [[TMP15]]
|
|
// CHECK-32-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_LB]], align 4
|
|
// CHECK-32-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-32-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK-32-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP16]], [[TMP17]]
|
|
// CHECK-32-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-32-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK-32: omp.dispatch.end:
|
|
// CHECK-32-NEXT: call void @__kmpc_distribute_static_fini(ptr @[[GLOB2]], i32 [[TMP2]])
|
|
// CHECK-32-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK-32-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0
|
|
// CHECK-32-NEXT: br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
|
|
// CHECK-32: .omp.final.then:
|
|
// CHECK-32-NEXT: store i32 10, ptr [[I]], align 4
|
|
// CHECK-32-NEXT: br label [[DOTOMP_FINAL_DONE]]
|
|
// CHECK-32: .omp.final.done:
|
|
// CHECK-32-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l50
|
|
// CHECK-32-SAME: (ptr noalias [[DYN_PTR:%.*]], ptr nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[F:%.*]]) #[[ATTR0]] {
|
|
// CHECK-32-NEXT: entry:
|
|
// CHECK-32-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK-32-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK-32-NEXT: [[F_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK-32-NEXT: [[F_CASTED:%.*]] = alloca i32, align 4
|
|
// CHECK-32-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK-32-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
|
|
// CHECK-32-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
|
|
// CHECK-32-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
|
|
// CHECK-32-NEXT: store i32 [[F]], ptr [[F_ADDR]], align 4
|
|
// CHECK-32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[C_ADDR]], align 4
|
|
// CHECK-32-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l50_kernel_environment, ptr [[DYN_PTR]])
|
|
// CHECK-32-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
|
|
// CHECK-32-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
|
|
// CHECK-32: user_code.entry:
|
|
// CHECK-32-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
|
|
// CHECK-32-NEXT: [[TMP3:%.*]] = load i32, ptr [[F_ADDR]], align 4
|
|
// CHECK-32-NEXT: store i32 [[TMP3]], ptr [[F_CASTED]], align 4
|
|
// CHECK-32-NEXT: [[TMP4:%.*]] = load i32, ptr [[F_CASTED]], align 4
|
|
// CHECK-32-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4
|
|
// CHECK-32-NEXT: store i32 [[TMP2]], ptr [[DOTTHREADID_TEMP_]], align 4
|
|
// CHECK-32-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l50_omp_outlined(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]], ptr [[TMP0]], i32 [[TMP4]]) #[[ATTR2]]
|
|
// CHECK-32-NEXT: call void @__kmpc_target_deinit()
|
|
// CHECK-32-NEXT: ret void
|
|
// CHECK-32: worker.exit:
|
|
// CHECK-32-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l50_omp_outlined
|
|
// CHECK-32-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], ptr nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[F:%.*]]) #[[ATTR1]] {
|
|
// CHECK-32-NEXT: entry:
|
|
// CHECK-32-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK-32-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK-32-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK-32-NEXT: [[F_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK-32-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK-32-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK-32-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
|
|
// CHECK-32-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK-32-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK-32-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK-32-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK-32-NEXT: [[K:%.*]] = alloca i32, align 4
|
|
// CHECK-32-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK-32-NEXT: [[J:%.*]] = alloca i32, align 4
|
|
// CHECK-32-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK-32-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
|
|
// CHECK-32-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
|
|
// CHECK-32-NEXT: store i32 [[F]], ptr [[F_ADDR]], align 4
|
|
// CHECK-32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[C_ADDR]], align 4
|
|
// CHECK-32-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK-32-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-32-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK-32-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK-32-NEXT: [[NVPTX_NUM_THREADS:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
|
|
// CHECK-32-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK-32-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
|
|
// CHECK-32-NEXT: call void @__kmpc_distribute_static_init_4(ptr @[[GLOB2]], i32 [[TMP2]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
|
|
// CHECK-32-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK-32: omp.dispatch.cond:
|
|
// CHECK-32-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-32-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99
|
|
// CHECK-32-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK-32: cond.true:
|
|
// CHECK-32-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK-32: cond.false:
|
|
// CHECK-32-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-32-NEXT: br label [[COND_END]]
|
|
// CHECK-32: cond.end:
|
|
// CHECK-32-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
|
|
// CHECK-32-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-32-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK-32-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK-32-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK-32-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-32-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
|
|
// CHECK-32-NEXT: br i1 [[CMP2]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK-32: omp.dispatch.body:
|
|
// CHECK-32-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK-32: omp.inner.for.cond:
|
|
// CHECK-32-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28:![0-9]+]]
|
|
// CHECK-32-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK-32-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
|
|
// CHECK-32-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK-32: omp.inner.for.body:
|
|
// CHECK-32-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK-32-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP10]], 10
|
|
// CHECK-32-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1
|
|
// CHECK-32-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK-32-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK-32-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK-32-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK-32-NEXT: [[DIV4:%.*]] = sdiv i32 [[TMP12]], 10
|
|
// CHECK-32-NEXT: [[MUL5:%.*]] = mul nsw i32 [[DIV4]], 10
|
|
// CHECK-32-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP11]], [[MUL5]]
|
|
// CHECK-32-NEXT: [[MUL6:%.*]] = mul nsw i32 [[SUB]], 1
|
|
// CHECK-32-NEXT: [[ADD7:%.*]] = add nsw i32 0, [[MUL6]]
|
|
// CHECK-32-NEXT: store i32 [[ADD7]], ptr [[J]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK-32-NEXT: store i32 10, ptr [[K]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK-32-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK-32-NEXT: [[TMP14:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK-32-NEXT: [[TMP15:%.*]] = load i32, ptr [[F_ADDR]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK-32-NEXT: [[MUL8:%.*]] = mul nsw i32 [[TMP14]], [[TMP15]]
|
|
// CHECK-32-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP13]], [[MUL8]]
|
|
// CHECK-32-NEXT: [[TMP16:%.*]] = load i32, ptr [[K]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK-32-NEXT: [[ADD10:%.*]] = add nsw i32 [[ADD9]], [[TMP16]]
|
|
// CHECK-32-NEXT: [[TMP17:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK-32-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [10 x i32]], ptr [[TMP0]], i32 0, i32 [[TMP17]]
|
|
// CHECK-32-NEXT: [[TMP18:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK-32-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds [10 x i32], ptr [[ARRAYIDX]], i32 0, i32 [[TMP18]]
|
|
// CHECK-32-NEXT: store i32 [[ADD10]], ptr [[ARRAYIDX11]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK-32-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK-32: omp.body.continue:
|
|
// CHECK-32-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK-32: omp.inner.for.inc:
|
|
// CHECK-32-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK-32-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP19]], 1
|
|
// CHECK-32-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK-32-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]]
|
|
// CHECK-32: omp.inner.for.end:
|
|
// CHECK-32-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK-32: omp.dispatch.inc:
|
|
// CHECK-32-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK-32-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK-32-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
|
|
// CHECK-32-NEXT: store i32 [[ADD13]], ptr [[DOTOMP_LB]], align 4
|
|
// CHECK-32-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-32-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK-32-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
|
|
// CHECK-32-NEXT: store i32 [[ADD14]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-32-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK-32: omp.dispatch.end:
|
|
// CHECK-32-NEXT: call void @__kmpc_distribute_static_fini(ptr @[[GLOB2]], i32 [[TMP2]])
|
|
// CHECK-32-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK-32-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0
|
|
// CHECK-32-NEXT: br i1 [[TMP25]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
|
|
// CHECK-32: .omp.final.then:
|
|
// CHECK-32-NEXT: store i32 10, ptr [[I]], align 4
|
|
// CHECK-32-NEXT: store i32 10, ptr [[J]], align 4
|
|
// CHECK-32-NEXT: br label [[DOTOMP_FINAL_DONE]]
|
|
// CHECK-32: .omp.final.done:
|
|
// CHECK-32-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK-32-EX-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l34
|
|
// CHECK-32-EX-SAME: (ptr noalias [[DYN_PTR:%.*]], i32 [[N:%.*]], ptr nonnull align 4 dereferenceable(4000) [[A:%.*]], i32 [[L:%.*]]) #[[ATTR0:[0-9]+]] {
|
|
// CHECK-32-EX-NEXT: entry:
|
|
// CHECK-32-EX-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK-32-EX-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK-32-EX-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK-32-EX-NEXT: [[L_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK-32-EX-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
|
|
// CHECK-32-EX-NEXT: [[L_CASTED:%.*]] = alloca i32, align 4
|
|
// CHECK-32-EX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK-32-EX-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
|
|
// CHECK-32-EX-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
|
|
// CHECK-32-EX-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
|
|
// CHECK-32-EX-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
|
|
// CHECK-32-EX-NEXT: store i32 [[L]], ptr [[L_ADDR]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l34_kernel_environment, ptr [[DYN_PTR]])
|
|
// CHECK-32-EX-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
|
|
// CHECK-32-EX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
|
|
// CHECK-32-EX: user_code.entry:
|
|
// CHECK-32-EX-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1:[0-9]+]])
|
|
// CHECK-32-EX-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK-32-EX-NEXT: store i32 [[TMP3]], ptr [[N_CASTED]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_CASTED]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP5:%.*]] = load i32, ptr [[L_ADDR]], align 4
|
|
// CHECK-32-EX-NEXT: store i32 [[TMP5]], ptr [[L_CASTED]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP6:%.*]] = load i32, ptr [[L_CASTED]], align 4
|
|
// CHECK-32-EX-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4
|
|
// CHECK-32-EX-NEXT: store i32 [[TMP2]], ptr [[DOTTHREADID_TEMP_]], align 4
|
|
// CHECK-32-EX-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l34_omp_outlined(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]], i32 [[TMP4]], ptr [[TMP0]], i32 [[TMP6]]) #[[ATTR2:[0-9]+]]
|
|
// CHECK-32-EX-NEXT: call void @__kmpc_target_deinit()
|
|
// CHECK-32-EX-NEXT: ret void
|
|
// CHECK-32-EX: worker.exit:
|
|
// CHECK-32-EX-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK-32-EX-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l34_omp_outlined
|
|
// CHECK-32-EX-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], ptr nonnull align 4 dereferenceable(4000) [[A:%.*]], i32 [[L:%.*]]) #[[ATTR1:[0-9]+]] {
|
|
// CHECK-32-EX-NEXT: entry:
|
|
// CHECK-32-EX-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK-32-EX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK-32-EX-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK-32-EX-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK-32-EX-NEXT: [[L_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK-32-EX-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK-32-EX-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK-32-EX-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK-32-EX-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
|
|
// CHECK-32-EX-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK-32-EX-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK-32-EX-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK-32-EX-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK-32-EX-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK-32-EX-NEXT: [[I3:%.*]] = alloca i32, align 4
|
|
// CHECK-32-EX-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK-32-EX-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
|
|
// CHECK-32-EX-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
|
|
// CHECK-32-EX-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
|
|
// CHECK-32-EX-NEXT: store i32 [[L]], ptr [[L_ADDR]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK-32-EX-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK-32-EX-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
|
|
// CHECK-32-EX-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
|
|
// CHECK-32-EX-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
|
|
// CHECK-32-EX-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK-32-EX-NEXT: store i32 0, ptr [[I]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK-32-EX-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
|
|
// CHECK-32-EX-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK-32-EX: omp.precond.then:
|
|
// CHECK-32-EX-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK-32-EX-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-32-EX-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK-32-EX-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK-32-EX-NEXT: call void @__kmpc_distribute_static_init_4(ptr @[[GLOB2:[0-9]+]], i32 [[TMP6]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 128)
|
|
// CHECK-32-EX-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK-32-EX: omp.dispatch.cond:
|
|
// CHECK-32-EX-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK-32-EX-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
|
|
// CHECK-32-EX-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK-32-EX: cond.true:
|
|
// CHECK-32-EX-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK-32-EX-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK-32-EX: cond.false:
|
|
// CHECK-32-EX-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-32-EX-NEXT: br label [[COND_END]]
|
|
// CHECK-32-EX: cond.end:
|
|
// CHECK-32-EX-NEXT: [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
|
|
// CHECK-32-EX-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK-32-EX-NEXT: store i32 [[TMP11]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-32-EX-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
|
|
// CHECK-32-EX-NEXT: br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK-32-EX: omp.dispatch.body:
|
|
// CHECK-32-EX-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK-32-EX: omp.inner.for.cond:
|
|
// CHECK-32-EX-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18:![0-9]+]]
|
|
// CHECK-32-EX-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP18]]
|
|
// CHECK-32-EX-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
|
|
// CHECK-32-EX-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK-32-EX: omp.inner.for.body:
|
|
// CHECK-32-EX-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
|
|
// CHECK-32-EX-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP16]], 1
|
|
// CHECK-32-EX-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK-32-EX-NEXT: store i32 [[ADD]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP18]]
|
|
// CHECK-32-EX-NEXT: [[TMP17:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP18]]
|
|
// CHECK-32-EX-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i32], ptr [[TMP0]], i32 0, i32 [[TMP17]]
|
|
// CHECK-32-EX-NEXT: store i32 1, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP18]]
|
|
// CHECK-32-EX-NEXT: [[TMP18:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP18]]
|
|
// CHECK-32-EX-NEXT: store i32 [[TMP18]], ptr [[L_ADDR]], align 4, !llvm.access.group [[ACC_GRP18]]
|
|
// CHECK-32-EX-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK-32-EX: omp.body.continue:
|
|
// CHECK-32-EX-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK-32-EX: omp.inner.for.inc:
|
|
// CHECK-32-EX-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
|
|
// CHECK-32-EX-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP19]], 1
|
|
// CHECK-32-EX-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP18]]
|
|
// CHECK-32-EX-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
|
|
// CHECK-32-EX: omp.inner.for.end:
|
|
// CHECK-32-EX-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK-32-EX: omp.dispatch.inc:
|
|
// CHECK-32-EX-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK-32-EX-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
|
|
// CHECK-32-EX-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_LB]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK-32-EX-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
|
|
// CHECK-32-EX-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-32-EX-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK-32-EX: omp.dispatch.end:
|
|
// CHECK-32-EX-NEXT: [[TMP24:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP25:%.*]] = load i32, ptr [[TMP24]], align 4
|
|
// CHECK-32-EX-NEXT: call void @__kmpc_distribute_static_fini(ptr @[[GLOB2]], i32 [[TMP25]])
|
|
// CHECK-32-EX-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP27:%.*]] = icmp ne i32 [[TMP26]], 0
|
|
// CHECK-32-EX-NEXT: br i1 [[TMP27]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
|
|
// CHECK-32-EX: .omp.final.then:
|
|
// CHECK-32-EX-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK-32-EX-NEXT: [[SUB10:%.*]] = sub nsw i32 [[TMP28]], 0
|
|
// CHECK-32-EX-NEXT: [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1
|
|
// CHECK-32-EX-NEXT: [[MUL12:%.*]] = mul nsw i32 [[DIV11]], 1
|
|
// CHECK-32-EX-NEXT: [[ADD13:%.*]] = add nsw i32 0, [[MUL12]]
|
|
// CHECK-32-EX-NEXT: store i32 [[ADD13]], ptr [[I3]], align 4
|
|
// CHECK-32-EX-NEXT: br label [[DOTOMP_FINAL_DONE]]
|
|
// CHECK-32-EX: .omp.final.done:
|
|
// CHECK-32-EX-NEXT: [[TMP29:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0
|
|
// CHECK-32-EX-NEXT: br i1 [[TMP30]], label [[DOTOMP_LASTPRIVATE_THEN:%.*]], label [[DOTOMP_LASTPRIVATE_DONE:%.*]]
|
|
// CHECK-32-EX: .omp.lastprivate.then:
|
|
// CHECK-32-EX-NEXT: [[TMP31:%.*]] = load i32, ptr [[L_ADDR]], align 4
|
|
// CHECK-32-EX-NEXT: store i32 [[TMP31]], ptr [[L_ADDR]], align 4
|
|
// CHECK-32-EX-NEXT: br label [[DOTOMP_LASTPRIVATE_DONE]]
|
|
// CHECK-32-EX: .omp.lastprivate.done:
|
|
// CHECK-32-EX-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK-32-EX: omp.precond.end:
|
|
// CHECK-32-EX-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK-32-EX-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l40
|
|
// CHECK-32-EX-SAME: (ptr noalias [[DYN_PTR:%.*]], i32 [[N:%.*]], ptr nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR3:[0-9]+]] {
|
|
// CHECK-32-EX-NEXT: entry:
|
|
// CHECK-32-EX-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK-32-EX-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK-32-EX-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK-32-EX-NEXT: [[N_CASTED:%.*]] = alloca i32, align 4
|
|
// CHECK-32-EX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK-32-EX-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
|
|
// CHECK-32-EX-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
|
|
// CHECK-32-EX-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
|
|
// CHECK-32-EX-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l40_kernel_environment, ptr [[DYN_PTR]])
|
|
// CHECK-32-EX-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
|
|
// CHECK-32-EX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
|
|
// CHECK-32-EX: user_code.entry:
|
|
// CHECK-32-EX-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
|
|
// CHECK-32-EX-NEXT: [[TMP3:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK-32-EX-NEXT: store i32 [[TMP3]], ptr [[N_CASTED]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP4:%.*]] = load i32, ptr [[N_CASTED]], align 4
|
|
// CHECK-32-EX-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4
|
|
// CHECK-32-EX-NEXT: store i32 [[TMP2]], ptr [[DOTTHREADID_TEMP_]], align 4
|
|
// CHECK-32-EX-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l40_omp_outlined(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]], i32 [[TMP4]], ptr [[TMP0]]) #[[ATTR2]]
|
|
// CHECK-32-EX-NEXT: call void @__kmpc_target_deinit()
|
|
// CHECK-32-EX-NEXT: ret void
|
|
// CHECK-32-EX: worker.exit:
|
|
// CHECK-32-EX-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK-32-EX-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l40_omp_outlined
|
|
// CHECK-32-EX-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], i32 [[N:%.*]], ptr nonnull align 2 dereferenceable(2000) [[AA:%.*]]) #[[ATTR1]] {
|
|
// CHECK-32-EX-NEXT: entry:
|
|
// CHECK-32-EX-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK-32-EX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK-32-EX-NEXT: [[N_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK-32-EX-NEXT: [[AA_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK-32-EX-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK-32-EX-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK-32-EX-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4
|
|
// CHECK-32-EX-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4
|
|
// CHECK-32-EX-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK-32-EX-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK-32-EX-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK-32-EX-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK-32-EX-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK-32-EX-NEXT: [[I3:%.*]] = alloca i32, align 4
|
|
// CHECK-32-EX-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK-32-EX-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
|
|
// CHECK-32-EX-NEXT: store i32 [[N]], ptr [[N_ADDR]], align 4
|
|
// CHECK-32-EX-NEXT: store ptr [[AA]], ptr [[AA_ADDR]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP0:%.*]] = load ptr, ptr [[AA_ADDR]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP1:%.*]] = load i32, ptr [[N_ADDR]], align 4
|
|
// CHECK-32-EX-NEXT: store i32 [[TMP1]], ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK-32-EX-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP2]], 0
|
|
// CHECK-32-EX-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
|
|
// CHECK-32-EX-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
|
|
// CHECK-32-EX-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK-32-EX-NEXT: store i32 0, ptr [[I]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK-32-EX-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP3]]
|
|
// CHECK-32-EX-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
|
|
// CHECK-32-EX: omp.precond.then:
|
|
// CHECK-32-EX-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK-32-EX-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-32-EX-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK-32-EX-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK-32-EX-NEXT: [[NVPTX_NUM_THREADS:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
|
|
// CHECK-32-EX-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
|
|
// CHECK-32-EX-NEXT: call void @__kmpc_distribute_static_init_4(ptr @[[GLOB2]], i32 [[TMP6]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
|
|
// CHECK-32-EX-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK-32-EX: omp.dispatch.cond:
|
|
// CHECK-32-EX-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK-32-EX-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP7]], [[TMP8]]
|
|
// CHECK-32-EX-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK-32-EX: cond.true:
|
|
// CHECK-32-EX-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1]], align 4
|
|
// CHECK-32-EX-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK-32-EX: cond.false:
|
|
// CHECK-32-EX-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-32-EX-NEXT: br label [[COND_END]]
|
|
// CHECK-32-EX: cond.end:
|
|
// CHECK-32-EX-NEXT: [[COND:%.*]] = phi i32 [ [[TMP9]], [[COND_TRUE]] ], [ [[TMP10]], [[COND_FALSE]] ]
|
|
// CHECK-32-EX-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK-32-EX-NEXT: store i32 [[TMP11]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-32-EX-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP12]], [[TMP13]]
|
|
// CHECK-32-EX-NEXT: br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK-32-EX: omp.dispatch.body:
|
|
// CHECK-32-EX-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK-32-EX: omp.inner.for.cond:
|
|
// CHECK-32-EX-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22:![0-9]+]]
|
|
// CHECK-32-EX-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP22]]
|
|
// CHECK-32-EX-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
|
|
// CHECK-32-EX-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK-32-EX: omp.inner.for.body:
|
|
// CHECK-32-EX-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]]
|
|
// CHECK-32-EX-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP16]], 1
|
|
// CHECK-32-EX-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK-32-EX-NEXT: store i32 [[ADD]], ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP22]]
|
|
// CHECK-32-EX-NEXT: [[TMP17:%.*]] = load i32, ptr [[I3]], align 4, !llvm.access.group [[ACC_GRP22]]
|
|
// CHECK-32-EX-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1000 x i16], ptr [[TMP0]], i32 0, i32 [[TMP17]]
|
|
// CHECK-32-EX-NEXT: [[TMP18:%.*]] = load i16, ptr [[ARRAYIDX]], align 2, !llvm.access.group [[ACC_GRP22]]
|
|
// CHECK-32-EX-NEXT: [[CONV:%.*]] = sext i16 [[TMP18]] to i32
|
|
// CHECK-32-EX-NEXT: [[ADD7:%.*]] = add nsw i32 [[CONV]], 1
|
|
// CHECK-32-EX-NEXT: [[CONV8:%.*]] = trunc i32 [[ADD7]] to i16
|
|
// CHECK-32-EX-NEXT: store i16 [[CONV8]], ptr [[ARRAYIDX]], align 2, !llvm.access.group [[ACC_GRP22]]
|
|
// CHECK-32-EX-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK-32-EX: omp.body.continue:
|
|
// CHECK-32-EX-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK-32-EX: omp.inner.for.inc:
|
|
// CHECK-32-EX-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]]
|
|
// CHECK-32-EX-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP19]], 1
|
|
// CHECK-32-EX-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP22]]
|
|
// CHECK-32-EX-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP23:![0-9]+]]
|
|
// CHECK-32-EX: omp.inner.for.end:
|
|
// CHECK-32-EX-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK-32-EX: omp.dispatch.inc:
|
|
// CHECK-32-EX-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK-32-EX-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
|
|
// CHECK-32-EX-NEXT: store i32 [[ADD10]], ptr [[DOTOMP_LB]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK-32-EX-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
|
|
// CHECK-32-EX-NEXT: store i32 [[ADD11]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-32-EX-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK-32-EX: omp.dispatch.end:
|
|
// CHECK-32-EX-NEXT: [[TMP24:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP25:%.*]] = load i32, ptr [[TMP24]], align 4
|
|
// CHECK-32-EX-NEXT: call void @__kmpc_distribute_static_fini(ptr @[[GLOB2]], i32 [[TMP25]])
|
|
// CHECK-32-EX-NEXT: [[TMP26:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP27:%.*]] = icmp ne i32 [[TMP26]], 0
|
|
// CHECK-32-EX-NEXT: br i1 [[TMP27]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
|
|
// CHECK-32-EX: .omp.final.then:
|
|
// CHECK-32-EX-NEXT: [[TMP28:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_]], align 4
|
|
// CHECK-32-EX-NEXT: [[SUB12:%.*]] = sub nsw i32 [[TMP28]], 0
|
|
// CHECK-32-EX-NEXT: [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1
|
|
// CHECK-32-EX-NEXT: [[MUL14:%.*]] = mul nsw i32 [[DIV13]], 1
|
|
// CHECK-32-EX-NEXT: [[ADD15:%.*]] = add nsw i32 0, [[MUL14]]
|
|
// CHECK-32-EX-NEXT: store i32 [[ADD15]], ptr [[I3]], align 4
|
|
// CHECK-32-EX-NEXT: br label [[DOTOMP_FINAL_DONE]]
|
|
// CHECK-32-EX: .omp.final.done:
|
|
// CHECK-32-EX-NEXT: br label [[OMP_PRECOND_END]]
|
|
// CHECK-32-EX: omp.precond.end:
|
|
// CHECK-32-EX-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK-32-EX-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l45
|
|
// CHECK-32-EX-SAME: (ptr noalias [[DYN_PTR:%.*]], ptr nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR0]] {
|
|
// CHECK-32-EX-NEXT: entry:
|
|
// CHECK-32-EX-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK-32-EX-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK-32-EX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK-32-EX-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
|
|
// CHECK-32-EX-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
|
|
// CHECK-32-EX-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l45_kernel_environment, ptr [[DYN_PTR]])
|
|
// CHECK-32-EX-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
|
|
// CHECK-32-EX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
|
|
// CHECK-32-EX: user_code.entry:
|
|
// CHECK-32-EX-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
|
|
// CHECK-32-EX-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4
|
|
// CHECK-32-EX-NEXT: store i32 [[TMP2]], ptr [[DOTTHREADID_TEMP_]], align 4
|
|
// CHECK-32-EX-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l45_omp_outlined(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]], ptr [[TMP0]]) #[[ATTR2]]
|
|
// CHECK-32-EX-NEXT: call void @__kmpc_target_deinit()
|
|
// CHECK-32-EX-NEXT: ret void
|
|
// CHECK-32-EX: worker.exit:
|
|
// CHECK-32-EX-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK-32-EX-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l45_omp_outlined
|
|
// CHECK-32-EX-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], ptr nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR1]] {
|
|
// CHECK-32-EX-NEXT: entry:
|
|
// CHECK-32-EX-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK-32-EX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK-32-EX-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK-32-EX-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK-32-EX-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK-32-EX-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK-32-EX-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK-32-EX-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK-32-EX-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK-32-EX-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK-32-EX-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK-32-EX-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
|
|
// CHECK-32-EX-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP0:%.*]] = load ptr, ptr [[B_ADDR]], align 4
|
|
// CHECK-32-EX-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK-32-EX-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-32-EX-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK-32-EX-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK-32-EX-NEXT: [[NVPTX_NUM_THREADS:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
|
|
// CHECK-32-EX-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
|
|
// CHECK-32-EX-NEXT: call void @__kmpc_distribute_static_init_4(ptr @[[GLOB2]], i32 [[TMP2]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
|
|
// CHECK-32-EX-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK-32-EX: omp.dispatch.cond:
|
|
// CHECK-32-EX-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-32-EX-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 9
|
|
// CHECK-32-EX-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK-32-EX: cond.true:
|
|
// CHECK-32-EX-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK-32-EX: cond.false:
|
|
// CHECK-32-EX-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-32-EX-NEXT: br label [[COND_END]]
|
|
// CHECK-32-EX: cond.end:
|
|
// CHECK-32-EX-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
|
|
// CHECK-32-EX-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK-32-EX-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-32-EX-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
|
|
// CHECK-32-EX-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK-32-EX: omp.dispatch.body:
|
|
// CHECK-32-EX-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK-32-EX: omp.inner.for.cond:
|
|
// CHECK-32-EX-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25:![0-9]+]]
|
|
// CHECK-32-EX-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP25]]
|
|
// CHECK-32-EX-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
|
|
// CHECK-32-EX-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK-32-EX: omp.inner.for.body:
|
|
// CHECK-32-EX-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]]
|
|
// CHECK-32-EX-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1
|
|
// CHECK-32-EX-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK-32-EX-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP25]]
|
|
// CHECK-32-EX-NEXT: [[TMP11:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP25]]
|
|
// CHECK-32-EX-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], ptr [[TMP0]], i32 0, i32 [[TMP11]]
|
|
// CHECK-32-EX-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP25]]
|
|
// CHECK-32-EX-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], 1
|
|
// CHECK-32-EX-NEXT: store i32 [[ADD3]], ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP25]]
|
|
// CHECK-32-EX-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK-32-EX: omp.body.continue:
|
|
// CHECK-32-EX-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK-32-EX: omp.inner.for.inc:
|
|
// CHECK-32-EX-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]]
|
|
// CHECK-32-EX-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP13]], 1
|
|
// CHECK-32-EX-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP25]]
|
|
// CHECK-32-EX-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP26:![0-9]+]]
|
|
// CHECK-32-EX: omp.inner.for.end:
|
|
// CHECK-32-EX-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK-32-EX: omp.dispatch.inc:
|
|
// CHECK-32-EX-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK-32-EX-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP14]], [[TMP15]]
|
|
// CHECK-32-EX-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_LB]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK-32-EX-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP16]], [[TMP17]]
|
|
// CHECK-32-EX-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-32-EX-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK-32-EX: omp.dispatch.end:
|
|
// CHECK-32-EX-NEXT: call void @__kmpc_distribute_static_fini(ptr @[[GLOB2]], i32 [[TMP2]])
|
|
// CHECK-32-EX-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP19:%.*]] = icmp ne i32 [[TMP18]], 0
|
|
// CHECK-32-EX-NEXT: br i1 [[TMP19]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
|
|
// CHECK-32-EX: .omp.final.then:
|
|
// CHECK-32-EX-NEXT: store i32 10, ptr [[I]], align 4
|
|
// CHECK-32-EX-NEXT: br label [[DOTOMP_FINAL_DONE]]
|
|
// CHECK-32-EX: .omp.final.done:
|
|
// CHECK-32-EX-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK-32-EX-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l50
|
|
// CHECK-32-EX-SAME: (ptr noalias [[DYN_PTR:%.*]], ptr nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[F:%.*]]) #[[ATTR0]] {
|
|
// CHECK-32-EX-NEXT: entry:
|
|
// CHECK-32-EX-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK-32-EX-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK-32-EX-NEXT: [[F_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK-32-EX-NEXT: [[F_CASTED:%.*]] = alloca i32, align 4
|
|
// CHECK-32-EX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK-32-EX-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
|
|
// CHECK-32-EX-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
|
|
// CHECK-32-EX-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
|
|
// CHECK-32-EX-NEXT: store i32 [[F]], ptr [[F_ADDR]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP0:%.*]] = load ptr, ptr [[C_ADDR]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l50_kernel_environment, ptr [[DYN_PTR]])
|
|
// CHECK-32-EX-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
|
|
// CHECK-32-EX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
|
|
// CHECK-32-EX: user_code.entry:
|
|
// CHECK-32-EX-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
|
|
// CHECK-32-EX-NEXT: [[TMP3:%.*]] = load i32, ptr [[F_ADDR]], align 4
|
|
// CHECK-32-EX-NEXT: store i32 [[TMP3]], ptr [[F_CASTED]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP4:%.*]] = load i32, ptr [[F_CASTED]], align 4
|
|
// CHECK-32-EX-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4
|
|
// CHECK-32-EX-NEXT: store i32 [[TMP2]], ptr [[DOTTHREADID_TEMP_]], align 4
|
|
// CHECK-32-EX-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l50_omp_outlined(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]], ptr [[TMP0]], i32 [[TMP4]]) #[[ATTR2]]
|
|
// CHECK-32-EX-NEXT: call void @__kmpc_target_deinit()
|
|
// CHECK-32-EX-NEXT: ret void
|
|
// CHECK-32-EX: worker.exit:
|
|
// CHECK-32-EX-NEXT: ret void
|
|
//
|
|
//
|
|
// CHECK-32-EX-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l50_omp_outlined
|
|
// CHECK-32-EX-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], ptr nonnull align 4 dereferenceable(400) [[C:%.*]], i32 [[F:%.*]]) #[[ATTR1]] {
|
|
// CHECK-32-EX-NEXT: entry:
|
|
// CHECK-32-EX-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK-32-EX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK-32-EX-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
|
|
// CHECK-32-EX-NEXT: [[F_ADDR:%.*]] = alloca i32, align 4
|
|
// CHECK-32-EX-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
|
|
// CHECK-32-EX-NEXT: [[TMP:%.*]] = alloca i32, align 4
|
|
// CHECK-32-EX-NEXT: [[_TMP1:%.*]] = alloca i32, align 4
|
|
// CHECK-32-EX-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
|
|
// CHECK-32-EX-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
|
|
// CHECK-32-EX-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
|
|
// CHECK-32-EX-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
|
|
// CHECK-32-EX-NEXT: [[K:%.*]] = alloca i32, align 4
|
|
// CHECK-32-EX-NEXT: [[I:%.*]] = alloca i32, align 4
|
|
// CHECK-32-EX-NEXT: [[J:%.*]] = alloca i32, align 4
|
|
// CHECK-32-EX-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK-32-EX-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
|
|
// CHECK-32-EX-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
|
|
// CHECK-32-EX-NEXT: store i32 [[F]], ptr [[F_ADDR]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP0:%.*]] = load ptr, ptr [[C_ADDR]], align 4
|
|
// CHECK-32-EX-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK-32-EX-NEXT: store i32 99, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-32-EX-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK-32-EX-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK-32-EX-NEXT: [[NVPTX_NUM_THREADS:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
|
|
// CHECK-32-EX-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
|
|
// CHECK-32-EX-NEXT: call void @__kmpc_distribute_static_init_4(ptr @[[GLOB2]], i32 [[TMP2]], i32 91, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 [[NVPTX_NUM_THREADS]])
|
|
// CHECK-32-EX-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
|
|
// CHECK-32-EX: omp.dispatch.cond:
|
|
// CHECK-32-EX-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-32-EX-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 99
|
|
// CHECK-32-EX-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
|
|
// CHECK-32-EX: cond.true:
|
|
// CHECK-32-EX-NEXT: br label [[COND_END:%.*]]
|
|
// CHECK-32-EX: cond.false:
|
|
// CHECK-32-EX-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-32-EX-NEXT: br label [[COND_END]]
|
|
// CHECK-32-EX: cond.end:
|
|
// CHECK-32-EX-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ]
|
|
// CHECK-32-EX-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK-32-EX-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-32-EX-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]]
|
|
// CHECK-32-EX-NEXT: br i1 [[CMP2]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
|
|
// CHECK-32-EX: omp.dispatch.body:
|
|
// CHECK-32-EX-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
|
|
// CHECK-32-EX: omp.inner.for.cond:
|
|
// CHECK-32-EX-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28:![0-9]+]]
|
|
// CHECK-32-EX-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK-32-EX-NEXT: [[CMP3:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]]
|
|
// CHECK-32-EX-NEXT: br i1 [[CMP3]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
|
|
// CHECK-32-EX: omp.inner.for.body:
|
|
// CHECK-32-EX-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK-32-EX-NEXT: [[DIV:%.*]] = sdiv i32 [[TMP10]], 10
|
|
// CHECK-32-EX-NEXT: [[MUL:%.*]] = mul nsw i32 [[DIV]], 1
|
|
// CHECK-32-EX-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
|
|
// CHECK-32-EX-NEXT: store i32 [[ADD]], ptr [[I]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK-32-EX-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK-32-EX-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK-32-EX-NEXT: [[DIV4:%.*]] = sdiv i32 [[TMP12]], 10
|
|
// CHECK-32-EX-NEXT: [[MUL5:%.*]] = mul nsw i32 [[DIV4]], 10
|
|
// CHECK-32-EX-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP11]], [[MUL5]]
|
|
// CHECK-32-EX-NEXT: [[MUL6:%.*]] = mul nsw i32 [[SUB]], 1
|
|
// CHECK-32-EX-NEXT: [[ADD7:%.*]] = add nsw i32 0, [[MUL6]]
|
|
// CHECK-32-EX-NEXT: store i32 [[ADD7]], ptr [[J]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK-32-EX-NEXT: store i32 10, ptr [[K]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK-32-EX-NEXT: [[TMP13:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK-32-EX-NEXT: [[TMP14:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK-32-EX-NEXT: [[TMP15:%.*]] = load i32, ptr [[F_ADDR]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK-32-EX-NEXT: [[MUL8:%.*]] = mul nsw i32 [[TMP14]], [[TMP15]]
|
|
// CHECK-32-EX-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP13]], [[MUL8]]
|
|
// CHECK-32-EX-NEXT: [[TMP16:%.*]] = load i32, ptr [[K]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK-32-EX-NEXT: [[ADD10:%.*]] = add nsw i32 [[ADD9]], [[TMP16]]
|
|
// CHECK-32-EX-NEXT: [[TMP17:%.*]] = load i32, ptr [[I]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK-32-EX-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [10 x i32]], ptr [[TMP0]], i32 0, i32 [[TMP17]]
|
|
// CHECK-32-EX-NEXT: [[TMP18:%.*]] = load i32, ptr [[J]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK-32-EX-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds [10 x i32], ptr [[ARRAYIDX]], i32 0, i32 [[TMP18]]
|
|
// CHECK-32-EX-NEXT: store i32 [[ADD10]], ptr [[ARRAYIDX11]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK-32-EX-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
|
|
// CHECK-32-EX: omp.body.continue:
|
|
// CHECK-32-EX-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
|
|
// CHECK-32-EX: omp.inner.for.inc:
|
|
// CHECK-32-EX-NEXT: [[TMP19:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK-32-EX-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP19]], 1
|
|
// CHECK-32-EX-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP28]]
|
|
// CHECK-32-EX-NEXT: br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP29:![0-9]+]]
|
|
// CHECK-32-EX: omp.inner.for.end:
|
|
// CHECK-32-EX-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
|
|
// CHECK-32-EX: omp.dispatch.inc:
|
|
// CHECK-32-EX-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK-32-EX-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
|
|
// CHECK-32-EX-NEXT: store i32 [[ADD13]], ptr [[DOTOMP_LB]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_STRIDE]], align 4
|
|
// CHECK-32-EX-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
|
|
// CHECK-32-EX-NEXT: store i32 [[ADD14]], ptr [[DOTOMP_UB]], align 4
|
|
// CHECK-32-EX-NEXT: br label [[OMP_DISPATCH_COND]]
|
|
// CHECK-32-EX: omp.dispatch.end:
|
|
// CHECK-32-EX-NEXT: call void @__kmpc_distribute_static_fini(ptr @[[GLOB2]], i32 [[TMP2]])
|
|
// CHECK-32-EX-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_IS_LAST]], align 4
|
|
// CHECK-32-EX-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0
|
|
// CHECK-32-EX-NEXT: br i1 [[TMP25]], label [[DOTOMP_FINAL_THEN:%.*]], label [[DOTOMP_FINAL_DONE:%.*]]
|
|
// CHECK-32-EX: .omp.final.then:
|
|
// CHECK-32-EX-NEXT: store i32 10, ptr [[I]], align 4
|
|
// CHECK-32-EX-NEXT: store i32 10, ptr [[J]], align 4
|
|
// CHECK-32-EX-NEXT: br label [[DOTOMP_FINAL_DONE]]
|
|
// CHECK-32-EX: .omp.final.done:
|
|
// CHECK-32-EX-NEXT: ret void
|
|
//
|