Files
clang-p2996/clang/test/OpenMP/amdgcn_target_device_vla.cpp
Johannes Doerfert b8cbc5c02c [OpenMP] Introduce the KernelLaunchEnvironment as implicit argument (#70401)
The KernelEnvironment is for compile time information about a kernel. It
allows the compiler to feed information to the runtime. The
KernelLaunchEnvironment is for dynamic information *per* kernel launch.
It allows the rutime to feed information to the kernel that is not
shared with other invocations of the kernel. The first use case is to
replace the globals that synchronize teams reductions with per-launch
versions. This allows concurrent teams reductions. More uses cases will
follow, e.g., per launch memory pools.

Fixes: https://github.com/llvm/llvm-project/issues/70249
2023-10-31 19:38:43 -07:00

1273 lines
87 KiB
C++

// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
// REQUIRES: amdgpu-registered-target
// RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple x86_64-unknown-unknown -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm-bc %s -o %t-ppc-host.bc
// RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple amdgcn-amd-amdhsa -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s
// expected-no-diagnostics
#ifndef HEADER
#define HEADER
int foo1() {
int sum = 0.0;
#pragma omp target map(tofrom: sum)
{
int N = 10;
int A[N];
for (int i = 0; i < N; i++)
A[i] = i;
for (int i = 0; i < N; i++)
sum += A[i];
}
return sum;
}
int foo2() {
int sum = 0.0;
int M = 12;
int result[M];
#pragma omp target teams distribute parallel for map(from: result[:M])
for (int i = 0; i < M; i++) {
int N = 10;
int A[N];
result[i] = i;
for (int j = 0; j < N; j++)
A[j] = j;
for (int j = 0; j < N; j++)
result[i] += A[j];
}
for (int i = 0; i < M; i++)
sum += result[i];
return sum;
}
int foo3() {
int sum = 0.0;
int M = 12;
int result[M];
#pragma omp target teams distribute map(from: result[:M])
for (int i = 0; i < M; i++) {
int N = 10;
int A[N];
result[i] = i;
#pragma omp parallel for
for (int j = 0; j < N; j++)
A[j] = j;
for (int j = 0; j < N; j++)
result[i] += A[j];
}
for (int i = 0; i < M; i++)
sum += result[i];
return sum;
}
int foo4() {
int sum = 0.0;
int M = 12;
int result[M];
int N = 10;
#pragma omp target teams distribute map(from: result[:M])
for (int i = 0; i < M; i++) {
int A[N];
result[i] = i;
#pragma omp parallel for
for (int j = 0; j < N; j++)
A[j] = j;
for (int j = 0; j < N; j++)
result[i] += A[j];
}
for (int i = 0; i < M; i++)
sum += result[i];
return sum;
}
int main() {
return foo1() + foo2() + foo3() + foo4();
}
#endif
// CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z4foo1v_l12
// CHECK-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[SUM:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
// CHECK-NEXT: [[SUM_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
// CHECK-NEXT: [[N:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8, addrspace(5)
// CHECK-NEXT: [[I:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[I1:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[DYN_PTR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DYN_PTR_ADDR]] to ptr
// CHECK-NEXT: [[SUM_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[SUM_ADDR]] to ptr
// CHECK-NEXT: [[N_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[N]] to ptr
// CHECK-NEXT: [[__VLA_EXPR0_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[__VLA_EXPR0]] to ptr
// CHECK-NEXT: [[I_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I]] to ptr
// CHECK-NEXT: [[I1_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I1]] to ptr
// CHECK-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR_ASCAST]], align 8
// CHECK-NEXT: store ptr [[SUM]], ptr [[SUM_ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[SUM_ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr addrspacecast (ptr addrspace(1) @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z4foo1v_l12_kernel_environment to ptr), ptr [[DYN_PTR]])
// CHECK-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
// CHECK-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK: user_code.entry:
// CHECK-NEXT: store i32 10, ptr [[N_ASCAST]], align 4
// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ASCAST]], align 4
// CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64
// CHECK-NEXT: [[TMP4:%.*]] = mul nuw i64 [[TMP3]], 4
// CHECK-NEXT: [[TMP5:%.*]] = add nuw i64 [[TMP4]], 3
// CHECK-NEXT: [[TMP6:%.*]] = udiv i64 [[TMP5]], 4
// CHECK-NEXT: [[TMP7:%.*]] = mul nuw i64 [[TMP6]], 4
// CHECK-NEXT: [[A:%.*]] = call align 4 ptr @__kmpc_alloc_shared(i64 [[TMP7]])
// CHECK-NEXT: store i64 [[TMP3]], ptr [[__VLA_EXPR0_ASCAST]], align 8
// CHECK-NEXT: store i32 0, ptr [[I_ASCAST]], align 4
// CHECK-NEXT: br label [[FOR_COND:%.*]]
// CHECK: for.cond:
// CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[I_ASCAST]], align 4
// CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[N_ASCAST]], align 4
// CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP8]], [[TMP9]]
// CHECK-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
// CHECK: for.body:
// CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[I_ASCAST]], align 4
// CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[I_ASCAST]], align 4
// CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP11]] to i64
// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IDXPROM]]
// CHECK-NEXT: store i32 [[TMP10]], ptr [[ARRAYIDX]], align 4
// CHECK-NEXT: br label [[FOR_INC:%.*]]
// CHECK: for.inc:
// CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[I_ASCAST]], align 4
// CHECK-NEXT: [[INC:%.*]] = add nsw i32 [[TMP12]], 1
// CHECK-NEXT: store i32 [[INC]], ptr [[I_ASCAST]], align 4
// CHECK-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP13:![0-9]+]]
// CHECK: worker.exit:
// CHECK-NEXT: ret void
// CHECK: for.end:
// CHECK-NEXT: store i32 0, ptr [[I1_ASCAST]], align 4
// CHECK-NEXT: br label [[FOR_COND2:%.*]]
// CHECK: for.cond2:
// CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[I1_ASCAST]], align 4
// CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[N_ASCAST]], align 4
// CHECK-NEXT: [[CMP3:%.*]] = icmp slt i32 [[TMP13]], [[TMP14]]
// CHECK-NEXT: br i1 [[CMP3]], label [[FOR_BODY4:%.*]], label [[FOR_END9:%.*]]
// CHECK: for.body4:
// CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[I1_ASCAST]], align 4
// CHECK-NEXT: [[IDXPROM5:%.*]] = sext i32 [[TMP15]] to i64
// CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IDXPROM5]]
// CHECK-NEXT: [[TMP16:%.*]] = load i32, ptr [[ARRAYIDX6]], align 4
// CHECK-NEXT: [[TMP17:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP17]], [[TMP16]]
// CHECK-NEXT: store i32 [[ADD]], ptr [[TMP0]], align 4
// CHECK-NEXT: br label [[FOR_INC7:%.*]]
// CHECK: for.inc7:
// CHECK-NEXT: [[TMP18:%.*]] = load i32, ptr [[I1_ASCAST]], align 4
// CHECK-NEXT: [[INC8:%.*]] = add nsw i32 [[TMP18]], 1
// CHECK-NEXT: store i32 [[INC8]], ptr [[I1_ASCAST]], align 4
// CHECK-NEXT: br label [[FOR_COND2]], !llvm.loop [[LOOP15:![0-9]+]]
// CHECK: for.end9:
// CHECK-NEXT: call void @__kmpc_free_shared(ptr [[A]], i64 [[TMP7]])
// CHECK-NEXT: call void @__kmpc_target_deinit()
// CHECK-NEXT: ret void
//
//
// CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z4foo2v_l30
// CHECK-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i64 noundef [[M:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[RESULT:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
// CHECK-NEXT: [[M_ADDR:%.*]] = alloca i64, align 8, addrspace(5)
// CHECK-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8, addrspace(5)
// CHECK-NEXT: [[RESULT_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
// CHECK-NEXT: [[M_CASTED:%.*]] = alloca i64, align 8, addrspace(5)
// CHECK-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[DYN_PTR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DYN_PTR_ADDR]] to ptr
// CHECK-NEXT: [[M_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[M_ADDR]] to ptr
// CHECK-NEXT: [[VLA_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VLA_ADDR]] to ptr
// CHECK-NEXT: [[RESULT_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RESULT_ADDR]] to ptr
// CHECK-NEXT: [[M_CASTED_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[M_CASTED]] to ptr
// CHECK-NEXT: [[DOTZERO_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTZERO_ADDR]] to ptr
// CHECK-NEXT: [[DOTTHREADID_TEMP__ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTTHREADID_TEMP_]] to ptr
// CHECK-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR_ASCAST]], align 8
// CHECK-NEXT: store i64 [[M]], ptr [[M_ADDR_ASCAST]], align 8
// CHECK-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR_ASCAST]], align 8
// CHECK-NEXT: store ptr [[RESULT]], ptr [[RESULT_ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[RESULT_ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_target_init(ptr addrspacecast (ptr addrspace(1) @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z4foo2v_l30_kernel_environment to ptr), ptr [[DYN_PTR]])
// CHECK-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP2]], -1
// CHECK-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK: user_code.entry:
// CHECK-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_global_thread_num(ptr addrspacecast (ptr addrspace(1) @[[GLOB1:[0-9]+]] to ptr))
// CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[M_ADDR_ASCAST]], align 4
// CHECK-NEXT: store i32 [[TMP4]], ptr [[M_CASTED_ASCAST]], align 4
// CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr [[M_CASTED_ASCAST]], align 8
// CHECK-NEXT: store i32 0, ptr [[DOTZERO_ADDR_ASCAST]], align 4
// CHECK-NEXT: store i32 [[TMP3]], ptr [[DOTTHREADID_TEMP__ASCAST]], align 4
// CHECK-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z4foo2v_l30_omp_outlined(ptr [[DOTTHREADID_TEMP__ASCAST]], ptr [[DOTZERO_ADDR_ASCAST]], i64 [[TMP5]], i64 [[TMP0]], ptr [[TMP1]]) #[[ATTR4:[0-9]+]]
// CHECK-NEXT: call void @__kmpc_target_deinit()
// CHECK-NEXT: ret void
// CHECK: worker.exit:
// CHECK-NEXT: ret void
//
//
// CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z4foo2v_l30_omp_outlined
// CHECK-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[M:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[RESULT:%.*]]) #[[ATTR3:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
// CHECK-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
// CHECK-NEXT: [[M_ADDR:%.*]] = alloca i64, align 8, addrspace(5)
// CHECK-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8, addrspace(5)
// CHECK-NEXT: [[RESULT_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
// CHECK-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[TMP:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[I:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[I3:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[M_CASTED:%.*]] = alloca i64, align 8, addrspace(5)
// CHECK-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [5 x ptr], align 8, addrspace(5)
// CHECK-NEXT: [[DOTGLOBAL_TID__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTGLOBAL_TID__ADDR]] to ptr
// CHECK-NEXT: [[DOTBOUND_TID__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTBOUND_TID__ADDR]] to ptr
// CHECK-NEXT: [[M_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[M_ADDR]] to ptr
// CHECK-NEXT: [[VLA_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VLA_ADDR]] to ptr
// CHECK-NEXT: [[RESULT_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RESULT_ADDR]] to ptr
// CHECK-NEXT: [[DOTOMP_IV_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTOMP_IV]] to ptr
// CHECK-NEXT: [[TMP_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TMP]] to ptr
// CHECK-NEXT: [[DOTCAPTURE_EXPR__ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTCAPTURE_EXPR_]] to ptr
// CHECK-NEXT: [[DOTCAPTURE_EXPR_1_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTCAPTURE_EXPR_1]] to ptr
// CHECK-NEXT: [[I_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I]] to ptr
// CHECK-NEXT: [[DOTOMP_COMB_LB_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTOMP_COMB_LB]] to ptr
// CHECK-NEXT: [[DOTOMP_COMB_UB_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTOMP_COMB_UB]] to ptr
// CHECK-NEXT: [[DOTOMP_STRIDE_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTOMP_STRIDE]] to ptr
// CHECK-NEXT: [[DOTOMP_IS_LAST_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTOMP_IS_LAST]] to ptr
// CHECK-NEXT: [[I3_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I3]] to ptr
// CHECK-NEXT: [[M_CASTED_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[M_CASTED]] to ptr
// CHECK-NEXT: [[CAPTURED_VARS_ADDRS_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[CAPTURED_VARS_ADDRS]] to ptr
// CHECK-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR_ASCAST]], align 8
// CHECK-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR_ASCAST]], align 8
// CHECK-NEXT: store i64 [[M]], ptr [[M_ADDR_ASCAST]], align 8
// CHECK-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR_ASCAST]], align 8
// CHECK-NEXT: store ptr [[RESULT]], ptr [[RESULT_ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[RESULT_ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[M_ADDR_ASCAST]], align 4
// CHECK-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR__ASCAST]], align 4
// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ASCAST]], align 4
// CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1_ASCAST]], align 4
// CHECK-NEXT: store i32 0, ptr [[I_ASCAST]], align 4
// CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ASCAST]], align 4
// CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK: omp.precond.then:
// CHECK-NEXT: store i32 0, ptr [[DOTOMP_COMB_LB_ASCAST]], align 4
// CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1_ASCAST]], align 4
// CHECK-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_COMB_UB_ASCAST]], align 4
// CHECK-NEXT: store i32 1, ptr [[DOTOMP_STRIDE_ASCAST]], align 4
// CHECK-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST_ASCAST]], align 4
// CHECK-NEXT: [[NVPTX_NUM_THREADS:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block()
// CHECK-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 4
// CHECK-NEXT: call void @__kmpc_distribute_static_init_4(ptr addrspacecast (ptr addrspace(1) @[[GLOB2:[0-9]+]] to ptr), i32 [[TMP7]], i32 91, ptr [[DOTOMP_IS_LAST_ASCAST]], ptr [[DOTOMP_COMB_LB_ASCAST]], ptr [[DOTOMP_COMB_UB_ASCAST]], ptr [[DOTOMP_STRIDE_ASCAST]], i32 1, i32 [[NVPTX_NUM_THREADS]])
// CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_COMB_UB_ASCAST]], align 4
// CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1_ASCAST]], align 4
// CHECK-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
// CHECK-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK: cond.true:
// CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1_ASCAST]], align 4
// CHECK-NEXT: br label [[COND_END:%.*]]
// CHECK: cond.false:
// CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_COMB_UB_ASCAST]], align 4
// CHECK-NEXT: br label [[COND_END]]
// CHECK: cond.end:
// CHECK-NEXT: [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
// CHECK-NEXT: store i32 [[COND]], ptr [[DOTOMP_COMB_UB_ASCAST]], align 4
// CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_COMB_LB_ASCAST]], align 4
// CHECK-NEXT: store i32 [[TMP12]], ptr [[DOTOMP_IV_ASCAST]], align 4
// CHECK-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK: omp.inner.for.cond:
// CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV_ASCAST]], align 4
// CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1_ASCAST]], align 4
// CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP14]], 1
// CHECK-NEXT: [[CMP5:%.*]] = icmp slt i32 [[TMP13]], [[ADD]]
// CHECK-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK: omp.inner.for.body:
// CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_COMB_LB_ASCAST]], align 4
// CHECK-NEXT: [[TMP16:%.*]] = zext i32 [[TMP15]] to i64
// CHECK-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_COMB_UB_ASCAST]], align 4
// CHECK-NEXT: [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
// CHECK-NEXT: [[TMP19:%.*]] = load i32, ptr [[M_ADDR_ASCAST]], align 4
// CHECK-NEXT: store i32 [[TMP19]], ptr [[M_CASTED_ASCAST]], align 4
// CHECK-NEXT: [[TMP20:%.*]] = load i64, ptr [[M_CASTED_ASCAST]], align 8
// CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x ptr], ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 0, i64 0
// CHECK-NEXT: [[TMP22:%.*]] = inttoptr i64 [[TMP16]] to ptr
// CHECK-NEXT: store ptr [[TMP22]], ptr [[TMP21]], align 8
// CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x ptr], ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 0, i64 1
// CHECK-NEXT: [[TMP24:%.*]] = inttoptr i64 [[TMP18]] to ptr
// CHECK-NEXT: store ptr [[TMP24]], ptr [[TMP23]], align 8
// CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x ptr], ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 0, i64 2
// CHECK-NEXT: [[TMP26:%.*]] = inttoptr i64 [[TMP20]] to ptr
// CHECK-NEXT: store ptr [[TMP26]], ptr [[TMP25]], align 8
// CHECK-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x ptr], ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 0, i64 3
// CHECK-NEXT: [[TMP28:%.*]] = inttoptr i64 [[TMP0]] to ptr
// CHECK-NEXT: store ptr [[TMP28]], ptr [[TMP27]], align 8
// CHECK-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x ptr], ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 0, i64 4
// CHECK-NEXT: store ptr [[TMP1]], ptr [[TMP29]], align 8
// CHECK-NEXT: [[TMP30:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP31:%.*]] = load i32, ptr [[TMP30]], align 4
// CHECK-NEXT: call void @__kmpc_parallel_51(ptr addrspacecast (ptr addrspace(1) @[[GLOB1]] to ptr), i32 [[TMP31]], i32 1, i32 -1, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z4foo2v_l30_omp_outlined_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 5)
// CHECK-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK: omp.inner.for.inc:
// CHECK-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_IV_ASCAST]], align 4
// CHECK-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_STRIDE_ASCAST]], align 4
// CHECK-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP32]], [[TMP33]]
// CHECK-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_IV_ASCAST]], align 4
// CHECK-NEXT: [[TMP34:%.*]] = load i32, ptr [[DOTOMP_COMB_LB_ASCAST]], align 4
// CHECK-NEXT: [[TMP35:%.*]] = load i32, ptr [[DOTOMP_STRIDE_ASCAST]], align 4
// CHECK-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP34]], [[TMP35]]
// CHECK-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_COMB_LB_ASCAST]], align 4
// CHECK-NEXT: [[TMP36:%.*]] = load i32, ptr [[DOTOMP_COMB_UB_ASCAST]], align 4
// CHECK-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTOMP_STRIDE_ASCAST]], align 4
// CHECK-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP36]], [[TMP37]]
// CHECK-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_COMB_UB_ASCAST]], align 4
// CHECK-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTOMP_COMB_UB_ASCAST]], align 4
// CHECK-NEXT: [[TMP39:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1_ASCAST]], align 4
// CHECK-NEXT: [[CMP9:%.*]] = icmp sgt i32 [[TMP38]], [[TMP39]]
// CHECK-NEXT: br i1 [[CMP9]], label [[COND_TRUE10:%.*]], label [[COND_FALSE11:%.*]]
// CHECK: cond.true10:
// CHECK-NEXT: [[TMP40:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1_ASCAST]], align 4
// CHECK-NEXT: br label [[COND_END12:%.*]]
// CHECK: cond.false11:
// CHECK-NEXT: [[TMP41:%.*]] = load i32, ptr [[DOTOMP_COMB_UB_ASCAST]], align 4
// CHECK-NEXT: br label [[COND_END12]]
// CHECK: cond.end12:
// CHECK-NEXT: [[COND13:%.*]] = phi i32 [ [[TMP40]], [[COND_TRUE10]] ], [ [[TMP41]], [[COND_FALSE11]] ]
// CHECK-NEXT: store i32 [[COND13]], ptr [[DOTOMP_COMB_UB_ASCAST]], align 4
// CHECK-NEXT: [[TMP42:%.*]] = load i32, ptr [[DOTOMP_COMB_LB_ASCAST]], align 4
// CHECK-NEXT: store i32 [[TMP42]], ptr [[DOTOMP_IV_ASCAST]], align 4
// CHECK-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK: omp.inner.for.end:
// CHECK-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK: omp.loop.exit:
// CHECK-NEXT: [[TMP43:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP44:%.*]] = load i32, ptr [[TMP43]], align 4
// CHECK-NEXT: call void @__kmpc_distribute_static_fini(ptr addrspacecast (ptr addrspace(1) @[[GLOB2]] to ptr), i32 [[TMP44]])
// CHECK-NEXT: br label [[OMP_PRECOND_END]]
// CHECK: omp.precond.end:
// CHECK-NEXT: ret void
//
//
// CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z4foo2v_l30_omp_outlined_omp_outlined
// CHECK-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i64 noundef [[M:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[RESULT:%.*]]) #[[ATTR3]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
// CHECK-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
// CHECK-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i64, align 8, addrspace(5)
// CHECK-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i64, align 8, addrspace(5)
// CHECK-NEXT: [[M_ADDR:%.*]] = alloca i64, align 8, addrspace(5)
// CHECK-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8, addrspace(5)
// CHECK-NEXT: [[RESULT_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
// CHECK-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[TMP:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[I:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[I4:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[N:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[SAVED_STACK:%.*]] = alloca ptr addrspace(5), align 4, addrspace(5)
// CHECK-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8, addrspace(5)
// CHECK-NEXT: [[J:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[J11:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[DOTGLOBAL_TID__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTGLOBAL_TID__ADDR]] to ptr
// CHECK-NEXT: [[DOTBOUND_TID__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTBOUND_TID__ADDR]] to ptr
// CHECK-NEXT: [[DOTPREVIOUS_LB__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTPREVIOUS_LB__ADDR]] to ptr
// CHECK-NEXT: [[DOTPREVIOUS_UB__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTPREVIOUS_UB__ADDR]] to ptr
// CHECK-NEXT: [[M_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[M_ADDR]] to ptr
// CHECK-NEXT: [[VLA_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VLA_ADDR]] to ptr
// CHECK-NEXT: [[RESULT_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RESULT_ADDR]] to ptr
// CHECK-NEXT: [[DOTOMP_IV_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTOMP_IV]] to ptr
// CHECK-NEXT: [[TMP_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TMP]] to ptr
// CHECK-NEXT: [[DOTCAPTURE_EXPR__ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTCAPTURE_EXPR_]] to ptr
// CHECK-NEXT: [[DOTCAPTURE_EXPR_1_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTCAPTURE_EXPR_1]] to ptr
// CHECK-NEXT: [[I_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I]] to ptr
// CHECK-NEXT: [[DOTOMP_LB_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTOMP_LB]] to ptr
// CHECK-NEXT: [[DOTOMP_UB_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTOMP_UB]] to ptr
// CHECK-NEXT: [[DOTOMP_STRIDE_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTOMP_STRIDE]] to ptr
// CHECK-NEXT: [[DOTOMP_IS_LAST_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTOMP_IS_LAST]] to ptr
// CHECK-NEXT: [[I4_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I4]] to ptr
// CHECK-NEXT: [[N_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[N]] to ptr
// CHECK-NEXT: [[SAVED_STACK_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[SAVED_STACK]] to ptr
// CHECK-NEXT: [[__VLA_EXPR0_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[__VLA_EXPR0]] to ptr
// CHECK-NEXT: [[J_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[J]] to ptr
// CHECK-NEXT: [[J11_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[J11]] to ptr
// CHECK-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR_ASCAST]], align 8
// CHECK-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR_ASCAST]], align 8
// CHECK-NEXT: store i64 [[DOTPREVIOUS_LB_]], ptr [[DOTPREVIOUS_LB__ADDR_ASCAST]], align 8
// CHECK-NEXT: store i64 [[DOTPREVIOUS_UB_]], ptr [[DOTPREVIOUS_UB__ADDR_ASCAST]], align 8
// CHECK-NEXT: store i64 [[M]], ptr [[M_ADDR_ASCAST]], align 8
// CHECK-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR_ASCAST]], align 8
// CHECK-NEXT: store ptr [[RESULT]], ptr [[RESULT_ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[RESULT_ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[M_ADDR_ASCAST]], align 4
// CHECK-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR__ASCAST]], align 4
// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ASCAST]], align 4
// CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1_ASCAST]], align 4
// CHECK-NEXT: store i32 0, ptr [[I_ASCAST]], align 4
// CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ASCAST]], align 4
// CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK: omp.precond.then:
// CHECK-NEXT: store i32 0, ptr [[DOTOMP_LB_ASCAST]], align 4
// CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1_ASCAST]], align 4
// CHECK-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_UB_ASCAST]], align 4
// CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr [[DOTPREVIOUS_LB__ADDR_ASCAST]], align 8
// CHECK-NEXT: [[CONV:%.*]] = trunc i64 [[TMP6]] to i32
// CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR_ASCAST]], align 8
// CHECK-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP7]] to i32
// CHECK-NEXT: store i32 [[CONV]], ptr [[DOTOMP_LB_ASCAST]], align 4
// CHECK-NEXT: store i32 [[CONV3]], ptr [[DOTOMP_UB_ASCAST]], align 4
// CHECK-NEXT: store i32 1, ptr [[DOTOMP_STRIDE_ASCAST]], align 4
// CHECK-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST_ASCAST]], align 4
// CHECK-NEXT: [[TMP8:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP8]], align 4
// CHECK-NEXT: call void @__kmpc_for_static_init_4(ptr addrspacecast (ptr addrspace(1) @[[GLOB3:[0-9]+]] to ptr), i32 [[TMP9]], i32 33, ptr [[DOTOMP_IS_LAST_ASCAST]], ptr [[DOTOMP_LB_ASCAST]], ptr [[DOTOMP_UB_ASCAST]], ptr [[DOTOMP_STRIDE_ASCAST]], i32 1, i32 1)
// CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_LB_ASCAST]], align 4
// CHECK-NEXT: store i32 [[TMP10]], ptr [[DOTOMP_IV_ASCAST]], align 4
// CHECK-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK: omp.inner.for.cond:
// CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_IV_ASCAST]], align 4
// CHECK-NEXT: [[CONV5:%.*]] = sext i32 [[TMP11]] to i64
// CHECK-NEXT: [[TMP12:%.*]] = load i64, ptr [[DOTPREVIOUS_UB__ADDR_ASCAST]], align 8
// CHECK-NEXT: [[CMP6:%.*]] = icmp ule i64 [[CONV5]], [[TMP12]]
// CHECK-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK: omp.inner.for.body:
// CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV_ASCAST]], align 4
// CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP13]], 1
// CHECK-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK-NEXT: store i32 [[ADD]], ptr [[I4_ASCAST]], align 4
// CHECK-NEXT: store i32 10, ptr [[N_ASCAST]], align 4
// CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[N_ASCAST]], align 4
// CHECK-NEXT: [[TMP15:%.*]] = zext i32 [[TMP14]] to i64
// CHECK-NEXT: [[TMP16:%.*]] = call ptr addrspace(5) @llvm.stacksave.p5()
// CHECK-NEXT: store ptr addrspace(5) [[TMP16]], ptr [[SAVED_STACK_ASCAST]], align 4
// CHECK-NEXT: [[VLA7:%.*]] = alloca i32, i64 [[TMP15]], align 4, addrspace(5)
// CHECK-NEXT: [[VLA7_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VLA7]] to ptr
// CHECK-NEXT: store i64 [[TMP15]], ptr [[__VLA_EXPR0_ASCAST]], align 8
// CHECK-NEXT: [[TMP17:%.*]] = load i32, ptr [[I4_ASCAST]], align 4
// CHECK-NEXT: [[TMP18:%.*]] = load i32, ptr [[I4_ASCAST]], align 4
// CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP18]] to i64
// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 [[IDXPROM]]
// CHECK-NEXT: store i32 [[TMP17]], ptr [[ARRAYIDX]], align 4
// CHECK-NEXT: store i32 0, ptr [[J_ASCAST]], align 4
// CHECK-NEXT: br label [[FOR_COND:%.*]]
// CHECK: for.cond:
// CHECK-NEXT: [[TMP19:%.*]] = load i32, ptr [[J_ASCAST]], align 4
// CHECK-NEXT: [[TMP20:%.*]] = load i32, ptr [[N_ASCAST]], align 4
// CHECK-NEXT: [[CMP8:%.*]] = icmp slt i32 [[TMP19]], [[TMP20]]
// CHECK-NEXT: br i1 [[CMP8]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
// CHECK: for.body:
// CHECK-NEXT: [[TMP21:%.*]] = load i32, ptr [[J_ASCAST]], align 4
// CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[J_ASCAST]], align 4
// CHECK-NEXT: [[IDXPROM9:%.*]] = sext i32 [[TMP22]] to i64
// CHECK-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds i32, ptr [[VLA7_ASCAST]], i64 [[IDXPROM9]]
// CHECK-NEXT: store i32 [[TMP21]], ptr [[ARRAYIDX10]], align 4
// CHECK-NEXT: br label [[FOR_INC:%.*]]
// CHECK: for.inc:
// CHECK-NEXT: [[TMP23:%.*]] = load i32, ptr [[J_ASCAST]], align 4
// CHECK-NEXT: [[INC:%.*]] = add nsw i32 [[TMP23]], 1
// CHECK-NEXT: store i32 [[INC]], ptr [[J_ASCAST]], align 4
// CHECK-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP16:![0-9]+]]
// CHECK: for.end:
// CHECK-NEXT: store i32 0, ptr [[J11_ASCAST]], align 4
// CHECK-NEXT: br label [[FOR_COND12:%.*]]
// CHECK: for.cond12:
// CHECK-NEXT: [[TMP24:%.*]] = load i32, ptr [[J11_ASCAST]], align 4
// CHECK-NEXT: [[TMP25:%.*]] = load i32, ptr [[N_ASCAST]], align 4
// CHECK-NEXT: [[CMP13:%.*]] = icmp slt i32 [[TMP24]], [[TMP25]]
// CHECK-NEXT: br i1 [[CMP13]], label [[FOR_BODY14:%.*]], label [[FOR_END22:%.*]]
// CHECK: for.body14:
// CHECK-NEXT: [[TMP26:%.*]] = load i32, ptr [[J11_ASCAST]], align 4
// CHECK-NEXT: [[IDXPROM15:%.*]] = sext i32 [[TMP26]] to i64
// CHECK-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds i32, ptr [[VLA7_ASCAST]], i64 [[IDXPROM15]]
// CHECK-NEXT: [[TMP27:%.*]] = load i32, ptr [[ARRAYIDX16]], align 4
// CHECK-NEXT: [[TMP28:%.*]] = load i32, ptr [[I4_ASCAST]], align 4
// CHECK-NEXT: [[IDXPROM17:%.*]] = sext i32 [[TMP28]] to i64
// CHECK-NEXT: [[ARRAYIDX18:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 [[IDXPROM17]]
// CHECK-NEXT: [[TMP29:%.*]] = load i32, ptr [[ARRAYIDX18]], align 4
// CHECK-NEXT: [[ADD19:%.*]] = add nsw i32 [[TMP29]], [[TMP27]]
// CHECK-NEXT: store i32 [[ADD19]], ptr [[ARRAYIDX18]], align 4
// CHECK-NEXT: br label [[FOR_INC20:%.*]]
// CHECK: for.inc20:
// CHECK-NEXT: [[TMP30:%.*]] = load i32, ptr [[J11_ASCAST]], align 4
// CHECK-NEXT: [[INC21:%.*]] = add nsw i32 [[TMP30]], 1
// CHECK-NEXT: store i32 [[INC21]], ptr [[J11_ASCAST]], align 4
// CHECK-NEXT: br label [[FOR_COND12]], !llvm.loop [[LOOP17:![0-9]+]]
// CHECK: for.end22:
// CHECK-NEXT: [[TMP31:%.*]] = load ptr addrspace(5), ptr [[SAVED_STACK_ASCAST]], align 4
// CHECK-NEXT: call void @llvm.stackrestore.p5(ptr addrspace(5) [[TMP31]])
// CHECK-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK: omp.body.continue:
// CHECK-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK: omp.inner.for.inc:
// CHECK-NEXT: [[TMP32:%.*]] = load i32, ptr [[DOTOMP_IV_ASCAST]], align 4
// CHECK-NEXT: [[TMP33:%.*]] = load i32, ptr [[DOTOMP_STRIDE_ASCAST]], align 4
// CHECK-NEXT: [[ADD23:%.*]] = add nsw i32 [[TMP32]], [[TMP33]]
// CHECK-NEXT: store i32 [[ADD23]], ptr [[DOTOMP_IV_ASCAST]], align 4
// CHECK-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK: omp.inner.for.end:
// CHECK-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK: omp.loop.exit:
// CHECK-NEXT: [[TMP34:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP35:%.*]] = load i32, ptr [[TMP34]], align 4
// CHECK-NEXT: call void @__kmpc_distribute_static_fini(ptr addrspacecast (ptr addrspace(1) @[[GLOB2]] to ptr), i32 [[TMP35]])
// CHECK-NEXT: br label [[OMP_PRECOND_END]]
// CHECK: omp.precond.end:
// CHECK-NEXT: ret void
//
//
// CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z4foo3v_l52
// CHECK-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i64 noundef [[M:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[RESULT:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
// CHECK-NEXT: [[M_ADDR:%.*]] = alloca i64, align 8, addrspace(5)
// CHECK-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8, addrspace(5)
// CHECK-NEXT: [[RESULT_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
// CHECK-NEXT: [[M_CASTED:%.*]] = alloca i64, align 8, addrspace(5)
// CHECK-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[DYN_PTR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DYN_PTR_ADDR]] to ptr
// CHECK-NEXT: [[M_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[M_ADDR]] to ptr
// CHECK-NEXT: [[VLA_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VLA_ADDR]] to ptr
// CHECK-NEXT: [[RESULT_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RESULT_ADDR]] to ptr
// CHECK-NEXT: [[M_CASTED_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[M_CASTED]] to ptr
// CHECK-NEXT: [[DOTZERO_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTZERO_ADDR]] to ptr
// CHECK-NEXT: [[DOTTHREADID_TEMP__ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTTHREADID_TEMP_]] to ptr
// CHECK-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR_ASCAST]], align 8
// CHECK-NEXT: store i64 [[M]], ptr [[M_ADDR_ASCAST]], align 8
// CHECK-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR_ASCAST]], align 8
// CHECK-NEXT: store ptr [[RESULT]], ptr [[RESULT_ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[RESULT_ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_target_init(ptr addrspacecast (ptr addrspace(1) @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z4foo3v_l52_kernel_environment to ptr), ptr [[DYN_PTR]])
// CHECK-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP2]], -1
// CHECK-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK: user_code.entry:
// CHECK-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_global_thread_num(ptr addrspacecast (ptr addrspace(1) @[[GLOB1]] to ptr))
// CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[M_ADDR_ASCAST]], align 4
// CHECK-NEXT: store i32 [[TMP4]], ptr [[M_CASTED_ASCAST]], align 4
// CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr [[M_CASTED_ASCAST]], align 8
// CHECK-NEXT: store i32 0, ptr [[DOTZERO_ADDR_ASCAST]], align 4
// CHECK-NEXT: store i32 [[TMP3]], ptr [[DOTTHREADID_TEMP__ASCAST]], align 4
// CHECK-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z4foo3v_l52_omp_outlined(ptr [[DOTTHREADID_TEMP__ASCAST]], ptr [[DOTZERO_ADDR_ASCAST]], i64 [[TMP5]], i64 [[TMP0]], ptr [[TMP1]]) #[[ATTR4]]
// CHECK-NEXT: call void @__kmpc_target_deinit()
// CHECK-NEXT: ret void
// CHECK: worker.exit:
// CHECK-NEXT: ret void
//
//
// CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z4foo3v_l52_omp_outlined
// CHECK-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[M:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[RESULT:%.*]]) #[[ATTR3]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
// CHECK-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
// CHECK-NEXT: [[M_ADDR:%.*]] = alloca i64, align 8, addrspace(5)
// CHECK-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8, addrspace(5)
// CHECK-NEXT: [[RESULT_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
// CHECK-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[TMP:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[I:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[I3:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8, addrspace(5)
// CHECK-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [3 x ptr], align 8, addrspace(5)
// CHECK-NEXT: [[J:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[DOTGLOBAL_TID__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTGLOBAL_TID__ADDR]] to ptr
// CHECK-NEXT: [[DOTBOUND_TID__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTBOUND_TID__ADDR]] to ptr
// CHECK-NEXT: [[M_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[M_ADDR]] to ptr
// CHECK-NEXT: [[VLA_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VLA_ADDR]] to ptr
// CHECK-NEXT: [[RESULT_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RESULT_ADDR]] to ptr
// CHECK-NEXT: [[DOTOMP_IV_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTOMP_IV]] to ptr
// CHECK-NEXT: [[TMP_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TMP]] to ptr
// CHECK-NEXT: [[DOTCAPTURE_EXPR__ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTCAPTURE_EXPR_]] to ptr
// CHECK-NEXT: [[DOTCAPTURE_EXPR_1_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTCAPTURE_EXPR_1]] to ptr
// CHECK-NEXT: [[I_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I]] to ptr
// CHECK-NEXT: [[DOTOMP_LB_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTOMP_LB]] to ptr
// CHECK-NEXT: [[DOTOMP_UB_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTOMP_UB]] to ptr
// CHECK-NEXT: [[DOTOMP_STRIDE_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTOMP_STRIDE]] to ptr
// CHECK-NEXT: [[DOTOMP_IS_LAST_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTOMP_IS_LAST]] to ptr
// CHECK-NEXT: [[I3_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I3]] to ptr
// CHECK-NEXT: [[__VLA_EXPR0_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[__VLA_EXPR0]] to ptr
// CHECK-NEXT: [[CAPTURED_VARS_ADDRS_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[CAPTURED_VARS_ADDRS]] to ptr
// CHECK-NEXT: [[J_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[J]] to ptr
// CHECK-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR_ASCAST]], align 8
// CHECK-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR_ASCAST]], align 8
// CHECK-NEXT: store i64 [[M]], ptr [[M_ADDR_ASCAST]], align 8
// CHECK-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR_ASCAST]], align 8
// CHECK-NEXT: store ptr [[RESULT]], ptr [[RESULT_ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[RESULT_ADDR_ASCAST]], align 8
// CHECK-NEXT: [[N:%.*]] = call align 8 ptr @__kmpc_alloc_shared(i64 4)
// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[M_ADDR_ASCAST]], align 4
// CHECK-NEXT: store i32 [[TMP2]], ptr [[DOTCAPTURE_EXPR__ASCAST]], align 4
// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ASCAST]], align 4
// CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP3]], 0
// CHECK-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1_ASCAST]], align 4
// CHECK-NEXT: store i32 0, ptr [[I_ASCAST]], align 4
// CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ASCAST]], align 4
// CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP4]]
// CHECK-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK: omp.precond.then:
// CHECK-NEXT: store i32 0, ptr [[DOTOMP_LB_ASCAST]], align 4
// CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1_ASCAST]], align 4
// CHECK-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_UB_ASCAST]], align 4
// CHECK-NEXT: store i32 1, ptr [[DOTOMP_STRIDE_ASCAST]], align 4
// CHECK-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST_ASCAST]], align 4
// CHECK-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[TMP6]], align 4
// CHECK-NEXT: call void @__kmpc_distribute_static_init_4(ptr addrspacecast (ptr addrspace(1) @[[GLOB2]] to ptr), i32 [[TMP7]], i32 92, ptr [[DOTOMP_IS_LAST_ASCAST]], ptr [[DOTOMP_LB_ASCAST]], ptr [[DOTOMP_UB_ASCAST]], ptr [[DOTOMP_STRIDE_ASCAST]], i32 1, i32 1)
// CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB_ASCAST]], align 4
// CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1_ASCAST]], align 4
// CHECK-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
// CHECK-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK: cond.true:
// CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1_ASCAST]], align 4
// CHECK-NEXT: br label [[COND_END:%.*]]
// CHECK: cond.false:
// CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTOMP_UB_ASCAST]], align 4
// CHECK-NEXT: br label [[COND_END]]
// CHECK: cond.end:
// CHECK-NEXT: [[COND:%.*]] = phi i32 [ [[TMP10]], [[COND_TRUE]] ], [ [[TMP11]], [[COND_FALSE]] ]
// CHECK-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB_ASCAST]], align 4
// CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_LB_ASCAST]], align 4
// CHECK-NEXT: store i32 [[TMP12]], ptr [[DOTOMP_IV_ASCAST]], align 4
// CHECK-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK: omp.inner.for.cond:
// CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV_ASCAST]], align 4
// CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_UB_ASCAST]], align 4
// CHECK-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP13]], [[TMP14]]
// CHECK-NEXT: br i1 [[CMP5]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK: omp.inner.for.body:
// CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_IV_ASCAST]], align 4
// CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP15]], 1
// CHECK-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK-NEXT: store i32 [[ADD]], ptr [[I3_ASCAST]], align 4
// CHECK-NEXT: store i32 10, ptr [[N]], align 4
// CHECK-NEXT: [[TMP16:%.*]] = load i32, ptr [[N]], align 4
// CHECK-NEXT: [[TMP17:%.*]] = zext i32 [[TMP16]] to i64
// CHECK-NEXT: [[TMP18:%.*]] = mul nuw i64 [[TMP17]], 4
// CHECK-NEXT: [[TMP19:%.*]] = add nuw i64 [[TMP18]], 3
// CHECK-NEXT: [[TMP20:%.*]] = udiv i64 [[TMP19]], 4
// CHECK-NEXT: [[TMP21:%.*]] = mul nuw i64 [[TMP20]], 4
// CHECK-NEXT: [[A:%.*]] = call align 4 ptr @__kmpc_alloc_shared(i64 [[TMP21]])
// CHECK-NEXT: store i64 [[TMP17]], ptr [[__VLA_EXPR0_ASCAST]], align 8
// CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[I3_ASCAST]], align 4
// CHECK-NEXT: [[TMP23:%.*]] = load i32, ptr [[I3_ASCAST]], align 4
// CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP23]] to i64
// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 [[IDXPROM]]
// CHECK-NEXT: store i32 [[TMP22]], ptr [[ARRAYIDX]], align 4
// CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 0, i64 0
// CHECK-NEXT: store ptr [[N]], ptr [[TMP24]], align 8
// CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 0, i64 1
// CHECK-NEXT: [[TMP26:%.*]] = inttoptr i64 [[TMP17]] to ptr
// CHECK-NEXT: store ptr [[TMP26]], ptr [[TMP25]], align 8
// CHECK-NEXT: [[TMP27:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 0, i64 2
// CHECK-NEXT: store ptr [[A]], ptr [[TMP27]], align 8
// CHECK-NEXT: [[TMP28:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP29:%.*]] = load i32, ptr [[TMP28]], align 4
// CHECK-NEXT: call void @__kmpc_parallel_51(ptr addrspacecast (ptr addrspace(1) @[[GLOB1]] to ptr), i32 [[TMP29]], i32 1, i32 -1, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z4foo3v_l52_omp_outlined_omp_outlined, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z4foo3v_l52_omp_outlined_omp_outlined_wrapper, ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 3)
// CHECK-NEXT: store i32 0, ptr [[J_ASCAST]], align 4
// CHECK-NEXT: br label [[FOR_COND:%.*]]
// CHECK: for.cond:
// CHECK-NEXT: [[TMP30:%.*]] = load i32, ptr [[J_ASCAST]], align 4
// CHECK-NEXT: [[TMP31:%.*]] = load i32, ptr [[N]], align 4
// CHECK-NEXT: [[CMP6:%.*]] = icmp slt i32 [[TMP30]], [[TMP31]]
// CHECK-NEXT: br i1 [[CMP6]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
// CHECK: for.body:
// CHECK-NEXT: [[TMP32:%.*]] = load i32, ptr [[J_ASCAST]], align 4
// CHECK-NEXT: [[IDXPROM7:%.*]] = sext i32 [[TMP32]] to i64
// CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IDXPROM7]]
// CHECK-NEXT: [[TMP33:%.*]] = load i32, ptr [[ARRAYIDX8]], align 4
// CHECK-NEXT: [[TMP34:%.*]] = load i32, ptr [[I3_ASCAST]], align 4
// CHECK-NEXT: [[IDXPROM9:%.*]] = sext i32 [[TMP34]] to i64
// CHECK-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 [[IDXPROM9]]
// CHECK-NEXT: [[TMP35:%.*]] = load i32, ptr [[ARRAYIDX10]], align 4
// CHECK-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP35]], [[TMP33]]
// CHECK-NEXT: store i32 [[ADD11]], ptr [[ARRAYIDX10]], align 4
// CHECK-NEXT: br label [[FOR_INC:%.*]]
// CHECK: for.inc:
// CHECK-NEXT: [[TMP36:%.*]] = load i32, ptr [[J_ASCAST]], align 4
// CHECK-NEXT: [[INC:%.*]] = add nsw i32 [[TMP36]], 1
// CHECK-NEXT: store i32 [[INC]], ptr [[J_ASCAST]], align 4
// CHECK-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP18:![0-9]+]]
// CHECK: for.end:
// CHECK-NEXT: call void @__kmpc_free_shared(ptr [[A]], i64 [[TMP21]])
// CHECK-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK: omp.body.continue:
// CHECK-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK: omp.inner.for.inc:
// CHECK-NEXT: [[TMP37:%.*]] = load i32, ptr [[DOTOMP_IV_ASCAST]], align 4
// CHECK-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP37]], 1
// CHECK-NEXT: store i32 [[ADD12]], ptr [[DOTOMP_IV_ASCAST]], align 4
// CHECK-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK: omp.inner.for.end:
// CHECK-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK: omp.loop.exit:
// CHECK-NEXT: [[TMP38:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP39:%.*]] = load i32, ptr [[TMP38]], align 4
// CHECK-NEXT: call void @__kmpc_distribute_static_fini(ptr addrspacecast (ptr addrspace(1) @[[GLOB2]] to ptr), i32 [[TMP39]])
// CHECK-NEXT: br label [[OMP_PRECOND_END]]
// CHECK: omp.precond.end:
// CHECK-NEXT: call void @__kmpc_free_shared(ptr [[N]], i64 4)
// CHECK-NEXT: ret void
//
//
// CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z4foo3v_l52_omp_outlined_omp_outlined
// CHECK-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR3]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
// CHECK-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
// CHECK-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
// CHECK-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8, addrspace(5)
// CHECK-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
// CHECK-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[TMP:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[J:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[J3:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[DOTGLOBAL_TID__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTGLOBAL_TID__ADDR]] to ptr
// CHECK-NEXT: [[DOTBOUND_TID__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTBOUND_TID__ADDR]] to ptr
// CHECK-NEXT: [[N_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[N_ADDR]] to ptr
// CHECK-NEXT: [[VLA_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VLA_ADDR]] to ptr
// CHECK-NEXT: [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[A_ADDR]] to ptr
// CHECK-NEXT: [[DOTOMP_IV_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTOMP_IV]] to ptr
// CHECK-NEXT: [[TMP_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TMP]] to ptr
// CHECK-NEXT: [[DOTCAPTURE_EXPR__ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTCAPTURE_EXPR_]] to ptr
// CHECK-NEXT: [[DOTCAPTURE_EXPR_1_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTCAPTURE_EXPR_1]] to ptr
// CHECK-NEXT: [[J_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[J]] to ptr
// CHECK-NEXT: [[DOTOMP_LB_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTOMP_LB]] to ptr
// CHECK-NEXT: [[DOTOMP_UB_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTOMP_UB]] to ptr
// CHECK-NEXT: [[DOTOMP_STRIDE_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTOMP_STRIDE]] to ptr
// CHECK-NEXT: [[DOTOMP_IS_LAST_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTOMP_IS_LAST]] to ptr
// CHECK-NEXT: [[J3_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[J3]] to ptr
// CHECK-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR_ASCAST]], align 8
// CHECK-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR_ASCAST]], align 8
// CHECK-NEXT: store ptr [[N]], ptr [[N_ADDR_ASCAST]], align 8
// CHECK-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR_ASCAST]], align 8
// CHECK-NEXT: store ptr [[A]], ptr [[A_ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR__ASCAST]], align 4
// CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ASCAST]], align 4
// CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
// CHECK-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1_ASCAST]], align 4
// CHECK-NEXT: store i32 0, ptr [[J_ASCAST]], align 4
// CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ASCAST]], align 4
// CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
// CHECK-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK: omp.precond.then:
// CHECK-NEXT: store i32 0, ptr [[DOTOMP_LB_ASCAST]], align 4
// CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1_ASCAST]], align 4
// CHECK-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB_ASCAST]], align 4
// CHECK-NEXT: store i32 1, ptr [[DOTOMP_STRIDE_ASCAST]], align 4
// CHECK-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST_ASCAST]], align 4
// CHECK-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
// CHECK-NEXT: call void @__kmpc_for_static_init_4(ptr addrspacecast (ptr addrspace(1) @[[GLOB3]] to ptr), i32 [[TMP8]], i32 33, ptr [[DOTOMP_IS_LAST_ASCAST]], ptr [[DOTOMP_LB_ASCAST]], ptr [[DOTOMP_UB_ASCAST]], ptr [[DOTOMP_STRIDE_ASCAST]], i32 1, i32 1)
// CHECK-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK: omp.dispatch.cond:
// CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB_ASCAST]], align 4
// CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1_ASCAST]], align 4
// CHECK-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
// CHECK-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK: cond.true:
// CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1_ASCAST]], align 4
// CHECK-NEXT: br label [[COND_END:%.*]]
// CHECK: cond.false:
// CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB_ASCAST]], align 4
// CHECK-NEXT: br label [[COND_END]]
// CHECK: cond.end:
// CHECK-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
// CHECK-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB_ASCAST]], align 4
// CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB_ASCAST]], align 4
// CHECK-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV_ASCAST]], align 4
// CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV_ASCAST]], align 4
// CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB_ASCAST]], align 4
// CHECK-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
// CHECK-NEXT: br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK: omp.dispatch.body:
// CHECK-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK: omp.inner.for.cond:
// CHECK-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV_ASCAST]], align 4
// CHECK-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB_ASCAST]], align 4
// CHECK-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
// CHECK-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK: omp.inner.for.body:
// CHECK-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV_ASCAST]], align 4
// CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
// CHECK-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK-NEXT: store i32 [[ADD]], ptr [[J3_ASCAST]], align 4
// CHECK-NEXT: [[TMP19:%.*]] = load i32, ptr [[J3_ASCAST]], align 4
// CHECK-NEXT: [[TMP20:%.*]] = load i32, ptr [[J3_ASCAST]], align 4
// CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP20]] to i64
// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i64 [[IDXPROM]]
// CHECK-NEXT: store i32 [[TMP19]], ptr [[ARRAYIDX]], align 4
// CHECK-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK: omp.body.continue:
// CHECK-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK: omp.inner.for.inc:
// CHECK-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IV_ASCAST]], align 4
// CHECK-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP21]], 1
// CHECK-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV_ASCAST]], align 4
// CHECK-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK: omp.inner.for.end:
// CHECK-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK: omp.dispatch.inc:
// CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_LB_ASCAST]], align 4
// CHECK-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_STRIDE_ASCAST]], align 4
// CHECK-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
// CHECK-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_LB_ASCAST]], align 4
// CHECK-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_UB_ASCAST]], align 4
// CHECK-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_STRIDE_ASCAST]], align 4
// CHECK-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
// CHECK-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_UB_ASCAST]], align 4
// CHECK-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK: omp.dispatch.end:
// CHECK-NEXT: [[TMP26:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP27:%.*]] = load i32, ptr [[TMP26]], align 4
// CHECK-NEXT: call void @__kmpc_for_static_fini(ptr addrspacecast (ptr addrspace(1) @[[GLOB3]] to ptr), i32 [[TMP27]])
// CHECK-NEXT: br label [[OMP_PRECOND_END]]
// CHECK: omp.precond.end:
// CHECK-NEXT: ret void
//
//
// CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z4foo3v_l52_omp_outlined_omp_outlined_wrapper
// CHECK-SAME: (i16 noundef zeroext [[TMP0:%.*]], i32 noundef [[TMP1:%.*]]) #[[ATTR7:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2, addrspace(5)
// CHECK-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8, addrspace(5)
// CHECK-NEXT: [[DOTADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTADDR]] to ptr
// CHECK-NEXT: [[DOTADDR1_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTADDR1]] to ptr
// CHECK-NEXT: [[DOTZERO_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTZERO_ADDR]] to ptr
// CHECK-NEXT: [[GLOBAL_ARGS_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[GLOBAL_ARGS]] to ptr
// CHECK-NEXT: store i16 [[TMP0]], ptr [[DOTADDR_ASCAST]], align 2
// CHECK-NEXT: store i32 [[TMP1]], ptr [[DOTADDR1_ASCAST]], align 4
// CHECK-NEXT: store i32 0, ptr [[DOTZERO_ADDR_ASCAST]], align 4
// CHECK-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS_ASCAST]])
// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[GLOBAL_ARGS_ASCAST]], align 8
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds ptr, ptr [[TMP2]], i64 0
// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP3]], align 8
// CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds ptr, ptr [[TMP2]], i64 1
// CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr [[TMP5]], align 8
// CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds ptr, ptr [[TMP2]], i64 2
// CHECK-NEXT: [[TMP8:%.*]] = load ptr, ptr [[TMP7]], align 8
// CHECK-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z4foo3v_l52_omp_outlined_omp_outlined(ptr [[DOTADDR1_ASCAST]], ptr [[DOTZERO_ADDR_ASCAST]], ptr [[TMP4]], i64 [[TMP6]], ptr [[TMP8]]) #[[ATTR4]]
// CHECK-NEXT: ret void
//
//
// CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z4foo4v_l76
// CHECK-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i64 noundef [[M:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[RESULT:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
// CHECK-NEXT: [[M_ADDR:%.*]] = alloca i64, align 8, addrspace(5)
// CHECK-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8, addrspace(5)
// CHECK-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8, addrspace(5)
// CHECK-NEXT: [[RESULT_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
// CHECK-NEXT: [[M_CASTED:%.*]] = alloca i64, align 8, addrspace(5)
// CHECK-NEXT: [[N_CASTED:%.*]] = alloca i64, align 8, addrspace(5)
// CHECK-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[DYN_PTR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DYN_PTR_ADDR]] to ptr
// CHECK-NEXT: [[M_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[M_ADDR]] to ptr
// CHECK-NEXT: [[N_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[N_ADDR]] to ptr
// CHECK-NEXT: [[VLA_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VLA_ADDR]] to ptr
// CHECK-NEXT: [[RESULT_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RESULT_ADDR]] to ptr
// CHECK-NEXT: [[M_CASTED_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[M_CASTED]] to ptr
// CHECK-NEXT: [[N_CASTED_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[N_CASTED]] to ptr
// CHECK-NEXT: [[DOTZERO_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTZERO_ADDR]] to ptr
// CHECK-NEXT: [[DOTTHREADID_TEMP__ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTTHREADID_TEMP_]] to ptr
// CHECK-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR_ASCAST]], align 8
// CHECK-NEXT: store i64 [[M]], ptr [[M_ADDR_ASCAST]], align 8
// CHECK-NEXT: store i64 [[N]], ptr [[N_ADDR_ASCAST]], align 8
// CHECK-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR_ASCAST]], align 8
// CHECK-NEXT: store ptr [[RESULT]], ptr [[RESULT_ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[RESULT_ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_target_init(ptr addrspacecast (ptr addrspace(1) @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z4foo4v_l76_kernel_environment to ptr), ptr [[DYN_PTR]])
// CHECK-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP2]], -1
// CHECK-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK: user_code.entry:
// CHECK-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_global_thread_num(ptr addrspacecast (ptr addrspace(1) @[[GLOB1]] to ptr))
// CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[M_ADDR_ASCAST]], align 4
// CHECK-NEXT: store i32 [[TMP4]], ptr [[M_CASTED_ASCAST]], align 4
// CHECK-NEXT: [[TMP5:%.*]] = load i64, ptr [[M_CASTED_ASCAST]], align 8
// CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[N_ADDR_ASCAST]], align 4
// CHECK-NEXT: store i32 [[TMP6]], ptr [[N_CASTED_ASCAST]], align 4
// CHECK-NEXT: [[TMP7:%.*]] = load i64, ptr [[N_CASTED_ASCAST]], align 8
// CHECK-NEXT: store i32 0, ptr [[DOTZERO_ADDR_ASCAST]], align 4
// CHECK-NEXT: store i32 [[TMP3]], ptr [[DOTTHREADID_TEMP__ASCAST]], align 4
// CHECK-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z4foo4v_l76_omp_outlined(ptr [[DOTTHREADID_TEMP__ASCAST]], ptr [[DOTZERO_ADDR_ASCAST]], i64 [[TMP5]], i64 [[TMP7]], i64 [[TMP0]], ptr [[TMP1]]) #[[ATTR4]]
// CHECK-NEXT: call void @__kmpc_target_deinit()
// CHECK-NEXT: ret void
// CHECK: worker.exit:
// CHECK-NEXT: ret void
//
//
// CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z4foo4v_l76_omp_outlined
// CHECK-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[M:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[RESULT:%.*]]) #[[ATTR3]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
// CHECK-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
// CHECK-NEXT: [[M_ADDR:%.*]] = alloca i64, align 8, addrspace(5)
// CHECK-NEXT: [[N_ADDR:%.*]] = alloca i64, align 8, addrspace(5)
// CHECK-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8, addrspace(5)
// CHECK-NEXT: [[RESULT_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
// CHECK-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[TMP:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[I:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[I4:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8, addrspace(5)
// CHECK-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [3 x ptr], align 8, addrspace(5)
// CHECK-NEXT: [[J:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[DOTGLOBAL_TID__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTGLOBAL_TID__ADDR]] to ptr
// CHECK-NEXT: [[DOTBOUND_TID__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTBOUND_TID__ADDR]] to ptr
// CHECK-NEXT: [[M_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[M_ADDR]] to ptr
// CHECK-NEXT: [[N_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[N_ADDR]] to ptr
// CHECK-NEXT: [[VLA_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VLA_ADDR]] to ptr
// CHECK-NEXT: [[RESULT_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RESULT_ADDR]] to ptr
// CHECK-NEXT: [[DOTOMP_IV_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTOMP_IV]] to ptr
// CHECK-NEXT: [[TMP_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TMP]] to ptr
// CHECK-NEXT: [[DOTCAPTURE_EXPR__ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTCAPTURE_EXPR_]] to ptr
// CHECK-NEXT: [[DOTCAPTURE_EXPR_2_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTCAPTURE_EXPR_2]] to ptr
// CHECK-NEXT: [[I_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I]] to ptr
// CHECK-NEXT: [[DOTOMP_LB_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTOMP_LB]] to ptr
// CHECK-NEXT: [[DOTOMP_UB_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTOMP_UB]] to ptr
// CHECK-NEXT: [[DOTOMP_STRIDE_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTOMP_STRIDE]] to ptr
// CHECK-NEXT: [[DOTOMP_IS_LAST_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTOMP_IS_LAST]] to ptr
// CHECK-NEXT: [[I4_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I4]] to ptr
// CHECK-NEXT: [[__VLA_EXPR0_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[__VLA_EXPR0]] to ptr
// CHECK-NEXT: [[CAPTURED_VARS_ADDRS_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[CAPTURED_VARS_ADDRS]] to ptr
// CHECK-NEXT: [[J_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[J]] to ptr
// CHECK-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR_ASCAST]], align 8
// CHECK-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR_ASCAST]], align 8
// CHECK-NEXT: store i64 [[M]], ptr [[M_ADDR_ASCAST]], align 8
// CHECK-NEXT: store i64 [[N]], ptr [[N_ADDR_ASCAST]], align 8
// CHECK-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR_ASCAST]], align 8
// CHECK-NEXT: store ptr [[RESULT]], ptr [[RESULT_ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr [[VLA_ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[RESULT_ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[N_ADDR_ASCAST]], align 4
// CHECK-NEXT: [[N1:%.*]] = call align 8 ptr @__kmpc_alloc_shared(i64 4)
// CHECK-NEXT: store i32 [[TMP2]], ptr [[N1]], align 4
// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[M_ADDR_ASCAST]], align 4
// CHECK-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR__ASCAST]], align 4
// CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ASCAST]], align 4
// CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
// CHECK-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK-NEXT: store i32 [[SUB3]], ptr [[DOTCAPTURE_EXPR_2_ASCAST]], align 4
// CHECK-NEXT: store i32 0, ptr [[I_ASCAST]], align 4
// CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ASCAST]], align 4
// CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
// CHECK-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK: omp.precond.then:
// CHECK-NEXT: store i32 0, ptr [[DOTOMP_LB_ASCAST]], align 4
// CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_ASCAST]], align 4
// CHECK-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB_ASCAST]], align 4
// CHECK-NEXT: store i32 1, ptr [[DOTOMP_STRIDE_ASCAST]], align 4
// CHECK-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST_ASCAST]], align 4
// CHECK-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
// CHECK-NEXT: call void @__kmpc_distribute_static_init_4(ptr addrspacecast (ptr addrspace(1) @[[GLOB2]] to ptr), i32 [[TMP8]], i32 92, ptr [[DOTOMP_IS_LAST_ASCAST]], ptr [[DOTOMP_LB_ASCAST]], ptr [[DOTOMP_UB_ASCAST]], ptr [[DOTOMP_STRIDE_ASCAST]], i32 1, i32 1)
// CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB_ASCAST]], align 4
// CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_ASCAST]], align 4
// CHECK-NEXT: [[CMP5:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
// CHECK-NEXT: br i1 [[CMP5]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK: cond.true:
// CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_2_ASCAST]], align 4
// CHECK-NEXT: br label [[COND_END:%.*]]
// CHECK: cond.false:
// CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB_ASCAST]], align 4
// CHECK-NEXT: br label [[COND_END]]
// CHECK: cond.end:
// CHECK-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
// CHECK-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB_ASCAST]], align 4
// CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB_ASCAST]], align 4
// CHECK-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV_ASCAST]], align 4
// CHECK-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK: omp.inner.for.cond:
// CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV_ASCAST]], align 4
// CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB_ASCAST]], align 4
// CHECK-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
// CHECK-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK: omp.inner.for.body:
// CHECK-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV_ASCAST]], align 4
// CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP16]], 1
// CHECK-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK-NEXT: store i32 [[ADD]], ptr [[I4_ASCAST]], align 4
// CHECK-NEXT: [[TMP17:%.*]] = load i32, ptr [[N1]], align 4
// CHECK-NEXT: [[TMP18:%.*]] = zext i32 [[TMP17]] to i64
// CHECK-NEXT: [[TMP19:%.*]] = mul nuw i64 [[TMP18]], 4
// CHECK-NEXT: [[TMP20:%.*]] = add nuw i64 [[TMP19]], 3
// CHECK-NEXT: [[TMP21:%.*]] = udiv i64 [[TMP20]], 4
// CHECK-NEXT: [[TMP22:%.*]] = mul nuw i64 [[TMP21]], 4
// CHECK-NEXT: [[A:%.*]] = call align 4 ptr @__kmpc_alloc_shared(i64 [[TMP22]])
// CHECK-NEXT: store i64 [[TMP18]], ptr [[__VLA_EXPR0_ASCAST]], align 8
// CHECK-NEXT: [[TMP23:%.*]] = load i32, ptr [[I4_ASCAST]], align 4
// CHECK-NEXT: [[TMP24:%.*]] = load i32, ptr [[I4_ASCAST]], align 4
// CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP24]] to i64
// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 [[IDXPROM]]
// CHECK-NEXT: store i32 [[TMP23]], ptr [[ARRAYIDX]], align 4
// CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 0, i64 0
// CHECK-NEXT: store ptr [[N1]], ptr [[TMP25]], align 8
// CHECK-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 0, i64 1
// CHECK-NEXT: [[TMP27:%.*]] = inttoptr i64 [[TMP18]] to ptr
// CHECK-NEXT: store ptr [[TMP27]], ptr [[TMP26]], align 8
// CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds [3 x ptr], ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 0, i64 2
// CHECK-NEXT: store ptr [[A]], ptr [[TMP28]], align 8
// CHECK-NEXT: [[TMP29:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP30:%.*]] = load i32, ptr [[TMP29]], align 4
// CHECK-NEXT: call void @__kmpc_parallel_51(ptr addrspacecast (ptr addrspace(1) @[[GLOB1]] to ptr), i32 [[TMP30]], i32 1, i32 -1, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z4foo4v_l76_omp_outlined_omp_outlined, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z4foo4v_l76_omp_outlined_omp_outlined_wrapper, ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 3)
// CHECK-NEXT: store i32 0, ptr [[J_ASCAST]], align 4
// CHECK-NEXT: br label [[FOR_COND:%.*]]
// CHECK: for.cond:
// CHECK-NEXT: [[TMP31:%.*]] = load i32, ptr [[J_ASCAST]], align 4
// CHECK-NEXT: [[TMP32:%.*]] = load i32, ptr [[N1]], align 4
// CHECK-NEXT: [[CMP7:%.*]] = icmp slt i32 [[TMP31]], [[TMP32]]
// CHECK-NEXT: br i1 [[CMP7]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
// CHECK: for.body:
// CHECK-NEXT: [[TMP33:%.*]] = load i32, ptr [[J_ASCAST]], align 4
// CHECK-NEXT: [[IDXPROM8:%.*]] = sext i32 [[TMP33]] to i64
// CHECK-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[IDXPROM8]]
// CHECK-NEXT: [[TMP34:%.*]] = load i32, ptr [[ARRAYIDX9]], align 4
// CHECK-NEXT: [[TMP35:%.*]] = load i32, ptr [[I4_ASCAST]], align 4
// CHECK-NEXT: [[IDXPROM10:%.*]] = sext i32 [[TMP35]] to i64
// CHECK-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i64 [[IDXPROM10]]
// CHECK-NEXT: [[TMP36:%.*]] = load i32, ptr [[ARRAYIDX11]], align 4
// CHECK-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP36]], [[TMP34]]
// CHECK-NEXT: store i32 [[ADD12]], ptr [[ARRAYIDX11]], align 4
// CHECK-NEXT: br label [[FOR_INC:%.*]]
// CHECK: for.inc:
// CHECK-NEXT: [[TMP37:%.*]] = load i32, ptr [[J_ASCAST]], align 4
// CHECK-NEXT: [[INC:%.*]] = add nsw i32 [[TMP37]], 1
// CHECK-NEXT: store i32 [[INC]], ptr [[J_ASCAST]], align 4
// CHECK-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP19:![0-9]+]]
// CHECK: for.end:
// CHECK-NEXT: call void @__kmpc_free_shared(ptr [[A]], i64 [[TMP22]])
// CHECK-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK: omp.body.continue:
// CHECK-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK: omp.inner.for.inc:
// CHECK-NEXT: [[TMP38:%.*]] = load i32, ptr [[DOTOMP_IV_ASCAST]], align 4
// CHECK-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP38]], 1
// CHECK-NEXT: store i32 [[ADD13]], ptr [[DOTOMP_IV_ASCAST]], align 4
// CHECK-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK: omp.inner.for.end:
// CHECK-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK: omp.loop.exit:
// CHECK-NEXT: [[TMP39:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP40:%.*]] = load i32, ptr [[TMP39]], align 4
// CHECK-NEXT: call void @__kmpc_distribute_static_fini(ptr addrspacecast (ptr addrspace(1) @[[GLOB2]] to ptr), i32 [[TMP40]])
// CHECK-NEXT: br label [[OMP_PRECOND_END]]
// CHECK: omp.precond.end:
// CHECK-NEXT: call void @__kmpc_free_shared(ptr [[N1]], i64 4)
// CHECK-NEXT: ret void
//
//
// CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z4foo4v_l76_omp_outlined_omp_outlined
// CHECK-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR3]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
// CHECK-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
// CHECK-NEXT: [[N_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
// CHECK-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8, addrspace(5)
// CHECK-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
// CHECK-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[TMP:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[J:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[J3:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[DOTGLOBAL_TID__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTGLOBAL_TID__ADDR]] to ptr
// CHECK-NEXT: [[DOTBOUND_TID__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTBOUND_TID__ADDR]] to ptr
// CHECK-NEXT: [[N_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[N_ADDR]] to ptr
// CHECK-NEXT: [[VLA_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[VLA_ADDR]] to ptr
// CHECK-NEXT: [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[A_ADDR]] to ptr
// CHECK-NEXT: [[DOTOMP_IV_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTOMP_IV]] to ptr
// CHECK-NEXT: [[TMP_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TMP]] to ptr
// CHECK-NEXT: [[DOTCAPTURE_EXPR__ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTCAPTURE_EXPR_]] to ptr
// CHECK-NEXT: [[DOTCAPTURE_EXPR_1_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTCAPTURE_EXPR_1]] to ptr
// CHECK-NEXT: [[J_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[J]] to ptr
// CHECK-NEXT: [[DOTOMP_LB_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTOMP_LB]] to ptr
// CHECK-NEXT: [[DOTOMP_UB_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTOMP_UB]] to ptr
// CHECK-NEXT: [[DOTOMP_STRIDE_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTOMP_STRIDE]] to ptr
// CHECK-NEXT: [[DOTOMP_IS_LAST_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTOMP_IS_LAST]] to ptr
// CHECK-NEXT: [[J3_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[J3]] to ptr
// CHECK-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR_ASCAST]], align 8
// CHECK-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR_ASCAST]], align 8
// CHECK-NEXT: store ptr [[N]], ptr [[N_ADDR_ASCAST]], align 8
// CHECK-NEXT: store i64 [[VLA]], ptr [[VLA_ADDR_ASCAST]], align 8
// CHECK-NEXT: store ptr [[A]], ptr [[A_ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[N_ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr [[VLA_ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[A_ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NEXT: store i32 [[TMP3]], ptr [[DOTCAPTURE_EXPR__ASCAST]], align 4
// CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ASCAST]], align 4
// CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP4]], 0
// CHECK-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1
// CHECK-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1
// CHECK-NEXT: store i32 [[SUB2]], ptr [[DOTCAPTURE_EXPR_1_ASCAST]], align 4
// CHECK-NEXT: store i32 0, ptr [[J_ASCAST]], align 4
// CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR__ASCAST]], align 4
// CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP5]]
// CHECK-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
// CHECK: omp.precond.then:
// CHECK-NEXT: store i32 0, ptr [[DOTOMP_LB_ASCAST]], align 4
// CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1_ASCAST]], align 4
// CHECK-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_UB_ASCAST]], align 4
// CHECK-NEXT: store i32 1, ptr [[DOTOMP_STRIDE_ASCAST]], align 4
// CHECK-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST_ASCAST]], align 4
// CHECK-NEXT: [[TMP7:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[TMP7]], align 4
// CHECK-NEXT: call void @__kmpc_for_static_init_4(ptr addrspacecast (ptr addrspace(1) @[[GLOB3]] to ptr), i32 [[TMP8]], i32 33, ptr [[DOTOMP_IS_LAST_ASCAST]], ptr [[DOTOMP_LB_ASCAST]], ptr [[DOTOMP_UB_ASCAST]], ptr [[DOTOMP_STRIDE_ASCAST]], i32 1, i32 1)
// CHECK-NEXT: br label [[OMP_DISPATCH_COND:%.*]]
// CHECK: omp.dispatch.cond:
// CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB_ASCAST]], align 4
// CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1_ASCAST]], align 4
// CHECK-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[TMP9]], [[TMP10]]
// CHECK-NEXT: br i1 [[CMP4]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK: cond.true:
// CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTCAPTURE_EXPR_1_ASCAST]], align 4
// CHECK-NEXT: br label [[COND_END:%.*]]
// CHECK: cond.false:
// CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[DOTOMP_UB_ASCAST]], align 4
// CHECK-NEXT: br label [[COND_END]]
// CHECK: cond.end:
// CHECK-NEXT: [[COND:%.*]] = phi i32 [ [[TMP11]], [[COND_TRUE]] ], [ [[TMP12]], [[COND_FALSE]] ]
// CHECK-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB_ASCAST]], align 4
// CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_LB_ASCAST]], align 4
// CHECK-NEXT: store i32 [[TMP13]], ptr [[DOTOMP_IV_ASCAST]], align 4
// CHECK-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV_ASCAST]], align 4
// CHECK-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_UB_ASCAST]], align 4
// CHECK-NEXT: [[CMP5:%.*]] = icmp sle i32 [[TMP14]], [[TMP15]]
// CHECK-NEXT: br i1 [[CMP5]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]]
// CHECK: omp.dispatch.body:
// CHECK-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK: omp.inner.for.cond:
// CHECK-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_IV_ASCAST]], align 4
// CHECK-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB_ASCAST]], align 4
// CHECK-NEXT: [[CMP6:%.*]] = icmp sle i32 [[TMP16]], [[TMP17]]
// CHECK-NEXT: br i1 [[CMP6]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK: omp.inner.for.body:
// CHECK-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_IV_ASCAST]], align 4
// CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP18]], 1
// CHECK-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK-NEXT: store i32 [[ADD]], ptr [[J3_ASCAST]], align 4
// CHECK-NEXT: [[TMP19:%.*]] = load i32, ptr [[J3_ASCAST]], align 4
// CHECK-NEXT: [[TMP20:%.*]] = load i32, ptr [[J3_ASCAST]], align 4
// CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP20]] to i64
// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i64 [[IDXPROM]]
// CHECK-NEXT: store i32 [[TMP19]], ptr [[ARRAYIDX]], align 4
// CHECK-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK: omp.body.continue:
// CHECK-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK: omp.inner.for.inc:
// CHECK-NEXT: [[TMP21:%.*]] = load i32, ptr [[DOTOMP_IV_ASCAST]], align 4
// CHECK-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP21]], 1
// CHECK-NEXT: store i32 [[ADD7]], ptr [[DOTOMP_IV_ASCAST]], align 4
// CHECK-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK: omp.inner.for.end:
// CHECK-NEXT: br label [[OMP_DISPATCH_INC:%.*]]
// CHECK: omp.dispatch.inc:
// CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[DOTOMP_LB_ASCAST]], align 4
// CHECK-NEXT: [[TMP23:%.*]] = load i32, ptr [[DOTOMP_STRIDE_ASCAST]], align 4
// CHECK-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP22]], [[TMP23]]
// CHECK-NEXT: store i32 [[ADD8]], ptr [[DOTOMP_LB_ASCAST]], align 4
// CHECK-NEXT: [[TMP24:%.*]] = load i32, ptr [[DOTOMP_UB_ASCAST]], align 4
// CHECK-NEXT: [[TMP25:%.*]] = load i32, ptr [[DOTOMP_STRIDE_ASCAST]], align 4
// CHECK-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP24]], [[TMP25]]
// CHECK-NEXT: store i32 [[ADD9]], ptr [[DOTOMP_UB_ASCAST]], align 4
// CHECK-NEXT: br label [[OMP_DISPATCH_COND]]
// CHECK: omp.dispatch.end:
// CHECK-NEXT: [[TMP26:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR_ASCAST]], align 8
// CHECK-NEXT: [[TMP27:%.*]] = load i32, ptr [[TMP26]], align 4
// CHECK-NEXT: call void @__kmpc_for_static_fini(ptr addrspacecast (ptr addrspace(1) @[[GLOB3]] to ptr), i32 [[TMP27]])
// CHECK-NEXT: br label [[OMP_PRECOND_END]]
// CHECK: omp.precond.end:
// CHECK-NEXT: ret void
//
//
// CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z4foo4v_l76_omp_outlined_omp_outlined_wrapper
// CHECK-SAME: (i16 noundef zeroext [[TMP0:%.*]], i32 noundef [[TMP1:%.*]]) #[[ATTR7]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[DOTADDR:%.*]] = alloca i16, align 2, addrspace(5)
// CHECK-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4, addrspace(5)
// CHECK-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8, addrspace(5)
// CHECK-NEXT: [[DOTADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTADDR]] to ptr
// CHECK-NEXT: [[DOTADDR1_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTADDR1]] to ptr
// CHECK-NEXT: [[DOTZERO_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTZERO_ADDR]] to ptr
// CHECK-NEXT: [[GLOBAL_ARGS_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[GLOBAL_ARGS]] to ptr
// CHECK-NEXT: store i16 [[TMP0]], ptr [[DOTADDR_ASCAST]], align 2
// CHECK-NEXT: store i32 [[TMP1]], ptr [[DOTADDR1_ASCAST]], align 4
// CHECK-NEXT: store i32 0, ptr [[DOTZERO_ADDR_ASCAST]], align 4
// CHECK-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS_ASCAST]])
// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[GLOBAL_ARGS_ASCAST]], align 8
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds ptr, ptr [[TMP2]], i64 0
// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[TMP3]], align 8
// CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds ptr, ptr [[TMP2]], i64 1
// CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr [[TMP5]], align 8
// CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds ptr, ptr [[TMP2]], i64 2
// CHECK-NEXT: [[TMP8:%.*]] = load ptr, ptr [[TMP7]], align 8
// CHECK-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z4foo4v_l76_omp_outlined_omp_outlined(ptr [[DOTADDR1_ASCAST]], ptr [[DOTZERO_ADDR_ASCAST]], ptr [[TMP4]], i64 [[TMP6]], ptr [[TMP8]]) #[[ATTR4]]
// CHECK-NEXT: ret void
//