Files
clang-p2996/clang/test/OpenMP/target_teams_map_codegen.cpp
Joseph Huber 5300263c70 [OpenMP] Add loop tripcount argument to kernel launch and remove push function
Previously we added the `push_target_tripcount` function to send the
loop tripcount to the device runtime so we knew how to configure the
teams / threads for execute the loop for a teams distribute construct.
This was implemented as a separate function mostly to avoid changing the
interface for backwards compatbility. Now that we've changed it anyway
and the new interface can take an arbitrary number of arguments via the
struct without changing the ABI, we can move this to the new interface.
This will simplify the runtime by removing unnecessary state between
calls.

Depends on D128550

Reviewed By: jdoerfert

Differential Revision: https://reviews.llvm.org/D128816
2022-07-08 14:44:16 -04:00

3459 lines
265 KiB
C++

// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
// Test host codegen.
// RUN: %clang_cc1 -no-opaque-pointers -DHAS_INT128 -verify -fopenmp -fopenmp-version=50 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1
// RUN: %clang_cc1 -no-opaque-pointers -DHAS_INT128 -fopenmp -fopenmp-version=50 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
// RUN: %clang_cc1 -no-opaque-pointers -DHAS_INT128 -fopenmp -fopenmp-version=50 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK1
// RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -fopenmp-version=50 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK3
// RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=50 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
// RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=50 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK3
// Test target codegen - host bc file has to be created first.
// RUN: %clang_cc1 -no-opaque-pointers -DHAS_INT128 -verify -fopenmp -fopenmp-version=50 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc
// RUN: %clang_cc1 -no-opaque-pointers -DHAS_INT128 -verify -fopenmp -fopenmp-version=50 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --check-prefix=CHECK5
// RUN: %clang_cc1 -no-opaque-pointers -DHAS_INT128 -fopenmp -fopenmp-version=50 -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s
// RUN: %clang_cc1 -no-opaque-pointers -DHAS_INT128 -fopenmp -fopenmp-version=50 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK5
// RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -fopenmp-version=50 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc
// RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -fopenmp-version=50 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix=CHECK7
// RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=50 -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s
// RUN: %clang_cc1 -no-opaque-pointers -fopenmp -fopenmp-version=50 -x c++ -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -std=c++11 -fopenmp-is-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK7
// expected-no-diagnostics
#ifndef HEADER
#define HEADER
void mapWithPrivate() {
int x, y;
#pragma omp target teams private(x) map(x,y) private(y)
;
}
void mapWithFirstprivate() {
int x, y;
#pragma omp target teams firstprivate(x) map(x,y) firstprivate(y)
;
}
void mapWithReduction() {
int x, y;
#pragma omp target teams reduction(+:x) map(x,y) reduction(+:y)
;
}
void mapFrom() {
int x;
#pragma omp target teams firstprivate(x) map(from:x)
;
}
void mapTo() {
int x;
#pragma omp target teams firstprivate(x) map(to:x)
;
}
void mapAlloc() {
int x;
#pragma omp target teams firstprivate(x) map(alloc:x)
;
}
void mapArray() {
int x[77], y[88], z[99];
#pragma omp target teams private(x) firstprivate(y) reduction(+:z) map(x,y,z)
;
#pragma omp target teams private(x) firstprivate(y) reduction(+:z) map(to:x,y,z)
;
}
# if HAS_INT128
void mapInt128() {
__int128 x, y, z;
#pragma omp target teams private(x) firstprivate(y) reduction(+:z) map(x,y,z)
;
#pragma omp target teams private(x) firstprivate(y) reduction(+:z) map(from:x,y,z)
;
}
# endif
#endif
// CHECK1-LABEL: define {{[^@]+}}@_Z14mapWithPrivatev
// CHECK1-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[X:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[Y:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x i8*], align 8
// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x i8*], align 8
// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x i8*], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK1-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to i32**
// CHECK1-NEXT: store i32* [[X]], i32** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32**
// CHECK1-NEXT: store i32* [[X]], i32** [[TMP3]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
// CHECK1-NEXT: store i8* null, i8** [[TMP4]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK1-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32**
// CHECK1-NEXT: store i32* [[Y]], i32** [[TMP6]], align 8
// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32**
// CHECK1-NEXT: store i32* [[Y]], i32** [[TMP8]], align 8
// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
// CHECK1-NEXT: store i8* null, i8** [[TMP9]], align 8
// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
// CHECK1-NEXT: store i32 1, i32* [[TMP12]], align 4
// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
// CHECK1-NEXT: store i32 2, i32* [[TMP13]], align 4
// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
// CHECK1-NEXT: store i8** [[TMP10]], i8*** [[TMP14]], align 8
// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
// CHECK1-NEXT: store i8** [[TMP11]], i8*** [[TMP15]], align 8
// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
// CHECK1-NEXT: store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes, i32 0, i32 0), i64** [[TMP16]], align 8
// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
// CHECK1-NEXT: store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes, i32 0, i32 0), i64** [[TMP17]], align 8
// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
// CHECK1-NEXT: store i8** null, i8*** [[TMP18]], align 8
// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
// CHECK1-NEXT: store i8** null, i8*** [[TMP19]], align 8
// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
// CHECK1-NEXT: store i64 0, i64* [[TMP20]], align 8
// CHECK1-NEXT: [[TMP21:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1:[0-9]+]], i64 -1, i32 0, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14mapWithPrivatev_l27.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
// CHECK1-NEXT: [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0
// CHECK1-NEXT: br i1 [[TMP22]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK1: omp_offload.failed:
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14mapWithPrivatev_l27() #[[ATTR2:[0-9]+]]
// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK1: omp_offload.cont:
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14mapWithPrivatev_l27
// CHECK1-SAME: () #[[ATTR1:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[X:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[Y:%.*]] = alloca i32, align 4
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@_Z19mapWithFirstprivatev
// CHECK1-SAME: () #[[ATTR0]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[X:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[Y:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x i8*], align 8
// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x i8*], align 8
// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x i8*], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK1-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to i32**
// CHECK1-NEXT: store i32* [[X]], i32** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32**
// CHECK1-NEXT: store i32* [[X]], i32** [[TMP3]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
// CHECK1-NEXT: store i8* null, i8** [[TMP4]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK1-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32**
// CHECK1-NEXT: store i32* [[Y]], i32** [[TMP6]], align 8
// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32**
// CHECK1-NEXT: store i32* [[Y]], i32** [[TMP8]], align 8
// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
// CHECK1-NEXT: store i8* null, i8** [[TMP9]], align 8
// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
// CHECK1-NEXT: store i32 1, i32* [[TMP12]], align 4
// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
// CHECK1-NEXT: store i32 2, i32* [[TMP13]], align 4
// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
// CHECK1-NEXT: store i8** [[TMP10]], i8*** [[TMP14]], align 8
// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
// CHECK1-NEXT: store i8** [[TMP11]], i8*** [[TMP15]], align 8
// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
// CHECK1-NEXT: store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.2, i32 0, i32 0), i64** [[TMP16]], align 8
// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
// CHECK1-NEXT: store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.3, i32 0, i32 0), i64** [[TMP17]], align 8
// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
// CHECK1-NEXT: store i8** null, i8*** [[TMP18]], align 8
// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
// CHECK1-NEXT: store i8** null, i8*** [[TMP19]], align 8
// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
// CHECK1-NEXT: store i64 0, i64* [[TMP20]], align 8
// CHECK1-NEXT: [[TMP21:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 0, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z19mapWithFirstprivatev_l33.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
// CHECK1-NEXT: [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0
// CHECK1-NEXT: br i1 [[TMP22]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK1: omp_offload.failed:
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z19mapWithFirstprivatev_l33(i32* [[X]], i32* [[Y]]) #[[ATTR2]]
// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK1: omp_offload.cont:
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z19mapWithFirstprivatev_l33
// CHECK1-SAME: (i32* noundef nonnull align 4 dereferenceable(4) [[X:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[Y:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[X_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[Y_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[X_CASTED:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[Y_CASTED:%.*]] = alloca i64, align 8
// CHECK1-NEXT: store i32* [[X]], i32** [[X_ADDR]], align 8
// CHECK1-NEXT: store i32* [[Y]], i32** [[Y_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[Y_ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[X_CASTED]] to i32*
// CHECK1-NEXT: store i32 [[TMP2]], i32* [[CONV]], align 4
// CHECK1-NEXT: [[TMP3:%.*]] = load i64, i64* [[X_CASTED]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[Y_CASTED]] to i32*
// CHECK1-NEXT: store i32 [[TMP4]], i32* [[CONV1]], align 4
// CHECK1-NEXT: [[TMP5:%.*]] = load i64, i64* [[Y_CASTED]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP5]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[X:%.*]], i64 noundef [[Y:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[X_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: [[Y_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store i64 [[X]], i64* [[X_ADDR]], align 8
// CHECK1-NEXT: store i64 [[Y]], i64* [[Y_ADDR]], align 8
// CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[X_ADDR]] to i32*
// CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[Y_ADDR]] to i32*
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@_Z16mapWithReductionv
// CHECK1-SAME: () #[[ATTR0]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[X:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[Y:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x i8*], align 8
// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x i8*], align 8
// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x i8*], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK1-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to i32**
// CHECK1-NEXT: store i32* [[X]], i32** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32**
// CHECK1-NEXT: store i32* [[X]], i32** [[TMP3]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
// CHECK1-NEXT: store i8* null, i8** [[TMP4]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK1-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32**
// CHECK1-NEXT: store i32* [[Y]], i32** [[TMP6]], align 8
// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32**
// CHECK1-NEXT: store i32* [[Y]], i32** [[TMP8]], align 8
// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
// CHECK1-NEXT: store i8* null, i8** [[TMP9]], align 8
// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
// CHECK1-NEXT: store i32 1, i32* [[TMP12]], align 4
// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
// CHECK1-NEXT: store i32 2, i32* [[TMP13]], align 4
// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
// CHECK1-NEXT: store i8** [[TMP10]], i8*** [[TMP14]], align 8
// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
// CHECK1-NEXT: store i8** [[TMP11]], i8*** [[TMP15]], align 8
// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
// CHECK1-NEXT: store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.5, i32 0, i32 0), i64** [[TMP16]], align 8
// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
// CHECK1-NEXT: store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.6, i32 0, i32 0), i64** [[TMP17]], align 8
// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
// CHECK1-NEXT: store i8** null, i8*** [[TMP18]], align 8
// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
// CHECK1-NEXT: store i8** null, i8*** [[TMP19]], align 8
// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
// CHECK1-NEXT: store i64 0, i64* [[TMP20]], align 8
// CHECK1-NEXT: [[TMP21:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 0, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16mapWithReductionv_l39.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
// CHECK1-NEXT: [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0
// CHECK1-NEXT: br i1 [[TMP22]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK1: omp_offload.failed:
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16mapWithReductionv_l39(i32* [[X]], i32* [[Y]]) #[[ATTR2]]
// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK1: omp_offload.cont:
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16mapWithReductionv_l39
// CHECK1-SAME: (i32* noundef nonnull align 4 dereferenceable(4) [[X:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[Y:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[X_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[Y_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: store i32* [[X]], i32** [[X_ADDR]], align 8
// CHECK1-NEXT: store i32* [[Y]], i32** [[Y_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[Y_ADDR]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32* [[TMP0]], i32* [[TMP1]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..4
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[X:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[Y:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[X_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[Y_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[X1:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[Y2:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x i8*], align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[X]], i32** [[X_ADDR]], align 8
// CHECK1-NEXT: store i32* [[Y]], i32** [[Y_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32*, i32** [[Y_ADDR]], align 8
// CHECK1-NEXT: store i32 0, i32* [[X1]], align 4
// CHECK1-NEXT: store i32 0, i32* [[Y2]], align 4
// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// CHECK1-NEXT: [[TMP3:%.*]] = bitcast i32* [[X1]] to i8*
// CHECK1-NEXT: store i8* [[TMP3]], i8** [[TMP2]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
// CHECK1-NEXT: [[TMP5:%.*]] = bitcast i32* [[Y2]] to i8*
// CHECK1-NEXT: store i8* [[TMP5]], i8** [[TMP4]], align 8
// CHECK1-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// CHECK1-NEXT: [[TMP9:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP7]], i32 2, i64 16, i8* [[TMP8]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: switch i32 [[TMP9]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// CHECK1-NEXT: ]
// CHECK1: .omp.reduction.case1:
// CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[X1]], align 4
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[TMP0]], align 4
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[Y2]], align 4
// CHECK1-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
// CHECK1-NEXT: store i32 [[ADD3]], i32* [[TMP1]], align 4
// CHECK1-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK1: .omp.reduction.case2:
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[X1]], align 4
// CHECK1-NEXT: [[TMP15:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP14]] monotonic, align 4
// CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[Y2]], align 4
// CHECK1-NEXT: [[TMP17:%.*]] = atomicrmw add i32* [[TMP1]], i32 [[TMP16]] monotonic, align 4
// CHECK1-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK1: .omp.reduction.default:
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func
// CHECK1-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR3:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
// CHECK1-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [2 x i8*]*
// CHECK1-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [2 x i8*]*
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i64 0, i64 0
// CHECK1-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP3]], i64 0, i64 0
// CHECK1-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
// CHECK1-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i64 0, i64 1
// CHECK1-NEXT: [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 8
// CHECK1-NEXT: [[TMP14:%.*]] = bitcast i8* [[TMP13]] to i32*
// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP3]], i64 0, i64 1
// CHECK1-NEXT: [[TMP16:%.*]] = load i8*, i8** [[TMP15]], align 8
// CHECK1-NEXT: [[TMP17:%.*]] = bitcast i8* [[TMP16]] to i32*
// CHECK1-NEXT: [[TMP18:%.*]] = load i32, i32* [[TMP11]], align 4
// CHECK1-NEXT: [[TMP19:%.*]] = load i32, i32* [[TMP8]], align 4
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[TMP11]], align 4
// CHECK1-NEXT: [[TMP20:%.*]] = load i32, i32* [[TMP17]], align 4
// CHECK1-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP14]], align 4
// CHECK1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
// CHECK1-NEXT: store i32 [[ADD2]], i32* [[TMP17]], align 4
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@_Z7mapFromv
// CHECK1-SAME: () #[[ATTR0]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[X:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8
// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8
// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK1-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to i32**
// CHECK1-NEXT: store i32* [[X]], i32** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32**
// CHECK1-NEXT: store i32* [[X]], i32** [[TMP3]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
// CHECK1-NEXT: store i8* null, i8** [[TMP4]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
// CHECK1-NEXT: store i32 1, i32* [[TMP7]], align 4
// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
// CHECK1-NEXT: store i32 1, i32* [[TMP8]], align 4
// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
// CHECK1-NEXT: store i8** [[TMP5]], i8*** [[TMP9]], align 8
// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
// CHECK1-NEXT: store i8** [[TMP6]], i8*** [[TMP10]], align 8
// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
// CHECK1-NEXT: store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64** [[TMP11]], align 8
// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
// CHECK1-NEXT: store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i64** [[TMP12]], align 8
// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
// CHECK1-NEXT: store i8** null, i8*** [[TMP13]], align 8
// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
// CHECK1-NEXT: store i8** null, i8*** [[TMP14]], align 8
// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
// CHECK1-NEXT: store i64 0, i64* [[TMP15]], align 8
// CHECK1-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 0, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z7mapFromv_l45.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
// CHECK1-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
// CHECK1-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK1: omp_offload.failed:
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z7mapFromv_l45(i32* [[X]]) #[[ATTR2]]
// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK1: omp_offload.cont:
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z7mapFromv_l45
// CHECK1-SAME: (i32* noundef nonnull align 4 dereferenceable(4) [[X:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[X_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[X_CASTED:%.*]] = alloca i64, align 8
// CHECK1-NEXT: store i32* [[X]], i32** [[X_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[X_CASTED]] to i32*
// CHECK1-NEXT: store i32 [[TMP1]], i32* [[CONV]], align 4
// CHECK1-NEXT: [[TMP2:%.*]] = load i64, i64* [[X_CASTED]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP2]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..7
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[X:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[X_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store i64 [[X]], i64* [[X_ADDR]], align 8
// CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[X_ADDR]] to i32*
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@_Z5mapTov
// CHECK1-SAME: () #[[ATTR0]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[X:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8
// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8
// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK1-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to i32**
// CHECK1-NEXT: store i32* [[X]], i32** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32**
// CHECK1-NEXT: store i32* [[X]], i32** [[TMP3]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
// CHECK1-NEXT: store i8* null, i8** [[TMP4]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
// CHECK1-NEXT: store i32 1, i32* [[TMP7]], align 4
// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
// CHECK1-NEXT: store i32 1, i32* [[TMP8]], align 4
// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
// CHECK1-NEXT: store i8** [[TMP5]], i8*** [[TMP9]], align 8
// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
// CHECK1-NEXT: store i8** [[TMP6]], i8*** [[TMP10]], align 8
// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
// CHECK1-NEXT: store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.11, i32 0, i32 0), i64** [[TMP11]], align 8
// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
// CHECK1-NEXT: store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.12, i32 0, i32 0), i64** [[TMP12]], align 8
// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
// CHECK1-NEXT: store i8** null, i8*** [[TMP13]], align 8
// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
// CHECK1-NEXT: store i8** null, i8*** [[TMP14]], align 8
// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
// CHECK1-NEXT: store i64 0, i64* [[TMP15]], align 8
// CHECK1-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 0, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5mapTov_l51.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
// CHECK1-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
// CHECK1-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK1: omp_offload.failed:
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5mapTov_l51(i32* [[X]]) #[[ATTR2]]
// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK1: omp_offload.cont:
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5mapTov_l51
// CHECK1-SAME: (i32* noundef nonnull align 4 dereferenceable(4) [[X:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[X_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[X_CASTED:%.*]] = alloca i64, align 8
// CHECK1-NEXT: store i32* [[X]], i32** [[X_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[X_CASTED]] to i32*
// CHECK1-NEXT: store i32 [[TMP1]], i32* [[CONV]], align 4
// CHECK1-NEXT: [[TMP2:%.*]] = load i64, i64* [[X_CASTED]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i64 [[TMP2]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..10
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[X:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[X_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store i64 [[X]], i64* [[X_ADDR]], align 8
// CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[X_ADDR]] to i32*
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@_Z8mapAllocv
// CHECK1-SAME: () #[[ATTR0]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[X:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8
// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8
// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK1-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to i32**
// CHECK1-NEXT: store i32* [[X]], i32** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32**
// CHECK1-NEXT: store i32* [[X]], i32** [[TMP3]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
// CHECK1-NEXT: store i8* null, i8** [[TMP4]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
// CHECK1-NEXT: store i32 1, i32* [[TMP7]], align 4
// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
// CHECK1-NEXT: store i32 1, i32* [[TMP8]], align 4
// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
// CHECK1-NEXT: store i8** [[TMP5]], i8*** [[TMP9]], align 8
// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
// CHECK1-NEXT: store i8** [[TMP6]], i8*** [[TMP10]], align 8
// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
// CHECK1-NEXT: store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.14, i32 0, i32 0), i64** [[TMP11]], align 8
// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
// CHECK1-NEXT: store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.15, i32 0, i32 0), i64** [[TMP12]], align 8
// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
// CHECK1-NEXT: store i8** null, i8*** [[TMP13]], align 8
// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
// CHECK1-NEXT: store i8** null, i8*** [[TMP14]], align 8
// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
// CHECK1-NEXT: store i64 0, i64* [[TMP15]], align 8
// CHECK1-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 0, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapAllocv_l57.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
// CHECK1-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
// CHECK1-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK1: omp_offload.failed:
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapAllocv_l57(i32* [[X]]) #[[ATTR2]]
// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK1: omp_offload.cont:
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapAllocv_l57
// CHECK1-SAME: (i32* noundef nonnull align 4 dereferenceable(4) [[X:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[X_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[X_CASTED:%.*]] = alloca i64, align 8
// CHECK1-NEXT: store i32* [[X]], i32** [[X_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[X_CASTED]] to i32*
// CHECK1-NEXT: store i32 [[TMP1]], i32* [[CONV]], align 4
// CHECK1-NEXT: [[TMP2:%.*]] = load i64, i64* [[X_CASTED]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i64 [[TMP2]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..13
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[X:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[X_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store i64 [[X]], i64* [[X_ADDR]], align 8
// CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[X_ADDR]] to i32*
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@_Z8mapArrayv
// CHECK1-SAME: () #[[ATTR0]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[X:%.*]] = alloca [77 x i32], align 4
// CHECK1-NEXT: [[Y:%.*]] = alloca [88 x i32], align 4
// CHECK1-NEXT: [[Z:%.*]] = alloca [99 x i32], align 4
// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8
// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8
// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8
// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS1:%.*]] = alloca [3 x i8*], align 8
// CHECK1-NEXT: [[DOTOFFLOAD_PTRS2:%.*]] = alloca [3 x i8*], align 8
// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS3:%.*]] = alloca [3 x i8*], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK1-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to [88 x i32]**
// CHECK1-NEXT: store [88 x i32]* [[Y]], [88 x i32]** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to [88 x i32]**
// CHECK1-NEXT: store [88 x i32]* [[Y]], [88 x i32]** [[TMP3]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
// CHECK1-NEXT: store i8* null, i8** [[TMP4]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK1-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to [99 x i32]**
// CHECK1-NEXT: store [99 x i32]* [[Z]], [99 x i32]** [[TMP6]], align 8
// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to [99 x i32]**
// CHECK1-NEXT: store [99 x i32]* [[Z]], [99 x i32]** [[TMP8]], align 8
// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
// CHECK1-NEXT: store i8* null, i8** [[TMP9]], align 8
// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
// CHECK1-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to [77 x i32]**
// CHECK1-NEXT: store [77 x i32]* [[X]], [77 x i32]** [[TMP11]], align 8
// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
// CHECK1-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to [77 x i32]**
// CHECK1-NEXT: store [77 x i32]* [[X]], [77 x i32]** [[TMP13]], align 8
// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
// CHECK1-NEXT: store i8* null, i8** [[TMP14]], align 8
// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
// CHECK1-NEXT: store i32 1, i32* [[TMP17]], align 4
// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
// CHECK1-NEXT: store i32 3, i32* [[TMP18]], align 4
// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
// CHECK1-NEXT: store i8** [[TMP15]], i8*** [[TMP19]], align 8
// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
// CHECK1-NEXT: store i8** [[TMP16]], i8*** [[TMP20]], align 8
// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
// CHECK1-NEXT: store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.18, i32 0, i32 0), i64** [[TMP21]], align 8
// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
// CHECK1-NEXT: store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.19, i32 0, i32 0), i64** [[TMP22]], align 8
// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
// CHECK1-NEXT: store i8** null, i8*** [[TMP23]], align 8
// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
// CHECK1-NEXT: store i8** null, i8*** [[TMP24]], align 8
// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
// CHECK1-NEXT: store i64 0, i64* [[TMP25]], align 8
// CHECK1-NEXT: [[TMP26:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 0, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l63.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
// CHECK1-NEXT: [[TMP27:%.*]] = icmp ne i32 [[TMP26]], 0
// CHECK1-NEXT: br i1 [[TMP27]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK1: omp_offload.failed:
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l63([88 x i32]* [[Y]], [99 x i32]* [[Z]]) #[[ATTR2]]
// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK1: omp_offload.cont:
// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
// CHECK1-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to [88 x i32]**
// CHECK1-NEXT: store [88 x i32]* [[Y]], [88 x i32]** [[TMP29]], align 8
// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
// CHECK1-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to [88 x i32]**
// CHECK1-NEXT: store [88 x i32]* [[Y]], [88 x i32]** [[TMP31]], align 8
// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i64 0, i64 0
// CHECK1-NEXT: store i8* null, i8** [[TMP32]], align 8
// CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 1
// CHECK1-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to [99 x i32]**
// CHECK1-NEXT: store [99 x i32]* [[Z]], [99 x i32]** [[TMP34]], align 8
// CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 1
// CHECK1-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to [99 x i32]**
// CHECK1-NEXT: store [99 x i32]* [[Z]], [99 x i32]** [[TMP36]], align 8
// CHECK1-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i64 0, i64 1
// CHECK1-NEXT: store i8* null, i8** [[TMP37]], align 8
// CHECK1-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 2
// CHECK1-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to [77 x i32]**
// CHECK1-NEXT: store [77 x i32]* [[X]], [77 x i32]** [[TMP39]], align 8
// CHECK1-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 2
// CHECK1-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to [77 x i32]**
// CHECK1-NEXT: store [77 x i32]* [[X]], [77 x i32]** [[TMP41]], align 8
// CHECK1-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i64 0, i64 2
// CHECK1-NEXT: store i8* null, i8** [[TMP42]], align 8
// CHECK1-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
// CHECK1-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
// CHECK1-NEXT: [[KERNEL_ARGS4:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
// CHECK1-NEXT: [[TMP45:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 0
// CHECK1-NEXT: store i32 1, i32* [[TMP45]], align 4
// CHECK1-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 1
// CHECK1-NEXT: store i32 3, i32* [[TMP46]], align 4
// CHECK1-NEXT: [[TMP47:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 2
// CHECK1-NEXT: store i8** [[TMP43]], i8*** [[TMP47]], align 8
// CHECK1-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 3
// CHECK1-NEXT: store i8** [[TMP44]], i8*** [[TMP48]], align 8
// CHECK1-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 4
// CHECK1-NEXT: store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.22, i32 0, i32 0), i64** [[TMP49]], align 8
// CHECK1-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 5
// CHECK1-NEXT: store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.23, i32 0, i32 0), i64** [[TMP50]], align 8
// CHECK1-NEXT: [[TMP51:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 6
// CHECK1-NEXT: store i8** null, i8*** [[TMP51]], align 8
// CHECK1-NEXT: [[TMP52:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 7
// CHECK1-NEXT: store i8** null, i8*** [[TMP52]], align 8
// CHECK1-NEXT: [[TMP53:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 8
// CHECK1-NEXT: store i64 0, i64* [[TMP53]], align 8
// CHECK1-NEXT: [[TMP54:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 0, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l65.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]])
// CHECK1-NEXT: [[TMP55:%.*]] = icmp ne i32 [[TMP54]], 0
// CHECK1-NEXT: br i1 [[TMP55]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]]
// CHECK1: omp_offload.failed5:
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l65([88 x i32]* [[Y]], [99 x i32]* [[Z]]) #[[ATTR2]]
// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT6]]
// CHECK1: omp_offload.cont6:
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l63
// CHECK1-SAME: ([88 x i32]* noundef nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* noundef nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 8
// CHECK1-NEXT: [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 8
// CHECK1-NEXT: store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 8
// CHECK1-NEXT: store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [88 x i32]*, [99 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), [88 x i32]* [[TMP0]], [99 x i32]* [[TMP1]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..16
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [88 x i32]* noundef nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* noundef nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 8
// CHECK1-NEXT: [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 8
// CHECK1-NEXT: [[Y1:%.*]] = alloca [88 x i32], align 4
// CHECK1-NEXT: [[X:%.*]] = alloca [77 x i32], align 4
// CHECK1-NEXT: [[Z2:%.*]] = alloca [99 x i32], align 4
// CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 8
// CHECK1-NEXT: store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = bitcast [88 x i32]* [[Y1]] to i8*
// CHECK1-NEXT: [[TMP3:%.*]] = bitcast [88 x i32]* [[TMP0]] to i8*
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP2]], i8* align 4 [[TMP3]], i64 352, i1 false)
// CHECK1-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [99 x i32], [99 x i32]* [[Z2]], i32 0, i32 0
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr i32, i32* [[ARRAY_BEGIN]], i64 99
// CHECK1-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i32* [[ARRAY_BEGIN]], [[TMP4]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]]
// CHECK1: omp.arrayinit.body:
// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ]
// CHECK1-NEXT: store i32 0, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
// CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP4]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]]
// CHECK1: omp.arrayinit.done:
// CHECK1-NEXT: [[LHS_BEGIN:%.*]] = bitcast [99 x i32]* [[TMP1]] to i32*
// CHECK1-NEXT: [[RHS_BEGIN:%.*]] = bitcast [99 x i32]* [[Z2]] to i32*
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// CHECK1-NEXT: [[TMP6:%.*]] = bitcast i32* [[RHS_BEGIN]] to i8*
// CHECK1-NEXT: store i8* [[TMP6]], i8** [[TMP5]], align 8
// CHECK1-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
// CHECK1-NEXT: [[TMP9:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// CHECK1-NEXT: [[TMP10:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 1, i64 8, i8* [[TMP9]], void (i8*, i8*)* @.omp.reduction.reduction_func.17, [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: switch i32 [[TMP10]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// CHECK1-NEXT: ]
// CHECK1: .omp.reduction.case1:
// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i64 99
// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP11]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE6:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
// CHECK1: omp.arraycpy.body:
// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST3:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT4:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4
// CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT4]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE5:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT4]], [[TMP11]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE5]], label [[OMP_ARRAYCPY_DONE6]], label [[OMP_ARRAYCPY_BODY]]
// CHECK1: omp.arraycpy.done6:
// CHECK1-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK1: .omp.reduction.case2:
// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i64 99
// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY7:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP14]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY7]], label [[OMP_ARRAYCPY_DONE14:%.*]], label [[OMP_ARRAYCPY_BODY8:%.*]]
// CHECK1: omp.arraycpy.body8:
// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST9:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT12:%.*]], [[OMP_ARRAYCPY_BODY8]] ]
// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST10:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT11:%.*]], [[OMP_ARRAYCPY_BODY8]] ]
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], align 4
// CHECK1-NEXT: [[TMP16:%.*]] = atomicrmw add i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 [[TMP15]] monotonic, align 4
// CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT11]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT12]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE13:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT11]], [[TMP14]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE13]], label [[OMP_ARRAYCPY_DONE14]], label [[OMP_ARRAYCPY_BODY8]]
// CHECK1: omp.arraycpy.done14:
// CHECK1-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK1: .omp.reduction.default:
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.17
// CHECK1-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR3]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
// CHECK1-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
// CHECK1-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
// CHECK1-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
// CHECK1-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
// CHECK1-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr i32, i32* [[TMP11]], i64 99
// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[TMP11]], [[TMP12]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE2:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
// CHECK1: omp.arraycpy.body:
// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[TMP8]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
// CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP12]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE2]], label [[OMP_ARRAYCPY_BODY]]
// CHECK1: omp.arraycpy.done2:
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l65
// CHECK1-SAME: ([88 x i32]* noundef nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* noundef nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 8
// CHECK1-NEXT: [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 8
// CHECK1-NEXT: store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 8
// CHECK1-NEXT: store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [88 x i32]*, [99 x i32]*)* @.omp_outlined..20 to void (i32*, i32*, ...)*), [88 x i32]* [[TMP0]], [99 x i32]* [[TMP1]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..20
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [88 x i32]* noundef nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* noundef nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 8
// CHECK1-NEXT: [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 8
// CHECK1-NEXT: [[Y1:%.*]] = alloca [88 x i32], align 4
// CHECK1-NEXT: [[X:%.*]] = alloca [77 x i32], align 4
// CHECK1-NEXT: [[Z2:%.*]] = alloca [99 x i32], align 4
// CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 8
// CHECK1-NEXT: store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = bitcast [88 x i32]* [[Y1]] to i8*
// CHECK1-NEXT: [[TMP3:%.*]] = bitcast [88 x i32]* [[TMP0]] to i8*
// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP2]], i8* align 4 [[TMP3]], i64 352, i1 false)
// CHECK1-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [99 x i32], [99 x i32]* [[Z2]], i32 0, i32 0
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr i32, i32* [[ARRAY_BEGIN]], i64 99
// CHECK1-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i32* [[ARRAY_BEGIN]], [[TMP4]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]]
// CHECK1: omp.arrayinit.body:
// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ]
// CHECK1-NEXT: store i32 0, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
// CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP4]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]]
// CHECK1: omp.arrayinit.done:
// CHECK1-NEXT: [[LHS_BEGIN:%.*]] = bitcast [99 x i32]* [[TMP1]] to i32*
// CHECK1-NEXT: [[RHS_BEGIN:%.*]] = bitcast [99 x i32]* [[Z2]] to i32*
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// CHECK1-NEXT: [[TMP6:%.*]] = bitcast i32* [[RHS_BEGIN]] to i8*
// CHECK1-NEXT: store i8* [[TMP6]], i8** [[TMP5]], align 8
// CHECK1-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
// CHECK1-NEXT: [[TMP9:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// CHECK1-NEXT: [[TMP10:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 1, i64 8, i8* [[TMP9]], void (i8*, i8*)* @.omp.reduction.reduction_func.21, [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: switch i32 [[TMP10]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// CHECK1-NEXT: ]
// CHECK1: .omp.reduction.case1:
// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i64 99
// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP11]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE6:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
// CHECK1: omp.arraycpy.body:
// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST3:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT4:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4
// CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT4]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE5:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT4]], [[TMP11]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE5]], label [[OMP_ARRAYCPY_DONE6]], label [[OMP_ARRAYCPY_BODY]]
// CHECK1: omp.arraycpy.done6:
// CHECK1-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK1: .omp.reduction.case2:
// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i64 99
// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY7:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP14]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY7]], label [[OMP_ARRAYCPY_DONE14:%.*]], label [[OMP_ARRAYCPY_BODY8:%.*]]
// CHECK1: omp.arraycpy.body8:
// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST9:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT12:%.*]], [[OMP_ARRAYCPY_BODY8]] ]
// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST10:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT11:%.*]], [[OMP_ARRAYCPY_BODY8]] ]
// CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], align 4
// CHECK1-NEXT: [[TMP16:%.*]] = atomicrmw add i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 [[TMP15]] monotonic, align 4
// CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT11]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT12]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE13:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT11]], [[TMP14]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE13]], label [[OMP_ARRAYCPY_DONE14]], label [[OMP_ARRAYCPY_BODY8]]
// CHECK1: omp.arraycpy.done14:
// CHECK1-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK1: .omp.reduction.default:
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.21
// CHECK1-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR3]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
// CHECK1-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
// CHECK1-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
// CHECK1-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
// CHECK1-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
// CHECK1-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr i32, i32* [[TMP11]], i64 99
// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[TMP11]], [[TMP12]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE2:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
// CHECK1: omp.arraycpy.body:
// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[TMP8]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
// CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]]
// CHECK1-NEXT: store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
// CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP12]]
// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE2]], label [[OMP_ARRAYCPY_BODY]]
// CHECK1: omp.arraycpy.done2:
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@_Z9mapInt128v
// CHECK1-SAME: () #[[ATTR0]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[X:%.*]] = alloca i128, align 16
// CHECK1-NEXT: [[Y:%.*]] = alloca i128, align 16
// CHECK1-NEXT: [[Z:%.*]] = alloca i128, align 16
// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8
// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8
// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8
// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS1:%.*]] = alloca [3 x i8*], align 8
// CHECK1-NEXT: [[DOTOFFLOAD_PTRS2:%.*]] = alloca [3 x i8*], align 8
// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS3:%.*]] = alloca [3 x i8*], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK1-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to i128**
// CHECK1-NEXT: store i128* [[Y]], i128** [[TMP1]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i128**
// CHECK1-NEXT: store i128* [[Y]], i128** [[TMP3]], align 8
// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
// CHECK1-NEXT: store i8* null, i8** [[TMP4]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK1-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i128**
// CHECK1-NEXT: store i128* [[Z]], i128** [[TMP6]], align 8
// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i128**
// CHECK1-NEXT: store i128* [[Z]], i128** [[TMP8]], align 8
// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1
// CHECK1-NEXT: store i8* null, i8** [[TMP9]], align 8
// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
// CHECK1-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i128**
// CHECK1-NEXT: store i128* [[X]], i128** [[TMP11]], align 8
// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
// CHECK1-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i128**
// CHECK1-NEXT: store i128* [[X]], i128** [[TMP13]], align 8
// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2
// CHECK1-NEXT: store i8* null, i8** [[TMP14]], align 8
// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK1-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
// CHECK1-NEXT: store i32 1, i32* [[TMP17]], align 4
// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
// CHECK1-NEXT: store i32 3, i32* [[TMP18]], align 4
// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
// CHECK1-NEXT: store i8** [[TMP15]], i8*** [[TMP19]], align 8
// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
// CHECK1-NEXT: store i8** [[TMP16]], i8*** [[TMP20]], align 8
// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
// CHECK1-NEXT: store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.26, i32 0, i32 0), i64** [[TMP21]], align 8
// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
// CHECK1-NEXT: store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.27, i32 0, i32 0), i64** [[TMP22]], align 8
// CHECK1-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
// CHECK1-NEXT: store i8** null, i8*** [[TMP23]], align 8
// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
// CHECK1-NEXT: store i8** null, i8*** [[TMP24]], align 8
// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
// CHECK1-NEXT: store i64 0, i64* [[TMP25]], align 8
// CHECK1-NEXT: [[TMP26:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 0, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9mapInt128v_l72.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
// CHECK1-NEXT: [[TMP27:%.*]] = icmp ne i32 [[TMP26]], 0
// CHECK1-NEXT: br i1 [[TMP27]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK1: omp_offload.failed:
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9mapInt128v_l72(i128* [[Y]], i128* [[Z]]) #[[ATTR2]]
// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK1: omp_offload.cont:
// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
// CHECK1-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i128**
// CHECK1-NEXT: store i128* [[Y]], i128** [[TMP29]], align 8
// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
// CHECK1-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i128**
// CHECK1-NEXT: store i128* [[Y]], i128** [[TMP31]], align 8
// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i64 0, i64 0
// CHECK1-NEXT: store i8* null, i8** [[TMP32]], align 8
// CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 1
// CHECK1-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i128**
// CHECK1-NEXT: store i128* [[Z]], i128** [[TMP34]], align 8
// CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 1
// CHECK1-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i128**
// CHECK1-NEXT: store i128* [[Z]], i128** [[TMP36]], align 8
// CHECK1-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i64 0, i64 1
// CHECK1-NEXT: store i8* null, i8** [[TMP37]], align 8
// CHECK1-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 2
// CHECK1-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i128**
// CHECK1-NEXT: store i128* [[X]], i128** [[TMP39]], align 8
// CHECK1-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 2
// CHECK1-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i128**
// CHECK1-NEXT: store i128* [[X]], i128** [[TMP41]], align 8
// CHECK1-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i64 0, i64 2
// CHECK1-NEXT: store i8* null, i8** [[TMP42]], align 8
// CHECK1-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
// CHECK1-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
// CHECK1-NEXT: [[KERNEL_ARGS4:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
// CHECK1-NEXT: [[TMP45:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 0
// CHECK1-NEXT: store i32 1, i32* [[TMP45]], align 4
// CHECK1-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 1
// CHECK1-NEXT: store i32 3, i32* [[TMP46]], align 4
// CHECK1-NEXT: [[TMP47:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 2
// CHECK1-NEXT: store i8** [[TMP43]], i8*** [[TMP47]], align 8
// CHECK1-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 3
// CHECK1-NEXT: store i8** [[TMP44]], i8*** [[TMP48]], align 8
// CHECK1-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 4
// CHECK1-NEXT: store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.30, i32 0, i32 0), i64** [[TMP49]], align 8
// CHECK1-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 5
// CHECK1-NEXT: store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.31, i32 0, i32 0), i64** [[TMP50]], align 8
// CHECK1-NEXT: [[TMP51:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 6
// CHECK1-NEXT: store i8** null, i8*** [[TMP51]], align 8
// CHECK1-NEXT: [[TMP52:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 7
// CHECK1-NEXT: store i8** null, i8*** [[TMP52]], align 8
// CHECK1-NEXT: [[TMP53:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 8
// CHECK1-NEXT: store i64 0, i64* [[TMP53]], align 8
// CHECK1-NEXT: [[TMP54:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 0, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9mapInt128v_l74.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]])
// CHECK1-NEXT: [[TMP55:%.*]] = icmp ne i32 [[TMP54]], 0
// CHECK1-NEXT: br i1 [[TMP55]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]]
// CHECK1: omp_offload.failed5:
// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9mapInt128v_l74(i128* [[Y]], i128* [[Z]]) #[[ATTR2]]
// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT6]]
// CHECK1: omp_offload.cont6:
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9mapInt128v_l72
// CHECK1-SAME: (i128* noundef nonnull align 16 dereferenceable(16) [[Y:%.*]], i128* noundef nonnull align 16 dereferenceable(16) [[Z:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[Y_ADDR:%.*]] = alloca i128*, align 8
// CHECK1-NEXT: [[Z_ADDR:%.*]] = alloca i128*, align 8
// CHECK1-NEXT: store i128* [[Y]], i128** [[Y_ADDR]], align 8
// CHECK1-NEXT: store i128* [[Z]], i128** [[Z_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i128*, i128** [[Y_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i128*, i128** [[Z_ADDR]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i128*, i128*)* @.omp_outlined..24 to void (i32*, i32*, ...)*), i128* [[TMP0]], i128* [[TMP1]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..24
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i128* noundef nonnull align 16 dereferenceable(16) [[Y:%.*]], i128* noundef nonnull align 16 dereferenceable(16) [[Z:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[Y_ADDR:%.*]] = alloca i128*, align 8
// CHECK1-NEXT: [[Z_ADDR:%.*]] = alloca i128*, align 8
// CHECK1-NEXT: [[Y1:%.*]] = alloca i128, align 16
// CHECK1-NEXT: [[X:%.*]] = alloca i128, align 16
// CHECK1-NEXT: [[Z2:%.*]] = alloca i128, align 16
// CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
// CHECK1-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i128, align 16
// CHECK1-NEXT: [[ATOMIC_TEMP3:%.*]] = alloca i128, align 16
// CHECK1-NEXT: [[TMP:%.*]] = alloca i128, align 16
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store i128* [[Y]], i128** [[Y_ADDR]], align 8
// CHECK1-NEXT: store i128* [[Z]], i128** [[Z_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i128*, i128** [[Y_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i128*, i128** [[Z_ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i128, i128* [[TMP0]], align 16
// CHECK1-NEXT: store i128 [[TMP2]], i128* [[Y1]], align 16
// CHECK1-NEXT: store i128 0, i128* [[Z2]], align 16
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// CHECK1-NEXT: [[TMP4:%.*]] = bitcast i128* [[Z2]] to i8*
// CHECK1-NEXT: store i8* [[TMP4]], i8** [[TMP3]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
// CHECK1-NEXT: [[TMP7:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// CHECK1-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], i32 1, i64 8, i8* [[TMP7]], void (i8*, i8*)* @.omp.reduction.reduction_func.25, [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: switch i32 [[TMP8]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// CHECK1-NEXT: ]
// CHECK1: .omp.reduction.case1:
// CHECK1-NEXT: [[TMP9:%.*]] = load i128, i128* [[TMP1]], align 16
// CHECK1-NEXT: [[TMP10:%.*]] = load i128, i128* [[Z2]], align 16
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i128 [[TMP9]], [[TMP10]]
// CHECK1-NEXT: store i128 [[ADD]], i128* [[TMP1]], align 16
// CHECK1-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK1: .omp.reduction.case2:
// CHECK1-NEXT: [[TMP11:%.*]] = load i128, i128* [[Z2]], align 16
// CHECK1-NEXT: [[TMP12:%.*]] = bitcast i128* [[TMP1]] to i8*
// CHECK1-NEXT: [[TMP13:%.*]] = bitcast i128* [[ATOMIC_TEMP]] to i8*
// CHECK1-NEXT: call void @__atomic_load(i64 noundef 16, i8* noundef [[TMP12]], i8* noundef [[TMP13]], i32 noundef signext 0)
// CHECK1-NEXT: br label [[ATOMIC_CONT:%.*]]
// CHECK1: atomic_cont:
// CHECK1-NEXT: [[TMP14:%.*]] = load i128, i128* [[ATOMIC_TEMP]], align 16
// CHECK1-NEXT: store i128 [[TMP14]], i128* [[TMP]], align 16
// CHECK1-NEXT: [[TMP15:%.*]] = load i128, i128* [[TMP]], align 16
// CHECK1-NEXT: [[TMP16:%.*]] = load i128, i128* [[Z2]], align 16
// CHECK1-NEXT: [[ADD4:%.*]] = add nsw i128 [[TMP15]], [[TMP16]]
// CHECK1-NEXT: store i128 [[ADD4]], i128* [[ATOMIC_TEMP3]], align 16
// CHECK1-NEXT: [[TMP17:%.*]] = bitcast i128* [[TMP1]] to i8*
// CHECK1-NEXT: [[TMP18:%.*]] = bitcast i128* [[ATOMIC_TEMP]] to i8*
// CHECK1-NEXT: [[TMP19:%.*]] = bitcast i128* [[ATOMIC_TEMP3]] to i8*
// CHECK1-NEXT: [[CALL:%.*]] = call noundef zeroext i1 @__atomic_compare_exchange(i64 noundef 16, i8* noundef [[TMP17]], i8* noundef [[TMP18]], i8* noundef [[TMP19]], i32 noundef signext 0, i32 noundef signext 0)
// CHECK1-NEXT: br i1 [[CALL]], label [[ATOMIC_EXIT:%.*]], label [[ATOMIC_CONT]]
// CHECK1: atomic_exit:
// CHECK1-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK1: .omp.reduction.default:
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.25
// CHECK1-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR3]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
// CHECK1-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
// CHECK1-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
// CHECK1-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i128*
// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
// CHECK1-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
// CHECK1-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i128*
// CHECK1-NEXT: [[TMP12:%.*]] = load i128, i128* [[TMP11]], align 16
// CHECK1-NEXT: [[TMP13:%.*]] = load i128, i128* [[TMP8]], align 16
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i128 [[TMP12]], [[TMP13]]
// CHECK1-NEXT: store i128 [[ADD]], i128* [[TMP11]], align 16
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9mapInt128v_l74
// CHECK1-SAME: (i128* noundef nonnull align 16 dereferenceable(16) [[Y:%.*]], i128* noundef nonnull align 16 dereferenceable(16) [[Z:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[Y_ADDR:%.*]] = alloca i128*, align 8
// CHECK1-NEXT: [[Z_ADDR:%.*]] = alloca i128*, align 8
// CHECK1-NEXT: store i128* [[Y]], i128** [[Y_ADDR]], align 8
// CHECK1-NEXT: store i128* [[Z]], i128** [[Z_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i128*, i128** [[Y_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i128*, i128** [[Z_ADDR]], align 8
// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i128*, i128*)* @.omp_outlined..28 to void (i32*, i32*, ...)*), i128* [[TMP0]], i128* [[TMP1]])
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..28
// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i128* noundef nonnull align 16 dereferenceable(16) [[Y:%.*]], i128* noundef nonnull align 16 dereferenceable(16) [[Z:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT: [[Y_ADDR:%.*]] = alloca i128*, align 8
// CHECK1-NEXT: [[Z_ADDR:%.*]] = alloca i128*, align 8
// CHECK1-NEXT: [[Y1:%.*]] = alloca i128, align 16
// CHECK1-NEXT: [[X:%.*]] = alloca i128, align 16
// CHECK1-NEXT: [[Z2:%.*]] = alloca i128, align 16
// CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
// CHECK1-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i128, align 16
// CHECK1-NEXT: [[ATOMIC_TEMP3:%.*]] = alloca i128, align 16
// CHECK1-NEXT: [[TMP:%.*]] = alloca i128, align 16
// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT: store i128* [[Y]], i128** [[Y_ADDR]], align 8
// CHECK1-NEXT: store i128* [[Z]], i128** [[Z_ADDR]], align 8
// CHECK1-NEXT: [[TMP0:%.*]] = load i128*, i128** [[Y_ADDR]], align 8
// CHECK1-NEXT: [[TMP1:%.*]] = load i128*, i128** [[Z_ADDR]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i128, i128* [[TMP0]], align 16
// CHECK1-NEXT: store i128 [[TMP2]], i128* [[Y1]], align 16
// CHECK1-NEXT: store i128 0, i128* [[Z2]], align 16
// CHECK1-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// CHECK1-NEXT: [[TMP4:%.*]] = bitcast i128* [[Z2]] to i8*
// CHECK1-NEXT: store i8* [[TMP4]], i8** [[TMP3]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
// CHECK1-NEXT: [[TMP7:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// CHECK1-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], i32 1, i64 8, i8* [[TMP7]], void (i8*, i8*)* @.omp.reduction.reduction_func.29, [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: switch i32 [[TMP8]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// CHECK1-NEXT: ]
// CHECK1: .omp.reduction.case1:
// CHECK1-NEXT: [[TMP9:%.*]] = load i128, i128* [[TMP1]], align 16
// CHECK1-NEXT: [[TMP10:%.*]] = load i128, i128* [[Z2]], align 16
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i128 [[TMP9]], [[TMP10]]
// CHECK1-NEXT: store i128 [[ADD]], i128* [[TMP1]], align 16
// CHECK1-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK1: .omp.reduction.case2:
// CHECK1-NEXT: [[TMP11:%.*]] = load i128, i128* [[Z2]], align 16
// CHECK1-NEXT: [[TMP12:%.*]] = bitcast i128* [[TMP1]] to i8*
// CHECK1-NEXT: [[TMP13:%.*]] = bitcast i128* [[ATOMIC_TEMP]] to i8*
// CHECK1-NEXT: call void @__atomic_load(i64 noundef 16, i8* noundef [[TMP12]], i8* noundef [[TMP13]], i32 noundef signext 0)
// CHECK1-NEXT: br label [[ATOMIC_CONT:%.*]]
// CHECK1: atomic_cont:
// CHECK1-NEXT: [[TMP14:%.*]] = load i128, i128* [[ATOMIC_TEMP]], align 16
// CHECK1-NEXT: store i128 [[TMP14]], i128* [[TMP]], align 16
// CHECK1-NEXT: [[TMP15:%.*]] = load i128, i128* [[TMP]], align 16
// CHECK1-NEXT: [[TMP16:%.*]] = load i128, i128* [[Z2]], align 16
// CHECK1-NEXT: [[ADD4:%.*]] = add nsw i128 [[TMP15]], [[TMP16]]
// CHECK1-NEXT: store i128 [[ADD4]], i128* [[ATOMIC_TEMP3]], align 16
// CHECK1-NEXT: [[TMP17:%.*]] = bitcast i128* [[TMP1]] to i8*
// CHECK1-NEXT: [[TMP18:%.*]] = bitcast i128* [[ATOMIC_TEMP]] to i8*
// CHECK1-NEXT: [[TMP19:%.*]] = bitcast i128* [[ATOMIC_TEMP3]] to i8*
// CHECK1-NEXT: [[CALL:%.*]] = call noundef zeroext i1 @__atomic_compare_exchange(i64 noundef 16, i8* noundef [[TMP17]], i8* noundef [[TMP18]], i8* noundef [[TMP19]], i32 noundef signext 0, i32 noundef signext 0)
// CHECK1-NEXT: br i1 [[CALL]], label [[ATOMIC_EXIT:%.*]], label [[ATOMIC_CONT]]
// CHECK1: atomic_exit:
// CHECK1-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK1: .omp.reduction.default:
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.29
// CHECK1-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR3]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
// CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
// CHECK1-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
// CHECK1-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
// CHECK1-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
// CHECK1-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i128*
// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
// CHECK1-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
// CHECK1-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i128*
// CHECK1-NEXT: [[TMP12:%.*]] = load i128, i128* [[TMP11]], align 16
// CHECK1-NEXT: [[TMP13:%.*]] = load i128, i128* [[TMP8]], align 16
// CHECK1-NEXT: [[ADD:%.*]] = add nsw i128 [[TMP12]], [[TMP13]]
// CHECK1-NEXT: store i128 [[ADD]], i128* [[TMP11]], align 16
// CHECK1-NEXT: ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
// CHECK1-SAME: () #[[ATTR7:[0-9]+]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: call void @__tgt_register_requires(i64 1)
// CHECK1-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@_Z14mapWithPrivatev
// CHECK3-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[X:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[Y:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x i8*], align 4
// CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x i8*], align 4
// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x i8*], align 4
// CHECK3-NEXT: [[TMP0:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK3-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to i32**
// CHECK3-NEXT: store i32* [[X]], i32** [[TMP1]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK3-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32**
// CHECK3-NEXT: store i32* [[X]], i32** [[TMP3]], align 4
// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// CHECK3-NEXT: store i8* null, i8** [[TMP4]], align 4
// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK3-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32**
// CHECK3-NEXT: store i32* [[Y]], i32** [[TMP6]], align 4
// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK3-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32**
// CHECK3-NEXT: store i32* [[Y]], i32** [[TMP8]], align 4
// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
// CHECK3-NEXT: store i8* null, i8** [[TMP9]], align 4
// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
// CHECK3-NEXT: store i32 1, i32* [[TMP12]], align 4
// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
// CHECK3-NEXT: store i32 2, i32* [[TMP13]], align 4
// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
// CHECK3-NEXT: store i8** [[TMP10]], i8*** [[TMP14]], align 4
// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
// CHECK3-NEXT: store i8** [[TMP11]], i8*** [[TMP15]], align 4
// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
// CHECK3-NEXT: store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes, i32 0, i32 0), i64** [[TMP16]], align 4
// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
// CHECK3-NEXT: store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes, i32 0, i32 0), i64** [[TMP17]], align 4
// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
// CHECK3-NEXT: store i8** null, i8*** [[TMP18]], align 4
// CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
// CHECK3-NEXT: store i8** null, i8*** [[TMP19]], align 4
// CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
// CHECK3-NEXT: store i64 0, i64* [[TMP20]], align 8
// CHECK3-NEXT: [[TMP21:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1:[0-9]+]], i64 -1, i32 0, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14mapWithPrivatev_l27.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
// CHECK3-NEXT: [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0
// CHECK3-NEXT: br i1 [[TMP22]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK3: omp_offload.failed:
// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14mapWithPrivatev_l27() #[[ATTR2:[0-9]+]]
// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK3: omp_offload.cont:
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14mapWithPrivatev_l27
// CHECK3-SAME: () #[[ATTR1:[0-9]+]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR1]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[X:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[Y:%.*]] = alloca i32, align 4
// CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@_Z19mapWithFirstprivatev
// CHECK3-SAME: () #[[ATTR0]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[X:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[Y:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x i8*], align 4
// CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x i8*], align 4
// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x i8*], align 4
// CHECK3-NEXT: [[TMP0:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK3-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to i32**
// CHECK3-NEXT: store i32* [[X]], i32** [[TMP1]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK3-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32**
// CHECK3-NEXT: store i32* [[X]], i32** [[TMP3]], align 4
// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// CHECK3-NEXT: store i8* null, i8** [[TMP4]], align 4
// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK3-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32**
// CHECK3-NEXT: store i32* [[Y]], i32** [[TMP6]], align 4
// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK3-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32**
// CHECK3-NEXT: store i32* [[Y]], i32** [[TMP8]], align 4
// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
// CHECK3-NEXT: store i8* null, i8** [[TMP9]], align 4
// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
// CHECK3-NEXT: store i32 1, i32* [[TMP12]], align 4
// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
// CHECK3-NEXT: store i32 2, i32* [[TMP13]], align 4
// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
// CHECK3-NEXT: store i8** [[TMP10]], i8*** [[TMP14]], align 4
// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
// CHECK3-NEXT: store i8** [[TMP11]], i8*** [[TMP15]], align 4
// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
// CHECK3-NEXT: store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.2, i32 0, i32 0), i64** [[TMP16]], align 4
// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
// CHECK3-NEXT: store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.3, i32 0, i32 0), i64** [[TMP17]], align 4
// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
// CHECK3-NEXT: store i8** null, i8*** [[TMP18]], align 4
// CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
// CHECK3-NEXT: store i8** null, i8*** [[TMP19]], align 4
// CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
// CHECK3-NEXT: store i64 0, i64* [[TMP20]], align 8
// CHECK3-NEXT: [[TMP21:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 0, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z19mapWithFirstprivatev_l33.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
// CHECK3-NEXT: [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0
// CHECK3-NEXT: br i1 [[TMP22]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK3: omp_offload.failed:
// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z19mapWithFirstprivatev_l33(i32* [[X]], i32* [[Y]]) #[[ATTR2]]
// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK3: omp_offload.cont:
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z19mapWithFirstprivatev_l33
// CHECK3-SAME: (i32* noundef nonnull align 4 dereferenceable(4) [[X:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[Y:%.*]]) #[[ATTR1]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[X_ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[Y_ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[X_CASTED:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[Y_CASTED:%.*]] = alloca i32, align 4
// CHECK3-NEXT: store i32* [[X]], i32** [[X_ADDR]], align 4
// CHECK3-NEXT: store i32* [[Y]], i32** [[Y_ADDR]], align 4
// CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 4
// CHECK3-NEXT: [[TMP1:%.*]] = load i32*, i32** [[Y_ADDR]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK3-NEXT: store i32 [[TMP2]], i32* [[X_CASTED]], align 4
// CHECK3-NEXT: [[TMP3:%.*]] = load i32, i32* [[X_CASTED]], align 4
// CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK3-NEXT: store i32 [[TMP4]], i32* [[Y_CASTED]], align 4
// CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[Y_CASTED]], align 4
// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP5]])
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[X:%.*]], i32 noundef [[Y:%.*]]) #[[ATTR1]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[X_ADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[Y_ADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK3-NEXT: store i32 [[X]], i32* [[X_ADDR]], align 4
// CHECK3-NEXT: store i32 [[Y]], i32* [[Y_ADDR]], align 4
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@_Z16mapWithReductionv
// CHECK3-SAME: () #[[ATTR0]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[X:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[Y:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [2 x i8*], align 4
// CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [2 x i8*], align 4
// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [2 x i8*], align 4
// CHECK3-NEXT: [[TMP0:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK3-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to i32**
// CHECK3-NEXT: store i32* [[X]], i32** [[TMP1]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK3-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32**
// CHECK3-NEXT: store i32* [[X]], i32** [[TMP3]], align 4
// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// CHECK3-NEXT: store i8* null, i8** [[TMP4]], align 4
// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK3-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to i32**
// CHECK3-NEXT: store i32* [[Y]], i32** [[TMP6]], align 4
// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK3-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to i32**
// CHECK3-NEXT: store i32* [[Y]], i32** [[TMP8]], align 4
// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
// CHECK3-NEXT: store i8* null, i8** [[TMP9]], align 4
// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
// CHECK3-NEXT: store i32 1, i32* [[TMP12]], align 4
// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
// CHECK3-NEXT: store i32 2, i32* [[TMP13]], align 4
// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
// CHECK3-NEXT: store i8** [[TMP10]], i8*** [[TMP14]], align 4
// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
// CHECK3-NEXT: store i8** [[TMP11]], i8*** [[TMP15]], align 4
// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
// CHECK3-NEXT: store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.5, i32 0, i32 0), i64** [[TMP16]], align 4
// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
// CHECK3-NEXT: store i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.6, i32 0, i32 0), i64** [[TMP17]], align 4
// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
// CHECK3-NEXT: store i8** null, i8*** [[TMP18]], align 4
// CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
// CHECK3-NEXT: store i8** null, i8*** [[TMP19]], align 4
// CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
// CHECK3-NEXT: store i64 0, i64* [[TMP20]], align 8
// CHECK3-NEXT: [[TMP21:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 0, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16mapWithReductionv_l39.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
// CHECK3-NEXT: [[TMP22:%.*]] = icmp ne i32 [[TMP21]], 0
// CHECK3-NEXT: br i1 [[TMP22]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK3: omp_offload.failed:
// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16mapWithReductionv_l39(i32* [[X]], i32* [[Y]]) #[[ATTR2]]
// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK3: omp_offload.cont:
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16mapWithReductionv_l39
// CHECK3-SAME: (i32* noundef nonnull align 4 dereferenceable(4) [[X:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[Y:%.*]]) #[[ATTR1]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[X_ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[Y_ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: store i32* [[X]], i32** [[X_ADDR]], align 4
// CHECK3-NEXT: store i32* [[Y]], i32** [[Y_ADDR]], align 4
// CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 4
// CHECK3-NEXT: [[TMP1:%.*]] = load i32*, i32** [[Y_ADDR]], align 4
// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32* [[TMP0]], i32* [[TMP1]])
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..4
// CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[X:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[Y:%.*]]) #[[ATTR1]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[X_ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[Y_ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[X1:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[Y2:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x i8*], align 4
// CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK3-NEXT: store i32* [[X]], i32** [[X_ADDR]], align 4
// CHECK3-NEXT: store i32* [[Y]], i32** [[Y_ADDR]], align 4
// CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 4
// CHECK3-NEXT: [[TMP1:%.*]] = load i32*, i32** [[Y_ADDR]], align 4
// CHECK3-NEXT: store i32 0, i32* [[X1]], align 4
// CHECK3-NEXT: store i32 0, i32* [[Y2]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
// CHECK3-NEXT: [[TMP3:%.*]] = bitcast i32* [[X1]] to i8*
// CHECK3-NEXT: store i8* [[TMP3]], i8** [[TMP2]], align 4
// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 1
// CHECK3-NEXT: [[TMP5:%.*]] = bitcast i32* [[Y2]] to i8*
// CHECK3-NEXT: store i8* [[TMP5]], i8** [[TMP4]], align 4
// CHECK3-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK3-NEXT: [[TMP8:%.*]] = bitcast [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// CHECK3-NEXT: [[TMP9:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP7]], i32 2, i32 8, i8* [[TMP8]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK3-NEXT: switch i32 [[TMP9]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// CHECK3-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// CHECK3-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// CHECK3-NEXT: ]
// CHECK3: .omp.reduction.case1:
// CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[X1]], align 4
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
// CHECK3-NEXT: store i32 [[ADD]], i32* [[TMP0]], align 4
// CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[Y2]], align 4
// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
// CHECK3-NEXT: store i32 [[ADD3]], i32* [[TMP1]], align 4
// CHECK3-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK3: .omp.reduction.case2:
// CHECK3-NEXT: [[TMP14:%.*]] = load i32, i32* [[X1]], align 4
// CHECK3-NEXT: [[TMP15:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP14]] monotonic, align 4
// CHECK3-NEXT: [[TMP16:%.*]] = load i32, i32* [[Y2]], align 4
// CHECK3-NEXT: [[TMP17:%.*]] = atomicrmw add i32* [[TMP1]], i32 [[TMP16]] monotonic, align 4
// CHECK3-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK3: .omp.reduction.default:
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func
// CHECK3-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR3:[0-9]+]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4
// CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 4
// CHECK3-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4
// CHECK3-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 4
// CHECK3-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [2 x i8*]*
// CHECK3-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 4
// CHECK3-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [2 x i8*]*
// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i32 0, i32 0
// CHECK3-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4
// CHECK3-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP3]], i32 0, i32 0
// CHECK3-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4
// CHECK3-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i32 0, i32 1
// CHECK3-NEXT: [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 4
// CHECK3-NEXT: [[TMP14:%.*]] = bitcast i8* [[TMP13]] to i32*
// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP3]], i32 0, i32 1
// CHECK3-NEXT: [[TMP16:%.*]] = load i8*, i8** [[TMP15]], align 4
// CHECK3-NEXT: [[TMP17:%.*]] = bitcast i8* [[TMP16]] to i32*
// CHECK3-NEXT: [[TMP18:%.*]] = load i32, i32* [[TMP11]], align 4
// CHECK3-NEXT: [[TMP19:%.*]] = load i32, i32* [[TMP8]], align 4
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
// CHECK3-NEXT: store i32 [[ADD]], i32* [[TMP11]], align 4
// CHECK3-NEXT: [[TMP20:%.*]] = load i32, i32* [[TMP17]], align 4
// CHECK3-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP14]], align 4
// CHECK3-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
// CHECK3-NEXT: store i32 [[ADD2]], i32* [[TMP17]], align 4
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@_Z7mapFromv
// CHECK3-SAME: () #[[ATTR0]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[X:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 4
// CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 4
// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 4
// CHECK3-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK3-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to i32**
// CHECK3-NEXT: store i32* [[X]], i32** [[TMP1]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK3-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32**
// CHECK3-NEXT: store i32* [[X]], i32** [[TMP3]], align 4
// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// CHECK3-NEXT: store i8* null, i8** [[TMP4]], align 4
// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
// CHECK3-NEXT: store i32 1, i32* [[TMP7]], align 4
// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
// CHECK3-NEXT: store i32 1, i32* [[TMP8]], align 4
// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
// CHECK3-NEXT: store i8** [[TMP5]], i8*** [[TMP9]], align 4
// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
// CHECK3-NEXT: store i8** [[TMP6]], i8*** [[TMP10]], align 4
// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
// CHECK3-NEXT: store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64** [[TMP11]], align 4
// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
// CHECK3-NEXT: store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i64** [[TMP12]], align 4
// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
// CHECK3-NEXT: store i8** null, i8*** [[TMP13]], align 4
// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
// CHECK3-NEXT: store i8** null, i8*** [[TMP14]], align 4
// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
// CHECK3-NEXT: store i64 0, i64* [[TMP15]], align 8
// CHECK3-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 0, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z7mapFromv_l45.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
// CHECK3-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
// CHECK3-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK3: omp_offload.failed:
// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z7mapFromv_l45(i32* [[X]]) #[[ATTR2]]
// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK3: omp_offload.cont:
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z7mapFromv_l45
// CHECK3-SAME: (i32* noundef nonnull align 4 dereferenceable(4) [[X:%.*]]) #[[ATTR1]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[X_ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[X_CASTED:%.*]] = alloca i32, align 4
// CHECK3-NEXT: store i32* [[X]], i32** [[X_ADDR]], align 4
// CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 4
// CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK3-NEXT: store i32 [[TMP1]], i32* [[X_CASTED]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[X_CASTED]], align 4
// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP2]])
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..7
// CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[X:%.*]]) #[[ATTR1]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[X_ADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK3-NEXT: store i32 [[X]], i32* [[X_ADDR]], align 4
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@_Z5mapTov
// CHECK3-SAME: () #[[ATTR0]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[X:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 4
// CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 4
// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 4
// CHECK3-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK3-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to i32**
// CHECK3-NEXT: store i32* [[X]], i32** [[TMP1]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK3-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32**
// CHECK3-NEXT: store i32* [[X]], i32** [[TMP3]], align 4
// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// CHECK3-NEXT: store i8* null, i8** [[TMP4]], align 4
// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
// CHECK3-NEXT: store i32 1, i32* [[TMP7]], align 4
// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
// CHECK3-NEXT: store i32 1, i32* [[TMP8]], align 4
// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
// CHECK3-NEXT: store i8** [[TMP5]], i8*** [[TMP9]], align 4
// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
// CHECK3-NEXT: store i8** [[TMP6]], i8*** [[TMP10]], align 4
// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
// CHECK3-NEXT: store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.11, i32 0, i32 0), i64** [[TMP11]], align 4
// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
// CHECK3-NEXT: store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.12, i32 0, i32 0), i64** [[TMP12]], align 4
// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
// CHECK3-NEXT: store i8** null, i8*** [[TMP13]], align 4
// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
// CHECK3-NEXT: store i8** null, i8*** [[TMP14]], align 4
// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
// CHECK3-NEXT: store i64 0, i64* [[TMP15]], align 8
// CHECK3-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 0, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5mapTov_l51.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
// CHECK3-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
// CHECK3-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK3: omp_offload.failed:
// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5mapTov_l51(i32* [[X]]) #[[ATTR2]]
// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK3: omp_offload.cont:
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5mapTov_l51
// CHECK3-SAME: (i32* noundef nonnull align 4 dereferenceable(4) [[X:%.*]]) #[[ATTR1]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[X_ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[X_CASTED:%.*]] = alloca i32, align 4
// CHECK3-NEXT: store i32* [[X]], i32** [[X_ADDR]], align 4
// CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 4
// CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK3-NEXT: store i32 [[TMP1]], i32* [[X_CASTED]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[X_CASTED]], align 4
// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i32 [[TMP2]])
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..10
// CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[X:%.*]]) #[[ATTR1]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[X_ADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK3-NEXT: store i32 [[X]], i32* [[X_ADDR]], align 4
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@_Z8mapAllocv
// CHECK3-SAME: () #[[ATTR0]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[X:%.*]] = alloca i32, align 4
// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 4
// CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 4
// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 4
// CHECK3-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK3-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to i32**
// CHECK3-NEXT: store i32* [[X]], i32** [[TMP1]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK3-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to i32**
// CHECK3-NEXT: store i32* [[X]], i32** [[TMP3]], align 4
// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// CHECK3-NEXT: store i8* null, i8** [[TMP4]], align 4
// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
// CHECK3-NEXT: store i32 1, i32* [[TMP7]], align 4
// CHECK3-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
// CHECK3-NEXT: store i32 1, i32* [[TMP8]], align 4
// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
// CHECK3-NEXT: store i8** [[TMP5]], i8*** [[TMP9]], align 4
// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
// CHECK3-NEXT: store i8** [[TMP6]], i8*** [[TMP10]], align 4
// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
// CHECK3-NEXT: store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.14, i32 0, i32 0), i64** [[TMP11]], align 4
// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
// CHECK3-NEXT: store i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.15, i32 0, i32 0), i64** [[TMP12]], align 4
// CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
// CHECK3-NEXT: store i8** null, i8*** [[TMP13]], align 4
// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
// CHECK3-NEXT: store i8** null, i8*** [[TMP14]], align 4
// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
// CHECK3-NEXT: store i64 0, i64* [[TMP15]], align 8
// CHECK3-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 0, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapAllocv_l57.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
// CHECK3-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0
// CHECK3-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK3: omp_offload.failed:
// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapAllocv_l57(i32* [[X]]) #[[ATTR2]]
// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK3: omp_offload.cont:
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapAllocv_l57
// CHECK3-SAME: (i32* noundef nonnull align 4 dereferenceable(4) [[X:%.*]]) #[[ATTR1]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[X_ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[X_CASTED:%.*]] = alloca i32, align 4
// CHECK3-NEXT: store i32* [[X]], i32** [[X_ADDR]], align 4
// CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 4
// CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK3-NEXT: store i32 [[TMP1]], i32* [[X_CASTED]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[X_CASTED]], align 4
// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i32 [[TMP2]])
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..13
// CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[X:%.*]]) #[[ATTR1]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[X_ADDR:%.*]] = alloca i32, align 4
// CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK3-NEXT: store i32 [[X]], i32* [[X_ADDR]], align 4
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@_Z8mapArrayv
// CHECK3-SAME: () #[[ATTR0]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[X:%.*]] = alloca [77 x i32], align 4
// CHECK3-NEXT: [[Y:%.*]] = alloca [88 x i32], align 4
// CHECK3-NEXT: [[Z:%.*]] = alloca [99 x i32], align 4
// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4
// CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4
// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4
// CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS1:%.*]] = alloca [3 x i8*], align 4
// CHECK3-NEXT: [[DOTOFFLOAD_PTRS2:%.*]] = alloca [3 x i8*], align 4
// CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS3:%.*]] = alloca [3 x i8*], align 4
// CHECK3-NEXT: [[TMP0:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK3-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to [88 x i32]**
// CHECK3-NEXT: store [88 x i32]* [[Y]], [88 x i32]** [[TMP1]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK3-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to [88 x i32]**
// CHECK3-NEXT: store [88 x i32]* [[Y]], [88 x i32]** [[TMP3]], align 4
// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// CHECK3-NEXT: store i8* null, i8** [[TMP4]], align 4
// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1
// CHECK3-NEXT: [[TMP6:%.*]] = bitcast i8** [[TMP5]] to [99 x i32]**
// CHECK3-NEXT: store [99 x i32]* [[Z]], [99 x i32]** [[TMP6]], align 4
// CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1
// CHECK3-NEXT: [[TMP8:%.*]] = bitcast i8** [[TMP7]] to [99 x i32]**
// CHECK3-NEXT: store [99 x i32]* [[Z]], [99 x i32]** [[TMP8]], align 4
// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1
// CHECK3-NEXT: store i8* null, i8** [[TMP9]], align 4
// CHECK3-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2
// CHECK3-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to [77 x i32]**
// CHECK3-NEXT: store [77 x i32]* [[X]], [77 x i32]** [[TMP11]], align 4
// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2
// CHECK3-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to [77 x i32]**
// CHECK3-NEXT: store [77 x i32]* [[X]], [77 x i32]** [[TMP13]], align 4
// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2
// CHECK3-NEXT: store i8* null, i8** [[TMP14]], align 4
// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK3-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 0
// CHECK3-NEXT: store i32 1, i32* [[TMP17]], align 4
// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 1
// CHECK3-NEXT: store i32 3, i32* [[TMP18]], align 4
// CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 2
// CHECK3-NEXT: store i8** [[TMP15]], i8*** [[TMP19]], align 4
// CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 3
// CHECK3-NEXT: store i8** [[TMP16]], i8*** [[TMP20]], align 4
// CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 4
// CHECK3-NEXT: store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.18, i32 0, i32 0), i64** [[TMP21]], align 4
// CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 5
// CHECK3-NEXT: store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.19, i32 0, i32 0), i64** [[TMP22]], align 4
// CHECK3-NEXT: [[TMP23:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 6
// CHECK3-NEXT: store i8** null, i8*** [[TMP23]], align 4
// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 7
// CHECK3-NEXT: store i8** null, i8*** [[TMP24]], align 4
// CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]], i32 0, i32 8
// CHECK3-NEXT: store i64 0, i64* [[TMP25]], align 8
// CHECK3-NEXT: [[TMP26:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 0, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l63.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS]])
// CHECK3-NEXT: [[TMP27:%.*]] = icmp ne i32 [[TMP26]], 0
// CHECK3-NEXT: br i1 [[TMP27]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK3: omp_offload.failed:
// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l63([88 x i32]* [[Y]], [99 x i32]* [[Z]]) #[[ATTR2]]
// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK3: omp_offload.cont:
// CHECK3-NEXT: [[TMP28:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
// CHECK3-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to [88 x i32]**
// CHECK3-NEXT: store [88 x i32]* [[Y]], [88 x i32]** [[TMP29]], align 4
// CHECK3-NEXT: [[TMP30:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
// CHECK3-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to [88 x i32]**
// CHECK3-NEXT: store [88 x i32]* [[Y]], [88 x i32]** [[TMP31]], align 4
// CHECK3-NEXT: [[TMP32:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i32 0, i32 0
// CHECK3-NEXT: store i8* null, i8** [[TMP32]], align 4
// CHECK3-NEXT: [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 1
// CHECK3-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to [99 x i32]**
// CHECK3-NEXT: store [99 x i32]* [[Z]], [99 x i32]** [[TMP34]], align 4
// CHECK3-NEXT: [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 1
// CHECK3-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to [99 x i32]**
// CHECK3-NEXT: store [99 x i32]* [[Z]], [99 x i32]** [[TMP36]], align 4
// CHECK3-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i32 0, i32 1
// CHECK3-NEXT: store i8* null, i8** [[TMP37]], align 4
// CHECK3-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 2
// CHECK3-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to [77 x i32]**
// CHECK3-NEXT: store [77 x i32]* [[X]], [77 x i32]** [[TMP39]], align 4
// CHECK3-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 2
// CHECK3-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to [77 x i32]**
// CHECK3-NEXT: store [77 x i32]* [[X]], [77 x i32]** [[TMP41]], align 4
// CHECK3-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i32 0, i32 2
// CHECK3-NEXT: store i8* null, i8** [[TMP42]], align 4
// CHECK3-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0
// CHECK3-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0
// CHECK3-NEXT: [[KERNEL_ARGS4:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS]], align 8
// CHECK3-NEXT: [[TMP45:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 0
// CHECK3-NEXT: store i32 1, i32* [[TMP45]], align 4
// CHECK3-NEXT: [[TMP46:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 1
// CHECK3-NEXT: store i32 3, i32* [[TMP46]], align 4
// CHECK3-NEXT: [[TMP47:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 2
// CHECK3-NEXT: store i8** [[TMP43]], i8*** [[TMP47]], align 4
// CHECK3-NEXT: [[TMP48:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 3
// CHECK3-NEXT: store i8** [[TMP44]], i8*** [[TMP48]], align 4
// CHECK3-NEXT: [[TMP49:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 4
// CHECK3-NEXT: store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.22, i32 0, i32 0), i64** [[TMP49]], align 4
// CHECK3-NEXT: [[TMP50:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 5
// CHECK3-NEXT: store i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.23, i32 0, i32 0), i64** [[TMP50]], align 4
// CHECK3-NEXT: [[TMP51:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 6
// CHECK3-NEXT: store i8** null, i8*** [[TMP51]], align 4
// CHECK3-NEXT: [[TMP52:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 7
// CHECK3-NEXT: store i8** null, i8*** [[TMP52]], align 4
// CHECK3-NEXT: [[TMP53:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]], i32 0, i32 8
// CHECK3-NEXT: store i64 0, i64* [[TMP53]], align 8
// CHECK3-NEXT: [[TMP54:%.*]] = call i32 @__tgt_target_kernel(%struct.ident_t* @[[GLOB1]], i64 -1, i32 0, i32 0, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l65.region_id, %struct.__tgt_kernel_arguments* [[KERNEL_ARGS4]])
// CHECK3-NEXT: [[TMP55:%.*]] = icmp ne i32 [[TMP54]], 0
// CHECK3-NEXT: br i1 [[TMP55]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]]
// CHECK3: omp_offload.failed5:
// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l65([88 x i32]* [[Y]], [99 x i32]* [[Z]]) #[[ATTR2]]
// CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT6]]
// CHECK3: omp_offload.cont6:
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l63
// CHECK3-SAME: ([88 x i32]* noundef nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* noundef nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR1]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 4
// CHECK3-NEXT: [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 4
// CHECK3-NEXT: store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 4
// CHECK3-NEXT: store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 4
// CHECK3-NEXT: [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 4
// CHECK3-NEXT: [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 4
// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [88 x i32]*, [99 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), [88 x i32]* [[TMP0]], [99 x i32]* [[TMP1]])
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..16
// CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [88 x i32]* noundef nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* noundef nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR1]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 4
// CHECK3-NEXT: [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 4
// CHECK3-NEXT: [[Y1:%.*]] = alloca [88 x i32], align 4
// CHECK3-NEXT: [[X:%.*]] = alloca [77 x i32], align 4
// CHECK3-NEXT: [[Z2:%.*]] = alloca [99 x i32], align 4
// CHECK3-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 4
// CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK3-NEXT: store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 4
// CHECK3-NEXT: store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 4
// CHECK3-NEXT: [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 4
// CHECK3-NEXT: [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = bitcast [88 x i32]* [[Y1]] to i8*
// CHECK3-NEXT: [[TMP3:%.*]] = bitcast [88 x i32]* [[TMP0]] to i8*
// CHECK3-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP2]], i8* align 4 [[TMP3]], i32 352, i1 false)
// CHECK3-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [99 x i32], [99 x i32]* [[Z2]], i32 0, i32 0
// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr i32, i32* [[ARRAY_BEGIN]], i32 99
// CHECK3-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i32* [[ARRAY_BEGIN]], [[TMP4]]
// CHECK3-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]]
// CHECK3: omp.arrayinit.body:
// CHECK3-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ]
// CHECK3-NEXT: store i32 0, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
// CHECK3-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
// CHECK3-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP4]]
// CHECK3-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]]
// CHECK3: omp.arrayinit.done:
// CHECK3-NEXT: [[LHS_BEGIN:%.*]] = bitcast [99 x i32]* [[TMP1]] to i32*
// CHECK3-NEXT: [[RHS_BEGIN:%.*]] = bitcast [99 x i32]* [[Z2]] to i32*
// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
// CHECK3-NEXT: [[TMP6:%.*]] = bitcast i32* [[RHS_BEGIN]] to i8*
// CHECK3-NEXT: store i8* [[TMP6]], i8** [[TMP5]], align 4
// CHECK3-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
// CHECK3-NEXT: [[TMP9:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// CHECK3-NEXT: [[TMP10:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 1, i32 4, i8* [[TMP9]], void (i8*, i8*)* @.omp.reduction.reduction_func.17, [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK3-NEXT: switch i32 [[TMP10]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// CHECK3-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// CHECK3-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// CHECK3-NEXT: ]
// CHECK3: .omp.reduction.case1:
// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i32 99
// CHECK3-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP11]]
// CHECK3-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE6:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
// CHECK3: omp.arraycpy.body:
// CHECK3-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK3-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST3:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT4:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4
// CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
// CHECK3-NEXT: store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4
// CHECK3-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT4]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], i32 1
// CHECK3-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
// CHECK3-NEXT: [[OMP_ARRAYCPY_DONE5:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT4]], [[TMP11]]
// CHECK3-NEXT: br i1 [[OMP_ARRAYCPY_DONE5]], label [[OMP_ARRAYCPY_DONE6]], label [[OMP_ARRAYCPY_BODY]]
// CHECK3: omp.arraycpy.done6:
// CHECK3-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK3: .omp.reduction.case2:
// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i32 99
// CHECK3-NEXT: [[OMP_ARRAYCPY_ISEMPTY7:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP14]]
// CHECK3-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY7]], label [[OMP_ARRAYCPY_DONE14:%.*]], label [[OMP_ARRAYCPY_BODY8:%.*]]
// CHECK3: omp.arraycpy.body8:
// CHECK3-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST9:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT12:%.*]], [[OMP_ARRAYCPY_BODY8]] ]
// CHECK3-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST10:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT11:%.*]], [[OMP_ARRAYCPY_BODY8]] ]
// CHECK3-NEXT: [[TMP15:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], align 4
// CHECK3-NEXT: [[TMP16:%.*]] = atomicrmw add i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 [[TMP15]] monotonic, align 4
// CHECK3-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT11]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 1
// CHECK3-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT12]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], i32 1
// CHECK3-NEXT: [[OMP_ARRAYCPY_DONE13:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT11]], [[TMP14]]
// CHECK3-NEXT: br i1 [[OMP_ARRAYCPY_DONE13]], label [[OMP_ARRAYCPY_DONE14]], label [[OMP_ARRAYCPY_BODY8]]
// CHECK3: omp.arraycpy.done14:
// CHECK3-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK3: .omp.reduction.default:
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.17
// CHECK3-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR3]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4
// CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 4
// CHECK3-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4
// CHECK3-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 4
// CHECK3-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
// CHECK3-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 4
// CHECK3-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i32 0, i32 0
// CHECK3-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4
// CHECK3-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i32 0, i32 0
// CHECK3-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4
// CHECK3-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr i32, i32* [[TMP11]], i32 99
// CHECK3-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[TMP11]], [[TMP12]]
// CHECK3-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE2:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
// CHECK3: omp.arraycpy.body:
// CHECK3-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[TMP8]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK3-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
// CHECK3-NEXT: [[TMP14:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]]
// CHECK3-NEXT: store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
// CHECK3-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
// CHECK3-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
// CHECK3-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP12]]
// CHECK3-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE2]], label [[OMP_ARRAYCPY_BODY]]
// CHECK3: omp.arraycpy.done2:
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l65
// CHECK3-SAME: ([88 x i32]* noundef nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* noundef nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR1]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 4
// CHECK3-NEXT: [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 4
// CHECK3-NEXT: store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 4
// CHECK3-NEXT: store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 4
// CHECK3-NEXT: [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 4
// CHECK3-NEXT: [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 4
// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [88 x i32]*, [99 x i32]*)* @.omp_outlined..20 to void (i32*, i32*, ...)*), [88 x i32]* [[TMP0]], [99 x i32]* [[TMP1]])
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..20
// CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [88 x i32]* noundef nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* noundef nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR1]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK3-NEXT: [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 4
// CHECK3-NEXT: [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 4
// CHECK3-NEXT: [[Y1:%.*]] = alloca [88 x i32], align 4
// CHECK3-NEXT: [[X:%.*]] = alloca [77 x i32], align 4
// CHECK3-NEXT: [[Z2:%.*]] = alloca [99 x i32], align 4
// CHECK3-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 4
// CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK3-NEXT: store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 4
// CHECK3-NEXT: store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 4
// CHECK3-NEXT: [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 4
// CHECK3-NEXT: [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = bitcast [88 x i32]* [[Y1]] to i8*
// CHECK3-NEXT: [[TMP3:%.*]] = bitcast [88 x i32]* [[TMP0]] to i8*
// CHECK3-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP2]], i8* align 4 [[TMP3]], i32 352, i1 false)
// CHECK3-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [99 x i32], [99 x i32]* [[Z2]], i32 0, i32 0
// CHECK3-NEXT: [[TMP4:%.*]] = getelementptr i32, i32* [[ARRAY_BEGIN]], i32 99
// CHECK3-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i32* [[ARRAY_BEGIN]], [[TMP4]]
// CHECK3-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]]
// CHECK3: omp.arrayinit.body:
// CHECK3-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ]
// CHECK3-NEXT: store i32 0, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
// CHECK3-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
// CHECK3-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP4]]
// CHECK3-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]]
// CHECK3: omp.arrayinit.done:
// CHECK3-NEXT: [[LHS_BEGIN:%.*]] = bitcast [99 x i32]* [[TMP1]] to i32*
// CHECK3-NEXT: [[RHS_BEGIN:%.*]] = bitcast [99 x i32]* [[Z2]] to i32*
// CHECK3-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
// CHECK3-NEXT: [[TMP6:%.*]] = bitcast i32* [[RHS_BEGIN]] to i8*
// CHECK3-NEXT: store i8* [[TMP6]], i8** [[TMP5]], align 4
// CHECK3-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
// CHECK3-NEXT: [[TMP9:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// CHECK3-NEXT: [[TMP10:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 1, i32 4, i8* [[TMP9]], void (i8*, i8*)* @.omp.reduction.reduction_func.21, [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK3-NEXT: switch i32 [[TMP10]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// CHECK3-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// CHECK3-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// CHECK3-NEXT: ]
// CHECK3: .omp.reduction.case1:
// CHECK3-NEXT: [[TMP11:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i32 99
// CHECK3-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP11]]
// CHECK3-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE6:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
// CHECK3: omp.arraycpy.body:
// CHECK3-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK3-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST3:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT4:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK3-NEXT: [[TMP12:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4
// CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
// CHECK3-NEXT: store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4
// CHECK3-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT4]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], i32 1
// CHECK3-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
// CHECK3-NEXT: [[OMP_ARRAYCPY_DONE5:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT4]], [[TMP11]]
// CHECK3-NEXT: br i1 [[OMP_ARRAYCPY_DONE5]], label [[OMP_ARRAYCPY_DONE6]], label [[OMP_ARRAYCPY_BODY]]
// CHECK3: omp.arraycpy.done6:
// CHECK3-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK3: .omp.reduction.case2:
// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i32 99
// CHECK3-NEXT: [[OMP_ARRAYCPY_ISEMPTY7:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP14]]
// CHECK3-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY7]], label [[OMP_ARRAYCPY_DONE14:%.*]], label [[OMP_ARRAYCPY_BODY8:%.*]]
// CHECK3: omp.arraycpy.body8:
// CHECK3-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST9:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT12:%.*]], [[OMP_ARRAYCPY_BODY8]] ]
// CHECK3-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST10:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT11:%.*]], [[OMP_ARRAYCPY_BODY8]] ]
// CHECK3-NEXT: [[TMP15:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], align 4
// CHECK3-NEXT: [[TMP16:%.*]] = atomicrmw add i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 [[TMP15]] monotonic, align 4
// CHECK3-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT11]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 1
// CHECK3-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT12]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], i32 1
// CHECK3-NEXT: [[OMP_ARRAYCPY_DONE13:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT11]], [[TMP14]]
// CHECK3-NEXT: br i1 [[OMP_ARRAYCPY_DONE13]], label [[OMP_ARRAYCPY_DONE14]], label [[OMP_ARRAYCPY_BODY8]]
// CHECK3: omp.arraycpy.done14:
// CHECK3-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK3-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK3: .omp.reduction.default:
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.21
// CHECK3-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR3]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4
// CHECK3-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 4
// CHECK3-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4
// CHECK3-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 4
// CHECK3-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 4
// CHECK3-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
// CHECK3-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 4
// CHECK3-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
// CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i32 0, i32 0
// CHECK3-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4
// CHECK3-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
// CHECK3-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i32 0, i32 0
// CHECK3-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4
// CHECK3-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
// CHECK3-NEXT: [[TMP12:%.*]] = getelementptr i32, i32* [[TMP11]], i32 99
// CHECK3-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[TMP11]], [[TMP12]]
// CHECK3-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE2:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
// CHECK3: omp.arraycpy.body:
// CHECK3-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[TMP8]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK3-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK3-NEXT: [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
// CHECK3-NEXT: [[TMP14:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4
// CHECK3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]]
// CHECK3-NEXT: store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
// CHECK3-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
// CHECK3-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
// CHECK3-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP12]]
// CHECK3-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE2]], label [[OMP_ARRAYCPY_BODY]]
// CHECK3: omp.arraycpy.done2:
// CHECK3-NEXT: ret void
//
//
// CHECK3-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg
// CHECK3-SAME: () #[[ATTR6:[0-9]+]] {
// CHECK3-NEXT: entry:
// CHECK3-NEXT: call void @__tgt_register_requires(i64 1)
// CHECK3-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14mapWithPrivatev_l27
// CHECK5-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK5-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: [[X:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[Y:%.*]] = alloca i32, align 4
// CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z19mapWithFirstprivatev_l33
// CHECK5-SAME: (i32* noundef nonnull align 4 dereferenceable(4) [[X:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[Y:%.*]]) #[[ATTR0]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[X_ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: [[Y_ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: [[X_CASTED:%.*]] = alloca i64, align 8
// CHECK5-NEXT: [[Y_CASTED:%.*]] = alloca i64, align 8
// CHECK5-NEXT: store i32* [[X]], i32** [[X_ADDR]], align 8
// CHECK5-NEXT: store i32* [[Y]], i32** [[Y_ADDR]], align 8
// CHECK5-NEXT: [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 8
// CHECK5-NEXT: [[TMP1:%.*]] = load i32*, i32** [[Y_ADDR]], align 8
// CHECK5-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK5-NEXT: [[CONV:%.*]] = bitcast i64* [[X_CASTED]] to i32*
// CHECK5-NEXT: store i32 [[TMP2]], i32* [[CONV]], align 4
// CHECK5-NEXT: [[TMP3:%.*]] = load i64, i64* [[X_CASTED]], align 8
// CHECK5-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK5-NEXT: [[CONV1:%.*]] = bitcast i64* [[Y_CASTED]] to i32*
// CHECK5-NEXT: store i32 [[TMP4]], i32* [[CONV1]], align 4
// CHECK5-NEXT: [[TMP5:%.*]] = load i64, i64* [[Y_CASTED]], align 8
// CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP5]])
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK5-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[X:%.*]], i64 noundef [[Y:%.*]]) #[[ATTR0]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: [[X_ADDR:%.*]] = alloca i64, align 8
// CHECK5-NEXT: [[Y_ADDR:%.*]] = alloca i64, align 8
// CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK5-NEXT: store i64 [[X]], i64* [[X_ADDR]], align 8
// CHECK5-NEXT: store i64 [[Y]], i64* [[Y_ADDR]], align 8
// CHECK5-NEXT: [[CONV:%.*]] = bitcast i64* [[X_ADDR]] to i32*
// CHECK5-NEXT: [[CONV1:%.*]] = bitcast i64* [[Y_ADDR]] to i32*
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16mapWithReductionv_l39
// CHECK5-SAME: (i32* noundef nonnull align 4 dereferenceable(4) [[X:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[Y:%.*]]) #[[ATTR0]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[X_ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: [[Y_ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: store i32* [[X]], i32** [[X_ADDR]], align 8
// CHECK5-NEXT: store i32* [[Y]], i32** [[Y_ADDR]], align 8
// CHECK5-NEXT: [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 8
// CHECK5-NEXT: [[TMP1:%.*]] = load i32*, i32** [[Y_ADDR]], align 8
// CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32* [[TMP0]], i32* [[TMP1]])
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..2
// CHECK5-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[X:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[Y:%.*]]) #[[ATTR0]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: [[X_ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: [[Y_ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: [[X1:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[Y2:%.*]] = alloca i32, align 4
// CHECK5-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x i8*], align 8
// CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK5-NEXT: store i32* [[X]], i32** [[X_ADDR]], align 8
// CHECK5-NEXT: store i32* [[Y]], i32** [[Y_ADDR]], align 8
// CHECK5-NEXT: [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 8
// CHECK5-NEXT: [[TMP1:%.*]] = load i32*, i32** [[Y_ADDR]], align 8
// CHECK5-NEXT: store i32 0, i32* [[X1]], align 4
// CHECK5-NEXT: store i32 0, i32* [[Y2]], align 4
// CHECK5-NEXT: [[TMP2:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// CHECK5-NEXT: [[TMP3:%.*]] = bitcast i32* [[X1]] to i8*
// CHECK5-NEXT: store i8* [[TMP3]], i8** [[TMP2]], align 8
// CHECK5-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
// CHECK5-NEXT: [[TMP5:%.*]] = bitcast i32* [[Y2]] to i8*
// CHECK5-NEXT: store i8* [[TMP5]], i8** [[TMP4]], align 8
// CHECK5-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK5-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK5-NEXT: [[TMP8:%.*]] = bitcast [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// CHECK5-NEXT: [[TMP9:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP7]], i32 2, i64 16, i8* [[TMP8]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK5-NEXT: switch i32 [[TMP9]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// CHECK5-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// CHECK5-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// CHECK5-NEXT: ]
// CHECK5: .omp.reduction.case1:
// CHECK5-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK5-NEXT: [[TMP11:%.*]] = load i32, i32* [[X1]], align 4
// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
// CHECK5-NEXT: store i32 [[ADD]], i32* [[TMP0]], align 4
// CHECK5-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK5-NEXT: [[TMP13:%.*]] = load i32, i32* [[Y2]], align 4
// CHECK5-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
// CHECK5-NEXT: store i32 [[ADD3]], i32* [[TMP1]], align 4
// CHECK5-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK5-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK5: .omp.reduction.case2:
// CHECK5-NEXT: [[TMP14:%.*]] = load i32, i32* [[X1]], align 4
// CHECK5-NEXT: [[TMP15:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP14]] monotonic, align 4
// CHECK5-NEXT: [[TMP16:%.*]] = load i32, i32* [[Y2]], align 4
// CHECK5-NEXT: [[TMP17:%.*]] = atomicrmw add i32* [[TMP1]], i32 [[TMP16]] monotonic, align 4
// CHECK5-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK5-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK5: .omp.reduction.default:
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func
// CHECK5-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR2:[0-9]+]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
// CHECK5-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
// CHECK5-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
// CHECK5-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
// CHECK5-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
// CHECK5-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [2 x i8*]*
// CHECK5-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
// CHECK5-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [2 x i8*]*
// CHECK5-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i64 0, i64 0
// CHECK5-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// CHECK5-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
// CHECK5-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP3]], i64 0, i64 0
// CHECK5-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
// CHECK5-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
// CHECK5-NEXT: [[TMP12:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i64 0, i64 1
// CHECK5-NEXT: [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 8
// CHECK5-NEXT: [[TMP14:%.*]] = bitcast i8* [[TMP13]] to i32*
// CHECK5-NEXT: [[TMP15:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP3]], i64 0, i64 1
// CHECK5-NEXT: [[TMP16:%.*]] = load i8*, i8** [[TMP15]], align 8
// CHECK5-NEXT: [[TMP17:%.*]] = bitcast i8* [[TMP16]] to i32*
// CHECK5-NEXT: [[TMP18:%.*]] = load i32, i32* [[TMP11]], align 4
// CHECK5-NEXT: [[TMP19:%.*]] = load i32, i32* [[TMP8]], align 4
// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
// CHECK5-NEXT: store i32 [[ADD]], i32* [[TMP11]], align 4
// CHECK5-NEXT: [[TMP20:%.*]] = load i32, i32* [[TMP17]], align 4
// CHECK5-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP14]], align 4
// CHECK5-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
// CHECK5-NEXT: store i32 [[ADD2]], i32* [[TMP17]], align 4
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z7mapFromv_l45
// CHECK5-SAME: (i32* noundef nonnull align 4 dereferenceable(4) [[X:%.*]]) #[[ATTR0]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[X_ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: [[X_CASTED:%.*]] = alloca i64, align 8
// CHECK5-NEXT: store i32* [[X]], i32** [[X_ADDR]], align 8
// CHECK5-NEXT: [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 8
// CHECK5-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK5-NEXT: [[CONV:%.*]] = bitcast i64* [[X_CASTED]] to i32*
// CHECK5-NEXT: store i32 [[TMP1]], i32* [[CONV]], align 4
// CHECK5-NEXT: [[TMP2:%.*]] = load i64, i64* [[X_CASTED]], align 8
// CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP2]])
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..3
// CHECK5-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[X:%.*]]) #[[ATTR0]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: [[X_ADDR:%.*]] = alloca i64, align 8
// CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK5-NEXT: store i64 [[X]], i64* [[X_ADDR]], align 8
// CHECK5-NEXT: [[CONV:%.*]] = bitcast i64* [[X_ADDR]] to i32*
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5mapTov_l51
// CHECK5-SAME: (i32* noundef nonnull align 4 dereferenceable(4) [[X:%.*]]) #[[ATTR0]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[X_ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: [[X_CASTED:%.*]] = alloca i64, align 8
// CHECK5-NEXT: store i32* [[X]], i32** [[X_ADDR]], align 8
// CHECK5-NEXT: [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 8
// CHECK5-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK5-NEXT: [[CONV:%.*]] = bitcast i64* [[X_CASTED]] to i32*
// CHECK5-NEXT: store i32 [[TMP1]], i32* [[CONV]], align 4
// CHECK5-NEXT: [[TMP2:%.*]] = load i64, i64* [[X_CASTED]], align 8
// CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i64 [[TMP2]])
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..4
// CHECK5-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[X:%.*]]) #[[ATTR0]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: [[X_ADDR:%.*]] = alloca i64, align 8
// CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK5-NEXT: store i64 [[X]], i64* [[X_ADDR]], align 8
// CHECK5-NEXT: [[CONV:%.*]] = bitcast i64* [[X_ADDR]] to i32*
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapAllocv_l57
// CHECK5-SAME: (i32* noundef nonnull align 4 dereferenceable(4) [[X:%.*]]) #[[ATTR0]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[X_ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: [[X_CASTED:%.*]] = alloca i64, align 8
// CHECK5-NEXT: store i32* [[X]], i32** [[X_ADDR]], align 8
// CHECK5-NEXT: [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 8
// CHECK5-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK5-NEXT: [[CONV:%.*]] = bitcast i64* [[X_CASTED]] to i32*
// CHECK5-NEXT: store i32 [[TMP1]], i32* [[CONV]], align 4
// CHECK5-NEXT: [[TMP2:%.*]] = load i64, i64* [[X_CASTED]], align 8
// CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i64 [[TMP2]])
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..5
// CHECK5-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[X:%.*]]) #[[ATTR0]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: [[X_ADDR:%.*]] = alloca i64, align 8
// CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK5-NEXT: store i64 [[X]], i64* [[X_ADDR]], align 8
// CHECK5-NEXT: [[CONV:%.*]] = bitcast i64* [[X_ADDR]] to i32*
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l63
// CHECK5-SAME: ([88 x i32]* noundef nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* noundef nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR0]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 8
// CHECK5-NEXT: [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 8
// CHECK5-NEXT: store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 8
// CHECK5-NEXT: store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 8
// CHECK5-NEXT: [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 8
// CHECK5-NEXT: [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 8
// CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [88 x i32]*, [99 x i32]*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), [88 x i32]* [[TMP0]], [99 x i32]* [[TMP1]])
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..6
// CHECK5-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [88 x i32]* noundef nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* noundef nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR0]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 8
// CHECK5-NEXT: [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 8
// CHECK5-NEXT: [[Y1:%.*]] = alloca [88 x i32], align 4
// CHECK5-NEXT: [[X:%.*]] = alloca [77 x i32], align 4
// CHECK5-NEXT: [[Z2:%.*]] = alloca [99 x i32], align 4
// CHECK5-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
// CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK5-NEXT: store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 8
// CHECK5-NEXT: store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 8
// CHECK5-NEXT: [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 8
// CHECK5-NEXT: [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 8
// CHECK5-NEXT: [[TMP2:%.*]] = bitcast [88 x i32]* [[Y1]] to i8*
// CHECK5-NEXT: [[TMP3:%.*]] = bitcast [88 x i32]* [[TMP0]] to i8*
// CHECK5-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP2]], i8* align 4 [[TMP3]], i64 352, i1 false)
// CHECK5-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [99 x i32], [99 x i32]* [[Z2]], i32 0, i32 0
// CHECK5-NEXT: [[TMP4:%.*]] = getelementptr i32, i32* [[ARRAY_BEGIN]], i64 99
// CHECK5-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i32* [[ARRAY_BEGIN]], [[TMP4]]
// CHECK5-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]]
// CHECK5: omp.arrayinit.body:
// CHECK5-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ]
// CHECK5-NEXT: store i32 0, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
// CHECK5-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
// CHECK5-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP4]]
// CHECK5-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]]
// CHECK5: omp.arrayinit.done:
// CHECK5-NEXT: [[LHS_BEGIN:%.*]] = bitcast [99 x i32]* [[TMP1]] to i32*
// CHECK5-NEXT: [[RHS_BEGIN:%.*]] = bitcast [99 x i32]* [[Z2]] to i32*
// CHECK5-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// CHECK5-NEXT: [[TMP6:%.*]] = bitcast i32* [[RHS_BEGIN]] to i8*
// CHECK5-NEXT: store i8* [[TMP6]], i8** [[TMP5]], align 8
// CHECK5-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK5-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
// CHECK5-NEXT: [[TMP9:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// CHECK5-NEXT: [[TMP10:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 1, i64 8, i8* [[TMP9]], void (i8*, i8*)* @.omp.reduction.reduction_func.7, [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK5-NEXT: switch i32 [[TMP10]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// CHECK5-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// CHECK5-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// CHECK5-NEXT: ]
// CHECK5: .omp.reduction.case1:
// CHECK5-NEXT: [[TMP11:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i64 99
// CHECK5-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP11]]
// CHECK5-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE6:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
// CHECK5: omp.arraycpy.body:
// CHECK5-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK5-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST3:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT4:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK5-NEXT: [[TMP12:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4
// CHECK5-NEXT: [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4
// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
// CHECK5-NEXT: store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4
// CHECK5-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT4]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], i32 1
// CHECK5-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
// CHECK5-NEXT: [[OMP_ARRAYCPY_DONE5:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT4]], [[TMP11]]
// CHECK5-NEXT: br i1 [[OMP_ARRAYCPY_DONE5]], label [[OMP_ARRAYCPY_DONE6]], label [[OMP_ARRAYCPY_BODY]]
// CHECK5: omp.arraycpy.done6:
// CHECK5-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK5-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK5: .omp.reduction.case2:
// CHECK5-NEXT: [[TMP14:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i64 99
// CHECK5-NEXT: [[OMP_ARRAYCPY_ISEMPTY7:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP14]]
// CHECK5-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY7]], label [[OMP_ARRAYCPY_DONE14:%.*]], label [[OMP_ARRAYCPY_BODY8:%.*]]
// CHECK5: omp.arraycpy.body8:
// CHECK5-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST9:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT12:%.*]], [[OMP_ARRAYCPY_BODY8]] ]
// CHECK5-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST10:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT11:%.*]], [[OMP_ARRAYCPY_BODY8]] ]
// CHECK5-NEXT: [[TMP15:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], align 4
// CHECK5-NEXT: [[TMP16:%.*]] = atomicrmw add i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 [[TMP15]] monotonic, align 4
// CHECK5-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT11]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 1
// CHECK5-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT12]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], i32 1
// CHECK5-NEXT: [[OMP_ARRAYCPY_DONE13:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT11]], [[TMP14]]
// CHECK5-NEXT: br i1 [[OMP_ARRAYCPY_DONE13]], label [[OMP_ARRAYCPY_DONE14]], label [[OMP_ARRAYCPY_BODY8]]
// CHECK5: omp.arraycpy.done14:
// CHECK5-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK5-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK5: .omp.reduction.default:
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.7
// CHECK5-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR2]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
// CHECK5-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
// CHECK5-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
// CHECK5-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
// CHECK5-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
// CHECK5-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
// CHECK5-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
// CHECK5-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
// CHECK5-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
// CHECK5-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// CHECK5-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
// CHECK5-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
// CHECK5-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
// CHECK5-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
// CHECK5-NEXT: [[TMP12:%.*]] = getelementptr i32, i32* [[TMP11]], i64 99
// CHECK5-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[TMP11]], [[TMP12]]
// CHECK5-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE2:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
// CHECK5: omp.arraycpy.body:
// CHECK5-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[TMP8]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK5-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK5-NEXT: [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
// CHECK5-NEXT: [[TMP14:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4
// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]]
// CHECK5-NEXT: store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
// CHECK5-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
// CHECK5-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
// CHECK5-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP12]]
// CHECK5-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE2]], label [[OMP_ARRAYCPY_BODY]]
// CHECK5: omp.arraycpy.done2:
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l65
// CHECK5-SAME: ([88 x i32]* noundef nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* noundef nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR0]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 8
// CHECK5-NEXT: [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 8
// CHECK5-NEXT: store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 8
// CHECK5-NEXT: store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 8
// CHECK5-NEXT: [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 8
// CHECK5-NEXT: [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 8
// CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [88 x i32]*, [99 x i32]*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), [88 x i32]* [[TMP0]], [99 x i32]* [[TMP1]])
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..8
// CHECK5-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [88 x i32]* noundef nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* noundef nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR0]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 8
// CHECK5-NEXT: [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 8
// CHECK5-NEXT: [[Y1:%.*]] = alloca [88 x i32], align 4
// CHECK5-NEXT: [[X:%.*]] = alloca [77 x i32], align 4
// CHECK5-NEXT: [[Z2:%.*]] = alloca [99 x i32], align 4
// CHECK5-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
// CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK5-NEXT: store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 8
// CHECK5-NEXT: store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 8
// CHECK5-NEXT: [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 8
// CHECK5-NEXT: [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 8
// CHECK5-NEXT: [[TMP2:%.*]] = bitcast [88 x i32]* [[Y1]] to i8*
// CHECK5-NEXT: [[TMP3:%.*]] = bitcast [88 x i32]* [[TMP0]] to i8*
// CHECK5-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP2]], i8* align 4 [[TMP3]], i64 352, i1 false)
// CHECK5-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [99 x i32], [99 x i32]* [[Z2]], i32 0, i32 0
// CHECK5-NEXT: [[TMP4:%.*]] = getelementptr i32, i32* [[ARRAY_BEGIN]], i64 99
// CHECK5-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i32* [[ARRAY_BEGIN]], [[TMP4]]
// CHECK5-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]]
// CHECK5: omp.arrayinit.body:
// CHECK5-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ]
// CHECK5-NEXT: store i32 0, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
// CHECK5-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
// CHECK5-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP4]]
// CHECK5-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]]
// CHECK5: omp.arrayinit.done:
// CHECK5-NEXT: [[LHS_BEGIN:%.*]] = bitcast [99 x i32]* [[TMP1]] to i32*
// CHECK5-NEXT: [[RHS_BEGIN:%.*]] = bitcast [99 x i32]* [[Z2]] to i32*
// CHECK5-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// CHECK5-NEXT: [[TMP6:%.*]] = bitcast i32* [[RHS_BEGIN]] to i8*
// CHECK5-NEXT: store i8* [[TMP6]], i8** [[TMP5]], align 8
// CHECK5-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK5-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
// CHECK5-NEXT: [[TMP9:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// CHECK5-NEXT: [[TMP10:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 1, i64 8, i8* [[TMP9]], void (i8*, i8*)* @.omp.reduction.reduction_func.9, [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK5-NEXT: switch i32 [[TMP10]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// CHECK5-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// CHECK5-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// CHECK5-NEXT: ]
// CHECK5: .omp.reduction.case1:
// CHECK5-NEXT: [[TMP11:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i64 99
// CHECK5-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP11]]
// CHECK5-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE6:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
// CHECK5: omp.arraycpy.body:
// CHECK5-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK5-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST3:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT4:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK5-NEXT: [[TMP12:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4
// CHECK5-NEXT: [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4
// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
// CHECK5-NEXT: store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4
// CHECK5-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT4]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], i32 1
// CHECK5-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
// CHECK5-NEXT: [[OMP_ARRAYCPY_DONE5:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT4]], [[TMP11]]
// CHECK5-NEXT: br i1 [[OMP_ARRAYCPY_DONE5]], label [[OMP_ARRAYCPY_DONE6]], label [[OMP_ARRAYCPY_BODY]]
// CHECK5: omp.arraycpy.done6:
// CHECK5-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK5-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK5: .omp.reduction.case2:
// CHECK5-NEXT: [[TMP14:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i64 99
// CHECK5-NEXT: [[OMP_ARRAYCPY_ISEMPTY7:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP14]]
// CHECK5-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY7]], label [[OMP_ARRAYCPY_DONE14:%.*]], label [[OMP_ARRAYCPY_BODY8:%.*]]
// CHECK5: omp.arraycpy.body8:
// CHECK5-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST9:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT12:%.*]], [[OMP_ARRAYCPY_BODY8]] ]
// CHECK5-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST10:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT11:%.*]], [[OMP_ARRAYCPY_BODY8]] ]
// CHECK5-NEXT: [[TMP15:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], align 4
// CHECK5-NEXT: [[TMP16:%.*]] = atomicrmw add i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 [[TMP15]] monotonic, align 4
// CHECK5-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT11]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 1
// CHECK5-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT12]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], i32 1
// CHECK5-NEXT: [[OMP_ARRAYCPY_DONE13:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT11]], [[TMP14]]
// CHECK5-NEXT: br i1 [[OMP_ARRAYCPY_DONE13]], label [[OMP_ARRAYCPY_DONE14]], label [[OMP_ARRAYCPY_BODY8]]
// CHECK5: omp.arraycpy.done14:
// CHECK5-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK5-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK5: .omp.reduction.default:
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.9
// CHECK5-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR2]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
// CHECK5-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
// CHECK5-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
// CHECK5-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
// CHECK5-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
// CHECK5-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
// CHECK5-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
// CHECK5-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
// CHECK5-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
// CHECK5-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// CHECK5-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
// CHECK5-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
// CHECK5-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
// CHECK5-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
// CHECK5-NEXT: [[TMP12:%.*]] = getelementptr i32, i32* [[TMP11]], i64 99
// CHECK5-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[TMP11]], [[TMP12]]
// CHECK5-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE2:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
// CHECK5: omp.arraycpy.body:
// CHECK5-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[TMP8]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK5-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK5-NEXT: [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
// CHECK5-NEXT: [[TMP14:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4
// CHECK5-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]]
// CHECK5-NEXT: store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
// CHECK5-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
// CHECK5-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
// CHECK5-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP12]]
// CHECK5-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE2]], label [[OMP_ARRAYCPY_BODY]]
// CHECK5: omp.arraycpy.done2:
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9mapInt128v_l72
// CHECK5-SAME: (i128* noundef nonnull align 16 dereferenceable(16) [[Y:%.*]], i128* noundef nonnull align 16 dereferenceable(16) [[Z:%.*]]) #[[ATTR0]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[Y_ADDR:%.*]] = alloca i128*, align 8
// CHECK5-NEXT: [[Z_ADDR:%.*]] = alloca i128*, align 8
// CHECK5-NEXT: store i128* [[Y]], i128** [[Y_ADDR]], align 8
// CHECK5-NEXT: store i128* [[Z]], i128** [[Z_ADDR]], align 8
// CHECK5-NEXT: [[TMP0:%.*]] = load i128*, i128** [[Y_ADDR]], align 8
// CHECK5-NEXT: [[TMP1:%.*]] = load i128*, i128** [[Z_ADDR]], align 8
// CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i128*, i128*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i128* [[TMP0]], i128* [[TMP1]])
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..10
// CHECK5-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i128* noundef nonnull align 16 dereferenceable(16) [[Y:%.*]], i128* noundef nonnull align 16 dereferenceable(16) [[Z:%.*]]) #[[ATTR0]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: [[Y_ADDR:%.*]] = alloca i128*, align 8
// CHECK5-NEXT: [[Z_ADDR:%.*]] = alloca i128*, align 8
// CHECK5-NEXT: [[Y1:%.*]] = alloca i128, align 16
// CHECK5-NEXT: [[X:%.*]] = alloca i128, align 16
// CHECK5-NEXT: [[Z2:%.*]] = alloca i128, align 16
// CHECK5-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
// CHECK5-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i128, align 16
// CHECK5-NEXT: [[ATOMIC_TEMP3:%.*]] = alloca i128, align 16
// CHECK5-NEXT: [[TMP:%.*]] = alloca i128, align 16
// CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK5-NEXT: store i128* [[Y]], i128** [[Y_ADDR]], align 8
// CHECK5-NEXT: store i128* [[Z]], i128** [[Z_ADDR]], align 8
// CHECK5-NEXT: [[TMP0:%.*]] = load i128*, i128** [[Y_ADDR]], align 8
// CHECK5-NEXT: [[TMP1:%.*]] = load i128*, i128** [[Z_ADDR]], align 8
// CHECK5-NEXT: [[TMP2:%.*]] = load i128, i128* [[TMP0]], align 16
// CHECK5-NEXT: store i128 [[TMP2]], i128* [[Y1]], align 16
// CHECK5-NEXT: store i128 0, i128* [[Z2]], align 16
// CHECK5-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// CHECK5-NEXT: [[TMP4:%.*]] = bitcast i128* [[Z2]] to i8*
// CHECK5-NEXT: store i8* [[TMP4]], i8** [[TMP3]], align 8
// CHECK5-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK5-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
// CHECK5-NEXT: [[TMP7:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// CHECK5-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], i32 1, i64 8, i8* [[TMP7]], void (i8*, i8*)* @.omp.reduction.reduction_func.11, [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK5-NEXT: switch i32 [[TMP8]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// CHECK5-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// CHECK5-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// CHECK5-NEXT: ]
// CHECK5: .omp.reduction.case1:
// CHECK5-NEXT: [[TMP9:%.*]] = load i128, i128* [[TMP1]], align 16
// CHECK5-NEXT: [[TMP10:%.*]] = load i128, i128* [[Z2]], align 16
// CHECK5-NEXT: [[ADD:%.*]] = add nsw i128 [[TMP9]], [[TMP10]]
// CHECK5-NEXT: store i128 [[ADD]], i128* [[TMP1]], align 16
// CHECK5-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK5-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK5: .omp.reduction.case2:
// CHECK5-NEXT: [[TMP11:%.*]] = load i128, i128* [[Z2]], align 16
// CHECK5-NEXT: [[TMP12:%.*]] = bitcast i128* [[TMP1]] to i8*
// CHECK5-NEXT: [[TMP13:%.*]] = bitcast i128* [[ATOMIC_TEMP]] to i8*
// CHECK5-NEXT: call void @__atomic_load(i64 noundef 16, i8* noundef [[TMP12]], i8* noundef [[TMP13]], i32 noundef signext 0) #[[ATTR6:[0-9]+]]
// CHECK5-NEXT: br label [[ATOMIC_CONT:%.*]]
// CHECK5: atomic_cont:
// CHECK5-NEXT: [[TMP14:%.*]] = load i128, i128* [[ATOMIC_TEMP]], align 16
// CHECK5-NEXT: store i128 [[TMP14]], i128* [[TMP]], align 16
// CHECK5-NEXT: [[TMP15:%.*]] = load i128, i128* [[TMP]], align 16
// CHECK5-NEXT: [[TMP16:%.*]] = load i128, i128* [[Z2]], align 16
// CHECK5-NEXT: [[ADD4:%.*]] = add nsw i128 [[TMP15]], [[TMP16]]
// CHECK5-NEXT: store i128 [[ADD4]], i128* [[ATOMIC_TEMP3]], align 16
// CHECK5-NEXT: [[TMP17:%.*]] = bitcast i128* [[TMP1]] to i8*
// CHECK5-NEXT: [[TMP18:%.*]] = bitcast i128* [[ATOMIC_TEMP]] to i8*
// CHECK5-NEXT: [[TMP19:%.*]] = bitcast i128* [[ATOMIC_TEMP3]] to i8*
// CHECK5-NEXT: [[CALL:%.*]] = call noundef zeroext i1 @__atomic_compare_exchange(i64 noundef 16, i8* noundef [[TMP17]], i8* noundef [[TMP18]], i8* noundef [[TMP19]], i32 noundef signext 0, i32 noundef signext 0) #[[ATTR6]]
// CHECK5-NEXT: br i1 [[CALL]], label [[ATOMIC_EXIT:%.*]], label [[ATOMIC_CONT]]
// CHECK5: atomic_exit:
// CHECK5-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK5-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK5: .omp.reduction.default:
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.11
// CHECK5-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR2]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
// CHECK5-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
// CHECK5-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
// CHECK5-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
// CHECK5-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
// CHECK5-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
// CHECK5-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
// CHECK5-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
// CHECK5-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
// CHECK5-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// CHECK5-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i128*
// CHECK5-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
// CHECK5-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
// CHECK5-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i128*
// CHECK5-NEXT: [[TMP12:%.*]] = load i128, i128* [[TMP11]], align 16
// CHECK5-NEXT: [[TMP13:%.*]] = load i128, i128* [[TMP8]], align 16
// CHECK5-NEXT: [[ADD:%.*]] = add nsw i128 [[TMP12]], [[TMP13]]
// CHECK5-NEXT: store i128 [[ADD]], i128* [[TMP11]], align 16
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9mapInt128v_l74
// CHECK5-SAME: (i128* noundef nonnull align 16 dereferenceable(16) [[Y:%.*]], i128* noundef nonnull align 16 dereferenceable(16) [[Z:%.*]]) #[[ATTR0]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[Y_ADDR:%.*]] = alloca i128*, align 8
// CHECK5-NEXT: [[Z_ADDR:%.*]] = alloca i128*, align 8
// CHECK5-NEXT: store i128* [[Y]], i128** [[Y_ADDR]], align 8
// CHECK5-NEXT: store i128* [[Z]], i128** [[Z_ADDR]], align 8
// CHECK5-NEXT: [[TMP0:%.*]] = load i128*, i128** [[Y_ADDR]], align 8
// CHECK5-NEXT: [[TMP1:%.*]] = load i128*, i128** [[Z_ADDR]], align 8
// CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i128*, i128*)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i128* [[TMP0]], i128* [[TMP1]])
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..12
// CHECK5-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i128* noundef nonnull align 16 dereferenceable(16) [[Y:%.*]], i128* noundef nonnull align 16 dereferenceable(16) [[Z:%.*]]) #[[ATTR0]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK5-NEXT: [[Y_ADDR:%.*]] = alloca i128*, align 8
// CHECK5-NEXT: [[Z_ADDR:%.*]] = alloca i128*, align 8
// CHECK5-NEXT: [[Y1:%.*]] = alloca i128, align 16
// CHECK5-NEXT: [[X:%.*]] = alloca i128, align 16
// CHECK5-NEXT: [[Z2:%.*]] = alloca i128, align 16
// CHECK5-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8
// CHECK5-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i128, align 16
// CHECK5-NEXT: [[ATOMIC_TEMP3:%.*]] = alloca i128, align 16
// CHECK5-NEXT: [[TMP:%.*]] = alloca i128, align 16
// CHECK5-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK5-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK5-NEXT: store i128* [[Y]], i128** [[Y_ADDR]], align 8
// CHECK5-NEXT: store i128* [[Z]], i128** [[Z_ADDR]], align 8
// CHECK5-NEXT: [[TMP0:%.*]] = load i128*, i128** [[Y_ADDR]], align 8
// CHECK5-NEXT: [[TMP1:%.*]] = load i128*, i128** [[Z_ADDR]], align 8
// CHECK5-NEXT: [[TMP2:%.*]] = load i128, i128* [[TMP0]], align 16
// CHECK5-NEXT: store i128 [[TMP2]], i128* [[Y1]], align 16
// CHECK5-NEXT: store i128 0, i128* [[Z2]], align 16
// CHECK5-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// CHECK5-NEXT: [[TMP4:%.*]] = bitcast i128* [[Z2]] to i8*
// CHECK5-NEXT: store i8* [[TMP4]], i8** [[TMP3]], align 8
// CHECK5-NEXT: [[TMP5:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK5-NEXT: [[TMP6:%.*]] = load i32, i32* [[TMP5]], align 4
// CHECK5-NEXT: [[TMP7:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// CHECK5-NEXT: [[TMP8:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], i32 1, i64 8, i8* [[TMP7]], void (i8*, i8*)* @.omp.reduction.reduction_func.13, [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK5-NEXT: switch i32 [[TMP8]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// CHECK5-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// CHECK5-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// CHECK5-NEXT: ]
// CHECK5: .omp.reduction.case1:
// CHECK5-NEXT: [[TMP9:%.*]] = load i128, i128* [[TMP1]], align 16
// CHECK5-NEXT: [[TMP10:%.*]] = load i128, i128* [[Z2]], align 16
// CHECK5-NEXT: [[ADD:%.*]] = add nsw i128 [[TMP9]], [[TMP10]]
// CHECK5-NEXT: store i128 [[ADD]], i128* [[TMP1]], align 16
// CHECK5-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK5-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK5: .omp.reduction.case2:
// CHECK5-NEXT: [[TMP11:%.*]] = load i128, i128* [[Z2]], align 16
// CHECK5-NEXT: [[TMP12:%.*]] = bitcast i128* [[TMP1]] to i8*
// CHECK5-NEXT: [[TMP13:%.*]] = bitcast i128* [[ATOMIC_TEMP]] to i8*
// CHECK5-NEXT: call void @__atomic_load(i64 noundef 16, i8* noundef [[TMP12]], i8* noundef [[TMP13]], i32 noundef signext 0) #[[ATTR6]]
// CHECK5-NEXT: br label [[ATOMIC_CONT:%.*]]
// CHECK5: atomic_cont:
// CHECK5-NEXT: [[TMP14:%.*]] = load i128, i128* [[ATOMIC_TEMP]], align 16
// CHECK5-NEXT: store i128 [[TMP14]], i128* [[TMP]], align 16
// CHECK5-NEXT: [[TMP15:%.*]] = load i128, i128* [[TMP]], align 16
// CHECK5-NEXT: [[TMP16:%.*]] = load i128, i128* [[Z2]], align 16
// CHECK5-NEXT: [[ADD4:%.*]] = add nsw i128 [[TMP15]], [[TMP16]]
// CHECK5-NEXT: store i128 [[ADD4]], i128* [[ATOMIC_TEMP3]], align 16
// CHECK5-NEXT: [[TMP17:%.*]] = bitcast i128* [[TMP1]] to i8*
// CHECK5-NEXT: [[TMP18:%.*]] = bitcast i128* [[ATOMIC_TEMP]] to i8*
// CHECK5-NEXT: [[TMP19:%.*]] = bitcast i128* [[ATOMIC_TEMP3]] to i8*
// CHECK5-NEXT: [[CALL:%.*]] = call noundef zeroext i1 @__atomic_compare_exchange(i64 noundef 16, i8* noundef [[TMP17]], i8* noundef [[TMP18]], i8* noundef [[TMP19]], i32 noundef signext 0, i32 noundef signext 0) #[[ATTR6]]
// CHECK5-NEXT: br i1 [[CALL]], label [[ATOMIC_EXIT:%.*]], label [[ATOMIC_CONT]]
// CHECK5: atomic_exit:
// CHECK5-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP6]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK5-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK5: .omp.reduction.default:
// CHECK5-NEXT: ret void
//
//
// CHECK5-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.13
// CHECK5-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR2]] {
// CHECK5-NEXT: entry:
// CHECK5-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8
// CHECK5-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8
// CHECK5-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8
// CHECK5-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8
// CHECK5-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8
// CHECK5-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
// CHECK5-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8
// CHECK5-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
// CHECK5-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0
// CHECK5-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
// CHECK5-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i128*
// CHECK5-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0
// CHECK5-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
// CHECK5-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i128*
// CHECK5-NEXT: [[TMP12:%.*]] = load i128, i128* [[TMP11]], align 16
// CHECK5-NEXT: [[TMP13:%.*]] = load i128, i128* [[TMP8]], align 16
// CHECK5-NEXT: [[ADD:%.*]] = add nsw i128 [[TMP12]], [[TMP13]]
// CHECK5-NEXT: store i128 [[ADD]], i128* [[TMP11]], align 16
// CHECK5-NEXT: ret void
//
//
// CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z14mapWithPrivatev_l27
// CHECK7-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
// CHECK7-NEXT: ret void
//
//
// CHECK7-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK7-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR0]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK7-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK7-NEXT: [[X:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[Y:%.*]] = alloca i32, align 4
// CHECK7-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK7-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK7-NEXT: ret void
//
//
// CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z19mapWithFirstprivatev_l33
// CHECK7-SAME: (i32* noundef nonnull align 4 dereferenceable(4) [[X:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[Y:%.*]]) #[[ATTR0]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[X_ADDR:%.*]] = alloca i32*, align 4
// CHECK7-NEXT: [[Y_ADDR:%.*]] = alloca i32*, align 4
// CHECK7-NEXT: [[X_CASTED:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[Y_CASTED:%.*]] = alloca i32, align 4
// CHECK7-NEXT: store i32* [[X]], i32** [[X_ADDR]], align 4
// CHECK7-NEXT: store i32* [[Y]], i32** [[Y_ADDR]], align 4
// CHECK7-NEXT: [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 4
// CHECK7-NEXT: [[TMP1:%.*]] = load i32*, i32** [[Y_ADDR]], align 4
// CHECK7-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK7-NEXT: store i32 [[TMP2]], i32* [[X_CASTED]], align 4
// CHECK7-NEXT: [[TMP3:%.*]] = load i32, i32* [[X_CASTED]], align 4
// CHECK7-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK7-NEXT: store i32 [[TMP4]], i32* [[Y_CASTED]], align 4
// CHECK7-NEXT: [[TMP5:%.*]] = load i32, i32* [[Y_CASTED]], align 4
// CHECK7-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP5]])
// CHECK7-NEXT: ret void
//
//
// CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK7-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[X:%.*]], i32 noundef [[Y:%.*]]) #[[ATTR0]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK7-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK7-NEXT: [[X_ADDR:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[Y_ADDR:%.*]] = alloca i32, align 4
// CHECK7-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK7-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK7-NEXT: store i32 [[X]], i32* [[X_ADDR]], align 4
// CHECK7-NEXT: store i32 [[Y]], i32* [[Y_ADDR]], align 4
// CHECK7-NEXT: ret void
//
//
// CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z16mapWithReductionv_l39
// CHECK7-SAME: (i32* noundef nonnull align 4 dereferenceable(4) [[X:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[Y:%.*]]) #[[ATTR0]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[X_ADDR:%.*]] = alloca i32*, align 4
// CHECK7-NEXT: [[Y_ADDR:%.*]] = alloca i32*, align 4
// CHECK7-NEXT: store i32* [[X]], i32** [[X_ADDR]], align 4
// CHECK7-NEXT: store i32* [[Y]], i32** [[Y_ADDR]], align 4
// CHECK7-NEXT: [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 4
// CHECK7-NEXT: [[TMP1:%.*]] = load i32*, i32** [[Y_ADDR]], align 4
// CHECK7-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), i32* [[TMP0]], i32* [[TMP1]])
// CHECK7-NEXT: ret void
//
//
// CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..2
// CHECK7-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[X:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[Y:%.*]]) #[[ATTR0]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK7-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK7-NEXT: [[X_ADDR:%.*]] = alloca i32*, align 4
// CHECK7-NEXT: [[Y_ADDR:%.*]] = alloca i32*, align 4
// CHECK7-NEXT: [[X1:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[Y2:%.*]] = alloca i32, align 4
// CHECK7-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x i8*], align 4
// CHECK7-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK7-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK7-NEXT: store i32* [[X]], i32** [[X_ADDR]], align 4
// CHECK7-NEXT: store i32* [[Y]], i32** [[Y_ADDR]], align 4
// CHECK7-NEXT: [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 4
// CHECK7-NEXT: [[TMP1:%.*]] = load i32*, i32** [[Y_ADDR]], align 4
// CHECK7-NEXT: store i32 0, i32* [[X1]], align 4
// CHECK7-NEXT: store i32 0, i32* [[Y2]], align 4
// CHECK7-NEXT: [[TMP2:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
// CHECK7-NEXT: [[TMP3:%.*]] = bitcast i32* [[X1]] to i8*
// CHECK7-NEXT: store i8* [[TMP3]], i8** [[TMP2]], align 4
// CHECK7-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 1
// CHECK7-NEXT: [[TMP5:%.*]] = bitcast i32* [[Y2]] to i8*
// CHECK7-NEXT: store i8* [[TMP5]], i8** [[TMP4]], align 4
// CHECK7-NEXT: [[TMP6:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK7-NEXT: [[TMP7:%.*]] = load i32, i32* [[TMP6]], align 4
// CHECK7-NEXT: [[TMP8:%.*]] = bitcast [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// CHECK7-NEXT: [[TMP9:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP7]], i32 2, i32 8, i8* [[TMP8]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK7-NEXT: switch i32 [[TMP9]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// CHECK7-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// CHECK7-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// CHECK7-NEXT: ]
// CHECK7: .omp.reduction.case1:
// CHECK7-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK7-NEXT: [[TMP11:%.*]] = load i32, i32* [[X1]], align 4
// CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP10]], [[TMP11]]
// CHECK7-NEXT: store i32 [[ADD]], i32* [[TMP0]], align 4
// CHECK7-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK7-NEXT: [[TMP13:%.*]] = load i32, i32* [[Y2]], align 4
// CHECK7-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
// CHECK7-NEXT: store i32 [[ADD3]], i32* [[TMP1]], align 4
// CHECK7-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK7-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK7: .omp.reduction.case2:
// CHECK7-NEXT: [[TMP14:%.*]] = load i32, i32* [[X1]], align 4
// CHECK7-NEXT: [[TMP15:%.*]] = atomicrmw add i32* [[TMP0]], i32 [[TMP14]] monotonic, align 4
// CHECK7-NEXT: [[TMP16:%.*]] = load i32, i32* [[Y2]], align 4
// CHECK7-NEXT: [[TMP17:%.*]] = atomicrmw add i32* [[TMP1]], i32 [[TMP16]] monotonic, align 4
// CHECK7-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP7]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK7-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK7: .omp.reduction.default:
// CHECK7-NEXT: ret void
//
//
// CHECK7-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func
// CHECK7-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR2:[0-9]+]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4
// CHECK7-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 4
// CHECK7-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4
// CHECK7-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 4
// CHECK7-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 4
// CHECK7-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [2 x i8*]*
// CHECK7-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 4
// CHECK7-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [2 x i8*]*
// CHECK7-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i32 0, i32 0
// CHECK7-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4
// CHECK7-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
// CHECK7-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP3]], i32 0, i32 0
// CHECK7-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4
// CHECK7-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
// CHECK7-NEXT: [[TMP12:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i32 0, i32 1
// CHECK7-NEXT: [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 4
// CHECK7-NEXT: [[TMP14:%.*]] = bitcast i8* [[TMP13]] to i32*
// CHECK7-NEXT: [[TMP15:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP3]], i32 0, i32 1
// CHECK7-NEXT: [[TMP16:%.*]] = load i8*, i8** [[TMP15]], align 4
// CHECK7-NEXT: [[TMP17:%.*]] = bitcast i8* [[TMP16]] to i32*
// CHECK7-NEXT: [[TMP18:%.*]] = load i32, i32* [[TMP11]], align 4
// CHECK7-NEXT: [[TMP19:%.*]] = load i32, i32* [[TMP8]], align 4
// CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP18]], [[TMP19]]
// CHECK7-NEXT: store i32 [[ADD]], i32* [[TMP11]], align 4
// CHECK7-NEXT: [[TMP20:%.*]] = load i32, i32* [[TMP17]], align 4
// CHECK7-NEXT: [[TMP21:%.*]] = load i32, i32* [[TMP14]], align 4
// CHECK7-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP20]], [[TMP21]]
// CHECK7-NEXT: store i32 [[ADD2]], i32* [[TMP17]], align 4
// CHECK7-NEXT: ret void
//
//
// CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z7mapFromv_l45
// CHECK7-SAME: (i32* noundef nonnull align 4 dereferenceable(4) [[X:%.*]]) #[[ATTR0]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[X_ADDR:%.*]] = alloca i32*, align 4
// CHECK7-NEXT: [[X_CASTED:%.*]] = alloca i32, align 4
// CHECK7-NEXT: store i32* [[X]], i32** [[X_ADDR]], align 4
// CHECK7-NEXT: [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 4
// CHECK7-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK7-NEXT: store i32 [[TMP1]], i32* [[X_CASTED]], align 4
// CHECK7-NEXT: [[TMP2:%.*]] = load i32, i32* [[X_CASTED]], align 4
// CHECK7-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP2]])
// CHECK7-NEXT: ret void
//
//
// CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..3
// CHECK7-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[X:%.*]]) #[[ATTR0]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK7-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK7-NEXT: [[X_ADDR:%.*]] = alloca i32, align 4
// CHECK7-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK7-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK7-NEXT: store i32 [[X]], i32* [[X_ADDR]], align 4
// CHECK7-NEXT: ret void
//
//
// CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5mapTov_l51
// CHECK7-SAME: (i32* noundef nonnull align 4 dereferenceable(4) [[X:%.*]]) #[[ATTR0]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[X_ADDR:%.*]] = alloca i32*, align 4
// CHECK7-NEXT: [[X_CASTED:%.*]] = alloca i32, align 4
// CHECK7-NEXT: store i32* [[X]], i32** [[X_ADDR]], align 4
// CHECK7-NEXT: [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 4
// CHECK7-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK7-NEXT: store i32 [[TMP1]], i32* [[X_CASTED]], align 4
// CHECK7-NEXT: [[TMP2:%.*]] = load i32, i32* [[X_CASTED]], align 4
// CHECK7-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32 [[TMP2]])
// CHECK7-NEXT: ret void
//
//
// CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..4
// CHECK7-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[X:%.*]]) #[[ATTR0]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK7-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK7-NEXT: [[X_ADDR:%.*]] = alloca i32, align 4
// CHECK7-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK7-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK7-NEXT: store i32 [[X]], i32* [[X_ADDR]], align 4
// CHECK7-NEXT: ret void
//
//
// CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapAllocv_l57
// CHECK7-SAME: (i32* noundef nonnull align 4 dereferenceable(4) [[X:%.*]]) #[[ATTR0]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[X_ADDR:%.*]] = alloca i32*, align 4
// CHECK7-NEXT: [[X_CASTED:%.*]] = alloca i32, align 4
// CHECK7-NEXT: store i32* [[X]], i32** [[X_ADDR]], align 4
// CHECK7-NEXT: [[TMP0:%.*]] = load i32*, i32** [[X_ADDR]], align 4
// CHECK7-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
// CHECK7-NEXT: store i32 [[TMP1]], i32* [[X_CASTED]], align 4
// CHECK7-NEXT: [[TMP2:%.*]] = load i32, i32* [[X_CASTED]], align 4
// CHECK7-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i32 [[TMP2]])
// CHECK7-NEXT: ret void
//
//
// CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..5
// CHECK7-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[X:%.*]]) #[[ATTR0]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK7-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK7-NEXT: [[X_ADDR:%.*]] = alloca i32, align 4
// CHECK7-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK7-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK7-NEXT: store i32 [[X]], i32* [[X_ADDR]], align 4
// CHECK7-NEXT: ret void
//
//
// CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l63
// CHECK7-SAME: ([88 x i32]* noundef nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* noundef nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR0]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 4
// CHECK7-NEXT: [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 4
// CHECK7-NEXT: store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 4
// CHECK7-NEXT: store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 4
// CHECK7-NEXT: [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 4
// CHECK7-NEXT: [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 4
// CHECK7-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [88 x i32]*, [99 x i32]*)* @.omp_outlined..6 to void (i32*, i32*, ...)*), [88 x i32]* [[TMP0]], [99 x i32]* [[TMP1]])
// CHECK7-NEXT: ret void
//
//
// CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..6
// CHECK7-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [88 x i32]* noundef nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* noundef nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR0]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK7-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK7-NEXT: [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 4
// CHECK7-NEXT: [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 4
// CHECK7-NEXT: [[Y1:%.*]] = alloca [88 x i32], align 4
// CHECK7-NEXT: [[X:%.*]] = alloca [77 x i32], align 4
// CHECK7-NEXT: [[Z2:%.*]] = alloca [99 x i32], align 4
// CHECK7-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 4
// CHECK7-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK7-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK7-NEXT: store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 4
// CHECK7-NEXT: store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 4
// CHECK7-NEXT: [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 4
// CHECK7-NEXT: [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 4
// CHECK7-NEXT: [[TMP2:%.*]] = bitcast [88 x i32]* [[Y1]] to i8*
// CHECK7-NEXT: [[TMP3:%.*]] = bitcast [88 x i32]* [[TMP0]] to i8*
// CHECK7-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP2]], i8* align 4 [[TMP3]], i32 352, i1 false)
// CHECK7-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [99 x i32], [99 x i32]* [[Z2]], i32 0, i32 0
// CHECK7-NEXT: [[TMP4:%.*]] = getelementptr i32, i32* [[ARRAY_BEGIN]], i32 99
// CHECK7-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i32* [[ARRAY_BEGIN]], [[TMP4]]
// CHECK7-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]]
// CHECK7: omp.arrayinit.body:
// CHECK7-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ]
// CHECK7-NEXT: store i32 0, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
// CHECK7-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
// CHECK7-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP4]]
// CHECK7-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]]
// CHECK7: omp.arrayinit.done:
// CHECK7-NEXT: [[LHS_BEGIN:%.*]] = bitcast [99 x i32]* [[TMP1]] to i32*
// CHECK7-NEXT: [[RHS_BEGIN:%.*]] = bitcast [99 x i32]* [[Z2]] to i32*
// CHECK7-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
// CHECK7-NEXT: [[TMP6:%.*]] = bitcast i32* [[RHS_BEGIN]] to i8*
// CHECK7-NEXT: store i8* [[TMP6]], i8** [[TMP5]], align 4
// CHECK7-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK7-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
// CHECK7-NEXT: [[TMP9:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// CHECK7-NEXT: [[TMP10:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 1, i32 4, i8* [[TMP9]], void (i8*, i8*)* @.omp.reduction.reduction_func.7, [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK7-NEXT: switch i32 [[TMP10]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// CHECK7-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// CHECK7-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// CHECK7-NEXT: ]
// CHECK7: .omp.reduction.case1:
// CHECK7-NEXT: [[TMP11:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i32 99
// CHECK7-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP11]]
// CHECK7-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE6:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
// CHECK7: omp.arraycpy.body:
// CHECK7-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK7-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST3:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT4:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK7-NEXT: [[TMP12:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4
// CHECK7-NEXT: [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4
// CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
// CHECK7-NEXT: store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4
// CHECK7-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT4]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], i32 1
// CHECK7-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
// CHECK7-NEXT: [[OMP_ARRAYCPY_DONE5:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT4]], [[TMP11]]
// CHECK7-NEXT: br i1 [[OMP_ARRAYCPY_DONE5]], label [[OMP_ARRAYCPY_DONE6]], label [[OMP_ARRAYCPY_BODY]]
// CHECK7: omp.arraycpy.done6:
// CHECK7-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK7-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK7: .omp.reduction.case2:
// CHECK7-NEXT: [[TMP14:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i32 99
// CHECK7-NEXT: [[OMP_ARRAYCPY_ISEMPTY7:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP14]]
// CHECK7-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY7]], label [[OMP_ARRAYCPY_DONE14:%.*]], label [[OMP_ARRAYCPY_BODY8:%.*]]
// CHECK7: omp.arraycpy.body8:
// CHECK7-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST9:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT12:%.*]], [[OMP_ARRAYCPY_BODY8]] ]
// CHECK7-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST10:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT11:%.*]], [[OMP_ARRAYCPY_BODY8]] ]
// CHECK7-NEXT: [[TMP15:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], align 4
// CHECK7-NEXT: [[TMP16:%.*]] = atomicrmw add i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 [[TMP15]] monotonic, align 4
// CHECK7-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT11]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 1
// CHECK7-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT12]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], i32 1
// CHECK7-NEXT: [[OMP_ARRAYCPY_DONE13:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT11]], [[TMP14]]
// CHECK7-NEXT: br i1 [[OMP_ARRAYCPY_DONE13]], label [[OMP_ARRAYCPY_DONE14]], label [[OMP_ARRAYCPY_BODY8]]
// CHECK7: omp.arraycpy.done14:
// CHECK7-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK7-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK7: .omp.reduction.default:
// CHECK7-NEXT: ret void
//
//
// CHECK7-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.7
// CHECK7-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR2]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4
// CHECK7-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 4
// CHECK7-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4
// CHECK7-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 4
// CHECK7-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 4
// CHECK7-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
// CHECK7-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 4
// CHECK7-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
// CHECK7-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i32 0, i32 0
// CHECK7-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4
// CHECK7-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
// CHECK7-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i32 0, i32 0
// CHECK7-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4
// CHECK7-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
// CHECK7-NEXT: [[TMP12:%.*]] = getelementptr i32, i32* [[TMP11]], i32 99
// CHECK7-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[TMP11]], [[TMP12]]
// CHECK7-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE2:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
// CHECK7: omp.arraycpy.body:
// CHECK7-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[TMP8]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK7-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK7-NEXT: [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
// CHECK7-NEXT: [[TMP14:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4
// CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]]
// CHECK7-NEXT: store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
// CHECK7-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
// CHECK7-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
// CHECK7-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP12]]
// CHECK7-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE2]], label [[OMP_ARRAYCPY_BODY]]
// CHECK7: omp.arraycpy.done2:
// CHECK7-NEXT: ret void
//
//
// CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z8mapArrayv_l65
// CHECK7-SAME: ([88 x i32]* noundef nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* noundef nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR0]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 4
// CHECK7-NEXT: [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 4
// CHECK7-NEXT: store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 4
// CHECK7-NEXT: store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 4
// CHECK7-NEXT: [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 4
// CHECK7-NEXT: [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 4
// CHECK7-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [88 x i32]*, [99 x i32]*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), [88 x i32]* [[TMP0]], [99 x i32]* [[TMP1]])
// CHECK7-NEXT: ret void
//
//
// CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..8
// CHECK7-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [88 x i32]* noundef nonnull align 4 dereferenceable(352) [[Y:%.*]], [99 x i32]* noundef nonnull align 4 dereferenceable(396) [[Z:%.*]]) #[[ATTR0]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK7-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4
// CHECK7-NEXT: [[Y_ADDR:%.*]] = alloca [88 x i32]*, align 4
// CHECK7-NEXT: [[Z_ADDR:%.*]] = alloca [99 x i32]*, align 4
// CHECK7-NEXT: [[Y1:%.*]] = alloca [88 x i32], align 4
// CHECK7-NEXT: [[X:%.*]] = alloca [77 x i32], align 4
// CHECK7-NEXT: [[Z2:%.*]] = alloca [99 x i32], align 4
// CHECK7-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 4
// CHECK7-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK7-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4
// CHECK7-NEXT: store [88 x i32]* [[Y]], [88 x i32]** [[Y_ADDR]], align 4
// CHECK7-NEXT: store [99 x i32]* [[Z]], [99 x i32]** [[Z_ADDR]], align 4
// CHECK7-NEXT: [[TMP0:%.*]] = load [88 x i32]*, [88 x i32]** [[Y_ADDR]], align 4
// CHECK7-NEXT: [[TMP1:%.*]] = load [99 x i32]*, [99 x i32]** [[Z_ADDR]], align 4
// CHECK7-NEXT: [[TMP2:%.*]] = bitcast [88 x i32]* [[Y1]] to i8*
// CHECK7-NEXT: [[TMP3:%.*]] = bitcast [88 x i32]* [[TMP0]] to i8*
// CHECK7-NEXT: call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 4 [[TMP2]], i8* align 4 [[TMP3]], i32 352, i1 false)
// CHECK7-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [99 x i32], [99 x i32]* [[Z2]], i32 0, i32 0
// CHECK7-NEXT: [[TMP4:%.*]] = getelementptr i32, i32* [[ARRAY_BEGIN]], i32 99
// CHECK7-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i32* [[ARRAY_BEGIN]], [[TMP4]]
// CHECK7-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]]
// CHECK7: omp.arrayinit.body:
// CHECK7-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ]
// CHECK7-NEXT: store i32 0, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
// CHECK7-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
// CHECK7-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP4]]
// CHECK7-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]]
// CHECK7: omp.arrayinit.done:
// CHECK7-NEXT: [[LHS_BEGIN:%.*]] = bitcast [99 x i32]* [[TMP1]] to i32*
// CHECK7-NEXT: [[RHS_BEGIN:%.*]] = bitcast [99 x i32]* [[Z2]] to i32*
// CHECK7-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
// CHECK7-NEXT: [[TMP6:%.*]] = bitcast i32* [[RHS_BEGIN]] to i8*
// CHECK7-NEXT: store i8* [[TMP6]], i8** [[TMP5]], align 4
// CHECK7-NEXT: [[TMP7:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK7-NEXT: [[TMP8:%.*]] = load i32, i32* [[TMP7]], align 4
// CHECK7-NEXT: [[TMP9:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8*
// CHECK7-NEXT: [[TMP10:%.*]] = call i32 @__kmpc_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], i32 1, i32 4, i8* [[TMP9]], void (i8*, i8*)* @.omp.reduction.reduction_func.9, [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK7-NEXT: switch i32 [[TMP10]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [
// CHECK7-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]]
// CHECK7-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]]
// CHECK7-NEXT: ]
// CHECK7: .omp.reduction.case1:
// CHECK7-NEXT: [[TMP11:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i32 99
// CHECK7-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP11]]
// CHECK7-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE6:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
// CHECK7: omp.arraycpy.body:
// CHECK7-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK7-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST3:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT4:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK7-NEXT: [[TMP12:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4
// CHECK7-NEXT: [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4
// CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]]
// CHECK7-NEXT: store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4
// CHECK7-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT4]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], i32 1
// CHECK7-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
// CHECK7-NEXT: [[OMP_ARRAYCPY_DONE5:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT4]], [[TMP11]]
// CHECK7-NEXT: br i1 [[OMP_ARRAYCPY_DONE5]], label [[OMP_ARRAYCPY_DONE6]], label [[OMP_ARRAYCPY_BODY]]
// CHECK7: omp.arraycpy.done6:
// CHECK7-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK7-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK7: .omp.reduction.case2:
// CHECK7-NEXT: [[TMP14:%.*]] = getelementptr i32, i32* [[LHS_BEGIN]], i32 99
// CHECK7-NEXT: [[OMP_ARRAYCPY_ISEMPTY7:%.*]] = icmp eq i32* [[LHS_BEGIN]], [[TMP14]]
// CHECK7-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY7]], label [[OMP_ARRAYCPY_DONE14:%.*]], label [[OMP_ARRAYCPY_BODY8:%.*]]
// CHECK7: omp.arraycpy.body8:
// CHECK7-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST9:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT12:%.*]], [[OMP_ARRAYCPY_BODY8]] ]
// CHECK7-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST10:%.*]] = phi i32* [ [[LHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT11:%.*]], [[OMP_ARRAYCPY_BODY8]] ]
// CHECK7-NEXT: [[TMP15:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], align 4
// CHECK7-NEXT: [[TMP16:%.*]] = atomicrmw add i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 [[TMP15]] monotonic, align 4
// CHECK7-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT11]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 1
// CHECK7-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT12]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], i32 1
// CHECK7-NEXT: [[OMP_ARRAYCPY_DONE13:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT11]], [[TMP14]]
// CHECK7-NEXT: br i1 [[OMP_ARRAYCPY_DONE13]], label [[OMP_ARRAYCPY_DONE14]], label [[OMP_ARRAYCPY_BODY8]]
// CHECK7: omp.arraycpy.done14:
// CHECK7-NEXT: call void @__kmpc_end_reduce(%struct.ident_t* @[[GLOB2]], i32 [[TMP8]], [8 x i32]* @.gomp_critical_user_.reduction.var)
// CHECK7-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]]
// CHECK7: .omp.reduction.default:
// CHECK7-NEXT: ret void
//
//
// CHECK7-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.9
// CHECK7-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR2]] {
// CHECK7-NEXT: entry:
// CHECK7-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4
// CHECK7-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 4
// CHECK7-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4
// CHECK7-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 4
// CHECK7-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 4
// CHECK7-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]*
// CHECK7-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 4
// CHECK7-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]*
// CHECK7-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i32 0, i32 0
// CHECK7-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4
// CHECK7-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32*
// CHECK7-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i32 0, i32 0
// CHECK7-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4
// CHECK7-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32*
// CHECK7-NEXT: [[TMP12:%.*]] = getelementptr i32, i32* [[TMP11]], i32 99
// CHECK7-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[TMP11]], [[TMP12]]
// CHECK7-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE2:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
// CHECK7: omp.arraycpy.body:
// CHECK7-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[TMP8]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK7-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
// CHECK7-NEXT: [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
// CHECK7-NEXT: [[TMP14:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4
// CHECK7-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]]
// CHECK7-NEXT: store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4
// CHECK7-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
// CHECK7-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
// CHECK7-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP12]]
// CHECK7-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE2]], label [[OMP_ARRAYCPY_BODY]]
// CHECK7: omp.arraycpy.done2:
// CHECK7-NEXT: ret void
//