Files
clang-p2996/clang/test/OpenMP/nvptx_target_parallel_reduction_codegen.cpp
Johannes Doerfert 3de645efe3 [OpenMP][NFC] Split the reduction buffer size into two components
Before we tracked the size of the teams reduction buffer in order to
allocate it at runtime per kernel launch. This patch splits the number
into two parts, the size of the reduction data (=all reduction
variables) and the (maximal) length of the buffer. This will allow us to
allocate less if we need less, e.g., if we have less teams than the
maximal length. It also allows us to move code from clangs codegen into
the runtime as we now know how large the reduction data is.
2023-11-06 11:50:41 -08:00

2100 lines
140 KiB
C++

// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
// Test target codegen - host bc file has to be created first.
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-cuda-mode -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-cuda-mode -x c++ -triple nvptx64-unknown-unknown -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --check-prefix=CHECK-64
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-cuda-mode -x c++ -triple i386-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm-bc %s -o %t-x86-host.bc
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-cuda-mode -x c++ -triple nvptx-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix=CHECK-32
// RUN: %clang_cc1 -verify -fopenmp -fopenmp-cuda-mode -fexceptions -fcxx-exceptions -x c++ -triple nvptx-unknown-unknown -fopenmp-targets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix=CHECK-32-EX
// expected-no-diagnostics
#ifndef HEADER
#define HEADER
// Check for the data transfer medium in shared memory to transfer the reduction list to the first warp.
// Check that the execution mode of all 3 target regions is set to Spmd Mode.
template<typename tx>
tx ftemplate(int n) {
int a;
short b;
tx c;
float d;
double e;
#pragma omp target parallel reduction(+: e)
{
e += 5;
}
#pragma omp target parallel reduction(^: c) reduction(*: d)
{
c ^= 2;
d *= 33;
}
#pragma omp target parallel reduction(|: a) reduction(max: b)
{
a |= 1;
b = 99 > b ? 99 : b;
}
return a+b+c+d+e;
}
int bar(int n){
int a = 0;
a += ftemplate<char>(n);
return a;
}
// define internal void [[PFN]](
// Reduction function
// Shuffle and reduce function
// Condition to reduce
// Now check if we should just copy over the remote reduction list
// Inter warp copy function
// [[DO_COPY]]
// Barrier after copy to shared memory storage medium.
// Read into warp 0.
// define internal void [[PFN1]](
// Reduction function
// Shuffle and reduce function
// Condition to reduce
// Now check if we should just copy over the remote reduction list
// Inter warp copy function
// [[DO_COPY]]
// Barrier after copy to shared memory storage medium.
// Read into warp 0.
// [[DO_COPY]]
// Barrier after copy to shared memory storage medium.
// Read into warp 0.
// define internal void [[PFN2]](
// Reduction function
// Shuffle and reduce function
// Condition to reduce
// Now check if we should just copy over the remote reduction list
// Inter warp copy function
// [[DO_COPY]]
// Barrier after copy to shared memory storage medium.
// Read into warp 0.
// [[DO_COPY]]
// Barrier after copy to shared memory storage medium.
// Read into warp 0.
#endif
// CHECK-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l24
// CHECK-64-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[E:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-64-NEXT: entry:
// CHECK-64-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
// CHECK-64-NEXT: [[E_ADDR:%.*]] = alloca ptr, align 8
// CHECK-64-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x ptr], align 8
// CHECK-64-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
// CHECK-64-NEXT: store ptr [[E]], ptr [[E_ADDR]], align 8
// CHECK-64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[E_ADDR]], align 8
// CHECK-64-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l24_kernel_environment, ptr [[DYN_PTR]])
// CHECK-64-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
// CHECK-64-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK-64: user_code.entry:
// CHECK-64-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1:[0-9]+]])
// CHECK-64-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
// CHECK-64-NEXT: store ptr [[TMP0]], ptr [[TMP3]], align 8
// CHECK-64-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l24_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i64 1)
// CHECK-64-NEXT: call void @__kmpc_target_deinit()
// CHECK-64-NEXT: ret void
// CHECK-64: worker.exit:
// CHECK-64-NEXT: ret void
//
//
// CHECK-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l24_omp_outlined
// CHECK-64-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[E:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK-64-NEXT: entry:
// CHECK-64-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-64-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-64-NEXT: [[E_ADDR:%.*]] = alloca ptr, align 8
// CHECK-64-NEXT: [[E1:%.*]] = alloca double, align 8
// CHECK-64-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 8
// CHECK-64-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-64-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK-64-NEXT: store ptr [[E]], ptr [[E_ADDR]], align 8
// CHECK-64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[E_ADDR]], align 8
// CHECK-64-NEXT: store double 0.000000e+00, ptr [[E1]], align 8
// CHECK-64-NEXT: [[TMP1:%.*]] = load double, ptr [[E1]], align 8
// CHECK-64-NEXT: [[ADD:%.*]] = fadd double [[TMP1]], 5.000000e+00
// CHECK-64-NEXT: store double [[ADD]], ptr [[E1]], align 8
// CHECK-64-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// CHECK-64-NEXT: store ptr [[E1]], ptr [[TMP2]], align 8
// CHECK-64-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_nvptx_parallel_reduce_nowait_v2(ptr @[[GLOB1]], i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @_omp_reduction_shuffle_and_reduce_func, ptr @_omp_reduction_inter_warp_copy_func)
// CHECK-64-NEXT: [[TMP4:%.*]] = icmp eq i32 [[TMP3]], 1
// CHECK-64-NEXT: br i1 [[TMP4]], label [[DOTOMP_REDUCTION_THEN:%.*]], label [[DOTOMP_REDUCTION_DONE:%.*]]
// CHECK-64: .omp.reduction.then:
// CHECK-64-NEXT: [[TMP5:%.*]] = load double, ptr [[TMP0]], align 8
// CHECK-64-NEXT: [[TMP6:%.*]] = load double, ptr [[E1]], align 8
// CHECK-64-NEXT: [[ADD2:%.*]] = fadd double [[TMP5]], [[TMP6]]
// CHECK-64-NEXT: store double [[ADD2]], ptr [[TMP0]], align 8
// CHECK-64-NEXT: br label [[DOTOMP_REDUCTION_DONE]]
// CHECK-64: .omp.reduction.done:
// CHECK-64-NEXT: ret void
//
//
// CHECK-64-LABEL: define {{[^@]+}}@_omp_reduction_shuffle_and_reduce_func
// CHECK-64-SAME: (ptr noundef [[TMP0:%.*]], i16 noundef signext [[TMP1:%.*]], i16 noundef signext [[TMP2:%.*]], i16 noundef signext [[TMP3:%.*]]) #[[ATTR2:[0-9]+]] {
// CHECK-64-NEXT: entry:
// CHECK-64-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
// CHECK-64-NEXT: [[DOTADDR1:%.*]] = alloca i16, align 2
// CHECK-64-NEXT: [[DOTADDR2:%.*]] = alloca i16, align 2
// CHECK-64-NEXT: [[DOTADDR3:%.*]] = alloca i16, align 2
// CHECK-64-NEXT: [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST:%.*]] = alloca [1 x ptr], align 8
// CHECK-64-NEXT: [[DOTOMP_REDUCTION_ELEMENT:%.*]] = alloca double, align 8
// CHECK-64-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
// CHECK-64-NEXT: store i16 [[TMP1]], ptr [[DOTADDR1]], align 2
// CHECK-64-NEXT: store i16 [[TMP2]], ptr [[DOTADDR2]], align 2
// CHECK-64-NEXT: store i16 [[TMP3]], ptr [[DOTADDR3]], align 2
// CHECK-64-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR]], align 8
// CHECK-64-NEXT: [[TMP5:%.*]] = load i16, ptr [[DOTADDR1]], align 2
// CHECK-64-NEXT: [[TMP6:%.*]] = load i16, ptr [[DOTADDR2]], align 2
// CHECK-64-NEXT: [[TMP7:%.*]] = load i16, ptr [[DOTADDR3]], align 2
// CHECK-64-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP4]], i64 0, i64 0
// CHECK-64-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8
// CHECK-64-NEXT: [[TMP10:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 0
// CHECK-64-NEXT: [[TMP11:%.*]] = getelementptr double, ptr [[TMP9]], i64 1
// CHECK-64-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP9]], align 8
// CHECK-64-NEXT: [[TMP13:%.*]] = call i32 @__kmpc_get_warp_size()
// CHECK-64-NEXT: [[TMP14:%.*]] = trunc i32 [[TMP13]] to i16
// CHECK-64-NEXT: [[TMP15:%.*]] = call i64 @__kmpc_shuffle_int64(i64 [[TMP12]], i16 [[TMP6]], i16 [[TMP14]])
// CHECK-64-NEXT: store i64 [[TMP15]], ptr [[DOTOMP_REDUCTION_ELEMENT]], align 8
// CHECK-64-NEXT: [[TMP16:%.*]] = getelementptr i64, ptr [[TMP9]], i64 1
// CHECK-64-NEXT: [[TMP17:%.*]] = getelementptr i64, ptr [[DOTOMP_REDUCTION_ELEMENT]], i64 1
// CHECK-64-NEXT: store ptr [[DOTOMP_REDUCTION_ELEMENT]], ptr [[TMP10]], align 8
// CHECK-64-NEXT: [[TMP18:%.*]] = icmp eq i16 [[TMP7]], 0
// CHECK-64-NEXT: [[TMP19:%.*]] = icmp eq i16 [[TMP7]], 1
// CHECK-64-NEXT: [[TMP20:%.*]] = icmp ult i16 [[TMP5]], [[TMP6]]
// CHECK-64-NEXT: [[TMP21:%.*]] = and i1 [[TMP19]], [[TMP20]]
// CHECK-64-NEXT: [[TMP22:%.*]] = icmp eq i16 [[TMP7]], 2
// CHECK-64-NEXT: [[TMP23:%.*]] = and i16 [[TMP5]], 1
// CHECK-64-NEXT: [[TMP24:%.*]] = icmp eq i16 [[TMP23]], 0
// CHECK-64-NEXT: [[TMP25:%.*]] = and i1 [[TMP22]], [[TMP24]]
// CHECK-64-NEXT: [[TMP26:%.*]] = icmp sgt i16 [[TMP6]], 0
// CHECK-64-NEXT: [[TMP27:%.*]] = and i1 [[TMP25]], [[TMP26]]
// CHECK-64-NEXT: [[TMP28:%.*]] = or i1 [[TMP18]], [[TMP21]]
// CHECK-64-NEXT: [[TMP29:%.*]] = or i1 [[TMP28]], [[TMP27]]
// CHECK-64-NEXT: br i1 [[TMP29]], label [[THEN:%.*]], label [[ELSE:%.*]]
// CHECK-64: then:
// CHECK-64-NEXT: call void @"{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l24_omp_outlined_omp$reduction$reduction_func"(ptr [[TMP4]], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]]) #[[ATTR3:[0-9]+]]
// CHECK-64-NEXT: br label [[IFCONT:%.*]]
// CHECK-64: else:
// CHECK-64-NEXT: br label [[IFCONT]]
// CHECK-64: ifcont:
// CHECK-64-NEXT: [[TMP30:%.*]] = icmp eq i16 [[TMP7]], 1
// CHECK-64-NEXT: [[TMP31:%.*]] = icmp uge i16 [[TMP5]], [[TMP6]]
// CHECK-64-NEXT: [[TMP32:%.*]] = and i1 [[TMP30]], [[TMP31]]
// CHECK-64-NEXT: br i1 [[TMP32]], label [[THEN4:%.*]], label [[ELSE5:%.*]]
// CHECK-64: then4:
// CHECK-64-NEXT: [[TMP33:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 0
// CHECK-64-NEXT: [[TMP34:%.*]] = load ptr, ptr [[TMP33]], align 8
// CHECK-64-NEXT: [[TMP35:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP4]], i64 0, i64 0
// CHECK-64-NEXT: [[TMP36:%.*]] = load ptr, ptr [[TMP35]], align 8
// CHECK-64-NEXT: [[TMP37:%.*]] = load double, ptr [[TMP34]], align 8
// CHECK-64-NEXT: store double [[TMP37]], ptr [[TMP36]], align 8
// CHECK-64-NEXT: br label [[IFCONT6:%.*]]
// CHECK-64: else5:
// CHECK-64-NEXT: br label [[IFCONT6]]
// CHECK-64: ifcont6:
// CHECK-64-NEXT: ret void
//
//
// CHECK-64-LABEL: define {{[^@]+}}@_omp_reduction_inter_warp_copy_func
// CHECK-64-SAME: (ptr noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]]) #[[ATTR2]] {
// CHECK-64-NEXT: entry:
// CHECK-64-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
// CHECK-64-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
// CHECK-64-NEXT: [[DOTCNT_ADDR:%.*]] = alloca i32, align 4
// CHECK-64-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
// CHECK-64-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
// CHECK-64-NEXT: store i32 [[TMP1]], ptr [[DOTADDR1]], align 4
// CHECK-64-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
// CHECK-64-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
// CHECK-64-NEXT: [[NVPTX_LANE_ID:%.*]] = and i32 [[TMP4]], 31
// CHECK-64-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
// CHECK-64-NEXT: [[NVPTX_WARP_ID:%.*]] = ashr i32 [[TMP5]], 5
// CHECK-64-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTADDR]], align 8
// CHECK-64-NEXT: store i32 0, ptr [[DOTCNT_ADDR]], align 4
// CHECK-64-NEXT: br label [[PRECOND:%.*]]
// CHECK-64: precond:
// CHECK-64-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCNT_ADDR]], align 4
// CHECK-64-NEXT: [[TMP8:%.*]] = icmp ult i32 [[TMP7]], 2
// CHECK-64-NEXT: br i1 [[TMP8]], label [[BODY:%.*]], label [[EXIT:%.*]]
// CHECK-64: body:
// CHECK-64-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2:[0-9]+]], i32 [[TMP2]])
// CHECK-64-NEXT: [[WARP_MASTER:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
// CHECK-64-NEXT: br i1 [[WARP_MASTER]], label [[THEN:%.*]], label [[ELSE:%.*]]
// CHECK-64: then:
// CHECK-64-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP6]], i64 0, i64 0
// CHECK-64-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 8
// CHECK-64-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[TMP10]], i32 [[TMP7]]
// CHECK-64-NEXT: [[TMP12:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
// CHECK-64-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP11]], align 4
// CHECK-64-NEXT: store volatile i32 [[TMP13]], ptr addrspace(3) [[TMP12]], align 4
// CHECK-64-NEXT: br label [[IFCONT:%.*]]
// CHECK-64: else:
// CHECK-64-NEXT: br label [[IFCONT]]
// CHECK-64: ifcont:
// CHECK-64-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
// CHECK-64-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTADDR1]], align 4
// CHECK-64-NEXT: [[IS_ACTIVE_THREAD:%.*]] = icmp ult i32 [[TMP3]], [[TMP14]]
// CHECK-64-NEXT: br i1 [[IS_ACTIVE_THREAD]], label [[THEN2:%.*]], label [[ELSE3:%.*]]
// CHECK-64: then2:
// CHECK-64-NEXT: [[TMP15:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
// CHECK-64-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP6]], i64 0, i64 0
// CHECK-64-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8
// CHECK-64-NEXT: [[TMP18:%.*]] = getelementptr i32, ptr [[TMP17]], i32 [[TMP7]]
// CHECK-64-NEXT: [[TMP19:%.*]] = load volatile i32, ptr addrspace(3) [[TMP15]], align 4
// CHECK-64-NEXT: store i32 [[TMP19]], ptr [[TMP18]], align 4
// CHECK-64-NEXT: br label [[IFCONT4:%.*]]
// CHECK-64: else3:
// CHECK-64-NEXT: br label [[IFCONT4]]
// CHECK-64: ifcont4:
// CHECK-64-NEXT: [[TMP20:%.*]] = add nsw i32 [[TMP7]], 1
// CHECK-64-NEXT: store i32 [[TMP20]], ptr [[DOTCNT_ADDR]], align 4
// CHECK-64-NEXT: br label [[PRECOND]]
// CHECK-64: exit:
// CHECK-64-NEXT: ret void
//
//
// CHECK-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l29
// CHECK-64-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[C:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[D:%.*]]) #[[ATTR0]] {
// CHECK-64-NEXT: entry:
// CHECK-64-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
// CHECK-64-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
// CHECK-64-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
// CHECK-64-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [2 x ptr], align 8
// CHECK-64-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
// CHECK-64-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
// CHECK-64-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
// CHECK-64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[C_ADDR]], align 8
// CHECK-64-NEXT: [[TMP1:%.*]] = load ptr, ptr [[D_ADDR]], align 8
// CHECK-64-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l29_kernel_environment, ptr [[DYN_PTR]])
// CHECK-64-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP2]], -1
// CHECK-64-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK-64: user_code.entry:
// CHECK-64-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
// CHECK-64-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
// CHECK-64-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 8
// CHECK-64-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
// CHECK-64-NEXT: store ptr [[TMP1]], ptr [[TMP5]], align 8
// CHECK-64-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP3]], i32 1, i32 -1, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l29_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i64 2)
// CHECK-64-NEXT: call void @__kmpc_target_deinit()
// CHECK-64-NEXT: ret void
// CHECK-64: worker.exit:
// CHECK-64-NEXT: ret void
//
//
// CHECK-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l29_omp_outlined
// CHECK-64-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[C:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[D:%.*]]) #[[ATTR1]] {
// CHECK-64-NEXT: entry:
// CHECK-64-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-64-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-64-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 8
// CHECK-64-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 8
// CHECK-64-NEXT: [[C1:%.*]] = alloca i8, align 1
// CHECK-64-NEXT: [[D2:%.*]] = alloca float, align 4
// CHECK-64-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x ptr], align 8
// CHECK-64-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-64-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK-64-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 8
// CHECK-64-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 8
// CHECK-64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[C_ADDR]], align 8
// CHECK-64-NEXT: [[TMP1:%.*]] = load ptr, ptr [[D_ADDR]], align 8
// CHECK-64-NEXT: store i8 0, ptr [[C1]], align 1
// CHECK-64-NEXT: store float 1.000000e+00, ptr [[D2]], align 4
// CHECK-64-NEXT: [[TMP2:%.*]] = load i8, ptr [[C1]], align 1
// CHECK-64-NEXT: [[CONV:%.*]] = sext i8 [[TMP2]] to i32
// CHECK-64-NEXT: [[XOR:%.*]] = xor i32 [[CONV]], 2
// CHECK-64-NEXT: [[CONV3:%.*]] = trunc i32 [[XOR]] to i8
// CHECK-64-NEXT: store i8 [[CONV3]], ptr [[C1]], align 1
// CHECK-64-NEXT: [[TMP3:%.*]] = load float, ptr [[D2]], align 4
// CHECK-64-NEXT: [[MUL:%.*]] = fmul float [[TMP3]], 3.300000e+01
// CHECK-64-NEXT: store float [[MUL]], ptr [[D2]], align 4
// CHECK-64-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// CHECK-64-NEXT: store ptr [[C1]], ptr [[TMP4]], align 8
// CHECK-64-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
// CHECK-64-NEXT: store ptr [[D2]], ptr [[TMP5]], align 8
// CHECK-64-NEXT: [[TMP6:%.*]] = call i32 @__kmpc_nvptx_parallel_reduce_nowait_v2(ptr @[[GLOB1]], i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @_omp_reduction_shuffle_and_reduce_func1, ptr @_omp_reduction_inter_warp_copy_func2)
// CHECK-64-NEXT: [[TMP7:%.*]] = icmp eq i32 [[TMP6]], 1
// CHECK-64-NEXT: br i1 [[TMP7]], label [[DOTOMP_REDUCTION_THEN:%.*]], label [[DOTOMP_REDUCTION_DONE:%.*]]
// CHECK-64: .omp.reduction.then:
// CHECK-64-NEXT: [[TMP8:%.*]] = load i8, ptr [[TMP0]], align 1
// CHECK-64-NEXT: [[CONV4:%.*]] = sext i8 [[TMP8]] to i32
// CHECK-64-NEXT: [[TMP9:%.*]] = load i8, ptr [[C1]], align 1
// CHECK-64-NEXT: [[CONV5:%.*]] = sext i8 [[TMP9]] to i32
// CHECK-64-NEXT: [[XOR6:%.*]] = xor i32 [[CONV4]], [[CONV5]]
// CHECK-64-NEXT: [[CONV7:%.*]] = trunc i32 [[XOR6]] to i8
// CHECK-64-NEXT: store i8 [[CONV7]], ptr [[TMP0]], align 1
// CHECK-64-NEXT: [[TMP10:%.*]] = load float, ptr [[TMP1]], align 4
// CHECK-64-NEXT: [[TMP11:%.*]] = load float, ptr [[D2]], align 4
// CHECK-64-NEXT: [[MUL8:%.*]] = fmul float [[TMP10]], [[TMP11]]
// CHECK-64-NEXT: store float [[MUL8]], ptr [[TMP1]], align 4
// CHECK-64-NEXT: br label [[DOTOMP_REDUCTION_DONE]]
// CHECK-64: .omp.reduction.done:
// CHECK-64-NEXT: ret void
//
//
// CHECK-64-LABEL: define {{[^@]+}}@_omp_reduction_shuffle_and_reduce_func1
// CHECK-64-SAME: (ptr noundef [[TMP0:%.*]], i16 noundef signext [[TMP1:%.*]], i16 noundef signext [[TMP2:%.*]], i16 noundef signext [[TMP3:%.*]]) #[[ATTR2]] {
// CHECK-64-NEXT: entry:
// CHECK-64-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
// CHECK-64-NEXT: [[DOTADDR1:%.*]] = alloca i16, align 2
// CHECK-64-NEXT: [[DOTADDR2:%.*]] = alloca i16, align 2
// CHECK-64-NEXT: [[DOTADDR3:%.*]] = alloca i16, align 2
// CHECK-64-NEXT: [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST:%.*]] = alloca [2 x ptr], align 8
// CHECK-64-NEXT: [[DOTOMP_REDUCTION_ELEMENT:%.*]] = alloca i8, align 1
// CHECK-64-NEXT: [[DOTOMP_REDUCTION_ELEMENT4:%.*]] = alloca float, align 4
// CHECK-64-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
// CHECK-64-NEXT: store i16 [[TMP1]], ptr [[DOTADDR1]], align 2
// CHECK-64-NEXT: store i16 [[TMP2]], ptr [[DOTADDR2]], align 2
// CHECK-64-NEXT: store i16 [[TMP3]], ptr [[DOTADDR3]], align 2
// CHECK-64-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR]], align 8
// CHECK-64-NEXT: [[TMP5:%.*]] = load i16, ptr [[DOTADDR1]], align 2
// CHECK-64-NEXT: [[TMP6:%.*]] = load i16, ptr [[DOTADDR2]], align 2
// CHECK-64-NEXT: [[TMP7:%.*]] = load i16, ptr [[DOTADDR3]], align 2
// CHECK-64-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i64 0, i64 0
// CHECK-64-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8
// CHECK-64-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 0
// CHECK-64-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[TMP9]], i64 1
// CHECK-64-NEXT: [[TMP12:%.*]] = load i8, ptr [[TMP9]], align 1
// CHECK-64-NEXT: [[TMP13:%.*]] = sext i8 [[TMP12]] to i32
// CHECK-64-NEXT: [[TMP14:%.*]] = call i32 @__kmpc_get_warp_size()
// CHECK-64-NEXT: [[TMP15:%.*]] = trunc i32 [[TMP14]] to i16
// CHECK-64-NEXT: [[TMP16:%.*]] = call i32 @__kmpc_shuffle_int32(i32 [[TMP13]], i16 [[TMP6]], i16 [[TMP15]])
// CHECK-64-NEXT: [[TMP17:%.*]] = trunc i32 [[TMP16]] to i8
// CHECK-64-NEXT: store i8 [[TMP17]], ptr [[DOTOMP_REDUCTION_ELEMENT]], align 1
// CHECK-64-NEXT: [[TMP18:%.*]] = getelementptr i8, ptr [[TMP9]], i64 1
// CHECK-64-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[DOTOMP_REDUCTION_ELEMENT]], i64 1
// CHECK-64-NEXT: store ptr [[DOTOMP_REDUCTION_ELEMENT]], ptr [[TMP10]], align 8
// CHECK-64-NEXT: [[TMP20:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i64 0, i64 1
// CHECK-64-NEXT: [[TMP21:%.*]] = load ptr, ptr [[TMP20]], align 8
// CHECK-64-NEXT: [[TMP22:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 1
// CHECK-64-NEXT: [[TMP23:%.*]] = getelementptr float, ptr [[TMP21]], i64 1
// CHECK-64-NEXT: [[TMP24:%.*]] = load i32, ptr [[TMP21]], align 4
// CHECK-64-NEXT: [[TMP25:%.*]] = call i32 @__kmpc_get_warp_size()
// CHECK-64-NEXT: [[TMP26:%.*]] = trunc i32 [[TMP25]] to i16
// CHECK-64-NEXT: [[TMP27:%.*]] = call i32 @__kmpc_shuffle_int32(i32 [[TMP24]], i16 [[TMP6]], i16 [[TMP26]])
// CHECK-64-NEXT: store i32 [[TMP27]], ptr [[DOTOMP_REDUCTION_ELEMENT4]], align 4
// CHECK-64-NEXT: [[TMP28:%.*]] = getelementptr i32, ptr [[TMP21]], i64 1
// CHECK-64-NEXT: [[TMP29:%.*]] = getelementptr i32, ptr [[DOTOMP_REDUCTION_ELEMENT4]], i64 1
// CHECK-64-NEXT: store ptr [[DOTOMP_REDUCTION_ELEMENT4]], ptr [[TMP22]], align 8
// CHECK-64-NEXT: [[TMP30:%.*]] = icmp eq i16 [[TMP7]], 0
// CHECK-64-NEXT: [[TMP31:%.*]] = icmp eq i16 [[TMP7]], 1
// CHECK-64-NEXT: [[TMP32:%.*]] = icmp ult i16 [[TMP5]], [[TMP6]]
// CHECK-64-NEXT: [[TMP33:%.*]] = and i1 [[TMP31]], [[TMP32]]
// CHECK-64-NEXT: [[TMP34:%.*]] = icmp eq i16 [[TMP7]], 2
// CHECK-64-NEXT: [[TMP35:%.*]] = and i16 [[TMP5]], 1
// CHECK-64-NEXT: [[TMP36:%.*]] = icmp eq i16 [[TMP35]], 0
// CHECK-64-NEXT: [[TMP37:%.*]] = and i1 [[TMP34]], [[TMP36]]
// CHECK-64-NEXT: [[TMP38:%.*]] = icmp sgt i16 [[TMP6]], 0
// CHECK-64-NEXT: [[TMP39:%.*]] = and i1 [[TMP37]], [[TMP38]]
// CHECK-64-NEXT: [[TMP40:%.*]] = or i1 [[TMP30]], [[TMP33]]
// CHECK-64-NEXT: [[TMP41:%.*]] = or i1 [[TMP40]], [[TMP39]]
// CHECK-64-NEXT: br i1 [[TMP41]], label [[THEN:%.*]], label [[ELSE:%.*]]
// CHECK-64: then:
// CHECK-64-NEXT: call void @"{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l29_omp_outlined_omp$reduction$reduction_func"(ptr [[TMP4]], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]]) #[[ATTR3]]
// CHECK-64-NEXT: br label [[IFCONT:%.*]]
// CHECK-64: else:
// CHECK-64-NEXT: br label [[IFCONT]]
// CHECK-64: ifcont:
// CHECK-64-NEXT: [[TMP42:%.*]] = icmp eq i16 [[TMP7]], 1
// CHECK-64-NEXT: [[TMP43:%.*]] = icmp uge i16 [[TMP5]], [[TMP6]]
// CHECK-64-NEXT: [[TMP44:%.*]] = and i1 [[TMP42]], [[TMP43]]
// CHECK-64-NEXT: br i1 [[TMP44]], label [[THEN5:%.*]], label [[ELSE6:%.*]]
// CHECK-64: then5:
// CHECK-64-NEXT: [[TMP45:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 0
// CHECK-64-NEXT: [[TMP46:%.*]] = load ptr, ptr [[TMP45]], align 8
// CHECK-64-NEXT: [[TMP47:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i64 0, i64 0
// CHECK-64-NEXT: [[TMP48:%.*]] = load ptr, ptr [[TMP47]], align 8
// CHECK-64-NEXT: [[TMP49:%.*]] = load i8, ptr [[TMP46]], align 1
// CHECK-64-NEXT: store i8 [[TMP49]], ptr [[TMP48]], align 1
// CHECK-64-NEXT: [[TMP50:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 1
// CHECK-64-NEXT: [[TMP51:%.*]] = load ptr, ptr [[TMP50]], align 8
// CHECK-64-NEXT: [[TMP52:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i64 0, i64 1
// CHECK-64-NEXT: [[TMP53:%.*]] = load ptr, ptr [[TMP52]], align 8
// CHECK-64-NEXT: [[TMP54:%.*]] = load float, ptr [[TMP51]], align 4
// CHECK-64-NEXT: store float [[TMP54]], ptr [[TMP53]], align 4
// CHECK-64-NEXT: br label [[IFCONT7:%.*]]
// CHECK-64: else6:
// CHECK-64-NEXT: br label [[IFCONT7]]
// CHECK-64: ifcont7:
// CHECK-64-NEXT: ret void
//
//
// CHECK-64-LABEL: define {{[^@]+}}@_omp_reduction_inter_warp_copy_func2
// CHECK-64-SAME: (ptr noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]]) #[[ATTR2]] {
// CHECK-64-NEXT: entry:
// CHECK-64-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
// CHECK-64-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
// CHECK-64-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
// CHECK-64-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
// CHECK-64-NEXT: store i32 [[TMP1]], ptr [[DOTADDR1]], align 4
// CHECK-64-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
// CHECK-64-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
// CHECK-64-NEXT: [[NVPTX_LANE_ID:%.*]] = and i32 [[TMP4]], 31
// CHECK-64-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
// CHECK-64-NEXT: [[NVPTX_WARP_ID:%.*]] = ashr i32 [[TMP5]], 5
// CHECK-64-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTADDR]], align 8
// CHECK-64-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
// CHECK-64-NEXT: [[WARP_MASTER:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
// CHECK-64-NEXT: br i1 [[WARP_MASTER]], label [[THEN:%.*]], label [[ELSE:%.*]]
// CHECK-64: then:
// CHECK-64-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i64 0, i64 0
// CHECK-64-NEXT: [[TMP8:%.*]] = load ptr, ptr [[TMP7]], align 8
// CHECK-64-NEXT: [[TMP9:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
// CHECK-64-NEXT: [[TMP10:%.*]] = load i8, ptr [[TMP8]], align 1
// CHECK-64-NEXT: store volatile i8 [[TMP10]], ptr addrspace(3) [[TMP9]], align 1
// CHECK-64-NEXT: br label [[IFCONT:%.*]]
// CHECK-64: else:
// CHECK-64-NEXT: br label [[IFCONT]]
// CHECK-64: ifcont:
// CHECK-64-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
// CHECK-64-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTADDR1]], align 4
// CHECK-64-NEXT: [[IS_ACTIVE_THREAD:%.*]] = icmp ult i32 [[TMP3]], [[TMP11]]
// CHECK-64-NEXT: br i1 [[IS_ACTIVE_THREAD]], label [[THEN2:%.*]], label [[ELSE3:%.*]]
// CHECK-64: then2:
// CHECK-64-NEXT: [[TMP12:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
// CHECK-64-NEXT: [[TMP13:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i64 0, i64 0
// CHECK-64-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP13]], align 8
// CHECK-64-NEXT: [[TMP15:%.*]] = load volatile i8, ptr addrspace(3) [[TMP12]], align 1
// CHECK-64-NEXT: store i8 [[TMP15]], ptr [[TMP14]], align 1
// CHECK-64-NEXT: br label [[IFCONT4:%.*]]
// CHECK-64: else3:
// CHECK-64-NEXT: br label [[IFCONT4]]
// CHECK-64: ifcont4:
// CHECK-64-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
// CHECK-64-NEXT: [[WARP_MASTER5:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
// CHECK-64-NEXT: br i1 [[WARP_MASTER5]], label [[THEN6:%.*]], label [[ELSE7:%.*]]
// CHECK-64: then6:
// CHECK-64-NEXT: [[TMP16:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i64 0, i64 1
// CHECK-64-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8
// CHECK-64-NEXT: [[TMP18:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
// CHECK-64-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP17]], align 4
// CHECK-64-NEXT: store volatile i32 [[TMP19]], ptr addrspace(3) [[TMP18]], align 4
// CHECK-64-NEXT: br label [[IFCONT8:%.*]]
// CHECK-64: else7:
// CHECK-64-NEXT: br label [[IFCONT8]]
// CHECK-64: ifcont8:
// CHECK-64-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
// CHECK-64-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTADDR1]], align 4
// CHECK-64-NEXT: [[IS_ACTIVE_THREAD9:%.*]] = icmp ult i32 [[TMP3]], [[TMP20]]
// CHECK-64-NEXT: br i1 [[IS_ACTIVE_THREAD9]], label [[THEN10:%.*]], label [[ELSE11:%.*]]
// CHECK-64: then10:
// CHECK-64-NEXT: [[TMP21:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
// CHECK-64-NEXT: [[TMP22:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i64 0, i64 1
// CHECK-64-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP22]], align 8
// CHECK-64-NEXT: [[TMP24:%.*]] = load volatile i32, ptr addrspace(3) [[TMP21]], align 4
// CHECK-64-NEXT: store i32 [[TMP24]], ptr [[TMP23]], align 4
// CHECK-64-NEXT: br label [[IFCONT12:%.*]]
// CHECK-64: else11:
// CHECK-64-NEXT: br label [[IFCONT12]]
// CHECK-64: ifcont12:
// CHECK-64-NEXT: ret void
//
//
// CHECK-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l35
// CHECK-64-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[B:%.*]]) #[[ATTR0]] {
// CHECK-64-NEXT: entry:
// CHECK-64-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
// CHECK-64-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
// CHECK-64-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
// CHECK-64-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [2 x ptr], align 8
// CHECK-64-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
// CHECK-64-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
// CHECK-64-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
// CHECK-64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
// CHECK-64-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8
// CHECK-64-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l35_kernel_environment, ptr [[DYN_PTR]])
// CHECK-64-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP2]], -1
// CHECK-64-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK-64: user_code.entry:
// CHECK-64-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
// CHECK-64-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 0
// CHECK-64-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 8
// CHECK-64-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i64 0, i64 1
// CHECK-64-NEXT: store ptr [[TMP1]], ptr [[TMP5]], align 8
// CHECK-64-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP3]], i32 1, i32 -1, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l35_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i64 2)
// CHECK-64-NEXT: call void @__kmpc_target_deinit()
// CHECK-64-NEXT: ret void
// CHECK-64: worker.exit:
// CHECK-64-NEXT: ret void
//
//
// CHECK-64-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l35_omp_outlined
// CHECK-64-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[B:%.*]]) #[[ATTR1]] {
// CHECK-64-NEXT: entry:
// CHECK-64-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-64-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-64-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 8
// CHECK-64-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 8
// CHECK-64-NEXT: [[A1:%.*]] = alloca i32, align 4
// CHECK-64-NEXT: [[B2:%.*]] = alloca i16, align 2
// CHECK-64-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x ptr], align 8
// CHECK-64-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-64-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK-64-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 8
// CHECK-64-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 8
// CHECK-64-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 8
// CHECK-64-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 8
// CHECK-64-NEXT: store i32 0, ptr [[A1]], align 4
// CHECK-64-NEXT: store i16 -32768, ptr [[B2]], align 2
// CHECK-64-NEXT: [[TMP2:%.*]] = load i32, ptr [[A1]], align 4
// CHECK-64-NEXT: [[OR:%.*]] = or i32 [[TMP2]], 1
// CHECK-64-NEXT: store i32 [[OR]], ptr [[A1]], align 4
// CHECK-64-NEXT: [[TMP3:%.*]] = load i16, ptr [[B2]], align 2
// CHECK-64-NEXT: [[CONV:%.*]] = sext i16 [[TMP3]] to i32
// CHECK-64-NEXT: [[CMP:%.*]] = icmp sgt i32 99, [[CONV]]
// CHECK-64-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK-64: cond.true:
// CHECK-64-NEXT: br label [[COND_END:%.*]]
// CHECK-64: cond.false:
// CHECK-64-NEXT: [[TMP4:%.*]] = load i16, ptr [[B2]], align 2
// CHECK-64-NEXT: [[CONV3:%.*]] = sext i16 [[TMP4]] to i32
// CHECK-64-NEXT: br label [[COND_END]]
// CHECK-64: cond.end:
// CHECK-64-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[CONV3]], [[COND_FALSE]] ]
// CHECK-64-NEXT: [[CONV4:%.*]] = trunc i32 [[COND]] to i16
// CHECK-64-NEXT: store i16 [[CONV4]], ptr [[B2]], align 2
// CHECK-64-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0
// CHECK-64-NEXT: store ptr [[A1]], ptr [[TMP5]], align 8
// CHECK-64-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1
// CHECK-64-NEXT: store ptr [[B2]], ptr [[TMP6]], align 8
// CHECK-64-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_nvptx_parallel_reduce_nowait_v2(ptr @[[GLOB1]], i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @_omp_reduction_shuffle_and_reduce_func3, ptr @_omp_reduction_inter_warp_copy_func4)
// CHECK-64-NEXT: [[TMP8:%.*]] = icmp eq i32 [[TMP7]], 1
// CHECK-64-NEXT: br i1 [[TMP8]], label [[DOTOMP_REDUCTION_THEN:%.*]], label [[DOTOMP_REDUCTION_DONE:%.*]]
// CHECK-64: .omp.reduction.then:
// CHECK-64-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-64-NEXT: [[TMP10:%.*]] = load i32, ptr [[A1]], align 4
// CHECK-64-NEXT: [[OR5:%.*]] = or i32 [[TMP9]], [[TMP10]]
// CHECK-64-NEXT: store i32 [[OR5]], ptr [[TMP0]], align 4
// CHECK-64-NEXT: [[TMP11:%.*]] = load i16, ptr [[TMP1]], align 2
// CHECK-64-NEXT: [[CONV6:%.*]] = sext i16 [[TMP11]] to i32
// CHECK-64-NEXT: [[TMP12:%.*]] = load i16, ptr [[B2]], align 2
// CHECK-64-NEXT: [[CONV7:%.*]] = sext i16 [[TMP12]] to i32
// CHECK-64-NEXT: [[CMP8:%.*]] = icmp sgt i32 [[CONV6]], [[CONV7]]
// CHECK-64-NEXT: br i1 [[CMP8]], label [[COND_TRUE9:%.*]], label [[COND_FALSE10:%.*]]
// CHECK-64: cond.true9:
// CHECK-64-NEXT: [[TMP13:%.*]] = load i16, ptr [[TMP1]], align 2
// CHECK-64-NEXT: br label [[COND_END11:%.*]]
// CHECK-64: cond.false10:
// CHECK-64-NEXT: [[TMP14:%.*]] = load i16, ptr [[B2]], align 2
// CHECK-64-NEXT: br label [[COND_END11]]
// CHECK-64: cond.end11:
// CHECK-64-NEXT: [[COND12:%.*]] = phi i16 [ [[TMP13]], [[COND_TRUE9]] ], [ [[TMP14]], [[COND_FALSE10]] ]
// CHECK-64-NEXT: store i16 [[COND12]], ptr [[TMP1]], align 2
// CHECK-64-NEXT: br label [[DOTOMP_REDUCTION_DONE]]
// CHECK-64: .omp.reduction.done:
// CHECK-64-NEXT: ret void
//
//
// CHECK-64-LABEL: define {{[^@]+}}@_omp_reduction_shuffle_and_reduce_func3
// CHECK-64-SAME: (ptr noundef [[TMP0:%.*]], i16 noundef signext [[TMP1:%.*]], i16 noundef signext [[TMP2:%.*]], i16 noundef signext [[TMP3:%.*]]) #[[ATTR2]] {
// CHECK-64-NEXT: entry:
// CHECK-64-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
// CHECK-64-NEXT: [[DOTADDR1:%.*]] = alloca i16, align 2
// CHECK-64-NEXT: [[DOTADDR2:%.*]] = alloca i16, align 2
// CHECK-64-NEXT: [[DOTADDR3:%.*]] = alloca i16, align 2
// CHECK-64-NEXT: [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST:%.*]] = alloca [2 x ptr], align 8
// CHECK-64-NEXT: [[DOTOMP_REDUCTION_ELEMENT:%.*]] = alloca i32, align 4
// CHECK-64-NEXT: [[DOTOMP_REDUCTION_ELEMENT4:%.*]] = alloca i16, align 2
// CHECK-64-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
// CHECK-64-NEXT: store i16 [[TMP1]], ptr [[DOTADDR1]], align 2
// CHECK-64-NEXT: store i16 [[TMP2]], ptr [[DOTADDR2]], align 2
// CHECK-64-NEXT: store i16 [[TMP3]], ptr [[DOTADDR3]], align 2
// CHECK-64-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR]], align 8
// CHECK-64-NEXT: [[TMP5:%.*]] = load i16, ptr [[DOTADDR1]], align 2
// CHECK-64-NEXT: [[TMP6:%.*]] = load i16, ptr [[DOTADDR2]], align 2
// CHECK-64-NEXT: [[TMP7:%.*]] = load i16, ptr [[DOTADDR3]], align 2
// CHECK-64-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i64 0, i64 0
// CHECK-64-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 8
// CHECK-64-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 0
// CHECK-64-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[TMP9]], i64 1
// CHECK-64-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP9]], align 4
// CHECK-64-NEXT: [[TMP13:%.*]] = call i32 @__kmpc_get_warp_size()
// CHECK-64-NEXT: [[TMP14:%.*]] = trunc i32 [[TMP13]] to i16
// CHECK-64-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_shuffle_int32(i32 [[TMP12]], i16 [[TMP6]], i16 [[TMP14]])
// CHECK-64-NEXT: store i32 [[TMP15]], ptr [[DOTOMP_REDUCTION_ELEMENT]], align 4
// CHECK-64-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr [[TMP9]], i64 1
// CHECK-64-NEXT: [[TMP17:%.*]] = getelementptr i32, ptr [[DOTOMP_REDUCTION_ELEMENT]], i64 1
// CHECK-64-NEXT: store ptr [[DOTOMP_REDUCTION_ELEMENT]], ptr [[TMP10]], align 8
// CHECK-64-NEXT: [[TMP18:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i64 0, i64 1
// CHECK-64-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP18]], align 8
// CHECK-64-NEXT: [[TMP20:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 1
// CHECK-64-NEXT: [[TMP21:%.*]] = getelementptr i16, ptr [[TMP19]], i64 1
// CHECK-64-NEXT: [[TMP22:%.*]] = load i16, ptr [[TMP19]], align 2
// CHECK-64-NEXT: [[TMP23:%.*]] = sext i16 [[TMP22]] to i32
// CHECK-64-NEXT: [[TMP24:%.*]] = call i32 @__kmpc_get_warp_size()
// CHECK-64-NEXT: [[TMP25:%.*]] = trunc i32 [[TMP24]] to i16
// CHECK-64-NEXT: [[TMP26:%.*]] = call i32 @__kmpc_shuffle_int32(i32 [[TMP23]], i16 [[TMP6]], i16 [[TMP25]])
// CHECK-64-NEXT: [[TMP27:%.*]] = trunc i32 [[TMP26]] to i16
// CHECK-64-NEXT: store i16 [[TMP27]], ptr [[DOTOMP_REDUCTION_ELEMENT4]], align 2
// CHECK-64-NEXT: [[TMP28:%.*]] = getelementptr i16, ptr [[TMP19]], i64 1
// CHECK-64-NEXT: [[TMP29:%.*]] = getelementptr i16, ptr [[DOTOMP_REDUCTION_ELEMENT4]], i64 1
// CHECK-64-NEXT: store ptr [[DOTOMP_REDUCTION_ELEMENT4]], ptr [[TMP20]], align 8
// CHECK-64-NEXT: [[TMP30:%.*]] = icmp eq i16 [[TMP7]], 0
// CHECK-64-NEXT: [[TMP31:%.*]] = icmp eq i16 [[TMP7]], 1
// CHECK-64-NEXT: [[TMP32:%.*]] = icmp ult i16 [[TMP5]], [[TMP6]]
// CHECK-64-NEXT: [[TMP33:%.*]] = and i1 [[TMP31]], [[TMP32]]
// CHECK-64-NEXT: [[TMP34:%.*]] = icmp eq i16 [[TMP7]], 2
// CHECK-64-NEXT: [[TMP35:%.*]] = and i16 [[TMP5]], 1
// CHECK-64-NEXT: [[TMP36:%.*]] = icmp eq i16 [[TMP35]], 0
// CHECK-64-NEXT: [[TMP37:%.*]] = and i1 [[TMP34]], [[TMP36]]
// CHECK-64-NEXT: [[TMP38:%.*]] = icmp sgt i16 [[TMP6]], 0
// CHECK-64-NEXT: [[TMP39:%.*]] = and i1 [[TMP37]], [[TMP38]]
// CHECK-64-NEXT: [[TMP40:%.*]] = or i1 [[TMP30]], [[TMP33]]
// CHECK-64-NEXT: [[TMP41:%.*]] = or i1 [[TMP40]], [[TMP39]]
// CHECK-64-NEXT: br i1 [[TMP41]], label [[THEN:%.*]], label [[ELSE:%.*]]
// CHECK-64: then:
// CHECK-64-NEXT: call void @"{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l35_omp_outlined_omp$reduction$reduction_func"(ptr [[TMP4]], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]]) #[[ATTR3]]
// CHECK-64-NEXT: br label [[IFCONT:%.*]]
// CHECK-64: else:
// CHECK-64-NEXT: br label [[IFCONT]]
// CHECK-64: ifcont:
// CHECK-64-NEXT: [[TMP42:%.*]] = icmp eq i16 [[TMP7]], 1
// CHECK-64-NEXT: [[TMP43:%.*]] = icmp uge i16 [[TMP5]], [[TMP6]]
// CHECK-64-NEXT: [[TMP44:%.*]] = and i1 [[TMP42]], [[TMP43]]
// CHECK-64-NEXT: br i1 [[TMP44]], label [[THEN5:%.*]], label [[ELSE6:%.*]]
// CHECK-64: then5:
// CHECK-64-NEXT: [[TMP45:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 0
// CHECK-64-NEXT: [[TMP46:%.*]] = load ptr, ptr [[TMP45]], align 8
// CHECK-64-NEXT: [[TMP47:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i64 0, i64 0
// CHECK-64-NEXT: [[TMP48:%.*]] = load ptr, ptr [[TMP47]], align 8
// CHECK-64-NEXT: [[TMP49:%.*]] = load i32, ptr [[TMP46]], align 4
// CHECK-64-NEXT: store i32 [[TMP49]], ptr [[TMP48]], align 4
// CHECK-64-NEXT: [[TMP50:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 1
// CHECK-64-NEXT: [[TMP51:%.*]] = load ptr, ptr [[TMP50]], align 8
// CHECK-64-NEXT: [[TMP52:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i64 0, i64 1
// CHECK-64-NEXT: [[TMP53:%.*]] = load ptr, ptr [[TMP52]], align 8
// CHECK-64-NEXT: [[TMP54:%.*]] = load i16, ptr [[TMP51]], align 2
// CHECK-64-NEXT: store i16 [[TMP54]], ptr [[TMP53]], align 2
// CHECK-64-NEXT: br label [[IFCONT7:%.*]]
// CHECK-64: else6:
// CHECK-64-NEXT: br label [[IFCONT7]]
// CHECK-64: ifcont7:
// CHECK-64-NEXT: ret void
//
//
// CHECK-64-LABEL: define {{[^@]+}}@_omp_reduction_inter_warp_copy_func4
// CHECK-64-SAME: (ptr noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]]) #[[ATTR2]] {
// CHECK-64-NEXT: entry:
// CHECK-64-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
// CHECK-64-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
// CHECK-64-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
// CHECK-64-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
// CHECK-64-NEXT: store i32 [[TMP1]], ptr [[DOTADDR1]], align 4
// CHECK-64-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
// CHECK-64-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
// CHECK-64-NEXT: [[NVPTX_LANE_ID:%.*]] = and i32 [[TMP4]], 31
// CHECK-64-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
// CHECK-64-NEXT: [[NVPTX_WARP_ID:%.*]] = ashr i32 [[TMP5]], 5
// CHECK-64-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTADDR]], align 8
// CHECK-64-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
// CHECK-64-NEXT: [[WARP_MASTER:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
// CHECK-64-NEXT: br i1 [[WARP_MASTER]], label [[THEN:%.*]], label [[ELSE:%.*]]
// CHECK-64: then:
// CHECK-64-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i64 0, i64 0
// CHECK-64-NEXT: [[TMP8:%.*]] = load ptr, ptr [[TMP7]], align 8
// CHECK-64-NEXT: [[TMP9:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
// CHECK-64-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP8]], align 4
// CHECK-64-NEXT: store volatile i32 [[TMP10]], ptr addrspace(3) [[TMP9]], align 4
// CHECK-64-NEXT: br label [[IFCONT:%.*]]
// CHECK-64: else:
// CHECK-64-NEXT: br label [[IFCONT]]
// CHECK-64: ifcont:
// CHECK-64-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
// CHECK-64-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTADDR1]], align 4
// CHECK-64-NEXT: [[IS_ACTIVE_THREAD:%.*]] = icmp ult i32 [[TMP3]], [[TMP11]]
// CHECK-64-NEXT: br i1 [[IS_ACTIVE_THREAD]], label [[THEN2:%.*]], label [[ELSE3:%.*]]
// CHECK-64: then2:
// CHECK-64-NEXT: [[TMP12:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
// CHECK-64-NEXT: [[TMP13:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i64 0, i64 0
// CHECK-64-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP13]], align 8
// CHECK-64-NEXT: [[TMP15:%.*]] = load volatile i32, ptr addrspace(3) [[TMP12]], align 4
// CHECK-64-NEXT: store i32 [[TMP15]], ptr [[TMP14]], align 4
// CHECK-64-NEXT: br label [[IFCONT4:%.*]]
// CHECK-64: else3:
// CHECK-64-NEXT: br label [[IFCONT4]]
// CHECK-64: ifcont4:
// CHECK-64-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
// CHECK-64-NEXT: [[WARP_MASTER5:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
// CHECK-64-NEXT: br i1 [[WARP_MASTER5]], label [[THEN6:%.*]], label [[ELSE7:%.*]]
// CHECK-64: then6:
// CHECK-64-NEXT: [[TMP16:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i64 0, i64 1
// CHECK-64-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 8
// CHECK-64-NEXT: [[TMP18:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
// CHECK-64-NEXT: [[TMP19:%.*]] = load i16, ptr [[TMP17]], align 2
// CHECK-64-NEXT: store volatile i16 [[TMP19]], ptr addrspace(3) [[TMP18]], align 2
// CHECK-64-NEXT: br label [[IFCONT8:%.*]]
// CHECK-64: else7:
// CHECK-64-NEXT: br label [[IFCONT8]]
// CHECK-64: ifcont8:
// CHECK-64-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
// CHECK-64-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTADDR1]], align 4
// CHECK-64-NEXT: [[IS_ACTIVE_THREAD9:%.*]] = icmp ult i32 [[TMP3]], [[TMP20]]
// CHECK-64-NEXT: br i1 [[IS_ACTIVE_THREAD9]], label [[THEN10:%.*]], label [[ELSE11:%.*]]
// CHECK-64: then10:
// CHECK-64-NEXT: [[TMP21:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
// CHECK-64-NEXT: [[TMP22:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i64 0, i64 1
// CHECK-64-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP22]], align 8
// CHECK-64-NEXT: [[TMP24:%.*]] = load volatile i16, ptr addrspace(3) [[TMP21]], align 2
// CHECK-64-NEXT: store i16 [[TMP24]], ptr [[TMP23]], align 2
// CHECK-64-NEXT: br label [[IFCONT12:%.*]]
// CHECK-64: else11:
// CHECK-64-NEXT: br label [[IFCONT12]]
// CHECK-64: ifcont12:
// CHECK-64-NEXT: ret void
//
//
// CHECK-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l24
// CHECK-32-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[E:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-32-NEXT: entry:
// CHECK-32-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
// CHECK-32-NEXT: [[E_ADDR:%.*]] = alloca ptr, align 4
// CHECK-32-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x ptr], align 4
// CHECK-32-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
// CHECK-32-NEXT: store ptr [[E]], ptr [[E_ADDR]], align 4
// CHECK-32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[E_ADDR]], align 4
// CHECK-32-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l24_kernel_environment, ptr [[DYN_PTR]])
// CHECK-32-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
// CHECK-32-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK-32: user_code.entry:
// CHECK-32-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1:[0-9]+]])
// CHECK-32-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
// CHECK-32-NEXT: store ptr [[TMP0]], ptr [[TMP3]], align 4
// CHECK-32-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l24_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i32 1)
// CHECK-32-NEXT: call void @__kmpc_target_deinit()
// CHECK-32-NEXT: ret void
// CHECK-32: worker.exit:
// CHECK-32-NEXT: ret void
//
//
// CHECK-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l24_omp_outlined
// CHECK-32-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[E:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK-32-NEXT: entry:
// CHECK-32-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
// CHECK-32-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
// CHECK-32-NEXT: [[E_ADDR:%.*]] = alloca ptr, align 4
// CHECK-32-NEXT: [[E1:%.*]] = alloca double, align 8
// CHECK-32-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 4
// CHECK-32-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK-32-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
// CHECK-32-NEXT: store ptr [[E]], ptr [[E_ADDR]], align 4
// CHECK-32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[E_ADDR]], align 4
// CHECK-32-NEXT: store double 0.000000e+00, ptr [[E1]], align 8
// CHECK-32-NEXT: [[TMP1:%.*]] = load double, ptr [[E1]], align 8
// CHECK-32-NEXT: [[ADD:%.*]] = fadd double [[TMP1]], 5.000000e+00
// CHECK-32-NEXT: store double [[ADD]], ptr [[E1]], align 8
// CHECK-32-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
// CHECK-32-NEXT: store ptr [[E1]], ptr [[TMP2]], align 4
// CHECK-32-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_nvptx_parallel_reduce_nowait_v2(ptr @[[GLOB1]], i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @_omp_reduction_shuffle_and_reduce_func, ptr @_omp_reduction_inter_warp_copy_func)
// CHECK-32-NEXT: [[TMP4:%.*]] = icmp eq i32 [[TMP3]], 1
// CHECK-32-NEXT: br i1 [[TMP4]], label [[DOTOMP_REDUCTION_THEN:%.*]], label [[DOTOMP_REDUCTION_DONE:%.*]]
// CHECK-32: .omp.reduction.then:
// CHECK-32-NEXT: [[TMP5:%.*]] = load double, ptr [[TMP0]], align 8
// CHECK-32-NEXT: [[TMP6:%.*]] = load double, ptr [[E1]], align 8
// CHECK-32-NEXT: [[ADD2:%.*]] = fadd double [[TMP5]], [[TMP6]]
// CHECK-32-NEXT: store double [[ADD2]], ptr [[TMP0]], align 8
// CHECK-32-NEXT: br label [[DOTOMP_REDUCTION_DONE]]
// CHECK-32: .omp.reduction.done:
// CHECK-32-NEXT: ret void
//
//
// CHECK-32-LABEL: define {{[^@]+}}@_omp_reduction_shuffle_and_reduce_func
// CHECK-32-SAME: (ptr noundef [[TMP0:%.*]], i16 noundef signext [[TMP1:%.*]], i16 noundef signext [[TMP2:%.*]], i16 noundef signext [[TMP3:%.*]]) #[[ATTR2:[0-9]+]] {
// CHECK-32-NEXT: entry:
// CHECK-32-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 4
// CHECK-32-NEXT: [[DOTADDR1:%.*]] = alloca i16, align 2
// CHECK-32-NEXT: [[DOTADDR2:%.*]] = alloca i16, align 2
// CHECK-32-NEXT: [[DOTADDR3:%.*]] = alloca i16, align 2
// CHECK-32-NEXT: [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST:%.*]] = alloca [1 x ptr], align 4
// CHECK-32-NEXT: [[DOTOMP_REDUCTION_ELEMENT:%.*]] = alloca double, align 8
// CHECK-32-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4
// CHECK-32-NEXT: store i16 [[TMP1]], ptr [[DOTADDR1]], align 2
// CHECK-32-NEXT: store i16 [[TMP2]], ptr [[DOTADDR2]], align 2
// CHECK-32-NEXT: store i16 [[TMP3]], ptr [[DOTADDR3]], align 2
// CHECK-32-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR]], align 4
// CHECK-32-NEXT: [[TMP5:%.*]] = load i16, ptr [[DOTADDR1]], align 2
// CHECK-32-NEXT: [[TMP6:%.*]] = load i16, ptr [[DOTADDR2]], align 2
// CHECK-32-NEXT: [[TMP7:%.*]] = load i16, ptr [[DOTADDR3]], align 2
// CHECK-32-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP4]], i32 0, i32 0
// CHECK-32-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 4
// CHECK-32-NEXT: [[TMP10:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 0
// CHECK-32-NEXT: [[TMP11:%.*]] = getelementptr double, ptr [[TMP9]], i32 1
// CHECK-32-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP9]], align 8
// CHECK-32-NEXT: [[TMP13:%.*]] = call i32 @__kmpc_get_warp_size()
// CHECK-32-NEXT: [[TMP14:%.*]] = trunc i32 [[TMP13]] to i16
// CHECK-32-NEXT: [[TMP15:%.*]] = call i64 @__kmpc_shuffle_int64(i64 [[TMP12]], i16 [[TMP6]], i16 [[TMP14]])
// CHECK-32-NEXT: store i64 [[TMP15]], ptr [[DOTOMP_REDUCTION_ELEMENT]], align 8
// CHECK-32-NEXT: [[TMP16:%.*]] = getelementptr i64, ptr [[TMP9]], i32 1
// CHECK-32-NEXT: [[TMP17:%.*]] = getelementptr i64, ptr [[DOTOMP_REDUCTION_ELEMENT]], i32 1
// CHECK-32-NEXT: store ptr [[DOTOMP_REDUCTION_ELEMENT]], ptr [[TMP10]], align 4
// CHECK-32-NEXT: [[TMP18:%.*]] = icmp eq i16 [[TMP7]], 0
// CHECK-32-NEXT: [[TMP19:%.*]] = icmp eq i16 [[TMP7]], 1
// CHECK-32-NEXT: [[TMP20:%.*]] = icmp ult i16 [[TMP5]], [[TMP6]]
// CHECK-32-NEXT: [[TMP21:%.*]] = and i1 [[TMP19]], [[TMP20]]
// CHECK-32-NEXT: [[TMP22:%.*]] = icmp eq i16 [[TMP7]], 2
// CHECK-32-NEXT: [[TMP23:%.*]] = and i16 [[TMP5]], 1
// CHECK-32-NEXT: [[TMP24:%.*]] = icmp eq i16 [[TMP23]], 0
// CHECK-32-NEXT: [[TMP25:%.*]] = and i1 [[TMP22]], [[TMP24]]
// CHECK-32-NEXT: [[TMP26:%.*]] = icmp sgt i16 [[TMP6]], 0
// CHECK-32-NEXT: [[TMP27:%.*]] = and i1 [[TMP25]], [[TMP26]]
// CHECK-32-NEXT: [[TMP28:%.*]] = or i1 [[TMP18]], [[TMP21]]
// CHECK-32-NEXT: [[TMP29:%.*]] = or i1 [[TMP28]], [[TMP27]]
// CHECK-32-NEXT: br i1 [[TMP29]], label [[THEN:%.*]], label [[ELSE:%.*]]
// CHECK-32: then:
// CHECK-32-NEXT: call void @"{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l24_omp_outlined_omp$reduction$reduction_func"(ptr [[TMP4]], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]]) #[[ATTR3:[0-9]+]]
// CHECK-32-NEXT: br label [[IFCONT:%.*]]
// CHECK-32: else:
// CHECK-32-NEXT: br label [[IFCONT]]
// CHECK-32: ifcont:
// CHECK-32-NEXT: [[TMP30:%.*]] = icmp eq i16 [[TMP7]], 1
// CHECK-32-NEXT: [[TMP31:%.*]] = icmp uge i16 [[TMP5]], [[TMP6]]
// CHECK-32-NEXT: [[TMP32:%.*]] = and i1 [[TMP30]], [[TMP31]]
// CHECK-32-NEXT: br i1 [[TMP32]], label [[THEN4:%.*]], label [[ELSE5:%.*]]
// CHECK-32: then4:
// CHECK-32-NEXT: [[TMP33:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 0
// CHECK-32-NEXT: [[TMP34:%.*]] = load ptr, ptr [[TMP33]], align 4
// CHECK-32-NEXT: [[TMP35:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP4]], i32 0, i32 0
// CHECK-32-NEXT: [[TMP36:%.*]] = load ptr, ptr [[TMP35]], align 4
// CHECK-32-NEXT: [[TMP37:%.*]] = load double, ptr [[TMP34]], align 8
// CHECK-32-NEXT: store double [[TMP37]], ptr [[TMP36]], align 8
// CHECK-32-NEXT: br label [[IFCONT6:%.*]]
// CHECK-32: else5:
// CHECK-32-NEXT: br label [[IFCONT6]]
// CHECK-32: ifcont6:
// CHECK-32-NEXT: ret void
//
//
// CHECK-32-LABEL: define {{[^@]+}}@_omp_reduction_inter_warp_copy_func
// CHECK-32-SAME: (ptr noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]]) #[[ATTR2]] {
// CHECK-32-NEXT: entry:
// CHECK-32-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 4
// CHECK-32-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
// CHECK-32-NEXT: [[DOTCNT_ADDR:%.*]] = alloca i32, align 4
// CHECK-32-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
// CHECK-32-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4
// CHECK-32-NEXT: store i32 [[TMP1]], ptr [[DOTADDR1]], align 4
// CHECK-32-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
// CHECK-32-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
// CHECK-32-NEXT: [[NVPTX_LANE_ID:%.*]] = and i32 [[TMP4]], 31
// CHECK-32-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
// CHECK-32-NEXT: [[NVPTX_WARP_ID:%.*]] = ashr i32 [[TMP5]], 5
// CHECK-32-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTADDR]], align 4
// CHECK-32-NEXT: store i32 0, ptr [[DOTCNT_ADDR]], align 4
// CHECK-32-NEXT: br label [[PRECOND:%.*]]
// CHECK-32: precond:
// CHECK-32-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCNT_ADDR]], align 4
// CHECK-32-NEXT: [[TMP8:%.*]] = icmp ult i32 [[TMP7]], 2
// CHECK-32-NEXT: br i1 [[TMP8]], label [[BODY:%.*]], label [[EXIT:%.*]]
// CHECK-32: body:
// CHECK-32-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2:[0-9]+]], i32 [[TMP2]])
// CHECK-32-NEXT: [[WARP_MASTER:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
// CHECK-32-NEXT: br i1 [[WARP_MASTER]], label [[THEN:%.*]], label [[ELSE:%.*]]
// CHECK-32: then:
// CHECK-32-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP6]], i32 0, i32 0
// CHECK-32-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 4
// CHECK-32-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[TMP10]], i32 [[TMP7]]
// CHECK-32-NEXT: [[TMP12:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
// CHECK-32-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP11]], align 4
// CHECK-32-NEXT: store volatile i32 [[TMP13]], ptr addrspace(3) [[TMP12]], align 4
// CHECK-32-NEXT: br label [[IFCONT:%.*]]
// CHECK-32: else:
// CHECK-32-NEXT: br label [[IFCONT]]
// CHECK-32: ifcont:
// CHECK-32-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
// CHECK-32-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTADDR1]], align 4
// CHECK-32-NEXT: [[IS_ACTIVE_THREAD:%.*]] = icmp ult i32 [[TMP3]], [[TMP14]]
// CHECK-32-NEXT: br i1 [[IS_ACTIVE_THREAD]], label [[THEN2:%.*]], label [[ELSE3:%.*]]
// CHECK-32: then2:
// CHECK-32-NEXT: [[TMP15:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
// CHECK-32-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP6]], i32 0, i32 0
// CHECK-32-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 4
// CHECK-32-NEXT: [[TMP18:%.*]] = getelementptr i32, ptr [[TMP17]], i32 [[TMP7]]
// CHECK-32-NEXT: [[TMP19:%.*]] = load volatile i32, ptr addrspace(3) [[TMP15]], align 4
// CHECK-32-NEXT: store i32 [[TMP19]], ptr [[TMP18]], align 4
// CHECK-32-NEXT: br label [[IFCONT4:%.*]]
// CHECK-32: else3:
// CHECK-32-NEXT: br label [[IFCONT4]]
// CHECK-32: ifcont4:
// CHECK-32-NEXT: [[TMP20:%.*]] = add nsw i32 [[TMP7]], 1
// CHECK-32-NEXT: store i32 [[TMP20]], ptr [[DOTCNT_ADDR]], align 4
// CHECK-32-NEXT: br label [[PRECOND]]
// CHECK-32: exit:
// CHECK-32-NEXT: ret void
//
//
// CHECK-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l29
// CHECK-32-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[C:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[D:%.*]]) #[[ATTR0]] {
// CHECK-32-NEXT: entry:
// CHECK-32-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
// CHECK-32-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
// CHECK-32-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 4
// CHECK-32-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [2 x ptr], align 4
// CHECK-32-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
// CHECK-32-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
// CHECK-32-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4
// CHECK-32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[C_ADDR]], align 4
// CHECK-32-NEXT: [[TMP1:%.*]] = load ptr, ptr [[D_ADDR]], align 4
// CHECK-32-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l29_kernel_environment, ptr [[DYN_PTR]])
// CHECK-32-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP2]], -1
// CHECK-32-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK-32: user_code.entry:
// CHECK-32-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
// CHECK-32-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
// CHECK-32-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 4
// CHECK-32-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
// CHECK-32-NEXT: store ptr [[TMP1]], ptr [[TMP5]], align 4
// CHECK-32-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP3]], i32 1, i32 -1, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l29_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i32 2)
// CHECK-32-NEXT: call void @__kmpc_target_deinit()
// CHECK-32-NEXT: ret void
// CHECK-32: worker.exit:
// CHECK-32-NEXT: ret void
//
//
// CHECK-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l29_omp_outlined
// CHECK-32-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[C:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[D:%.*]]) #[[ATTR1]] {
// CHECK-32-NEXT: entry:
// CHECK-32-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
// CHECK-32-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
// CHECK-32-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
// CHECK-32-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 4
// CHECK-32-NEXT: [[C1:%.*]] = alloca i8, align 1
// CHECK-32-NEXT: [[D2:%.*]] = alloca float, align 4
// CHECK-32-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x ptr], align 4
// CHECK-32-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK-32-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
// CHECK-32-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
// CHECK-32-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4
// CHECK-32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[C_ADDR]], align 4
// CHECK-32-NEXT: [[TMP1:%.*]] = load ptr, ptr [[D_ADDR]], align 4
// CHECK-32-NEXT: store i8 0, ptr [[C1]], align 1
// CHECK-32-NEXT: store float 1.000000e+00, ptr [[D2]], align 4
// CHECK-32-NEXT: [[TMP2:%.*]] = load i8, ptr [[C1]], align 1
// CHECK-32-NEXT: [[CONV:%.*]] = sext i8 [[TMP2]] to i32
// CHECK-32-NEXT: [[XOR:%.*]] = xor i32 [[CONV]], 2
// CHECK-32-NEXT: [[CONV3:%.*]] = trunc i32 [[XOR]] to i8
// CHECK-32-NEXT: store i8 [[CONV3]], ptr [[C1]], align 1
// CHECK-32-NEXT: [[TMP3:%.*]] = load float, ptr [[D2]], align 4
// CHECK-32-NEXT: [[MUL:%.*]] = fmul float [[TMP3]], 3.300000e+01
// CHECK-32-NEXT: store float [[MUL]], ptr [[D2]], align 4
// CHECK-32-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
// CHECK-32-NEXT: store ptr [[C1]], ptr [[TMP4]], align 4
// CHECK-32-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 1
// CHECK-32-NEXT: store ptr [[D2]], ptr [[TMP5]], align 4
// CHECK-32-NEXT: [[TMP6:%.*]] = call i32 @__kmpc_nvptx_parallel_reduce_nowait_v2(ptr @[[GLOB1]], i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @_omp_reduction_shuffle_and_reduce_func1, ptr @_omp_reduction_inter_warp_copy_func2)
// CHECK-32-NEXT: [[TMP7:%.*]] = icmp eq i32 [[TMP6]], 1
// CHECK-32-NEXT: br i1 [[TMP7]], label [[DOTOMP_REDUCTION_THEN:%.*]], label [[DOTOMP_REDUCTION_DONE:%.*]]
// CHECK-32: .omp.reduction.then:
// CHECK-32-NEXT: [[TMP8:%.*]] = load i8, ptr [[TMP0]], align 1
// CHECK-32-NEXT: [[CONV4:%.*]] = sext i8 [[TMP8]] to i32
// CHECK-32-NEXT: [[TMP9:%.*]] = load i8, ptr [[C1]], align 1
// CHECK-32-NEXT: [[CONV5:%.*]] = sext i8 [[TMP9]] to i32
// CHECK-32-NEXT: [[XOR6:%.*]] = xor i32 [[CONV4]], [[CONV5]]
// CHECK-32-NEXT: [[CONV7:%.*]] = trunc i32 [[XOR6]] to i8
// CHECK-32-NEXT: store i8 [[CONV7]], ptr [[TMP0]], align 1
// CHECK-32-NEXT: [[TMP10:%.*]] = load float, ptr [[TMP1]], align 4
// CHECK-32-NEXT: [[TMP11:%.*]] = load float, ptr [[D2]], align 4
// CHECK-32-NEXT: [[MUL8:%.*]] = fmul float [[TMP10]], [[TMP11]]
// CHECK-32-NEXT: store float [[MUL8]], ptr [[TMP1]], align 4
// CHECK-32-NEXT: br label [[DOTOMP_REDUCTION_DONE]]
// CHECK-32: .omp.reduction.done:
// CHECK-32-NEXT: ret void
//
//
// CHECK-32-LABEL: define {{[^@]+}}@_omp_reduction_shuffle_and_reduce_func1
// CHECK-32-SAME: (ptr noundef [[TMP0:%.*]], i16 noundef signext [[TMP1:%.*]], i16 noundef signext [[TMP2:%.*]], i16 noundef signext [[TMP3:%.*]]) #[[ATTR2]] {
// CHECK-32-NEXT: entry:
// CHECK-32-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 4
// CHECK-32-NEXT: [[DOTADDR1:%.*]] = alloca i16, align 2
// CHECK-32-NEXT: [[DOTADDR2:%.*]] = alloca i16, align 2
// CHECK-32-NEXT: [[DOTADDR3:%.*]] = alloca i16, align 2
// CHECK-32-NEXT: [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST:%.*]] = alloca [2 x ptr], align 4
// CHECK-32-NEXT: [[DOTOMP_REDUCTION_ELEMENT:%.*]] = alloca i8, align 1
// CHECK-32-NEXT: [[DOTOMP_REDUCTION_ELEMENT4:%.*]] = alloca float, align 4
// CHECK-32-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4
// CHECK-32-NEXT: store i16 [[TMP1]], ptr [[DOTADDR1]], align 2
// CHECK-32-NEXT: store i16 [[TMP2]], ptr [[DOTADDR2]], align 2
// CHECK-32-NEXT: store i16 [[TMP3]], ptr [[DOTADDR3]], align 2
// CHECK-32-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR]], align 4
// CHECK-32-NEXT: [[TMP5:%.*]] = load i16, ptr [[DOTADDR1]], align 2
// CHECK-32-NEXT: [[TMP6:%.*]] = load i16, ptr [[DOTADDR2]], align 2
// CHECK-32-NEXT: [[TMP7:%.*]] = load i16, ptr [[DOTADDR3]], align 2
// CHECK-32-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i32 0, i32 0
// CHECK-32-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 4
// CHECK-32-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 0
// CHECK-32-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[TMP9]], i32 1
// CHECK-32-NEXT: [[TMP12:%.*]] = load i8, ptr [[TMP9]], align 1
// CHECK-32-NEXT: [[TMP13:%.*]] = sext i8 [[TMP12]] to i32
// CHECK-32-NEXT: [[TMP14:%.*]] = call i32 @__kmpc_get_warp_size()
// CHECK-32-NEXT: [[TMP15:%.*]] = trunc i32 [[TMP14]] to i16
// CHECK-32-NEXT: [[TMP16:%.*]] = call i32 @__kmpc_shuffle_int32(i32 [[TMP13]], i16 [[TMP6]], i16 [[TMP15]])
// CHECK-32-NEXT: [[TMP17:%.*]] = trunc i32 [[TMP16]] to i8
// CHECK-32-NEXT: store i8 [[TMP17]], ptr [[DOTOMP_REDUCTION_ELEMENT]], align 1
// CHECK-32-NEXT: [[TMP18:%.*]] = getelementptr i8, ptr [[TMP9]], i32 1
// CHECK-32-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[DOTOMP_REDUCTION_ELEMENT]], i32 1
// CHECK-32-NEXT: store ptr [[DOTOMP_REDUCTION_ELEMENT]], ptr [[TMP10]], align 4
// CHECK-32-NEXT: [[TMP20:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i32 0, i32 1
// CHECK-32-NEXT: [[TMP21:%.*]] = load ptr, ptr [[TMP20]], align 4
// CHECK-32-NEXT: [[TMP22:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 1
// CHECK-32-NEXT: [[TMP23:%.*]] = getelementptr float, ptr [[TMP21]], i32 1
// CHECK-32-NEXT: [[TMP24:%.*]] = load i32, ptr [[TMP21]], align 4
// CHECK-32-NEXT: [[TMP25:%.*]] = call i32 @__kmpc_get_warp_size()
// CHECK-32-NEXT: [[TMP26:%.*]] = trunc i32 [[TMP25]] to i16
// CHECK-32-NEXT: [[TMP27:%.*]] = call i32 @__kmpc_shuffle_int32(i32 [[TMP24]], i16 [[TMP6]], i16 [[TMP26]])
// CHECK-32-NEXT: store i32 [[TMP27]], ptr [[DOTOMP_REDUCTION_ELEMENT4]], align 4
// CHECK-32-NEXT: [[TMP28:%.*]] = getelementptr i32, ptr [[TMP21]], i32 1
// CHECK-32-NEXT: [[TMP29:%.*]] = getelementptr i32, ptr [[DOTOMP_REDUCTION_ELEMENT4]], i32 1
// CHECK-32-NEXT: store ptr [[DOTOMP_REDUCTION_ELEMENT4]], ptr [[TMP22]], align 4
// CHECK-32-NEXT: [[TMP30:%.*]] = icmp eq i16 [[TMP7]], 0
// CHECK-32-NEXT: [[TMP31:%.*]] = icmp eq i16 [[TMP7]], 1
// CHECK-32-NEXT: [[TMP32:%.*]] = icmp ult i16 [[TMP5]], [[TMP6]]
// CHECK-32-NEXT: [[TMP33:%.*]] = and i1 [[TMP31]], [[TMP32]]
// CHECK-32-NEXT: [[TMP34:%.*]] = icmp eq i16 [[TMP7]], 2
// CHECK-32-NEXT: [[TMP35:%.*]] = and i16 [[TMP5]], 1
// CHECK-32-NEXT: [[TMP36:%.*]] = icmp eq i16 [[TMP35]], 0
// CHECK-32-NEXT: [[TMP37:%.*]] = and i1 [[TMP34]], [[TMP36]]
// CHECK-32-NEXT: [[TMP38:%.*]] = icmp sgt i16 [[TMP6]], 0
// CHECK-32-NEXT: [[TMP39:%.*]] = and i1 [[TMP37]], [[TMP38]]
// CHECK-32-NEXT: [[TMP40:%.*]] = or i1 [[TMP30]], [[TMP33]]
// CHECK-32-NEXT: [[TMP41:%.*]] = or i1 [[TMP40]], [[TMP39]]
// CHECK-32-NEXT: br i1 [[TMP41]], label [[THEN:%.*]], label [[ELSE:%.*]]
// CHECK-32: then:
// CHECK-32-NEXT: call void @"{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l29_omp_outlined_omp$reduction$reduction_func"(ptr [[TMP4]], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]]) #[[ATTR3]]
// CHECK-32-NEXT: br label [[IFCONT:%.*]]
// CHECK-32: else:
// CHECK-32-NEXT: br label [[IFCONT]]
// CHECK-32: ifcont:
// CHECK-32-NEXT: [[TMP42:%.*]] = icmp eq i16 [[TMP7]], 1
// CHECK-32-NEXT: [[TMP43:%.*]] = icmp uge i16 [[TMP5]], [[TMP6]]
// CHECK-32-NEXT: [[TMP44:%.*]] = and i1 [[TMP42]], [[TMP43]]
// CHECK-32-NEXT: br i1 [[TMP44]], label [[THEN5:%.*]], label [[ELSE6:%.*]]
// CHECK-32: then5:
// CHECK-32-NEXT: [[TMP45:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 0
// CHECK-32-NEXT: [[TMP46:%.*]] = load ptr, ptr [[TMP45]], align 4
// CHECK-32-NEXT: [[TMP47:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i32 0, i32 0
// CHECK-32-NEXT: [[TMP48:%.*]] = load ptr, ptr [[TMP47]], align 4
// CHECK-32-NEXT: [[TMP49:%.*]] = load i8, ptr [[TMP46]], align 1
// CHECK-32-NEXT: store i8 [[TMP49]], ptr [[TMP48]], align 1
// CHECK-32-NEXT: [[TMP50:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 1
// CHECK-32-NEXT: [[TMP51:%.*]] = load ptr, ptr [[TMP50]], align 4
// CHECK-32-NEXT: [[TMP52:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i32 0, i32 1
// CHECK-32-NEXT: [[TMP53:%.*]] = load ptr, ptr [[TMP52]], align 4
// CHECK-32-NEXT: [[TMP54:%.*]] = load float, ptr [[TMP51]], align 4
// CHECK-32-NEXT: store float [[TMP54]], ptr [[TMP53]], align 4
// CHECK-32-NEXT: br label [[IFCONT7:%.*]]
// CHECK-32: else6:
// CHECK-32-NEXT: br label [[IFCONT7]]
// CHECK-32: ifcont7:
// CHECK-32-NEXT: ret void
//
//
// CHECK-32-LABEL: define {{[^@]+}}@_omp_reduction_inter_warp_copy_func2
// CHECK-32-SAME: (ptr noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]]) #[[ATTR2]] {
// CHECK-32-NEXT: entry:
// CHECK-32-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 4
// CHECK-32-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
// CHECK-32-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
// CHECK-32-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4
// CHECK-32-NEXT: store i32 [[TMP1]], ptr [[DOTADDR1]], align 4
// CHECK-32-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
// CHECK-32-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
// CHECK-32-NEXT: [[NVPTX_LANE_ID:%.*]] = and i32 [[TMP4]], 31
// CHECK-32-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
// CHECK-32-NEXT: [[NVPTX_WARP_ID:%.*]] = ashr i32 [[TMP5]], 5
// CHECK-32-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTADDR]], align 4
// CHECK-32-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
// CHECK-32-NEXT: [[WARP_MASTER:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
// CHECK-32-NEXT: br i1 [[WARP_MASTER]], label [[THEN:%.*]], label [[ELSE:%.*]]
// CHECK-32: then:
// CHECK-32-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i32 0, i32 0
// CHECK-32-NEXT: [[TMP8:%.*]] = load ptr, ptr [[TMP7]], align 4
// CHECK-32-NEXT: [[TMP9:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
// CHECK-32-NEXT: [[TMP10:%.*]] = load i8, ptr [[TMP8]], align 1
// CHECK-32-NEXT: store volatile i8 [[TMP10]], ptr addrspace(3) [[TMP9]], align 1
// CHECK-32-NEXT: br label [[IFCONT:%.*]]
// CHECK-32: else:
// CHECK-32-NEXT: br label [[IFCONT]]
// CHECK-32: ifcont:
// CHECK-32-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
// CHECK-32-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTADDR1]], align 4
// CHECK-32-NEXT: [[IS_ACTIVE_THREAD:%.*]] = icmp ult i32 [[TMP3]], [[TMP11]]
// CHECK-32-NEXT: br i1 [[IS_ACTIVE_THREAD]], label [[THEN2:%.*]], label [[ELSE3:%.*]]
// CHECK-32: then2:
// CHECK-32-NEXT: [[TMP12:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
// CHECK-32-NEXT: [[TMP13:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i32 0, i32 0
// CHECK-32-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP13]], align 4
// CHECK-32-NEXT: [[TMP15:%.*]] = load volatile i8, ptr addrspace(3) [[TMP12]], align 1
// CHECK-32-NEXT: store i8 [[TMP15]], ptr [[TMP14]], align 1
// CHECK-32-NEXT: br label [[IFCONT4:%.*]]
// CHECK-32: else3:
// CHECK-32-NEXT: br label [[IFCONT4]]
// CHECK-32: ifcont4:
// CHECK-32-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
// CHECK-32-NEXT: [[WARP_MASTER5:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
// CHECK-32-NEXT: br i1 [[WARP_MASTER5]], label [[THEN6:%.*]], label [[ELSE7:%.*]]
// CHECK-32: then6:
// CHECK-32-NEXT: [[TMP16:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i32 0, i32 1
// CHECK-32-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 4
// CHECK-32-NEXT: [[TMP18:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
// CHECK-32-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP17]], align 4
// CHECK-32-NEXT: store volatile i32 [[TMP19]], ptr addrspace(3) [[TMP18]], align 4
// CHECK-32-NEXT: br label [[IFCONT8:%.*]]
// CHECK-32: else7:
// CHECK-32-NEXT: br label [[IFCONT8]]
// CHECK-32: ifcont8:
// CHECK-32-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
// CHECK-32-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTADDR1]], align 4
// CHECK-32-NEXT: [[IS_ACTIVE_THREAD9:%.*]] = icmp ult i32 [[TMP3]], [[TMP20]]
// CHECK-32-NEXT: br i1 [[IS_ACTIVE_THREAD9]], label [[THEN10:%.*]], label [[ELSE11:%.*]]
// CHECK-32: then10:
// CHECK-32-NEXT: [[TMP21:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
// CHECK-32-NEXT: [[TMP22:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i32 0, i32 1
// CHECK-32-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP22]], align 4
// CHECK-32-NEXT: [[TMP24:%.*]] = load volatile i32, ptr addrspace(3) [[TMP21]], align 4
// CHECK-32-NEXT: store i32 [[TMP24]], ptr [[TMP23]], align 4
// CHECK-32-NEXT: br label [[IFCONT12:%.*]]
// CHECK-32: else11:
// CHECK-32-NEXT: br label [[IFCONT12]]
// CHECK-32: ifcont12:
// CHECK-32-NEXT: ret void
//
//
// CHECK-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l35
// CHECK-32-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[B:%.*]]) #[[ATTR0]] {
// CHECK-32-NEXT: entry:
// CHECK-32-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
// CHECK-32-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
// CHECK-32-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
// CHECK-32-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [2 x ptr], align 4
// CHECK-32-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
// CHECK-32-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
// CHECK-32-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
// CHECK-32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
// CHECK-32-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4
// CHECK-32-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l35_kernel_environment, ptr [[DYN_PTR]])
// CHECK-32-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP2]], -1
// CHECK-32-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK-32: user_code.entry:
// CHECK-32-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
// CHECK-32-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
// CHECK-32-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 4
// CHECK-32-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
// CHECK-32-NEXT: store ptr [[TMP1]], ptr [[TMP5]], align 4
// CHECK-32-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP3]], i32 1, i32 -1, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l35_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i32 2)
// CHECK-32-NEXT: call void @__kmpc_target_deinit()
// CHECK-32-NEXT: ret void
// CHECK-32: worker.exit:
// CHECK-32-NEXT: ret void
//
//
// CHECK-32-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l35_omp_outlined
// CHECK-32-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[B:%.*]]) #[[ATTR1]] {
// CHECK-32-NEXT: entry:
// CHECK-32-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
// CHECK-32-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
// CHECK-32-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
// CHECK-32-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
// CHECK-32-NEXT: [[A1:%.*]] = alloca i32, align 4
// CHECK-32-NEXT: [[B2:%.*]] = alloca i16, align 2
// CHECK-32-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x ptr], align 4
// CHECK-32-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK-32-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
// CHECK-32-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
// CHECK-32-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
// CHECK-32-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
// CHECK-32-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4
// CHECK-32-NEXT: store i32 0, ptr [[A1]], align 4
// CHECK-32-NEXT: store i16 -32768, ptr [[B2]], align 2
// CHECK-32-NEXT: [[TMP2:%.*]] = load i32, ptr [[A1]], align 4
// CHECK-32-NEXT: [[OR:%.*]] = or i32 [[TMP2]], 1
// CHECK-32-NEXT: store i32 [[OR]], ptr [[A1]], align 4
// CHECK-32-NEXT: [[TMP3:%.*]] = load i16, ptr [[B2]], align 2
// CHECK-32-NEXT: [[CONV:%.*]] = sext i16 [[TMP3]] to i32
// CHECK-32-NEXT: [[CMP:%.*]] = icmp sgt i32 99, [[CONV]]
// CHECK-32-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK-32: cond.true:
// CHECK-32-NEXT: br label [[COND_END:%.*]]
// CHECK-32: cond.false:
// CHECK-32-NEXT: [[TMP4:%.*]] = load i16, ptr [[B2]], align 2
// CHECK-32-NEXT: [[CONV3:%.*]] = sext i16 [[TMP4]] to i32
// CHECK-32-NEXT: br label [[COND_END]]
// CHECK-32: cond.end:
// CHECK-32-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[CONV3]], [[COND_FALSE]] ]
// CHECK-32-NEXT: [[CONV4:%.*]] = trunc i32 [[COND]] to i16
// CHECK-32-NEXT: store i16 [[CONV4]], ptr [[B2]], align 2
// CHECK-32-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
// CHECK-32-NEXT: store ptr [[A1]], ptr [[TMP5]], align 4
// CHECK-32-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 1
// CHECK-32-NEXT: store ptr [[B2]], ptr [[TMP6]], align 4
// CHECK-32-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_nvptx_parallel_reduce_nowait_v2(ptr @[[GLOB1]], i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @_omp_reduction_shuffle_and_reduce_func3, ptr @_omp_reduction_inter_warp_copy_func4)
// CHECK-32-NEXT: [[TMP8:%.*]] = icmp eq i32 [[TMP7]], 1
// CHECK-32-NEXT: br i1 [[TMP8]], label [[DOTOMP_REDUCTION_THEN:%.*]], label [[DOTOMP_REDUCTION_DONE:%.*]]
// CHECK-32: .omp.reduction.then:
// CHECK-32-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-32-NEXT: [[TMP10:%.*]] = load i32, ptr [[A1]], align 4
// CHECK-32-NEXT: [[OR5:%.*]] = or i32 [[TMP9]], [[TMP10]]
// CHECK-32-NEXT: store i32 [[OR5]], ptr [[TMP0]], align 4
// CHECK-32-NEXT: [[TMP11:%.*]] = load i16, ptr [[TMP1]], align 2
// CHECK-32-NEXT: [[CONV6:%.*]] = sext i16 [[TMP11]] to i32
// CHECK-32-NEXT: [[TMP12:%.*]] = load i16, ptr [[B2]], align 2
// CHECK-32-NEXT: [[CONV7:%.*]] = sext i16 [[TMP12]] to i32
// CHECK-32-NEXT: [[CMP8:%.*]] = icmp sgt i32 [[CONV6]], [[CONV7]]
// CHECK-32-NEXT: br i1 [[CMP8]], label [[COND_TRUE9:%.*]], label [[COND_FALSE10:%.*]]
// CHECK-32: cond.true9:
// CHECK-32-NEXT: [[TMP13:%.*]] = load i16, ptr [[TMP1]], align 2
// CHECK-32-NEXT: br label [[COND_END11:%.*]]
// CHECK-32: cond.false10:
// CHECK-32-NEXT: [[TMP14:%.*]] = load i16, ptr [[B2]], align 2
// CHECK-32-NEXT: br label [[COND_END11]]
// CHECK-32: cond.end11:
// CHECK-32-NEXT: [[COND12:%.*]] = phi i16 [ [[TMP13]], [[COND_TRUE9]] ], [ [[TMP14]], [[COND_FALSE10]] ]
// CHECK-32-NEXT: store i16 [[COND12]], ptr [[TMP1]], align 2
// CHECK-32-NEXT: br label [[DOTOMP_REDUCTION_DONE]]
// CHECK-32: .omp.reduction.done:
// CHECK-32-NEXT: ret void
//
//
// CHECK-32-LABEL: define {{[^@]+}}@_omp_reduction_shuffle_and_reduce_func3
// CHECK-32-SAME: (ptr noundef [[TMP0:%.*]], i16 noundef signext [[TMP1:%.*]], i16 noundef signext [[TMP2:%.*]], i16 noundef signext [[TMP3:%.*]]) #[[ATTR2]] {
// CHECK-32-NEXT: entry:
// CHECK-32-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 4
// CHECK-32-NEXT: [[DOTADDR1:%.*]] = alloca i16, align 2
// CHECK-32-NEXT: [[DOTADDR2:%.*]] = alloca i16, align 2
// CHECK-32-NEXT: [[DOTADDR3:%.*]] = alloca i16, align 2
// CHECK-32-NEXT: [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST:%.*]] = alloca [2 x ptr], align 4
// CHECK-32-NEXT: [[DOTOMP_REDUCTION_ELEMENT:%.*]] = alloca i32, align 4
// CHECK-32-NEXT: [[DOTOMP_REDUCTION_ELEMENT4:%.*]] = alloca i16, align 2
// CHECK-32-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4
// CHECK-32-NEXT: store i16 [[TMP1]], ptr [[DOTADDR1]], align 2
// CHECK-32-NEXT: store i16 [[TMP2]], ptr [[DOTADDR2]], align 2
// CHECK-32-NEXT: store i16 [[TMP3]], ptr [[DOTADDR3]], align 2
// CHECK-32-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR]], align 4
// CHECK-32-NEXT: [[TMP5:%.*]] = load i16, ptr [[DOTADDR1]], align 2
// CHECK-32-NEXT: [[TMP6:%.*]] = load i16, ptr [[DOTADDR2]], align 2
// CHECK-32-NEXT: [[TMP7:%.*]] = load i16, ptr [[DOTADDR3]], align 2
// CHECK-32-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i32 0, i32 0
// CHECK-32-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 4
// CHECK-32-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 0
// CHECK-32-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[TMP9]], i32 1
// CHECK-32-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP9]], align 4
// CHECK-32-NEXT: [[TMP13:%.*]] = call i32 @__kmpc_get_warp_size()
// CHECK-32-NEXT: [[TMP14:%.*]] = trunc i32 [[TMP13]] to i16
// CHECK-32-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_shuffle_int32(i32 [[TMP12]], i16 [[TMP6]], i16 [[TMP14]])
// CHECK-32-NEXT: store i32 [[TMP15]], ptr [[DOTOMP_REDUCTION_ELEMENT]], align 4
// CHECK-32-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr [[TMP9]], i32 1
// CHECK-32-NEXT: [[TMP17:%.*]] = getelementptr i32, ptr [[DOTOMP_REDUCTION_ELEMENT]], i32 1
// CHECK-32-NEXT: store ptr [[DOTOMP_REDUCTION_ELEMENT]], ptr [[TMP10]], align 4
// CHECK-32-NEXT: [[TMP18:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i32 0, i32 1
// CHECK-32-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP18]], align 4
// CHECK-32-NEXT: [[TMP20:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 1
// CHECK-32-NEXT: [[TMP21:%.*]] = getelementptr i16, ptr [[TMP19]], i32 1
// CHECK-32-NEXT: [[TMP22:%.*]] = load i16, ptr [[TMP19]], align 2
// CHECK-32-NEXT: [[TMP23:%.*]] = sext i16 [[TMP22]] to i32
// CHECK-32-NEXT: [[TMP24:%.*]] = call i32 @__kmpc_get_warp_size()
// CHECK-32-NEXT: [[TMP25:%.*]] = trunc i32 [[TMP24]] to i16
// CHECK-32-NEXT: [[TMP26:%.*]] = call i32 @__kmpc_shuffle_int32(i32 [[TMP23]], i16 [[TMP6]], i16 [[TMP25]])
// CHECK-32-NEXT: [[TMP27:%.*]] = trunc i32 [[TMP26]] to i16
// CHECK-32-NEXT: store i16 [[TMP27]], ptr [[DOTOMP_REDUCTION_ELEMENT4]], align 2
// CHECK-32-NEXT: [[TMP28:%.*]] = getelementptr i16, ptr [[TMP19]], i32 1
// CHECK-32-NEXT: [[TMP29:%.*]] = getelementptr i16, ptr [[DOTOMP_REDUCTION_ELEMENT4]], i32 1
// CHECK-32-NEXT: store ptr [[DOTOMP_REDUCTION_ELEMENT4]], ptr [[TMP20]], align 4
// CHECK-32-NEXT: [[TMP30:%.*]] = icmp eq i16 [[TMP7]], 0
// CHECK-32-NEXT: [[TMP31:%.*]] = icmp eq i16 [[TMP7]], 1
// CHECK-32-NEXT: [[TMP32:%.*]] = icmp ult i16 [[TMP5]], [[TMP6]]
// CHECK-32-NEXT: [[TMP33:%.*]] = and i1 [[TMP31]], [[TMP32]]
// CHECK-32-NEXT: [[TMP34:%.*]] = icmp eq i16 [[TMP7]], 2
// CHECK-32-NEXT: [[TMP35:%.*]] = and i16 [[TMP5]], 1
// CHECK-32-NEXT: [[TMP36:%.*]] = icmp eq i16 [[TMP35]], 0
// CHECK-32-NEXT: [[TMP37:%.*]] = and i1 [[TMP34]], [[TMP36]]
// CHECK-32-NEXT: [[TMP38:%.*]] = icmp sgt i16 [[TMP6]], 0
// CHECK-32-NEXT: [[TMP39:%.*]] = and i1 [[TMP37]], [[TMP38]]
// CHECK-32-NEXT: [[TMP40:%.*]] = or i1 [[TMP30]], [[TMP33]]
// CHECK-32-NEXT: [[TMP41:%.*]] = or i1 [[TMP40]], [[TMP39]]
// CHECK-32-NEXT: br i1 [[TMP41]], label [[THEN:%.*]], label [[ELSE:%.*]]
// CHECK-32: then:
// CHECK-32-NEXT: call void @"{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l35_omp_outlined_omp$reduction$reduction_func"(ptr [[TMP4]], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]]) #[[ATTR3]]
// CHECK-32-NEXT: br label [[IFCONT:%.*]]
// CHECK-32: else:
// CHECK-32-NEXT: br label [[IFCONT]]
// CHECK-32: ifcont:
// CHECK-32-NEXT: [[TMP42:%.*]] = icmp eq i16 [[TMP7]], 1
// CHECK-32-NEXT: [[TMP43:%.*]] = icmp uge i16 [[TMP5]], [[TMP6]]
// CHECK-32-NEXT: [[TMP44:%.*]] = and i1 [[TMP42]], [[TMP43]]
// CHECK-32-NEXT: br i1 [[TMP44]], label [[THEN5:%.*]], label [[ELSE6:%.*]]
// CHECK-32: then5:
// CHECK-32-NEXT: [[TMP45:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 0
// CHECK-32-NEXT: [[TMP46:%.*]] = load ptr, ptr [[TMP45]], align 4
// CHECK-32-NEXT: [[TMP47:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i32 0, i32 0
// CHECK-32-NEXT: [[TMP48:%.*]] = load ptr, ptr [[TMP47]], align 4
// CHECK-32-NEXT: [[TMP49:%.*]] = load i32, ptr [[TMP46]], align 4
// CHECK-32-NEXT: store i32 [[TMP49]], ptr [[TMP48]], align 4
// CHECK-32-NEXT: [[TMP50:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 1
// CHECK-32-NEXT: [[TMP51:%.*]] = load ptr, ptr [[TMP50]], align 4
// CHECK-32-NEXT: [[TMP52:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i32 0, i32 1
// CHECK-32-NEXT: [[TMP53:%.*]] = load ptr, ptr [[TMP52]], align 4
// CHECK-32-NEXT: [[TMP54:%.*]] = load i16, ptr [[TMP51]], align 2
// CHECK-32-NEXT: store i16 [[TMP54]], ptr [[TMP53]], align 2
// CHECK-32-NEXT: br label [[IFCONT7:%.*]]
// CHECK-32: else6:
// CHECK-32-NEXT: br label [[IFCONT7]]
// CHECK-32: ifcont7:
// CHECK-32-NEXT: ret void
//
//
// CHECK-32-LABEL: define {{[^@]+}}@_omp_reduction_inter_warp_copy_func4
// CHECK-32-SAME: (ptr noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]]) #[[ATTR2]] {
// CHECK-32-NEXT: entry:
// CHECK-32-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 4
// CHECK-32-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
// CHECK-32-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
// CHECK-32-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4
// CHECK-32-NEXT: store i32 [[TMP1]], ptr [[DOTADDR1]], align 4
// CHECK-32-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
// CHECK-32-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
// CHECK-32-NEXT: [[NVPTX_LANE_ID:%.*]] = and i32 [[TMP4]], 31
// CHECK-32-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
// CHECK-32-NEXT: [[NVPTX_WARP_ID:%.*]] = ashr i32 [[TMP5]], 5
// CHECK-32-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTADDR]], align 4
// CHECK-32-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
// CHECK-32-NEXT: [[WARP_MASTER:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
// CHECK-32-NEXT: br i1 [[WARP_MASTER]], label [[THEN:%.*]], label [[ELSE:%.*]]
// CHECK-32: then:
// CHECK-32-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i32 0, i32 0
// CHECK-32-NEXT: [[TMP8:%.*]] = load ptr, ptr [[TMP7]], align 4
// CHECK-32-NEXT: [[TMP9:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
// CHECK-32-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP8]], align 4
// CHECK-32-NEXT: store volatile i32 [[TMP10]], ptr addrspace(3) [[TMP9]], align 4
// CHECK-32-NEXT: br label [[IFCONT:%.*]]
// CHECK-32: else:
// CHECK-32-NEXT: br label [[IFCONT]]
// CHECK-32: ifcont:
// CHECK-32-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
// CHECK-32-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTADDR1]], align 4
// CHECK-32-NEXT: [[IS_ACTIVE_THREAD:%.*]] = icmp ult i32 [[TMP3]], [[TMP11]]
// CHECK-32-NEXT: br i1 [[IS_ACTIVE_THREAD]], label [[THEN2:%.*]], label [[ELSE3:%.*]]
// CHECK-32: then2:
// CHECK-32-NEXT: [[TMP12:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
// CHECK-32-NEXT: [[TMP13:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i32 0, i32 0
// CHECK-32-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP13]], align 4
// CHECK-32-NEXT: [[TMP15:%.*]] = load volatile i32, ptr addrspace(3) [[TMP12]], align 4
// CHECK-32-NEXT: store i32 [[TMP15]], ptr [[TMP14]], align 4
// CHECK-32-NEXT: br label [[IFCONT4:%.*]]
// CHECK-32: else3:
// CHECK-32-NEXT: br label [[IFCONT4]]
// CHECK-32: ifcont4:
// CHECK-32-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
// CHECK-32-NEXT: [[WARP_MASTER5:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
// CHECK-32-NEXT: br i1 [[WARP_MASTER5]], label [[THEN6:%.*]], label [[ELSE7:%.*]]
// CHECK-32: then6:
// CHECK-32-NEXT: [[TMP16:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i32 0, i32 1
// CHECK-32-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 4
// CHECK-32-NEXT: [[TMP18:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
// CHECK-32-NEXT: [[TMP19:%.*]] = load i16, ptr [[TMP17]], align 2
// CHECK-32-NEXT: store volatile i16 [[TMP19]], ptr addrspace(3) [[TMP18]], align 2
// CHECK-32-NEXT: br label [[IFCONT8:%.*]]
// CHECK-32: else7:
// CHECK-32-NEXT: br label [[IFCONT8]]
// CHECK-32: ifcont8:
// CHECK-32-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
// CHECK-32-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTADDR1]], align 4
// CHECK-32-NEXT: [[IS_ACTIVE_THREAD9:%.*]] = icmp ult i32 [[TMP3]], [[TMP20]]
// CHECK-32-NEXT: br i1 [[IS_ACTIVE_THREAD9]], label [[THEN10:%.*]], label [[ELSE11:%.*]]
// CHECK-32: then10:
// CHECK-32-NEXT: [[TMP21:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
// CHECK-32-NEXT: [[TMP22:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i32 0, i32 1
// CHECK-32-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP22]], align 4
// CHECK-32-NEXT: [[TMP24:%.*]] = load volatile i16, ptr addrspace(3) [[TMP21]], align 2
// CHECK-32-NEXT: store i16 [[TMP24]], ptr [[TMP23]], align 2
// CHECK-32-NEXT: br label [[IFCONT12:%.*]]
// CHECK-32: else11:
// CHECK-32-NEXT: br label [[IFCONT12]]
// CHECK-32: ifcont12:
// CHECK-32-NEXT: ret void
//
//
// CHECK-32-EX-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l24
// CHECK-32-EX-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[E:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-32-EX-NEXT: entry:
// CHECK-32-EX-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
// CHECK-32-EX-NEXT: [[E_ADDR:%.*]] = alloca ptr, align 4
// CHECK-32-EX-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x ptr], align 4
// CHECK-32-EX-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
// CHECK-32-EX-NEXT: store ptr [[E]], ptr [[E_ADDR]], align 4
// CHECK-32-EX-NEXT: [[TMP0:%.*]] = load ptr, ptr [[E_ADDR]], align 4
// CHECK-32-EX-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l24_kernel_environment, ptr [[DYN_PTR]])
// CHECK-32-EX-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1
// CHECK-32-EX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK-32-EX: user_code.entry:
// CHECK-32-EX-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1:[0-9]+]])
// CHECK-32-EX-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
// CHECK-32-EX-NEXT: store ptr [[TMP0]], ptr [[TMP3]], align 4
// CHECK-32-EX-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP2]], i32 1, i32 -1, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l24_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i32 1)
// CHECK-32-EX-NEXT: call void @__kmpc_target_deinit()
// CHECK-32-EX-NEXT: ret void
// CHECK-32-EX: worker.exit:
// CHECK-32-EX-NEXT: ret void
//
//
// CHECK-32-EX-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l24_omp_outlined
// CHECK-32-EX-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 8 dereferenceable(8) [[E:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK-32-EX-NEXT: entry:
// CHECK-32-EX-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
// CHECK-32-EX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
// CHECK-32-EX-NEXT: [[E_ADDR:%.*]] = alloca ptr, align 4
// CHECK-32-EX-NEXT: [[E1:%.*]] = alloca double, align 8
// CHECK-32-EX-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x ptr], align 4
// CHECK-32-EX-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK-32-EX-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
// CHECK-32-EX-NEXT: store ptr [[E]], ptr [[E_ADDR]], align 4
// CHECK-32-EX-NEXT: [[TMP0:%.*]] = load ptr, ptr [[E_ADDR]], align 4
// CHECK-32-EX-NEXT: store double 0.000000e+00, ptr [[E1]], align 8
// CHECK-32-EX-NEXT: [[TMP1:%.*]] = load double, ptr [[E1]], align 8
// CHECK-32-EX-NEXT: [[ADD:%.*]] = fadd double [[TMP1]], 5.000000e+00
// CHECK-32-EX-NEXT: store double [[ADD]], ptr [[E1]], align 8
// CHECK-32-EX-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
// CHECK-32-EX-NEXT: store ptr [[E1]], ptr [[TMP2]], align 4
// CHECK-32-EX-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_nvptx_parallel_reduce_nowait_v2(ptr @[[GLOB1]], i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @_omp_reduction_shuffle_and_reduce_func, ptr @_omp_reduction_inter_warp_copy_func)
// CHECK-32-EX-NEXT: [[TMP4:%.*]] = icmp eq i32 [[TMP3]], 1
// CHECK-32-EX-NEXT: br i1 [[TMP4]], label [[DOTOMP_REDUCTION_THEN:%.*]], label [[DOTOMP_REDUCTION_DONE:%.*]]
// CHECK-32-EX: .omp.reduction.then:
// CHECK-32-EX-NEXT: [[TMP5:%.*]] = load double, ptr [[TMP0]], align 8
// CHECK-32-EX-NEXT: [[TMP6:%.*]] = load double, ptr [[E1]], align 8
// CHECK-32-EX-NEXT: [[ADD2:%.*]] = fadd double [[TMP5]], [[TMP6]]
// CHECK-32-EX-NEXT: store double [[ADD2]], ptr [[TMP0]], align 8
// CHECK-32-EX-NEXT: br label [[DOTOMP_REDUCTION_DONE]]
// CHECK-32-EX: .omp.reduction.done:
// CHECK-32-EX-NEXT: ret void
//
//
// CHECK-32-EX-LABEL: define {{[^@]+}}@_omp_reduction_shuffle_and_reduce_func
// CHECK-32-EX-SAME: (ptr noundef [[TMP0:%.*]], i16 noundef signext [[TMP1:%.*]], i16 noundef signext [[TMP2:%.*]], i16 noundef signext [[TMP3:%.*]]) #[[ATTR2:[0-9]+]] {
// CHECK-32-EX-NEXT: entry:
// CHECK-32-EX-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 4
// CHECK-32-EX-NEXT: [[DOTADDR1:%.*]] = alloca i16, align 2
// CHECK-32-EX-NEXT: [[DOTADDR2:%.*]] = alloca i16, align 2
// CHECK-32-EX-NEXT: [[DOTADDR3:%.*]] = alloca i16, align 2
// CHECK-32-EX-NEXT: [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST:%.*]] = alloca [1 x ptr], align 4
// CHECK-32-EX-NEXT: [[DOTOMP_REDUCTION_ELEMENT:%.*]] = alloca double, align 8
// CHECK-32-EX-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4
// CHECK-32-EX-NEXT: store i16 [[TMP1]], ptr [[DOTADDR1]], align 2
// CHECK-32-EX-NEXT: store i16 [[TMP2]], ptr [[DOTADDR2]], align 2
// CHECK-32-EX-NEXT: store i16 [[TMP3]], ptr [[DOTADDR3]], align 2
// CHECK-32-EX-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR]], align 4
// CHECK-32-EX-NEXT: [[TMP5:%.*]] = load i16, ptr [[DOTADDR1]], align 2
// CHECK-32-EX-NEXT: [[TMP6:%.*]] = load i16, ptr [[DOTADDR2]], align 2
// CHECK-32-EX-NEXT: [[TMP7:%.*]] = load i16, ptr [[DOTADDR3]], align 2
// CHECK-32-EX-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP4]], i32 0, i32 0
// CHECK-32-EX-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 4
// CHECK-32-EX-NEXT: [[TMP10:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 0
// CHECK-32-EX-NEXT: [[TMP11:%.*]] = getelementptr double, ptr [[TMP9]], i32 1
// CHECK-32-EX-NEXT: [[TMP12:%.*]] = load i64, ptr [[TMP9]], align 8
// CHECK-32-EX-NEXT: [[TMP13:%.*]] = call i32 @__kmpc_get_warp_size()
// CHECK-32-EX-NEXT: [[TMP14:%.*]] = trunc i32 [[TMP13]] to i16
// CHECK-32-EX-NEXT: [[TMP15:%.*]] = call i64 @__kmpc_shuffle_int64(i64 [[TMP12]], i16 [[TMP6]], i16 [[TMP14]])
// CHECK-32-EX-NEXT: store i64 [[TMP15]], ptr [[DOTOMP_REDUCTION_ELEMENT]], align 8
// CHECK-32-EX-NEXT: [[TMP16:%.*]] = getelementptr i64, ptr [[TMP9]], i32 1
// CHECK-32-EX-NEXT: [[TMP17:%.*]] = getelementptr i64, ptr [[DOTOMP_REDUCTION_ELEMENT]], i32 1
// CHECK-32-EX-NEXT: store ptr [[DOTOMP_REDUCTION_ELEMENT]], ptr [[TMP10]], align 4
// CHECK-32-EX-NEXT: [[TMP18:%.*]] = icmp eq i16 [[TMP7]], 0
// CHECK-32-EX-NEXT: [[TMP19:%.*]] = icmp eq i16 [[TMP7]], 1
// CHECK-32-EX-NEXT: [[TMP20:%.*]] = icmp ult i16 [[TMP5]], [[TMP6]]
// CHECK-32-EX-NEXT: [[TMP21:%.*]] = and i1 [[TMP19]], [[TMP20]]
// CHECK-32-EX-NEXT: [[TMP22:%.*]] = icmp eq i16 [[TMP7]], 2
// CHECK-32-EX-NEXT: [[TMP23:%.*]] = and i16 [[TMP5]], 1
// CHECK-32-EX-NEXT: [[TMP24:%.*]] = icmp eq i16 [[TMP23]], 0
// CHECK-32-EX-NEXT: [[TMP25:%.*]] = and i1 [[TMP22]], [[TMP24]]
// CHECK-32-EX-NEXT: [[TMP26:%.*]] = icmp sgt i16 [[TMP6]], 0
// CHECK-32-EX-NEXT: [[TMP27:%.*]] = and i1 [[TMP25]], [[TMP26]]
// CHECK-32-EX-NEXT: [[TMP28:%.*]] = or i1 [[TMP18]], [[TMP21]]
// CHECK-32-EX-NEXT: [[TMP29:%.*]] = or i1 [[TMP28]], [[TMP27]]
// CHECK-32-EX-NEXT: br i1 [[TMP29]], label [[THEN:%.*]], label [[ELSE:%.*]]
// CHECK-32-EX: then:
// CHECK-32-EX-NEXT: call void @"{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l24_omp_outlined_omp$reduction$reduction_func"(ptr [[TMP4]], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]]) #[[ATTR3:[0-9]+]]
// CHECK-32-EX-NEXT: br label [[IFCONT:%.*]]
// CHECK-32-EX: else:
// CHECK-32-EX-NEXT: br label [[IFCONT]]
// CHECK-32-EX: ifcont:
// CHECK-32-EX-NEXT: [[TMP30:%.*]] = icmp eq i16 [[TMP7]], 1
// CHECK-32-EX-NEXT: [[TMP31:%.*]] = icmp uge i16 [[TMP5]], [[TMP6]]
// CHECK-32-EX-NEXT: [[TMP32:%.*]] = and i1 [[TMP30]], [[TMP31]]
// CHECK-32-EX-NEXT: br i1 [[TMP32]], label [[THEN4:%.*]], label [[ELSE5:%.*]]
// CHECK-32-EX: then4:
// CHECK-32-EX-NEXT: [[TMP33:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 0
// CHECK-32-EX-NEXT: [[TMP34:%.*]] = load ptr, ptr [[TMP33]], align 4
// CHECK-32-EX-NEXT: [[TMP35:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP4]], i32 0, i32 0
// CHECK-32-EX-NEXT: [[TMP36:%.*]] = load ptr, ptr [[TMP35]], align 4
// CHECK-32-EX-NEXT: [[TMP37:%.*]] = load double, ptr [[TMP34]], align 8
// CHECK-32-EX-NEXT: store double [[TMP37]], ptr [[TMP36]], align 8
// CHECK-32-EX-NEXT: br label [[IFCONT6:%.*]]
// CHECK-32-EX: else5:
// CHECK-32-EX-NEXT: br label [[IFCONT6]]
// CHECK-32-EX: ifcont6:
// CHECK-32-EX-NEXT: ret void
//
//
// CHECK-32-EX-LABEL: define {{[^@]+}}@_omp_reduction_inter_warp_copy_func
// CHECK-32-EX-SAME: (ptr noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]]) #[[ATTR2]] {
// CHECK-32-EX-NEXT: entry:
// CHECK-32-EX-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 4
// CHECK-32-EX-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
// CHECK-32-EX-NEXT: [[DOTCNT_ADDR:%.*]] = alloca i32, align 4
// CHECK-32-EX-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
// CHECK-32-EX-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4
// CHECK-32-EX-NEXT: store i32 [[TMP1]], ptr [[DOTADDR1]], align 4
// CHECK-32-EX-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
// CHECK-32-EX-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
// CHECK-32-EX-NEXT: [[NVPTX_LANE_ID:%.*]] = and i32 [[TMP4]], 31
// CHECK-32-EX-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
// CHECK-32-EX-NEXT: [[NVPTX_WARP_ID:%.*]] = ashr i32 [[TMP5]], 5
// CHECK-32-EX-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTADDR]], align 4
// CHECK-32-EX-NEXT: store i32 0, ptr [[DOTCNT_ADDR]], align 4
// CHECK-32-EX-NEXT: br label [[PRECOND:%.*]]
// CHECK-32-EX: precond:
// CHECK-32-EX-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTCNT_ADDR]], align 4
// CHECK-32-EX-NEXT: [[TMP8:%.*]] = icmp ult i32 [[TMP7]], 2
// CHECK-32-EX-NEXT: br i1 [[TMP8]], label [[BODY:%.*]], label [[EXIT:%.*]]
// CHECK-32-EX: body:
// CHECK-32-EX-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2:[0-9]+]], i32 [[TMP2]])
// CHECK-32-EX-NEXT: [[WARP_MASTER:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
// CHECK-32-EX-NEXT: br i1 [[WARP_MASTER]], label [[THEN:%.*]], label [[ELSE:%.*]]
// CHECK-32-EX: then:
// CHECK-32-EX-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP6]], i32 0, i32 0
// CHECK-32-EX-NEXT: [[TMP10:%.*]] = load ptr, ptr [[TMP9]], align 4
// CHECK-32-EX-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[TMP10]], i32 [[TMP7]]
// CHECK-32-EX-NEXT: [[TMP12:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
// CHECK-32-EX-NEXT: [[TMP13:%.*]] = load i32, ptr [[TMP11]], align 4
// CHECK-32-EX-NEXT: store volatile i32 [[TMP13]], ptr addrspace(3) [[TMP12]], align 4
// CHECK-32-EX-NEXT: br label [[IFCONT:%.*]]
// CHECK-32-EX: else:
// CHECK-32-EX-NEXT: br label [[IFCONT]]
// CHECK-32-EX: ifcont:
// CHECK-32-EX-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
// CHECK-32-EX-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTADDR1]], align 4
// CHECK-32-EX-NEXT: [[IS_ACTIVE_THREAD:%.*]] = icmp ult i32 [[TMP3]], [[TMP14]]
// CHECK-32-EX-NEXT: br i1 [[IS_ACTIVE_THREAD]], label [[THEN2:%.*]], label [[ELSE3:%.*]]
// CHECK-32-EX: then2:
// CHECK-32-EX-NEXT: [[TMP15:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
// CHECK-32-EX-NEXT: [[TMP16:%.*]] = getelementptr inbounds [1 x ptr], ptr [[TMP6]], i32 0, i32 0
// CHECK-32-EX-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 4
// CHECK-32-EX-NEXT: [[TMP18:%.*]] = getelementptr i32, ptr [[TMP17]], i32 [[TMP7]]
// CHECK-32-EX-NEXT: [[TMP19:%.*]] = load volatile i32, ptr addrspace(3) [[TMP15]], align 4
// CHECK-32-EX-NEXT: store i32 [[TMP19]], ptr [[TMP18]], align 4
// CHECK-32-EX-NEXT: br label [[IFCONT4:%.*]]
// CHECK-32-EX: else3:
// CHECK-32-EX-NEXT: br label [[IFCONT4]]
// CHECK-32-EX: ifcont4:
// CHECK-32-EX-NEXT: [[TMP20:%.*]] = add nsw i32 [[TMP7]], 1
// CHECK-32-EX-NEXT: store i32 [[TMP20]], ptr [[DOTCNT_ADDR]], align 4
// CHECK-32-EX-NEXT: br label [[PRECOND]]
// CHECK-32-EX: exit:
// CHECK-32-EX-NEXT: ret void
//
//
// CHECK-32-EX-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l29
// CHECK-32-EX-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[C:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[D:%.*]]) #[[ATTR0]] {
// CHECK-32-EX-NEXT: entry:
// CHECK-32-EX-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
// CHECK-32-EX-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
// CHECK-32-EX-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 4
// CHECK-32-EX-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [2 x ptr], align 4
// CHECK-32-EX-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
// CHECK-32-EX-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
// CHECK-32-EX-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4
// CHECK-32-EX-NEXT: [[TMP0:%.*]] = load ptr, ptr [[C_ADDR]], align 4
// CHECK-32-EX-NEXT: [[TMP1:%.*]] = load ptr, ptr [[D_ADDR]], align 4
// CHECK-32-EX-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l29_kernel_environment, ptr [[DYN_PTR]])
// CHECK-32-EX-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP2]], -1
// CHECK-32-EX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK-32-EX: user_code.entry:
// CHECK-32-EX-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
// CHECK-32-EX-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
// CHECK-32-EX-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 4
// CHECK-32-EX-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
// CHECK-32-EX-NEXT: store ptr [[TMP1]], ptr [[TMP5]], align 4
// CHECK-32-EX-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP3]], i32 1, i32 -1, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l29_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i32 2)
// CHECK-32-EX-NEXT: call void @__kmpc_target_deinit()
// CHECK-32-EX-NEXT: ret void
// CHECK-32-EX: worker.exit:
// CHECK-32-EX-NEXT: ret void
//
//
// CHECK-32-EX-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l29_omp_outlined
// CHECK-32-EX-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 1 dereferenceable(1) [[C:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[D:%.*]]) #[[ATTR1]] {
// CHECK-32-EX-NEXT: entry:
// CHECK-32-EX-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
// CHECK-32-EX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
// CHECK-32-EX-NEXT: [[C_ADDR:%.*]] = alloca ptr, align 4
// CHECK-32-EX-NEXT: [[D_ADDR:%.*]] = alloca ptr, align 4
// CHECK-32-EX-NEXT: [[C1:%.*]] = alloca i8, align 1
// CHECK-32-EX-NEXT: [[D2:%.*]] = alloca float, align 4
// CHECK-32-EX-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x ptr], align 4
// CHECK-32-EX-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK-32-EX-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
// CHECK-32-EX-NEXT: store ptr [[C]], ptr [[C_ADDR]], align 4
// CHECK-32-EX-NEXT: store ptr [[D]], ptr [[D_ADDR]], align 4
// CHECK-32-EX-NEXT: [[TMP0:%.*]] = load ptr, ptr [[C_ADDR]], align 4
// CHECK-32-EX-NEXT: [[TMP1:%.*]] = load ptr, ptr [[D_ADDR]], align 4
// CHECK-32-EX-NEXT: store i8 0, ptr [[C1]], align 1
// CHECK-32-EX-NEXT: store float 1.000000e+00, ptr [[D2]], align 4
// CHECK-32-EX-NEXT: [[TMP2:%.*]] = load i8, ptr [[C1]], align 1
// CHECK-32-EX-NEXT: [[CONV:%.*]] = sext i8 [[TMP2]] to i32
// CHECK-32-EX-NEXT: [[XOR:%.*]] = xor i32 [[CONV]], 2
// CHECK-32-EX-NEXT: [[CONV3:%.*]] = trunc i32 [[XOR]] to i8
// CHECK-32-EX-NEXT: store i8 [[CONV3]], ptr [[C1]], align 1
// CHECK-32-EX-NEXT: [[TMP3:%.*]] = load float, ptr [[D2]], align 4
// CHECK-32-EX-NEXT: [[MUL:%.*]] = fmul float [[TMP3]], 3.300000e+01
// CHECK-32-EX-NEXT: store float [[MUL]], ptr [[D2]], align 4
// CHECK-32-EX-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
// CHECK-32-EX-NEXT: store ptr [[C1]], ptr [[TMP4]], align 4
// CHECK-32-EX-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 1
// CHECK-32-EX-NEXT: store ptr [[D2]], ptr [[TMP5]], align 4
// CHECK-32-EX-NEXT: [[TMP6:%.*]] = call i32 @__kmpc_nvptx_parallel_reduce_nowait_v2(ptr @[[GLOB1]], i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @_omp_reduction_shuffle_and_reduce_func1, ptr @_omp_reduction_inter_warp_copy_func2)
// CHECK-32-EX-NEXT: [[TMP7:%.*]] = icmp eq i32 [[TMP6]], 1
// CHECK-32-EX-NEXT: br i1 [[TMP7]], label [[DOTOMP_REDUCTION_THEN:%.*]], label [[DOTOMP_REDUCTION_DONE:%.*]]
// CHECK-32-EX: .omp.reduction.then:
// CHECK-32-EX-NEXT: [[TMP8:%.*]] = load i8, ptr [[TMP0]], align 1
// CHECK-32-EX-NEXT: [[CONV4:%.*]] = sext i8 [[TMP8]] to i32
// CHECK-32-EX-NEXT: [[TMP9:%.*]] = load i8, ptr [[C1]], align 1
// CHECK-32-EX-NEXT: [[CONV5:%.*]] = sext i8 [[TMP9]] to i32
// CHECK-32-EX-NEXT: [[XOR6:%.*]] = xor i32 [[CONV4]], [[CONV5]]
// CHECK-32-EX-NEXT: [[CONV7:%.*]] = trunc i32 [[XOR6]] to i8
// CHECK-32-EX-NEXT: store i8 [[CONV7]], ptr [[TMP0]], align 1
// CHECK-32-EX-NEXT: [[TMP10:%.*]] = load float, ptr [[TMP1]], align 4
// CHECK-32-EX-NEXT: [[TMP11:%.*]] = load float, ptr [[D2]], align 4
// CHECK-32-EX-NEXT: [[MUL8:%.*]] = fmul float [[TMP10]], [[TMP11]]
// CHECK-32-EX-NEXT: store float [[MUL8]], ptr [[TMP1]], align 4
// CHECK-32-EX-NEXT: br label [[DOTOMP_REDUCTION_DONE]]
// CHECK-32-EX: .omp.reduction.done:
// CHECK-32-EX-NEXT: ret void
//
//
// CHECK-32-EX-LABEL: define {{[^@]+}}@_omp_reduction_shuffle_and_reduce_func1
// CHECK-32-EX-SAME: (ptr noundef [[TMP0:%.*]], i16 noundef signext [[TMP1:%.*]], i16 noundef signext [[TMP2:%.*]], i16 noundef signext [[TMP3:%.*]]) #[[ATTR2]] {
// CHECK-32-EX-NEXT: entry:
// CHECK-32-EX-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 4
// CHECK-32-EX-NEXT: [[DOTADDR1:%.*]] = alloca i16, align 2
// CHECK-32-EX-NEXT: [[DOTADDR2:%.*]] = alloca i16, align 2
// CHECK-32-EX-NEXT: [[DOTADDR3:%.*]] = alloca i16, align 2
// CHECK-32-EX-NEXT: [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST:%.*]] = alloca [2 x ptr], align 4
// CHECK-32-EX-NEXT: [[DOTOMP_REDUCTION_ELEMENT:%.*]] = alloca i8, align 1
// CHECK-32-EX-NEXT: [[DOTOMP_REDUCTION_ELEMENT4:%.*]] = alloca float, align 4
// CHECK-32-EX-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4
// CHECK-32-EX-NEXT: store i16 [[TMP1]], ptr [[DOTADDR1]], align 2
// CHECK-32-EX-NEXT: store i16 [[TMP2]], ptr [[DOTADDR2]], align 2
// CHECK-32-EX-NEXT: store i16 [[TMP3]], ptr [[DOTADDR3]], align 2
// CHECK-32-EX-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR]], align 4
// CHECK-32-EX-NEXT: [[TMP5:%.*]] = load i16, ptr [[DOTADDR1]], align 2
// CHECK-32-EX-NEXT: [[TMP6:%.*]] = load i16, ptr [[DOTADDR2]], align 2
// CHECK-32-EX-NEXT: [[TMP7:%.*]] = load i16, ptr [[DOTADDR3]], align 2
// CHECK-32-EX-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i32 0, i32 0
// CHECK-32-EX-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 4
// CHECK-32-EX-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 0
// CHECK-32-EX-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[TMP9]], i32 1
// CHECK-32-EX-NEXT: [[TMP12:%.*]] = load i8, ptr [[TMP9]], align 1
// CHECK-32-EX-NEXT: [[TMP13:%.*]] = sext i8 [[TMP12]] to i32
// CHECK-32-EX-NEXT: [[TMP14:%.*]] = call i32 @__kmpc_get_warp_size()
// CHECK-32-EX-NEXT: [[TMP15:%.*]] = trunc i32 [[TMP14]] to i16
// CHECK-32-EX-NEXT: [[TMP16:%.*]] = call i32 @__kmpc_shuffle_int32(i32 [[TMP13]], i16 [[TMP6]], i16 [[TMP15]])
// CHECK-32-EX-NEXT: [[TMP17:%.*]] = trunc i32 [[TMP16]] to i8
// CHECK-32-EX-NEXT: store i8 [[TMP17]], ptr [[DOTOMP_REDUCTION_ELEMENT]], align 1
// CHECK-32-EX-NEXT: [[TMP18:%.*]] = getelementptr i8, ptr [[TMP9]], i32 1
// CHECK-32-EX-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[DOTOMP_REDUCTION_ELEMENT]], i32 1
// CHECK-32-EX-NEXT: store ptr [[DOTOMP_REDUCTION_ELEMENT]], ptr [[TMP10]], align 4
// CHECK-32-EX-NEXT: [[TMP20:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i32 0, i32 1
// CHECK-32-EX-NEXT: [[TMP21:%.*]] = load ptr, ptr [[TMP20]], align 4
// CHECK-32-EX-NEXT: [[TMP22:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 1
// CHECK-32-EX-NEXT: [[TMP23:%.*]] = getelementptr float, ptr [[TMP21]], i32 1
// CHECK-32-EX-NEXT: [[TMP24:%.*]] = load i32, ptr [[TMP21]], align 4
// CHECK-32-EX-NEXT: [[TMP25:%.*]] = call i32 @__kmpc_get_warp_size()
// CHECK-32-EX-NEXT: [[TMP26:%.*]] = trunc i32 [[TMP25]] to i16
// CHECK-32-EX-NEXT: [[TMP27:%.*]] = call i32 @__kmpc_shuffle_int32(i32 [[TMP24]], i16 [[TMP6]], i16 [[TMP26]])
// CHECK-32-EX-NEXT: store i32 [[TMP27]], ptr [[DOTOMP_REDUCTION_ELEMENT4]], align 4
// CHECK-32-EX-NEXT: [[TMP28:%.*]] = getelementptr i32, ptr [[TMP21]], i32 1
// CHECK-32-EX-NEXT: [[TMP29:%.*]] = getelementptr i32, ptr [[DOTOMP_REDUCTION_ELEMENT4]], i32 1
// CHECK-32-EX-NEXT: store ptr [[DOTOMP_REDUCTION_ELEMENT4]], ptr [[TMP22]], align 4
// CHECK-32-EX-NEXT: [[TMP30:%.*]] = icmp eq i16 [[TMP7]], 0
// CHECK-32-EX-NEXT: [[TMP31:%.*]] = icmp eq i16 [[TMP7]], 1
// CHECK-32-EX-NEXT: [[TMP32:%.*]] = icmp ult i16 [[TMP5]], [[TMP6]]
// CHECK-32-EX-NEXT: [[TMP33:%.*]] = and i1 [[TMP31]], [[TMP32]]
// CHECK-32-EX-NEXT: [[TMP34:%.*]] = icmp eq i16 [[TMP7]], 2
// CHECK-32-EX-NEXT: [[TMP35:%.*]] = and i16 [[TMP5]], 1
// CHECK-32-EX-NEXT: [[TMP36:%.*]] = icmp eq i16 [[TMP35]], 0
// CHECK-32-EX-NEXT: [[TMP37:%.*]] = and i1 [[TMP34]], [[TMP36]]
// CHECK-32-EX-NEXT: [[TMP38:%.*]] = icmp sgt i16 [[TMP6]], 0
// CHECK-32-EX-NEXT: [[TMP39:%.*]] = and i1 [[TMP37]], [[TMP38]]
// CHECK-32-EX-NEXT: [[TMP40:%.*]] = or i1 [[TMP30]], [[TMP33]]
// CHECK-32-EX-NEXT: [[TMP41:%.*]] = or i1 [[TMP40]], [[TMP39]]
// CHECK-32-EX-NEXT: br i1 [[TMP41]], label [[THEN:%.*]], label [[ELSE:%.*]]
// CHECK-32-EX: then:
// CHECK-32-EX-NEXT: call void @"{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l29_omp_outlined_omp$reduction$reduction_func"(ptr [[TMP4]], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]]) #[[ATTR3]]
// CHECK-32-EX-NEXT: br label [[IFCONT:%.*]]
// CHECK-32-EX: else:
// CHECK-32-EX-NEXT: br label [[IFCONT]]
// CHECK-32-EX: ifcont:
// CHECK-32-EX-NEXT: [[TMP42:%.*]] = icmp eq i16 [[TMP7]], 1
// CHECK-32-EX-NEXT: [[TMP43:%.*]] = icmp uge i16 [[TMP5]], [[TMP6]]
// CHECK-32-EX-NEXT: [[TMP44:%.*]] = and i1 [[TMP42]], [[TMP43]]
// CHECK-32-EX-NEXT: br i1 [[TMP44]], label [[THEN5:%.*]], label [[ELSE6:%.*]]
// CHECK-32-EX: then5:
// CHECK-32-EX-NEXT: [[TMP45:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 0
// CHECK-32-EX-NEXT: [[TMP46:%.*]] = load ptr, ptr [[TMP45]], align 4
// CHECK-32-EX-NEXT: [[TMP47:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i32 0, i32 0
// CHECK-32-EX-NEXT: [[TMP48:%.*]] = load ptr, ptr [[TMP47]], align 4
// CHECK-32-EX-NEXT: [[TMP49:%.*]] = load i8, ptr [[TMP46]], align 1
// CHECK-32-EX-NEXT: store i8 [[TMP49]], ptr [[TMP48]], align 1
// CHECK-32-EX-NEXT: [[TMP50:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 1
// CHECK-32-EX-NEXT: [[TMP51:%.*]] = load ptr, ptr [[TMP50]], align 4
// CHECK-32-EX-NEXT: [[TMP52:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i32 0, i32 1
// CHECK-32-EX-NEXT: [[TMP53:%.*]] = load ptr, ptr [[TMP52]], align 4
// CHECK-32-EX-NEXT: [[TMP54:%.*]] = load float, ptr [[TMP51]], align 4
// CHECK-32-EX-NEXT: store float [[TMP54]], ptr [[TMP53]], align 4
// CHECK-32-EX-NEXT: br label [[IFCONT7:%.*]]
// CHECK-32-EX: else6:
// CHECK-32-EX-NEXT: br label [[IFCONT7]]
// CHECK-32-EX: ifcont7:
// CHECK-32-EX-NEXT: ret void
//
//
// CHECK-32-EX-LABEL: define {{[^@]+}}@_omp_reduction_inter_warp_copy_func2
// CHECK-32-EX-SAME: (ptr noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]]) #[[ATTR2]] {
// CHECK-32-EX-NEXT: entry:
// CHECK-32-EX-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 4
// CHECK-32-EX-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
// CHECK-32-EX-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
// CHECK-32-EX-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4
// CHECK-32-EX-NEXT: store i32 [[TMP1]], ptr [[DOTADDR1]], align 4
// CHECK-32-EX-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
// CHECK-32-EX-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
// CHECK-32-EX-NEXT: [[NVPTX_LANE_ID:%.*]] = and i32 [[TMP4]], 31
// CHECK-32-EX-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
// CHECK-32-EX-NEXT: [[NVPTX_WARP_ID:%.*]] = ashr i32 [[TMP5]], 5
// CHECK-32-EX-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTADDR]], align 4
// CHECK-32-EX-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
// CHECK-32-EX-NEXT: [[WARP_MASTER:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
// CHECK-32-EX-NEXT: br i1 [[WARP_MASTER]], label [[THEN:%.*]], label [[ELSE:%.*]]
// CHECK-32-EX: then:
// CHECK-32-EX-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i32 0, i32 0
// CHECK-32-EX-NEXT: [[TMP8:%.*]] = load ptr, ptr [[TMP7]], align 4
// CHECK-32-EX-NEXT: [[TMP9:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
// CHECK-32-EX-NEXT: [[TMP10:%.*]] = load i8, ptr [[TMP8]], align 1
// CHECK-32-EX-NEXT: store volatile i8 [[TMP10]], ptr addrspace(3) [[TMP9]], align 1
// CHECK-32-EX-NEXT: br label [[IFCONT:%.*]]
// CHECK-32-EX: else:
// CHECK-32-EX-NEXT: br label [[IFCONT]]
// CHECK-32-EX: ifcont:
// CHECK-32-EX-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
// CHECK-32-EX-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTADDR1]], align 4
// CHECK-32-EX-NEXT: [[IS_ACTIVE_THREAD:%.*]] = icmp ult i32 [[TMP3]], [[TMP11]]
// CHECK-32-EX-NEXT: br i1 [[IS_ACTIVE_THREAD]], label [[THEN2:%.*]], label [[ELSE3:%.*]]
// CHECK-32-EX: then2:
// CHECK-32-EX-NEXT: [[TMP12:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
// CHECK-32-EX-NEXT: [[TMP13:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i32 0, i32 0
// CHECK-32-EX-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP13]], align 4
// CHECK-32-EX-NEXT: [[TMP15:%.*]] = load volatile i8, ptr addrspace(3) [[TMP12]], align 1
// CHECK-32-EX-NEXT: store i8 [[TMP15]], ptr [[TMP14]], align 1
// CHECK-32-EX-NEXT: br label [[IFCONT4:%.*]]
// CHECK-32-EX: else3:
// CHECK-32-EX-NEXT: br label [[IFCONT4]]
// CHECK-32-EX: ifcont4:
// CHECK-32-EX-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
// CHECK-32-EX-NEXT: [[WARP_MASTER5:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
// CHECK-32-EX-NEXT: br i1 [[WARP_MASTER5]], label [[THEN6:%.*]], label [[ELSE7:%.*]]
// CHECK-32-EX: then6:
// CHECK-32-EX-NEXT: [[TMP16:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i32 0, i32 1
// CHECK-32-EX-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 4
// CHECK-32-EX-NEXT: [[TMP18:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
// CHECK-32-EX-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP17]], align 4
// CHECK-32-EX-NEXT: store volatile i32 [[TMP19]], ptr addrspace(3) [[TMP18]], align 4
// CHECK-32-EX-NEXT: br label [[IFCONT8:%.*]]
// CHECK-32-EX: else7:
// CHECK-32-EX-NEXT: br label [[IFCONT8]]
// CHECK-32-EX: ifcont8:
// CHECK-32-EX-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
// CHECK-32-EX-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTADDR1]], align 4
// CHECK-32-EX-NEXT: [[IS_ACTIVE_THREAD9:%.*]] = icmp ult i32 [[TMP3]], [[TMP20]]
// CHECK-32-EX-NEXT: br i1 [[IS_ACTIVE_THREAD9]], label [[THEN10:%.*]], label [[ELSE11:%.*]]
// CHECK-32-EX: then10:
// CHECK-32-EX-NEXT: [[TMP21:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
// CHECK-32-EX-NEXT: [[TMP22:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i32 0, i32 1
// CHECK-32-EX-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP22]], align 4
// CHECK-32-EX-NEXT: [[TMP24:%.*]] = load volatile i32, ptr addrspace(3) [[TMP21]], align 4
// CHECK-32-EX-NEXT: store i32 [[TMP24]], ptr [[TMP23]], align 4
// CHECK-32-EX-NEXT: br label [[IFCONT12:%.*]]
// CHECK-32-EX: else11:
// CHECK-32-EX-NEXT: br label [[IFCONT12]]
// CHECK-32-EX: ifcont12:
// CHECK-32-EX-NEXT: ret void
//
//
// CHECK-32-EX-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l35
// CHECK-32-EX-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[B:%.*]]) #[[ATTR0]] {
// CHECK-32-EX-NEXT: entry:
// CHECK-32-EX-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
// CHECK-32-EX-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
// CHECK-32-EX-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
// CHECK-32-EX-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [2 x ptr], align 4
// CHECK-32-EX-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
// CHECK-32-EX-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
// CHECK-32-EX-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
// CHECK-32-EX-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
// CHECK-32-EX-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4
// CHECK-32-EX-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_target_init(ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l35_kernel_environment, ptr [[DYN_PTR]])
// CHECK-32-EX-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP2]], -1
// CHECK-32-EX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]]
// CHECK-32-EX: user_code.entry:
// CHECK-32-EX-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
// CHECK-32-EX-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 0
// CHECK-32-EX-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 4
// CHECK-32-EX-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS]], i32 0, i32 1
// CHECK-32-EX-NEXT: store ptr [[TMP1]], ptr [[TMP5]], align 4
// CHECK-32-EX-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP3]], i32 1, i32 -1, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l35_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS]], i32 2)
// CHECK-32-EX-NEXT: call void @__kmpc_target_deinit()
// CHECK-32-EX-NEXT: ret void
// CHECK-32-EX: worker.exit:
// CHECK-32-EX-NEXT: ret void
//
//
// CHECK-32-EX-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l35_omp_outlined
// CHECK-32-EX-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(4) [[A:%.*]], ptr noundef nonnull align 2 dereferenceable(2) [[B:%.*]]) #[[ATTR1]] {
// CHECK-32-EX-NEXT: entry:
// CHECK-32-EX-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
// CHECK-32-EX-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
// CHECK-32-EX-NEXT: [[A_ADDR:%.*]] = alloca ptr, align 4
// CHECK-32-EX-NEXT: [[B_ADDR:%.*]] = alloca ptr, align 4
// CHECK-32-EX-NEXT: [[A1:%.*]] = alloca i32, align 4
// CHECK-32-EX-NEXT: [[B2:%.*]] = alloca i16, align 2
// CHECK-32-EX-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x ptr], align 4
// CHECK-32-EX-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK-32-EX-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
// CHECK-32-EX-NEXT: store ptr [[A]], ptr [[A_ADDR]], align 4
// CHECK-32-EX-NEXT: store ptr [[B]], ptr [[B_ADDR]], align 4
// CHECK-32-EX-NEXT: [[TMP0:%.*]] = load ptr, ptr [[A_ADDR]], align 4
// CHECK-32-EX-NEXT: [[TMP1:%.*]] = load ptr, ptr [[B_ADDR]], align 4
// CHECK-32-EX-NEXT: store i32 0, ptr [[A1]], align 4
// CHECK-32-EX-NEXT: store i16 -32768, ptr [[B2]], align 2
// CHECK-32-EX-NEXT: [[TMP2:%.*]] = load i32, ptr [[A1]], align 4
// CHECK-32-EX-NEXT: [[OR:%.*]] = or i32 [[TMP2]], 1
// CHECK-32-EX-NEXT: store i32 [[OR]], ptr [[A1]], align 4
// CHECK-32-EX-NEXT: [[TMP3:%.*]] = load i16, ptr [[B2]], align 2
// CHECK-32-EX-NEXT: [[CONV:%.*]] = sext i16 [[TMP3]] to i32
// CHECK-32-EX-NEXT: [[CMP:%.*]] = icmp sgt i32 99, [[CONV]]
// CHECK-32-EX-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK-32-EX: cond.true:
// CHECK-32-EX-NEXT: br label [[COND_END:%.*]]
// CHECK-32-EX: cond.false:
// CHECK-32-EX-NEXT: [[TMP4:%.*]] = load i16, ptr [[B2]], align 2
// CHECK-32-EX-NEXT: [[CONV3:%.*]] = sext i16 [[TMP4]] to i32
// CHECK-32-EX-NEXT: br label [[COND_END]]
// CHECK-32-EX: cond.end:
// CHECK-32-EX-NEXT: [[COND:%.*]] = phi i32 [ 99, [[COND_TRUE]] ], [ [[CONV3]], [[COND_FALSE]] ]
// CHECK-32-EX-NEXT: [[CONV4:%.*]] = trunc i32 [[COND]] to i16
// CHECK-32-EX-NEXT: store i16 [[CONV4]], ptr [[B2]], align 2
// CHECK-32-EX-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0
// CHECK-32-EX-NEXT: store ptr [[A1]], ptr [[TMP5]], align 4
// CHECK-32-EX-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 1
// CHECK-32-EX-NEXT: store ptr [[B2]], ptr [[TMP6]], align 4
// CHECK-32-EX-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_nvptx_parallel_reduce_nowait_v2(ptr @[[GLOB1]], i64 8, ptr [[DOTOMP_REDUCTION_RED_LIST]], ptr @_omp_reduction_shuffle_and_reduce_func3, ptr @_omp_reduction_inter_warp_copy_func4)
// CHECK-32-EX-NEXT: [[TMP8:%.*]] = icmp eq i32 [[TMP7]], 1
// CHECK-32-EX-NEXT: br i1 [[TMP8]], label [[DOTOMP_REDUCTION_THEN:%.*]], label [[DOTOMP_REDUCTION_DONE:%.*]]
// CHECK-32-EX: .omp.reduction.then:
// CHECK-32-EX-NEXT: [[TMP9:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-32-EX-NEXT: [[TMP10:%.*]] = load i32, ptr [[A1]], align 4
// CHECK-32-EX-NEXT: [[OR5:%.*]] = or i32 [[TMP9]], [[TMP10]]
// CHECK-32-EX-NEXT: store i32 [[OR5]], ptr [[TMP0]], align 4
// CHECK-32-EX-NEXT: [[TMP11:%.*]] = load i16, ptr [[TMP1]], align 2
// CHECK-32-EX-NEXT: [[CONV6:%.*]] = sext i16 [[TMP11]] to i32
// CHECK-32-EX-NEXT: [[TMP12:%.*]] = load i16, ptr [[B2]], align 2
// CHECK-32-EX-NEXT: [[CONV7:%.*]] = sext i16 [[TMP12]] to i32
// CHECK-32-EX-NEXT: [[CMP8:%.*]] = icmp sgt i32 [[CONV6]], [[CONV7]]
// CHECK-32-EX-NEXT: br i1 [[CMP8]], label [[COND_TRUE9:%.*]], label [[COND_FALSE10:%.*]]
// CHECK-32-EX: cond.true9:
// CHECK-32-EX-NEXT: [[TMP13:%.*]] = load i16, ptr [[TMP1]], align 2
// CHECK-32-EX-NEXT: br label [[COND_END11:%.*]]
// CHECK-32-EX: cond.false10:
// CHECK-32-EX-NEXT: [[TMP14:%.*]] = load i16, ptr [[B2]], align 2
// CHECK-32-EX-NEXT: br label [[COND_END11]]
// CHECK-32-EX: cond.end11:
// CHECK-32-EX-NEXT: [[COND12:%.*]] = phi i16 [ [[TMP13]], [[COND_TRUE9]] ], [ [[TMP14]], [[COND_FALSE10]] ]
// CHECK-32-EX-NEXT: store i16 [[COND12]], ptr [[TMP1]], align 2
// CHECK-32-EX-NEXT: br label [[DOTOMP_REDUCTION_DONE]]
// CHECK-32-EX: .omp.reduction.done:
// CHECK-32-EX-NEXT: ret void
//
//
// CHECK-32-EX-LABEL: define {{[^@]+}}@_omp_reduction_shuffle_and_reduce_func3
// CHECK-32-EX-SAME: (ptr noundef [[TMP0:%.*]], i16 noundef signext [[TMP1:%.*]], i16 noundef signext [[TMP2:%.*]], i16 noundef signext [[TMP3:%.*]]) #[[ATTR2]] {
// CHECK-32-EX-NEXT: entry:
// CHECK-32-EX-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 4
// CHECK-32-EX-NEXT: [[DOTADDR1:%.*]] = alloca i16, align 2
// CHECK-32-EX-NEXT: [[DOTADDR2:%.*]] = alloca i16, align 2
// CHECK-32-EX-NEXT: [[DOTADDR3:%.*]] = alloca i16, align 2
// CHECK-32-EX-NEXT: [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST:%.*]] = alloca [2 x ptr], align 4
// CHECK-32-EX-NEXT: [[DOTOMP_REDUCTION_ELEMENT:%.*]] = alloca i32, align 4
// CHECK-32-EX-NEXT: [[DOTOMP_REDUCTION_ELEMENT4:%.*]] = alloca i16, align 2
// CHECK-32-EX-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4
// CHECK-32-EX-NEXT: store i16 [[TMP1]], ptr [[DOTADDR1]], align 2
// CHECK-32-EX-NEXT: store i16 [[TMP2]], ptr [[DOTADDR2]], align 2
// CHECK-32-EX-NEXT: store i16 [[TMP3]], ptr [[DOTADDR3]], align 2
// CHECK-32-EX-NEXT: [[TMP4:%.*]] = load ptr, ptr [[DOTADDR]], align 4
// CHECK-32-EX-NEXT: [[TMP5:%.*]] = load i16, ptr [[DOTADDR1]], align 2
// CHECK-32-EX-NEXT: [[TMP6:%.*]] = load i16, ptr [[DOTADDR2]], align 2
// CHECK-32-EX-NEXT: [[TMP7:%.*]] = load i16, ptr [[DOTADDR3]], align 2
// CHECK-32-EX-NEXT: [[TMP8:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i32 0, i32 0
// CHECK-32-EX-NEXT: [[TMP9:%.*]] = load ptr, ptr [[TMP8]], align 4
// CHECK-32-EX-NEXT: [[TMP10:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 0
// CHECK-32-EX-NEXT: [[TMP11:%.*]] = getelementptr i32, ptr [[TMP9]], i32 1
// CHECK-32-EX-NEXT: [[TMP12:%.*]] = load i32, ptr [[TMP9]], align 4
// CHECK-32-EX-NEXT: [[TMP13:%.*]] = call i32 @__kmpc_get_warp_size()
// CHECK-32-EX-NEXT: [[TMP14:%.*]] = trunc i32 [[TMP13]] to i16
// CHECK-32-EX-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_shuffle_int32(i32 [[TMP12]], i16 [[TMP6]], i16 [[TMP14]])
// CHECK-32-EX-NEXT: store i32 [[TMP15]], ptr [[DOTOMP_REDUCTION_ELEMENT]], align 4
// CHECK-32-EX-NEXT: [[TMP16:%.*]] = getelementptr i32, ptr [[TMP9]], i32 1
// CHECK-32-EX-NEXT: [[TMP17:%.*]] = getelementptr i32, ptr [[DOTOMP_REDUCTION_ELEMENT]], i32 1
// CHECK-32-EX-NEXT: store ptr [[DOTOMP_REDUCTION_ELEMENT]], ptr [[TMP10]], align 4
// CHECK-32-EX-NEXT: [[TMP18:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i32 0, i32 1
// CHECK-32-EX-NEXT: [[TMP19:%.*]] = load ptr, ptr [[TMP18]], align 4
// CHECK-32-EX-NEXT: [[TMP20:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 1
// CHECK-32-EX-NEXT: [[TMP21:%.*]] = getelementptr i16, ptr [[TMP19]], i32 1
// CHECK-32-EX-NEXT: [[TMP22:%.*]] = load i16, ptr [[TMP19]], align 2
// CHECK-32-EX-NEXT: [[TMP23:%.*]] = sext i16 [[TMP22]] to i32
// CHECK-32-EX-NEXT: [[TMP24:%.*]] = call i32 @__kmpc_get_warp_size()
// CHECK-32-EX-NEXT: [[TMP25:%.*]] = trunc i32 [[TMP24]] to i16
// CHECK-32-EX-NEXT: [[TMP26:%.*]] = call i32 @__kmpc_shuffle_int32(i32 [[TMP23]], i16 [[TMP6]], i16 [[TMP25]])
// CHECK-32-EX-NEXT: [[TMP27:%.*]] = trunc i32 [[TMP26]] to i16
// CHECK-32-EX-NEXT: store i16 [[TMP27]], ptr [[DOTOMP_REDUCTION_ELEMENT4]], align 2
// CHECK-32-EX-NEXT: [[TMP28:%.*]] = getelementptr i16, ptr [[TMP19]], i32 1
// CHECK-32-EX-NEXT: [[TMP29:%.*]] = getelementptr i16, ptr [[DOTOMP_REDUCTION_ELEMENT4]], i32 1
// CHECK-32-EX-NEXT: store ptr [[DOTOMP_REDUCTION_ELEMENT4]], ptr [[TMP20]], align 4
// CHECK-32-EX-NEXT: [[TMP30:%.*]] = icmp eq i16 [[TMP7]], 0
// CHECK-32-EX-NEXT: [[TMP31:%.*]] = icmp eq i16 [[TMP7]], 1
// CHECK-32-EX-NEXT: [[TMP32:%.*]] = icmp ult i16 [[TMP5]], [[TMP6]]
// CHECK-32-EX-NEXT: [[TMP33:%.*]] = and i1 [[TMP31]], [[TMP32]]
// CHECK-32-EX-NEXT: [[TMP34:%.*]] = icmp eq i16 [[TMP7]], 2
// CHECK-32-EX-NEXT: [[TMP35:%.*]] = and i16 [[TMP5]], 1
// CHECK-32-EX-NEXT: [[TMP36:%.*]] = icmp eq i16 [[TMP35]], 0
// CHECK-32-EX-NEXT: [[TMP37:%.*]] = and i1 [[TMP34]], [[TMP36]]
// CHECK-32-EX-NEXT: [[TMP38:%.*]] = icmp sgt i16 [[TMP6]], 0
// CHECK-32-EX-NEXT: [[TMP39:%.*]] = and i1 [[TMP37]], [[TMP38]]
// CHECK-32-EX-NEXT: [[TMP40:%.*]] = or i1 [[TMP30]], [[TMP33]]
// CHECK-32-EX-NEXT: [[TMP41:%.*]] = or i1 [[TMP40]], [[TMP39]]
// CHECK-32-EX-NEXT: br i1 [[TMP41]], label [[THEN:%.*]], label [[ELSE:%.*]]
// CHECK-32-EX: then:
// CHECK-32-EX-NEXT: call void @"{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIcET_i_l35_omp_outlined_omp$reduction$reduction_func"(ptr [[TMP4]], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]]) #[[ATTR3]]
// CHECK-32-EX-NEXT: br label [[IFCONT:%.*]]
// CHECK-32-EX: else:
// CHECK-32-EX-NEXT: br label [[IFCONT]]
// CHECK-32-EX: ifcont:
// CHECK-32-EX-NEXT: [[TMP42:%.*]] = icmp eq i16 [[TMP7]], 1
// CHECK-32-EX-NEXT: [[TMP43:%.*]] = icmp uge i16 [[TMP5]], [[TMP6]]
// CHECK-32-EX-NEXT: [[TMP44:%.*]] = and i1 [[TMP42]], [[TMP43]]
// CHECK-32-EX-NEXT: br i1 [[TMP44]], label [[THEN5:%.*]], label [[ELSE6:%.*]]
// CHECK-32-EX: then5:
// CHECK-32-EX-NEXT: [[TMP45:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 0
// CHECK-32-EX-NEXT: [[TMP46:%.*]] = load ptr, ptr [[TMP45]], align 4
// CHECK-32-EX-NEXT: [[TMP47:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i32 0, i32 0
// CHECK-32-EX-NEXT: [[TMP48:%.*]] = load ptr, ptr [[TMP47]], align 4
// CHECK-32-EX-NEXT: [[TMP49:%.*]] = load i32, ptr [[TMP46]], align 4
// CHECK-32-EX-NEXT: store i32 [[TMP49]], ptr [[TMP48]], align 4
// CHECK-32-EX-NEXT: [[TMP50:%.*]] = getelementptr inbounds [2 x ptr], ptr [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i32 0, i32 1
// CHECK-32-EX-NEXT: [[TMP51:%.*]] = load ptr, ptr [[TMP50]], align 4
// CHECK-32-EX-NEXT: [[TMP52:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP4]], i32 0, i32 1
// CHECK-32-EX-NEXT: [[TMP53:%.*]] = load ptr, ptr [[TMP52]], align 4
// CHECK-32-EX-NEXT: [[TMP54:%.*]] = load i16, ptr [[TMP51]], align 2
// CHECK-32-EX-NEXT: store i16 [[TMP54]], ptr [[TMP53]], align 2
// CHECK-32-EX-NEXT: br label [[IFCONT7:%.*]]
// CHECK-32-EX: else6:
// CHECK-32-EX-NEXT: br label [[IFCONT7]]
// CHECK-32-EX: ifcont7:
// CHECK-32-EX-NEXT: ret void
//
//
// CHECK-32-EX-LABEL: define {{[^@]+}}@_omp_reduction_inter_warp_copy_func4
// CHECK-32-EX-SAME: (ptr noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]]) #[[ATTR2]] {
// CHECK-32-EX-NEXT: entry:
// CHECK-32-EX-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 4
// CHECK-32-EX-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4
// CHECK-32-EX-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
// CHECK-32-EX-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 4
// CHECK-32-EX-NEXT: store i32 [[TMP1]], ptr [[DOTADDR1]], align 4
// CHECK-32-EX-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
// CHECK-32-EX-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
// CHECK-32-EX-NEXT: [[NVPTX_LANE_ID:%.*]] = and i32 [[TMP4]], 31
// CHECK-32-EX-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block()
// CHECK-32-EX-NEXT: [[NVPTX_WARP_ID:%.*]] = ashr i32 [[TMP5]], 5
// CHECK-32-EX-NEXT: [[TMP6:%.*]] = load ptr, ptr [[DOTADDR]], align 4
// CHECK-32-EX-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
// CHECK-32-EX-NEXT: [[WARP_MASTER:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
// CHECK-32-EX-NEXT: br i1 [[WARP_MASTER]], label [[THEN:%.*]], label [[ELSE:%.*]]
// CHECK-32-EX: then:
// CHECK-32-EX-NEXT: [[TMP7:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i32 0, i32 0
// CHECK-32-EX-NEXT: [[TMP8:%.*]] = load ptr, ptr [[TMP7]], align 4
// CHECK-32-EX-NEXT: [[TMP9:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
// CHECK-32-EX-NEXT: [[TMP10:%.*]] = load i32, ptr [[TMP8]], align 4
// CHECK-32-EX-NEXT: store volatile i32 [[TMP10]], ptr addrspace(3) [[TMP9]], align 4
// CHECK-32-EX-NEXT: br label [[IFCONT:%.*]]
// CHECK-32-EX: else:
// CHECK-32-EX-NEXT: br label [[IFCONT]]
// CHECK-32-EX: ifcont:
// CHECK-32-EX-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
// CHECK-32-EX-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTADDR1]], align 4
// CHECK-32-EX-NEXT: [[IS_ACTIVE_THREAD:%.*]] = icmp ult i32 [[TMP3]], [[TMP11]]
// CHECK-32-EX-NEXT: br i1 [[IS_ACTIVE_THREAD]], label [[THEN2:%.*]], label [[ELSE3:%.*]]
// CHECK-32-EX: then2:
// CHECK-32-EX-NEXT: [[TMP12:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
// CHECK-32-EX-NEXT: [[TMP13:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i32 0, i32 0
// CHECK-32-EX-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP13]], align 4
// CHECK-32-EX-NEXT: [[TMP15:%.*]] = load volatile i32, ptr addrspace(3) [[TMP12]], align 4
// CHECK-32-EX-NEXT: store i32 [[TMP15]], ptr [[TMP14]], align 4
// CHECK-32-EX-NEXT: br label [[IFCONT4:%.*]]
// CHECK-32-EX: else3:
// CHECK-32-EX-NEXT: br label [[IFCONT4]]
// CHECK-32-EX: ifcont4:
// CHECK-32-EX-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
// CHECK-32-EX-NEXT: [[WARP_MASTER5:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0
// CHECK-32-EX-NEXT: br i1 [[WARP_MASTER5]], label [[THEN6:%.*]], label [[ELSE7:%.*]]
// CHECK-32-EX: then6:
// CHECK-32-EX-NEXT: [[TMP16:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i32 0, i32 1
// CHECK-32-EX-NEXT: [[TMP17:%.*]] = load ptr, ptr [[TMP16]], align 4
// CHECK-32-EX-NEXT: [[TMP18:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]]
// CHECK-32-EX-NEXT: [[TMP19:%.*]] = load i16, ptr [[TMP17]], align 2
// CHECK-32-EX-NEXT: store volatile i16 [[TMP19]], ptr addrspace(3) [[TMP18]], align 2
// CHECK-32-EX-NEXT: br label [[IFCONT8:%.*]]
// CHECK-32-EX: else7:
// CHECK-32-EX-NEXT: br label [[IFCONT8]]
// CHECK-32-EX: ifcont8:
// CHECK-32-EX-NEXT: call void @__kmpc_barrier(ptr @[[GLOB2]], i32 [[TMP2]])
// CHECK-32-EX-NEXT: [[TMP20:%.*]] = load i32, ptr [[DOTADDR1]], align 4
// CHECK-32-EX-NEXT: [[IS_ACTIVE_THREAD9:%.*]] = icmp ult i32 [[TMP3]], [[TMP20]]
// CHECK-32-EX-NEXT: br i1 [[IS_ACTIVE_THREAD9]], label [[THEN10:%.*]], label [[ELSE11:%.*]]
// CHECK-32-EX: then10:
// CHECK-32-EX-NEXT: [[TMP21:%.*]] = getelementptr inbounds [32 x i32], ptr addrspace(3) @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]]
// CHECK-32-EX-NEXT: [[TMP22:%.*]] = getelementptr inbounds [2 x ptr], ptr [[TMP6]], i32 0, i32 1
// CHECK-32-EX-NEXT: [[TMP23:%.*]] = load ptr, ptr [[TMP22]], align 4
// CHECK-32-EX-NEXT: [[TMP24:%.*]] = load volatile i16, ptr addrspace(3) [[TMP21]], align 2
// CHECK-32-EX-NEXT: store i16 [[TMP24]], ptr [[TMP23]], align 2
// CHECK-32-EX-NEXT: br label [[IFCONT12:%.*]]
// CHECK-32-EX: else11:
// CHECK-32-EX-NEXT: br label [[IFCONT12]]
// CHECK-32-EX: ifcont12:
// CHECK-32-EX-NEXT: ret void
//