Files
clang-p2996/clang/test/OpenMP/target_parallel_generic_loop_codegen-2.cpp
Johannes Doerfert b8cbc5c02c [OpenMP] Introduce the KernelLaunchEnvironment as implicit argument (#70401)
The KernelEnvironment is for compile time information about a kernel. It
allows the compiler to feed information to the runtime. The
KernelLaunchEnvironment is for dynamic information *per* kernel launch.
It allows the rutime to feed information to the kernel that is not
shared with other invocations of the kernel. The first use case is to
replace the globals that synchronize teams reductions with per-launch
versions. This allows concurrent teams reductions. More uses cases will
follow, e.g., per launch memory pools.

Fixes: https://github.com/llvm/llvm-project/issues/70249
2023-10-31 19:38:43 -07:00

961 lines
65 KiB
C++

// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _ --version 2
// Test host codegen.
// RUN: %clang_cc1 -verify -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s
// RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s
// RUN: %clang_cc1 -verify -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix CHECK-X86
// RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix CHECK-X86
// RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
// RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s
// RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0-X86 %s
// RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0-X86 %s
// Test target parallel for codegen - host bc file has to be created first.
// RUN: %clang_cc1 -verify -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc
// RUN: %clang_cc1 -verify -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s -check-prefix=TCHECK-TARGET
// RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s
// RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s -check-prefix=TCHECK-TARGET
// RUN: %clang_cc1 -verify -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc
// RUN: %clang_cc1 -verify -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s -check-prefix=TCHECK-TARGET-X86
// RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s
// RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s -check-prefix=TCHECK-TARGET-X86
// RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm-bc %s -o %t-ppc-host.bc
// RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck --check-prefix SIMD-ONLY1-TARGET %s
// RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o %t %s
// RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-ppc-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY1-TARGET %s
// RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm-bc %s -o %t-x86-host.bc
// RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-llvm %s -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o - | FileCheck --check-prefix SIMD-ONLY1-TARGET-X86 %s
// RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -emit-pch -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -o %t %s
// RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -triple i386-unknown-unknown -fopenmp-targets=i386-pc-linux-gnu -fopenmp-is-target-device -fopenmp-host-ir-file-path %t-x86-host.bc -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY1-TARGET-X86 %s
// expected-no-diagnostics
#ifndef HEADER
#define HEADER
int nested(int a){
#pragma omp target parallel loop
for (int i = 0; i < 10; ++i)
++a;
auto F = [&](){
#pragma omp parallel
{
#pragma omp target parallel loop
for (int i = 0; i < 10; ++i)
++a;
}
};
F();
return a;
}
// Check metadata is properly generated:
#endif
// CHECK-LABEL: define dso_local noundef signext i32 @_Z6nestedi
// CHECK-SAME: (i32 noundef signext [[A:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 8
// CHECK-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 8
// CHECK-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 8
// CHECK-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
// CHECK-NEXT: [[F:%.*]] = alloca [[CLASS_ANON:%.*]], align 8
// CHECK-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4
// CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8
// CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK-NEXT: store i64 [[TMP1]], ptr [[TMP2]], align 8
// CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK-NEXT: store i64 [[TMP1]], ptr [[TMP3]], align 8
// CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0
// CHECK-NEXT: store ptr null, ptr [[TMP4]], align 8
// CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
// CHECK-NEXT: store i32 2, ptr [[TMP7]], align 4
// CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
// CHECK-NEXT: store i32 1, ptr [[TMP8]], align 4
// CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
// CHECK-NEXT: store ptr [[TMP5]], ptr [[TMP9]], align 8
// CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
// CHECK-NEXT: store ptr [[TMP6]], ptr [[TMP10]], align 8
// CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
// CHECK-NEXT: store ptr @.offload_sizes, ptr [[TMP11]], align 8
// CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
// CHECK-NEXT: store ptr @.offload_maptypes, ptr [[TMP12]], align 8
// CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
// CHECK-NEXT: store ptr null, ptr [[TMP13]], align 8
// CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
// CHECK-NEXT: store ptr null, ptr [[TMP14]], align 8
// CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
// CHECK-NEXT: store i64 0, ptr [[TMP15]], align 8
// CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
// CHECK-NEXT: store i64 0, ptr [[TMP16]], align 8
// CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
// CHECK-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP17]], align 4
// CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
// CHECK-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP18]], align 4
// CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
// CHECK-NEXT: store i32 0, ptr [[TMP19]], align 4
// CHECK-NEXT: [[TMP20:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2:[0-9]+]], i64 -1, i32 1, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z6nestedi_l42.region_id, ptr [[KERNEL_ARGS]])
// CHECK-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0
// CHECK-NEXT: br i1 [[TMP21]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK: omp_offload.failed:
// CHECK-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z6nestedi_l42(i64 [[TMP1]]) #[[ATTR2:[0-9]+]]
// CHECK-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK: omp_offload.cont:
// CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[CLASS_ANON]], ptr [[F]], i32 0, i32 0
// CHECK-NEXT: store ptr [[A_ADDR]], ptr [[TMP22]], align 8
// CHECK-NEXT: call void @"_ZZ6nestediENK3$_0clEv"(ptr noundef nonnull align 8 dereferenceable(8) [[F]])
// CHECK-NEXT: [[TMP23:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-NEXT: ret i32 [[TMP23]]
//
//
// CHECK-LABEL: define internal void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z6nestedi_l42
// CHECK-SAME: (i64 noundef [[A:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4
// CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8
// CHECK-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z6nestedi_l42.omp_outlined, i64 [[TMP1]])
// CHECK-NEXT: ret void
//
//
// CHECK-LABEL: define internal void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z6nestedi_l42.omp_outlined
// CHECK-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]]) #[[ATTR1]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// CHECK-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
// CHECK-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
// CHECK-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK: cond.true:
// CHECK-NEXT: br label [[COND_END:%.*]]
// CHECK: cond.false:
// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NEXT: br label [[COND_END]]
// CHECK: cond.end:
// CHECK-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// CHECK-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
// CHECK-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK: omp.inner.for.cond:
// CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// CHECK-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK: omp.inner.for.body:
// CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// CHECK-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK-NEXT: store i32 [[ADD]], ptr [[I]], align 4
// CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-NEXT: [[INC:%.*]] = add nsw i32 [[TMP8]], 1
// CHECK-NEXT: store i32 [[INC]], ptr [[A_ADDR]], align 4
// CHECK-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK: omp.body.continue:
// CHECK-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK: omp.inner.for.inc:
// CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1
// CHECK-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4
// CHECK-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK: omp.inner.for.end:
// CHECK-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK: omp.loop.exit:
// CHECK-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
// CHECK-NEXT: ret void
//
//
// CHECK-LABEL: define internal void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z6nestedi_l49
// CHECK-SAME: (i64 noundef [[A:%.*]]) #[[ATTR1]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// CHECK-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4
// CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8
// CHECK-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z6nestedi_l49.omp_outlined, i64 [[TMP1]])
// CHECK-NEXT: ret void
//
//
// CHECK-LABEL: define internal void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z6nestedi_l49.omp_outlined
// CHECK-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]]) #[[ATTR1]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// CHECK-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// CHECK-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// CHECK-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
// CHECK-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
// CHECK-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK: cond.true:
// CHECK-NEXT: br label [[COND_END:%.*]]
// CHECK: cond.false:
// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NEXT: br label [[COND_END]]
// CHECK: cond.end:
// CHECK-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// CHECK-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
// CHECK-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK: omp.inner.for.cond:
// CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// CHECK-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK: omp.inner.for.body:
// CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// CHECK-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK-NEXT: store i32 [[ADD]], ptr [[I]], align 4
// CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-NEXT: [[INC:%.*]] = add nsw i32 [[TMP8]], 1
// CHECK-NEXT: store i32 [[INC]], ptr [[A_ADDR]], align 4
// CHECK-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK: omp.body.continue:
// CHECK-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK: omp.inner.for.inc:
// CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1
// CHECK-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4
// CHECK-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK: omp.inner.for.end:
// CHECK-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK: omp.loop.exit:
// CHECK-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
// CHECK-NEXT: ret void
//
//
// CHECK-LABEL: define internal void @.omp_offloading.requires_reg
// CHECK-SAME: () #[[ATTR3:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: call void @__tgt_register_requires(i64 1)
// CHECK-NEXT: ret void
//
//
// CHECK-X86-LABEL: define dso_local noundef i32 @_Z6nestedi
// CHECK-X86-SAME: (i32 noundef [[A:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-X86-NEXT: entry:
// CHECK-X86-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK-X86-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// CHECK-X86-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x ptr], align 4
// CHECK-X86-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x ptr], align 4
// CHECK-X86-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x ptr], align 4
// CHECK-X86-NEXT: [[KERNEL_ARGS:%.*]] = alloca [[STRUCT___TGT_KERNEL_ARGUMENTS:%.*]], align 8
// CHECK-X86-NEXT: [[F:%.*]] = alloca [[CLASS_ANON:%.*]], align 4
// CHECK-X86-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
// CHECK-X86-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-X86-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4
// CHECK-X86-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4
// CHECK-X86-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK-X86-NEXT: store i32 [[TMP1]], ptr [[TMP2]], align 4
// CHECK-X86-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK-X86-NEXT: store i32 [[TMP1]], ptr [[TMP3]], align 4
// CHECK-X86-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0
// CHECK-X86-NEXT: store ptr null, ptr [[TMP4]], align 4
// CHECK-X86-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0
// CHECK-X86-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x ptr], ptr [[DOTOFFLOAD_PTRS]], i32 0, i32 0
// CHECK-X86-NEXT: [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 0
// CHECK-X86-NEXT: store i32 2, ptr [[TMP7]], align 4
// CHECK-X86-NEXT: [[TMP8:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 1
// CHECK-X86-NEXT: store i32 1, ptr [[TMP8]], align 4
// CHECK-X86-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 2
// CHECK-X86-NEXT: store ptr [[TMP5]], ptr [[TMP9]], align 4
// CHECK-X86-NEXT: [[TMP10:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 3
// CHECK-X86-NEXT: store ptr [[TMP6]], ptr [[TMP10]], align 4
// CHECK-X86-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 4
// CHECK-X86-NEXT: store ptr @.offload_sizes, ptr [[TMP11]], align 4
// CHECK-X86-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 5
// CHECK-X86-NEXT: store ptr @.offload_maptypes, ptr [[TMP12]], align 4
// CHECK-X86-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 6
// CHECK-X86-NEXT: store ptr null, ptr [[TMP13]], align 4
// CHECK-X86-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 7
// CHECK-X86-NEXT: store ptr null, ptr [[TMP14]], align 4
// CHECK-X86-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 8
// CHECK-X86-NEXT: store i64 0, ptr [[TMP15]], align 8
// CHECK-X86-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 9
// CHECK-X86-NEXT: store i64 0, ptr [[TMP16]], align 8
// CHECK-X86-NEXT: [[TMP17:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 10
// CHECK-X86-NEXT: store [3 x i32] [i32 1, i32 0, i32 0], ptr [[TMP17]], align 4
// CHECK-X86-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 11
// CHECK-X86-NEXT: store [3 x i32] zeroinitializer, ptr [[TMP18]], align 4
// CHECK-X86-NEXT: [[TMP19:%.*]] = getelementptr inbounds [[STRUCT___TGT_KERNEL_ARGUMENTS]], ptr [[KERNEL_ARGS]], i32 0, i32 12
// CHECK-X86-NEXT: store i32 0, ptr [[TMP19]], align 4
// CHECK-X86-NEXT: [[TMP20:%.*]] = call i32 @__tgt_target_kernel(ptr @[[GLOB2:[0-9]+]], i64 -1, i32 1, i32 0, ptr @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z6nestedi_l42.region_id, ptr [[KERNEL_ARGS]])
// CHECK-X86-NEXT: [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0
// CHECK-X86-NEXT: br i1 [[TMP21]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]]
// CHECK-X86: omp_offload.failed:
// CHECK-X86-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z6nestedi_l42(i32 [[TMP1]]) #[[ATTR2:[0-9]+]]
// CHECK-X86-NEXT: br label [[OMP_OFFLOAD_CONT]]
// CHECK-X86: omp_offload.cont:
// CHECK-X86-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[CLASS_ANON]], ptr [[F]], i32 0, i32 0
// CHECK-X86-NEXT: store ptr [[A_ADDR]], ptr [[TMP22]], align 4
// CHECK-X86-NEXT: call void @"_ZZ6nestediENK3$_0clEv"(ptr noundef nonnull align 4 dereferenceable(4) [[F]])
// CHECK-X86-NEXT: [[TMP23:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-X86-NEXT: ret i32 [[TMP23]]
//
//
// CHECK-X86-LABEL: define internal void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z6nestedi_l42
// CHECK-X86-SAME: (i32 noundef [[A:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK-X86-NEXT: entry:
// CHECK-X86-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK-X86-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// CHECK-X86-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
// CHECK-X86-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-X86-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4
// CHECK-X86-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4
// CHECK-X86-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z6nestedi_l42.omp_outlined, i32 [[TMP1]])
// CHECK-X86-NEXT: ret void
//
//
// CHECK-X86-LABEL: define internal void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z6nestedi_l42.omp_outlined
// CHECK-X86-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]]) #[[ATTR1]] {
// CHECK-X86-NEXT: entry:
// CHECK-X86-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
// CHECK-X86-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
// CHECK-X86-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK-X86-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK-X86-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK-X86-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK-X86-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK-X86-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK-X86-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK-X86-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-X86-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK-X86-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
// CHECK-X86-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
// CHECK-X86-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK-X86-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
// CHECK-X86-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK-X86-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK-X86-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK-X86-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-X86-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK-X86-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-X86-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
// CHECK-X86-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK-X86: cond.true:
// CHECK-X86-NEXT: br label [[COND_END:%.*]]
// CHECK-X86: cond.false:
// CHECK-X86-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-X86-NEXT: br label [[COND_END]]
// CHECK-X86: cond.end:
// CHECK-X86-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// CHECK-X86-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK-X86-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK-X86-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
// CHECK-X86-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK-X86: omp.inner.for.cond:
// CHECK-X86-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-X86-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-X86-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// CHECK-X86-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK-X86: omp.inner.for.body:
// CHECK-X86-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-X86-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// CHECK-X86-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK-X86-NEXT: store i32 [[ADD]], ptr [[I]], align 4
// CHECK-X86-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-X86-NEXT: [[INC:%.*]] = add nsw i32 [[TMP8]], 1
// CHECK-X86-NEXT: store i32 [[INC]], ptr [[A_ADDR]], align 4
// CHECK-X86-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK-X86: omp.body.continue:
// CHECK-X86-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK-X86: omp.inner.for.inc:
// CHECK-X86-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-X86-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1
// CHECK-X86-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4
// CHECK-X86-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK-X86: omp.inner.for.end:
// CHECK-X86-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK-X86: omp.loop.exit:
// CHECK-X86-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
// CHECK-X86-NEXT: ret void
//
//
// CHECK-X86-LABEL: define internal void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z6nestedi_l49
// CHECK-X86-SAME: (i32 noundef [[A:%.*]]) #[[ATTR1]] {
// CHECK-X86-NEXT: entry:
// CHECK-X86-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK-X86-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// CHECK-X86-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
// CHECK-X86-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-X86-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4
// CHECK-X86-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4
// CHECK-X86-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z6nestedi_l49.omp_outlined, i32 [[TMP1]])
// CHECK-X86-NEXT: ret void
//
//
// CHECK-X86-LABEL: define internal void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z6nestedi_l49.omp_outlined
// CHECK-X86-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]]) #[[ATTR1]] {
// CHECK-X86-NEXT: entry:
// CHECK-X86-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
// CHECK-X86-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
// CHECK-X86-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK-X86-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// CHECK-X86-NEXT: [[TMP:%.*]] = alloca i32, align 4
// CHECK-X86-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// CHECK-X86-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// CHECK-X86-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// CHECK-X86-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// CHECK-X86-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-X86-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK-X86-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
// CHECK-X86-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
// CHECK-X86-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// CHECK-X86-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
// CHECK-X86-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// CHECK-X86-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// CHECK-X86-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
// CHECK-X86-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// CHECK-X86-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// CHECK-X86-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-X86-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
// CHECK-X86-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK-X86: cond.true:
// CHECK-X86-NEXT: br label [[COND_END:%.*]]
// CHECK-X86: cond.false:
// CHECK-X86-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-X86-NEXT: br label [[COND_END]]
// CHECK-X86: cond.end:
// CHECK-X86-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// CHECK-X86-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// CHECK-X86-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// CHECK-X86-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
// CHECK-X86-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// CHECK-X86: omp.inner.for.cond:
// CHECK-X86-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-X86-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// CHECK-X86-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// CHECK-X86-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// CHECK-X86: omp.inner.for.body:
// CHECK-X86-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-X86-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// CHECK-X86-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// CHECK-X86-NEXT: store i32 [[ADD]], ptr [[I]], align 4
// CHECK-X86-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4
// CHECK-X86-NEXT: [[INC:%.*]] = add nsw i32 [[TMP8]], 1
// CHECK-X86-NEXT: store i32 [[INC]], ptr [[A_ADDR]], align 4
// CHECK-X86-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// CHECK-X86: omp.body.continue:
// CHECK-X86-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// CHECK-X86: omp.inner.for.inc:
// CHECK-X86-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// CHECK-X86-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1
// CHECK-X86-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4
// CHECK-X86-NEXT: br label [[OMP_INNER_FOR_COND]]
// CHECK-X86: omp.inner.for.end:
// CHECK-X86-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// CHECK-X86: omp.loop.exit:
// CHECK-X86-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
// CHECK-X86-NEXT: ret void
//
//
// CHECK-X86-LABEL: define internal void @.omp_offloading.requires_reg
// CHECK-X86-SAME: () #[[ATTR3:[0-9]+]] {
// CHECK-X86-NEXT: entry:
// CHECK-X86-NEXT: call void @__tgt_register_requires(i64 1)
// CHECK-X86-NEXT: ret void
//
//
// SIMD-ONLY0-LABEL: define dso_local noundef signext i32 @_Z6nestedi
// SIMD-ONLY0-SAME: (i32 noundef signext [[A:%.*]]) #[[ATTR0:[0-9]+]] {
// SIMD-ONLY0-NEXT: entry:
// SIMD-ONLY0-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// SIMD-ONLY0-NEXT: [[I:%.*]] = alloca i32, align 4
// SIMD-ONLY0-NEXT: [[F:%.*]] = alloca [[CLASS_ANON:%.*]], align 8
// SIMD-ONLY0-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
// SIMD-ONLY0-NEXT: store i32 0, ptr [[I]], align 4
// SIMD-ONLY0-NEXT: br label [[FOR_COND:%.*]]
// SIMD-ONLY0: for.cond:
// SIMD-ONLY0-NEXT: [[TMP0:%.*]] = load i32, ptr [[I]], align 4
// SIMD-ONLY0-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP0]], 10
// SIMD-ONLY0-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
// SIMD-ONLY0: for.body:
// SIMD-ONLY0-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4
// SIMD-ONLY0-NEXT: [[INC:%.*]] = add nsw i32 [[TMP1]], 1
// SIMD-ONLY0-NEXT: store i32 [[INC]], ptr [[A_ADDR]], align 4
// SIMD-ONLY0-NEXT: br label [[FOR_INC:%.*]]
// SIMD-ONLY0: for.inc:
// SIMD-ONLY0-NEXT: [[TMP2:%.*]] = load i32, ptr [[I]], align 4
// SIMD-ONLY0-NEXT: [[INC1:%.*]] = add nsw i32 [[TMP2]], 1
// SIMD-ONLY0-NEXT: store i32 [[INC1]], ptr [[I]], align 4
// SIMD-ONLY0-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP2:![0-9]+]]
// SIMD-ONLY0: for.end:
// SIMD-ONLY0-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[CLASS_ANON]], ptr [[F]], i32 0, i32 0
// SIMD-ONLY0-NEXT: store ptr [[A_ADDR]], ptr [[TMP3]], align 8
// SIMD-ONLY0-NEXT: call void @"_ZZ6nestediENK3$_0clEv"(ptr noundef nonnull align 8 dereferenceable(8) [[F]])
// SIMD-ONLY0-NEXT: [[TMP4:%.*]] = load i32, ptr [[A_ADDR]], align 4
// SIMD-ONLY0-NEXT: ret i32 [[TMP4]]
//
//
// SIMD-ONLY0-X86-LABEL: define dso_local noundef i32 @_Z6nestedi
// SIMD-ONLY0-X86-SAME: (i32 noundef [[A:%.*]]) #[[ATTR0:[0-9]+]] {
// SIMD-ONLY0-X86-NEXT: entry:
// SIMD-ONLY0-X86-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// SIMD-ONLY0-X86-NEXT: [[I:%.*]] = alloca i32, align 4
// SIMD-ONLY0-X86-NEXT: [[F:%.*]] = alloca [[CLASS_ANON:%.*]], align 4
// SIMD-ONLY0-X86-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
// SIMD-ONLY0-X86-NEXT: store i32 0, ptr [[I]], align 4
// SIMD-ONLY0-X86-NEXT: br label [[FOR_COND:%.*]]
// SIMD-ONLY0-X86: for.cond:
// SIMD-ONLY0-X86-NEXT: [[TMP0:%.*]] = load i32, ptr [[I]], align 4
// SIMD-ONLY0-X86-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP0]], 10
// SIMD-ONLY0-X86-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
// SIMD-ONLY0-X86: for.body:
// SIMD-ONLY0-X86-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4
// SIMD-ONLY0-X86-NEXT: [[INC:%.*]] = add nsw i32 [[TMP1]], 1
// SIMD-ONLY0-X86-NEXT: store i32 [[INC]], ptr [[A_ADDR]], align 4
// SIMD-ONLY0-X86-NEXT: br label [[FOR_INC:%.*]]
// SIMD-ONLY0-X86: for.inc:
// SIMD-ONLY0-X86-NEXT: [[TMP2:%.*]] = load i32, ptr [[I]], align 4
// SIMD-ONLY0-X86-NEXT: [[INC1:%.*]] = add nsw i32 [[TMP2]], 1
// SIMD-ONLY0-X86-NEXT: store i32 [[INC1]], ptr [[I]], align 4
// SIMD-ONLY0-X86-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
// SIMD-ONLY0-X86: for.end:
// SIMD-ONLY0-X86-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[CLASS_ANON]], ptr [[F]], i32 0, i32 0
// SIMD-ONLY0-X86-NEXT: store ptr [[A_ADDR]], ptr [[TMP3]], align 4
// SIMD-ONLY0-X86-NEXT: call void @"_ZZ6nestediENK3$_0clEv"(ptr noundef nonnull align 4 dereferenceable(4) [[F]])
// SIMD-ONLY0-X86-NEXT: [[TMP4:%.*]] = load i32, ptr [[A_ADDR]], align 4
// SIMD-ONLY0-X86-NEXT: ret i32 [[TMP4]]
//
//
// TCHECK-TARGET-LABEL: define weak_odr protected void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z6nestedi_l42
// TCHECK-TARGET-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i64 noundef [[A:%.*]]) #[[ATTR0:[0-9]+]] {
// TCHECK-TARGET-NEXT: entry:
// TCHECK-TARGET-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
// TCHECK-TARGET-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// TCHECK-TARGET-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// TCHECK-TARGET-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
// TCHECK-TARGET-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// TCHECK-TARGET-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
// TCHECK-TARGET-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4
// TCHECK-TARGET-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8
// TCHECK-TARGET-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2:[0-9]+]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z6nestedi_l42.omp_outlined, i64 [[TMP1]])
// TCHECK-TARGET-NEXT: ret void
//
//
// TCHECK-TARGET-LABEL: define internal void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z6nestedi_l42.omp_outlined
// TCHECK-TARGET-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]]) #[[ATTR0]] {
// TCHECK-TARGET-NEXT: entry:
// TCHECK-TARGET-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// TCHECK-TARGET-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// TCHECK-TARGET-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// TCHECK-TARGET-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// TCHECK-TARGET-NEXT: [[TMP:%.*]] = alloca i32, align 4
// TCHECK-TARGET-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// TCHECK-TARGET-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// TCHECK-TARGET-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// TCHECK-TARGET-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// TCHECK-TARGET-NEXT: [[I:%.*]] = alloca i32, align 4
// TCHECK-TARGET-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// TCHECK-TARGET-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// TCHECK-TARGET-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// TCHECK-TARGET-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// TCHECK-TARGET-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
// TCHECK-TARGET-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// TCHECK-TARGET-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// TCHECK-TARGET-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// TCHECK-TARGET-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// TCHECK-TARGET-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// TCHECK-TARGET-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// TCHECK-TARGET-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
// TCHECK-TARGET-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// TCHECK-TARGET: cond.true:
// TCHECK-TARGET-NEXT: br label [[COND_END:%.*]]
// TCHECK-TARGET: cond.false:
// TCHECK-TARGET-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// TCHECK-TARGET-NEXT: br label [[COND_END]]
// TCHECK-TARGET: cond.end:
// TCHECK-TARGET-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// TCHECK-TARGET-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// TCHECK-TARGET-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// TCHECK-TARGET-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
// TCHECK-TARGET-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// TCHECK-TARGET: omp.inner.for.cond:
// TCHECK-TARGET-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// TCHECK-TARGET-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// TCHECK-TARGET-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// TCHECK-TARGET-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// TCHECK-TARGET: omp.inner.for.body:
// TCHECK-TARGET-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// TCHECK-TARGET-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// TCHECK-TARGET-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// TCHECK-TARGET-NEXT: store i32 [[ADD]], ptr [[I]], align 4
// TCHECK-TARGET-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4
// TCHECK-TARGET-NEXT: [[INC:%.*]] = add nsw i32 [[TMP8]], 1
// TCHECK-TARGET-NEXT: store i32 [[INC]], ptr [[A_ADDR]], align 4
// TCHECK-TARGET-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// TCHECK-TARGET: omp.body.continue:
// TCHECK-TARGET-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// TCHECK-TARGET: omp.inner.for.inc:
// TCHECK-TARGET-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// TCHECK-TARGET-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1
// TCHECK-TARGET-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4
// TCHECK-TARGET-NEXT: br label [[OMP_INNER_FOR_COND]]
// TCHECK-TARGET: omp.inner.for.end:
// TCHECK-TARGET-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// TCHECK-TARGET: omp.loop.exit:
// TCHECK-TARGET-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
// TCHECK-TARGET-NEXT: ret void
//
//
// TCHECK-TARGET-LABEL: define weak_odr protected void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z6nestedi_l49
// TCHECK-TARGET-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i64 noundef [[A:%.*]]) #[[ATTR0]] {
// TCHECK-TARGET-NEXT: entry:
// TCHECK-TARGET-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 8
// TCHECK-TARGET-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// TCHECK-TARGET-NEXT: [[A_CASTED:%.*]] = alloca i64, align 8
// TCHECK-TARGET-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 8
// TCHECK-TARGET-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// TCHECK-TARGET-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
// TCHECK-TARGET-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4
// TCHECK-TARGET-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_CASTED]], align 8
// TCHECK-TARGET-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z6nestedi_l49.omp_outlined, i64 [[TMP1]])
// TCHECK-TARGET-NEXT: ret void
//
//
// TCHECK-TARGET-LABEL: define internal void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z6nestedi_l49.omp_outlined
// TCHECK-TARGET-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]]) #[[ATTR0]] {
// TCHECK-TARGET-NEXT: entry:
// TCHECK-TARGET-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8
// TCHECK-TARGET-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8
// TCHECK-TARGET-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8
// TCHECK-TARGET-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// TCHECK-TARGET-NEXT: [[TMP:%.*]] = alloca i32, align 4
// TCHECK-TARGET-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// TCHECK-TARGET-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// TCHECK-TARGET-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// TCHECK-TARGET-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// TCHECK-TARGET-NEXT: [[I:%.*]] = alloca i32, align 4
// TCHECK-TARGET-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8
// TCHECK-TARGET-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8
// TCHECK-TARGET-NEXT: store i64 [[A]], ptr [[A_ADDR]], align 8
// TCHECK-TARGET-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// TCHECK-TARGET-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
// TCHECK-TARGET-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// TCHECK-TARGET-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// TCHECK-TARGET-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8
// TCHECK-TARGET-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// TCHECK-TARGET-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// TCHECK-TARGET-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// TCHECK-TARGET-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
// TCHECK-TARGET-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// TCHECK-TARGET: cond.true:
// TCHECK-TARGET-NEXT: br label [[COND_END:%.*]]
// TCHECK-TARGET: cond.false:
// TCHECK-TARGET-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// TCHECK-TARGET-NEXT: br label [[COND_END]]
// TCHECK-TARGET: cond.end:
// TCHECK-TARGET-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// TCHECK-TARGET-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// TCHECK-TARGET-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// TCHECK-TARGET-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
// TCHECK-TARGET-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// TCHECK-TARGET: omp.inner.for.cond:
// TCHECK-TARGET-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// TCHECK-TARGET-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// TCHECK-TARGET-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// TCHECK-TARGET-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// TCHECK-TARGET: omp.inner.for.body:
// TCHECK-TARGET-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// TCHECK-TARGET-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// TCHECK-TARGET-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// TCHECK-TARGET-NEXT: store i32 [[ADD]], ptr [[I]], align 4
// TCHECK-TARGET-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4
// TCHECK-TARGET-NEXT: [[INC:%.*]] = add nsw i32 [[TMP8]], 1
// TCHECK-TARGET-NEXT: store i32 [[INC]], ptr [[A_ADDR]], align 4
// TCHECK-TARGET-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// TCHECK-TARGET: omp.body.continue:
// TCHECK-TARGET-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// TCHECK-TARGET: omp.inner.for.inc:
// TCHECK-TARGET-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// TCHECK-TARGET-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1
// TCHECK-TARGET-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4
// TCHECK-TARGET-NEXT: br label [[OMP_INNER_FOR_COND]]
// TCHECK-TARGET: omp.inner.for.end:
// TCHECK-TARGET-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// TCHECK-TARGET: omp.loop.exit:
// TCHECK-TARGET-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
// TCHECK-TARGET-NEXT: ret void
//
//
// TCHECK-TARGET-X86-LABEL: define weak_odr protected void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z6nestedi_l42
// TCHECK-TARGET-X86-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i32 noundef [[A:%.*]]) #[[ATTR0:[0-9]+]] {
// TCHECK-TARGET-X86-NEXT: entry:
// TCHECK-TARGET-X86-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
// TCHECK-TARGET-X86-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// TCHECK-TARGET-X86-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// TCHECK-TARGET-X86-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
// TCHECK-TARGET-X86-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
// TCHECK-TARGET-X86-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
// TCHECK-TARGET-X86-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4
// TCHECK-TARGET-X86-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4
// TCHECK-TARGET-X86-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2:[0-9]+]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z6nestedi_l42.omp_outlined, i32 [[TMP1]])
// TCHECK-TARGET-X86-NEXT: ret void
//
//
// TCHECK-TARGET-X86-LABEL: define internal void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z6nestedi_l42.omp_outlined
// TCHECK-TARGET-X86-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]]) #[[ATTR0]] {
// TCHECK-TARGET-X86-NEXT: entry:
// TCHECK-TARGET-X86-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
// TCHECK-TARGET-X86-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
// TCHECK-TARGET-X86-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// TCHECK-TARGET-X86-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// TCHECK-TARGET-X86-NEXT: [[TMP:%.*]] = alloca i32, align 4
// TCHECK-TARGET-X86-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// TCHECK-TARGET-X86-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// TCHECK-TARGET-X86-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// TCHECK-TARGET-X86-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// TCHECK-TARGET-X86-NEXT: [[I:%.*]] = alloca i32, align 4
// TCHECK-TARGET-X86-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
// TCHECK-TARGET-X86-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
// TCHECK-TARGET-X86-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
// TCHECK-TARGET-X86-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// TCHECK-TARGET-X86-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
// TCHECK-TARGET-X86-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// TCHECK-TARGET-X86-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// TCHECK-TARGET-X86-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
// TCHECK-TARGET-X86-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// TCHECK-TARGET-X86-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// TCHECK-TARGET-X86-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// TCHECK-TARGET-X86-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
// TCHECK-TARGET-X86-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// TCHECK-TARGET-X86: cond.true:
// TCHECK-TARGET-X86-NEXT: br label [[COND_END:%.*]]
// TCHECK-TARGET-X86: cond.false:
// TCHECK-TARGET-X86-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// TCHECK-TARGET-X86-NEXT: br label [[COND_END]]
// TCHECK-TARGET-X86: cond.end:
// TCHECK-TARGET-X86-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// TCHECK-TARGET-X86-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// TCHECK-TARGET-X86-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// TCHECK-TARGET-X86-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
// TCHECK-TARGET-X86-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// TCHECK-TARGET-X86: omp.inner.for.cond:
// TCHECK-TARGET-X86-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// TCHECK-TARGET-X86-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// TCHECK-TARGET-X86-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// TCHECK-TARGET-X86-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// TCHECK-TARGET-X86: omp.inner.for.body:
// TCHECK-TARGET-X86-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// TCHECK-TARGET-X86-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// TCHECK-TARGET-X86-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// TCHECK-TARGET-X86-NEXT: store i32 [[ADD]], ptr [[I]], align 4
// TCHECK-TARGET-X86-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4
// TCHECK-TARGET-X86-NEXT: [[INC:%.*]] = add nsw i32 [[TMP8]], 1
// TCHECK-TARGET-X86-NEXT: store i32 [[INC]], ptr [[A_ADDR]], align 4
// TCHECK-TARGET-X86-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// TCHECK-TARGET-X86: omp.body.continue:
// TCHECK-TARGET-X86-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// TCHECK-TARGET-X86: omp.inner.for.inc:
// TCHECK-TARGET-X86-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// TCHECK-TARGET-X86-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1
// TCHECK-TARGET-X86-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4
// TCHECK-TARGET-X86-NEXT: br label [[OMP_INNER_FOR_COND]]
// TCHECK-TARGET-X86: omp.inner.for.end:
// TCHECK-TARGET-X86-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// TCHECK-TARGET-X86: omp.loop.exit:
// TCHECK-TARGET-X86-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
// TCHECK-TARGET-X86-NEXT: ret void
//
//
// TCHECK-TARGET-X86-LABEL: define weak_odr protected void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z6nestedi_l49
// TCHECK-TARGET-X86-SAME: (ptr noalias noundef [[DYN_PTR:%.*]], i32 noundef [[A:%.*]]) #[[ATTR0]] {
// TCHECK-TARGET-X86-NEXT: entry:
// TCHECK-TARGET-X86-NEXT: [[DYN_PTR_ADDR:%.*]] = alloca ptr, align 4
// TCHECK-TARGET-X86-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// TCHECK-TARGET-X86-NEXT: [[A_CASTED:%.*]] = alloca i32, align 4
// TCHECK-TARGET-X86-NEXT: store ptr [[DYN_PTR]], ptr [[DYN_PTR_ADDR]], align 4
// TCHECK-TARGET-X86-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
// TCHECK-TARGET-X86-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4
// TCHECK-TARGET-X86-NEXT: store i32 [[TMP0]], ptr [[A_CASTED]], align 4
// TCHECK-TARGET-X86-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_CASTED]], align 4
// TCHECK-TARGET-X86-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z6nestedi_l49.omp_outlined, i32 [[TMP1]])
// TCHECK-TARGET-X86-NEXT: ret void
//
//
// TCHECK-TARGET-X86-LABEL: define internal void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z6nestedi_l49.omp_outlined
// TCHECK-TARGET-X86-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]]) #[[ATTR0]] {
// TCHECK-TARGET-X86-NEXT: entry:
// TCHECK-TARGET-X86-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 4
// TCHECK-TARGET-X86-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 4
// TCHECK-TARGET-X86-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// TCHECK-TARGET-X86-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4
// TCHECK-TARGET-X86-NEXT: [[TMP:%.*]] = alloca i32, align 4
// TCHECK-TARGET-X86-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4
// TCHECK-TARGET-X86-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4
// TCHECK-TARGET-X86-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
// TCHECK-TARGET-X86-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
// TCHECK-TARGET-X86-NEXT: [[I:%.*]] = alloca i32, align 4
// TCHECK-TARGET-X86-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 4
// TCHECK-TARGET-X86-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 4
// TCHECK-TARGET-X86-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
// TCHECK-TARGET-X86-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4
// TCHECK-TARGET-X86-NEXT: store i32 9, ptr [[DOTOMP_UB]], align 4
// TCHECK-TARGET-X86-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4
// TCHECK-TARGET-X86-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
// TCHECK-TARGET-X86-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 4
// TCHECK-TARGET-X86-NEXT: [[TMP1:%.*]] = load i32, ptr [[TMP0]], align 4
// TCHECK-TARGET-X86-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1]], i32 [[TMP1]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1)
// TCHECK-TARGET-X86-NEXT: [[TMP2:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// TCHECK-TARGET-X86-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP2]], 9
// TCHECK-TARGET-X86-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// TCHECK-TARGET-X86: cond.true:
// TCHECK-TARGET-X86-NEXT: br label [[COND_END:%.*]]
// TCHECK-TARGET-X86: cond.false:
// TCHECK-TARGET-X86-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// TCHECK-TARGET-X86-NEXT: br label [[COND_END]]
// TCHECK-TARGET-X86: cond.end:
// TCHECK-TARGET-X86-NEXT: [[COND:%.*]] = phi i32 [ 9, [[COND_TRUE]] ], [ [[TMP3]], [[COND_FALSE]] ]
// TCHECK-TARGET-X86-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
// TCHECK-TARGET-X86-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
// TCHECK-TARGET-X86-NEXT: store i32 [[TMP4]], ptr [[DOTOMP_IV]], align 4
// TCHECK-TARGET-X86-NEXT: br label [[OMP_INNER_FOR_COND:%.*]]
// TCHECK-TARGET-X86: omp.inner.for.cond:
// TCHECK-TARGET-X86-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// TCHECK-TARGET-X86-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
// TCHECK-TARGET-X86-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP5]], [[TMP6]]
// TCHECK-TARGET-X86-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
// TCHECK-TARGET-X86: omp.inner.for.body:
// TCHECK-TARGET-X86-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// TCHECK-TARGET-X86-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP7]], 1
// TCHECK-TARGET-X86-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]]
// TCHECK-TARGET-X86-NEXT: store i32 [[ADD]], ptr [[I]], align 4
// TCHECK-TARGET-X86-NEXT: [[TMP8:%.*]] = load i32, ptr [[A_ADDR]], align 4
// TCHECK-TARGET-X86-NEXT: [[INC:%.*]] = add nsw i32 [[TMP8]], 1
// TCHECK-TARGET-X86-NEXT: store i32 [[INC]], ptr [[A_ADDR]], align 4
// TCHECK-TARGET-X86-NEXT: br label [[OMP_BODY_CONTINUE:%.*]]
// TCHECK-TARGET-X86: omp.body.continue:
// TCHECK-TARGET-X86-NEXT: br label [[OMP_INNER_FOR_INC:%.*]]
// TCHECK-TARGET-X86: omp.inner.for.inc:
// TCHECK-TARGET-X86-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4
// TCHECK-TARGET-X86-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP9]], 1
// TCHECK-TARGET-X86-NEXT: store i32 [[ADD2]], ptr [[DOTOMP_IV]], align 4
// TCHECK-TARGET-X86-NEXT: br label [[OMP_INNER_FOR_COND]]
// TCHECK-TARGET-X86: omp.inner.for.end:
// TCHECK-TARGET-X86-NEXT: br label [[OMP_LOOP_EXIT:%.*]]
// TCHECK-TARGET-X86: omp.loop.exit:
// TCHECK-TARGET-X86-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP1]])
// TCHECK-TARGET-X86-NEXT: ret void
//
//
// SIMD-ONLY1-TARGET-LABEL: define dso_local noundef signext i32 @_Z6nestedi
// SIMD-ONLY1-TARGET-SAME: (i32 noundef signext [[A:%.*]]) #[[ATTR0:[0-9]+]] {
// SIMD-ONLY1-TARGET-NEXT: entry:
// SIMD-ONLY1-TARGET-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// SIMD-ONLY1-TARGET-NEXT: [[I:%.*]] = alloca i32, align 4
// SIMD-ONLY1-TARGET-NEXT: [[F:%.*]] = alloca [[CLASS_ANON:%.*]], align 8
// SIMD-ONLY1-TARGET-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
// SIMD-ONLY1-TARGET-NEXT: store i32 0, ptr [[I]], align 4
// SIMD-ONLY1-TARGET-NEXT: br label [[FOR_COND:%.*]]
// SIMD-ONLY1-TARGET: for.cond:
// SIMD-ONLY1-TARGET-NEXT: [[TMP0:%.*]] = load i32, ptr [[I]], align 4
// SIMD-ONLY1-TARGET-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP0]], 10
// SIMD-ONLY1-TARGET-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
// SIMD-ONLY1-TARGET: for.body:
// SIMD-ONLY1-TARGET-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4
// SIMD-ONLY1-TARGET-NEXT: [[INC:%.*]] = add nsw i32 [[TMP1]], 1
// SIMD-ONLY1-TARGET-NEXT: store i32 [[INC]], ptr [[A_ADDR]], align 4
// SIMD-ONLY1-TARGET-NEXT: br label [[FOR_INC:%.*]]
// SIMD-ONLY1-TARGET: for.inc:
// SIMD-ONLY1-TARGET-NEXT: [[TMP2:%.*]] = load i32, ptr [[I]], align 4
// SIMD-ONLY1-TARGET-NEXT: [[INC1:%.*]] = add nsw i32 [[TMP2]], 1
// SIMD-ONLY1-TARGET-NEXT: store i32 [[INC1]], ptr [[I]], align 4
// SIMD-ONLY1-TARGET-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP2:![0-9]+]]
// SIMD-ONLY1-TARGET: for.end:
// SIMD-ONLY1-TARGET-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[CLASS_ANON]], ptr [[F]], i32 0, i32 0
// SIMD-ONLY1-TARGET-NEXT: store ptr [[A_ADDR]], ptr [[TMP3]], align 8
// SIMD-ONLY1-TARGET-NEXT: call void @"_ZZ6nestediENK3$_0clEv"(ptr noundef nonnull align 8 dereferenceable(8) [[F]])
// SIMD-ONLY1-TARGET-NEXT: [[TMP4:%.*]] = load i32, ptr [[A_ADDR]], align 4
// SIMD-ONLY1-TARGET-NEXT: ret i32 [[TMP4]]
//
//
// SIMD-ONLY1-TARGET-X86-LABEL: define dso_local noundef i32 @_Z6nestedi
// SIMD-ONLY1-TARGET-X86-SAME: (i32 noundef [[A:%.*]]) #[[ATTR0:[0-9]+]] {
// SIMD-ONLY1-TARGET-X86-NEXT: entry:
// SIMD-ONLY1-TARGET-X86-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// SIMD-ONLY1-TARGET-X86-NEXT: [[I:%.*]] = alloca i32, align 4
// SIMD-ONLY1-TARGET-X86-NEXT: [[F:%.*]] = alloca [[CLASS_ANON:%.*]], align 4
// SIMD-ONLY1-TARGET-X86-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
// SIMD-ONLY1-TARGET-X86-NEXT: store i32 0, ptr [[I]], align 4
// SIMD-ONLY1-TARGET-X86-NEXT: br label [[FOR_COND:%.*]]
// SIMD-ONLY1-TARGET-X86: for.cond:
// SIMD-ONLY1-TARGET-X86-NEXT: [[TMP0:%.*]] = load i32, ptr [[I]], align 4
// SIMD-ONLY1-TARGET-X86-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP0]], 10
// SIMD-ONLY1-TARGET-X86-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
// SIMD-ONLY1-TARGET-X86: for.body:
// SIMD-ONLY1-TARGET-X86-NEXT: [[TMP1:%.*]] = load i32, ptr [[A_ADDR]], align 4
// SIMD-ONLY1-TARGET-X86-NEXT: [[INC:%.*]] = add nsw i32 [[TMP1]], 1
// SIMD-ONLY1-TARGET-X86-NEXT: store i32 [[INC]], ptr [[A_ADDR]], align 4
// SIMD-ONLY1-TARGET-X86-NEXT: br label [[FOR_INC:%.*]]
// SIMD-ONLY1-TARGET-X86: for.inc:
// SIMD-ONLY1-TARGET-X86-NEXT: [[TMP2:%.*]] = load i32, ptr [[I]], align 4
// SIMD-ONLY1-TARGET-X86-NEXT: [[INC1:%.*]] = add nsw i32 [[TMP2]], 1
// SIMD-ONLY1-TARGET-X86-NEXT: store i32 [[INC1]], ptr [[I]], align 4
// SIMD-ONLY1-TARGET-X86-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
// SIMD-ONLY1-TARGET-X86: for.end:
// SIMD-ONLY1-TARGET-X86-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[CLASS_ANON]], ptr [[F]], i32 0, i32 0
// SIMD-ONLY1-TARGET-X86-NEXT: store ptr [[A_ADDR]], ptr [[TMP3]], align 4
// SIMD-ONLY1-TARGET-X86-NEXT: call void @"_ZZ6nestediENK3$_0clEv"(ptr noundef nonnull align 4 dereferenceable(4) [[F]])
// SIMD-ONLY1-TARGET-X86-NEXT: [[TMP4:%.*]] = load i32, ptr [[A_ADDR]], align 4
// SIMD-ONLY1-TARGET-X86-NEXT: ret i32 [[TMP4]]
//